summaryrefslogtreecommitdiffstats
path: root/drivers/serial/8250_hp300.c
Commit message (Expand)AuthorAgeFilesLines
* [PATCH] hp300: fix driver_register() return handling, remove dio_module_init()Bjorn Helgaas2006-03-251-5/+5
* [PATCH] Serial: Convert 8250_hp300 to use serial8250_{un,}register_portRussell King2005-04-261-26/+27
* Linux-2.6.12-rc2Linus Torvalds2005-04-171-0/+329
g2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile6
-rw-r--r--drivers/acpi/acpi_i2c.c103
-rw-r--r--drivers/acpi/acpi_memhotplug.c205
-rw-r--r--drivers/acpi/acpi_pad.c8
-rw-r--r--drivers/acpi/acpi_platform.c104
-rw-r--r--drivers/acpi/acpica/Makefile3
-rw-r--r--drivers/acpi/acpica/acdebug.h94
-rw-r--r--drivers/acpi/acpica/acdispat.h11
-rw-r--r--drivers/acpi/acpica/acevents.h6
-rw-r--r--drivers/acpi/acpica/acglobal.h73
-rw-r--r--drivers/acpi/acpica/aclocal.h16
-rw-r--r--drivers/acpi/acpica/acmacros.h163
-rw-r--r--drivers/acpi/acpica/acobject.h7
-rw-r--r--drivers/acpi/acpica/acopcode.h6
-rw-r--r--drivers/acpi/acpica/acparser.h3
-rw-r--r--drivers/acpi/acpica/acpredef.h11
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/acutils.h58
-rw-r--r--drivers/acpi/acpica/amlresrc.h1
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c6
-rw-r--r--drivers/acpi/acpica/dsmthdat.c14
-rw-r--r--drivers/acpi/acpica/dsobject.c6
-rw-r--r--drivers/acpi/acpica/dsopcode.c5
-rw-r--r--drivers/acpi/acpica/dsutils.c33
-rw-r--r--drivers/acpi/acpica/dswexec.c10
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswstate.c26
-rw-r--r--drivers/acpi/acpica/evgpe.c20
-rw-r--r--drivers/acpi/acpica/evgpeblk.c3
-rw-r--r--drivers/acpi/acpica/evgpeutil.c3
-rw-r--r--drivers/acpi/acpica/evrgnini.c7
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c13
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c9
-rw-r--r--drivers/acpi/acpica/exdebug.c10
-rw-r--r--drivers/acpi/acpica/exdump.c20
-rw-r--r--drivers/acpi/acpica/exfield.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c15
-rw-r--r--drivers/acpi/acpica/exmisc.c5
-rw-r--r--drivers/acpi/acpica/exmutex.c9
-rw-r--r--drivers/acpi/acpica/exnames.c9
-rw-r--r--drivers/acpi/acpica/exoparg1.c11
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c3
-rw-r--r--drivers/acpi/acpica/exoparg6.c5
-rw-r--r--drivers/acpi/acpica/exprep.c13
-rw-r--r--drivers/acpi/acpica/exregion.c3
-rw-r--r--drivers/acpi/acpica/exresnte.c9
-rw-r--r--drivers/acpi/acpica/exresolv.c3
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c4
-rw-r--r--drivers/acpi/acpica/exstoren.c11
-rw-r--r--drivers/acpi/acpica/exstorob.c5
-rw-r--r--drivers/acpi/acpica/exsystem.c9
-rw-r--r--drivers/acpi/acpica/exutils.c5
-rw-r--r--drivers/acpi/acpica/hwacpi.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwpci.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c1
-rw-r--r--drivers/acpi/acpica/hwtimer.c6
-rw-r--r--drivers/acpi/acpica/hwvalid.c1
-rw-r--r--drivers/acpi/acpica/hwxface.c1
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c12
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsalloc.c4
-rw-r--r--drivers/acpi/acpica/nsdump.c10
-rw-r--r--drivers/acpi/acpica/nsinit.c4
-rw-r--r--drivers/acpi/acpica/nsload.c10
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c8
-rw-r--r--drivers/acpi/acpica/nsparse.c8
-rw-r--r--drivers/acpi/acpica/nssearch.c17
-rw-r--r--drivers/acpi/acpica/nsutils.c18
-rw-r--r--drivers/acpi/acpica/nswalk.c10
-rw-r--r--drivers/acpi/acpica/nsxfeval.c20
-rw-r--r--drivers/acpi/acpica/nsxfname.c66
-rw-r--r--drivers/acpi/acpica/nsxfobj.c4
-rw-r--r--drivers/acpi/acpica/psargs.c8
-rw-r--r--drivers/acpi/acpica/psloop.c61
-rw-r--r--drivers/acpi/acpica/psopcode.c29
-rw-r--r--drivers/acpi/acpica/psparse.c13
-rw-r--r--drivers/acpi/acpica/psutils.c4
-rw-r--r--drivers/acpi/acpica/rscalc.c14
-rw-r--r--drivers/acpi/acpica/rslist.c4
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c3
-rw-r--r--drivers/acpi/acpica/utcache.c323
-rw-r--r--drivers/acpi/acpica/utdebug.c37
-rw-r--r--drivers/acpi/acpica/utids.c104
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c150
-rw-r--r--drivers/acpi/acpica/utmutex.c14
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c692
-rw-r--r--drivers/acpi/acpica/utxface.c5
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/apei/apei-base.c3
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/apei/erst-dbg.c11
-rw-r--r--drivers/acpi/apei/erst.c16
-rw-r--r--drivers/acpi/apei/ghes.c4
-rw-r--r--drivers/acpi/battery.c77
-rw-r--r--drivers/acpi/bus.c21
-rw-r--r--drivers/acpi/container.c27
-rw-r--r--drivers/acpi/device_pm.c667
-rw-r--r--drivers/acpi/dock.c56
-rw-r--r--drivers/acpi/ec.c97
-rw-r--r--drivers/acpi/glue.c65
-rw-r--r--drivers/acpi/hed.c4
-rw-r--r--drivers/acpi/internal.h11
-rw-r--r--drivers/acpi/osl.c226
-rw-r--r--drivers/acpi/pci_bind.c12
-rw-r--r--drivers/acpi/pci_irq.c32
-rw-r--r--drivers/acpi/pci_root.c167
-rw-r--r--drivers/acpi/power.c11
-rw-r--r--drivers/acpi/proc.c11
-rw-r--r--drivers/acpi/processor_driver.c74
-rw-r--r--drivers/acpi/processor_idle.c61
-rw-r--r--drivers/acpi/processor_perflib.c7
-rw-r--r--drivers/acpi/resource.c526
-rw-r--r--drivers/acpi/scan.c156
-rw-r--r--drivers/acpi/sleep.c535
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c40
-rw-r--r--drivers/acpi/utils.c38
-rw-r--r--drivers/acpi/video.c14
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/amba/bus.c36
-rw-r--r--drivers/amba/tegra-ahb.c7
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci_platform.c46
-rw-r--r--drivers/ata/ata_piix.c461
-rw-r--r--drivers/ata/libahci.c8
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c53
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_arasan_cf.c13
-rw-r--r--drivers/ata/pata_at91.c8
-rw-r--r--drivers/ata/pata_bf54x.c6
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5536.c32
-rw-r--r--drivers/ata/pata_ep93xx.c12
-rw-r--r--drivers/ata/pata_icside.c21
-rw-r--r--drivers/ata/pata_imx.c8
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c13
-rw-r--r--drivers/ata/pata_macio.c33
-rw-r--r--drivers/ata/pata_mpc52xx.c27
-rw-r--r--drivers/ata/pata_octeon_cf.c425
-rw-r--r--drivers/ata/pata_of_platform.c10
-rw-r--r--drivers/ata/pata_palmld.c10
-rw-r--r--drivers/ata/pata_pdc2027x.c3
-rw-r--r--drivers/ata/pata_platform.c35
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c6
-rw-r--r--drivers/ata/pata_rdc.c6
-rw-r--r--drivers/ata/pata_sch.c3
-rw-r--r--drivers/ata/pata_sil680.c3
-rw-r--r--[-rwxr-xr-x]drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_highbank.c16
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_promise.c15
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sx4.c14
-rw-r--r--drivers/ata/sata_vsc.c7
-rw-r--r--drivers/atm/ambassador.c53
-rw-r--r--drivers/atm/eni.c18
-rw-r--r--drivers/atm/firestream.c32
-rw-r--r--drivers/atm/fore200e.c70
-rw-r--r--drivers/atm/he.c36
-rw-r--r--drivers/atm/horizon.c12
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/atm/iphase.c11
-rw-r--r--drivers/atm/iphase.h146
-rw-r--r--drivers/atm/lanai.c28
-rw-r--r--drivers/atm/nicstar.c18
-rw-r--r--drivers/atm/solos-pci.c276
-rw-r--r--drivers/atm/zatm.c31
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c10
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/attribute_container.c2
-rw-r--r--drivers/base/bus.c14
-rw-r--r--drivers/base/core.c31
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/dma-buf.c2
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c54
-rw-r--r--drivers/base/memory.c42
-rw-r--r--drivers/base/node.c86
-rw-r--r--drivers/base/platform.c30
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/domain.c11
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/opp.c44
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/qos.c327
-rw-r--r--drivers/base/power/sysfs.c94
-rw-r--r--drivers/base/regmap/internal.h24
-rw-r--r--drivers/base/regmap/regmap-debugfs.c173
-rw-r--r--drivers/base/regmap/regmap-irq.c19
-rw-r--r--drivers/base/regmap/regmap.c271
-rw-r--r--drivers/bcma/Kconfig8
-rw-r--r--drivers/bcma/Makefile1
-rw-r--r--drivers/bcma/bcma_private.h27
-rw-r--r--drivers/bcma/driver_chipcommon.c216
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c5
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c47
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c35
-rw-r--r--drivers/bcma/driver_gmac_cmn.c2
-rw-r--r--drivers/bcma/driver_gpio.c103
-rw-r--r--drivers/bcma/driver_mips.c52
-rw-r--r--drivers/bcma/driver_pci.c4
-rw-r--r--drivers/bcma/driver_pci_host.c37
-rw-r--r--drivers/bcma/host_pci.c14
-rw-r--r--drivers/bcma/main.c76
-rw-r--r--drivers/bcma/sprom.c5
-rw-r--r--drivers/block/aoe/aoe.h57
-rw-r--r--drivers/block/aoe/aoeblk.c104
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/aoe/aoecmd.c715
-rw-r--r--drivers/block/aoe/aoedev.c243
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/aoe/aoenet.c15
-rw-r--r--drivers/block/cciss.c111
-rw-r--r--drivers/block/cpqarray.c16
-rw-r--r--drivers/block/drbd/Kconfig10
-rw-r--r--drivers/block/drbd/Makefile2
-rw-r--r--drivers/block/drbd/drbd_actlog.c702
-rw-r--r--drivers/block/drbd/drbd_bitmap.c249
-rw-r--r--drivers/block/drbd/drbd_int.h1365
-rw-r--r--drivers/block/drbd/drbd_interval.c207
-rw-r--r--drivers/block/drbd/drbd_interval.h40
-rw-r--r--drivers/block/drbd/drbd_main.c3781
-rw-r--r--drivers/block/drbd/drbd_nl.c3276
-rw-r--r--drivers/block/drbd/drbd_nla.c55
-rw-r--r--drivers/block/drbd/drbd_nla.h8
-rw-r--r--drivers/block/drbd/drbd_proc.c41
-rw-r--r--drivers/block/drbd/drbd_receiver.c3894
-rw-r--r--drivers/block/drbd/drbd_req.c1574
-rw-r--r--drivers/block/drbd/drbd_req.h188
-rw-r--r--drivers/block/drbd/drbd_state.c1863
-rw-r--r--drivers/block/drbd/drbd_state.h161
-rw-r--r--drivers/block/drbd/drbd_strings.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c1237
-rw-r--r--drivers/block/drbd/drbd_wrappers.h11
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c24
-rw-r--r--drivers/block/nvme.c17
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ps3vram.c4
-rw-r--r--drivers/block/rbd.c1389
-rw-r--r--drivers/block/rbd_types.h2
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/swim.c11
-rw-r--r--drivers/block/swim3.c3
-rw-r--r--drivers/block/umem.c3
-rw-r--r--drivers/block/virtio_blk.c20
-rw-r--r--drivers/block/xen-blkback/blkback.c305
-rw-r--r--drivers/block/xen-blkback/common.h16
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/block/xen-blkfront.c203
-rw-r--r--drivers/block/xsysace.c19
-rw-r--r--drivers/bluetooth/ath3k.c10
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c7
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/bus/omap-ocp2scp.c6
-rw-r--r--drivers/bus/omap_l3_noc.c6
-rw-r--r--drivers/cdrom/gdrom.c18
-rw-r--r--drivers/char/agp/ali-agp.c7
-rw-r--r--drivers/char/agp/amd-k7-agp.c8
-rw-r--r--drivers/char/agp/amd64-agp.c17
-rw-r--r--drivers/char/agp/ati-agp.c7
-rw-r--r--drivers/char/agp/efficeon-agp.c6
-rw-r--r--drivers/char/agp/i460-agp.c8
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/agp/intel-agp.h91
-rw-r--r--drivers/char/agp/intel-gtt.c320
-rw-r--r--drivers/char/agp/nvidia-agp.c6
-rw-r--r--drivers/char/agp/sgi-agp.c4
-rw-r--r--drivers/char/agp/sis-agp.c13
-rw-r--r--drivers/char/agp/sworks-agp.c6
-rw-r--r--drivers/char/agp/uninorth-agp.c8
-rw-r--r--drivers/char/agp/via-agp.c7
-rw-r--r--drivers/char/hpet.c5
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/atmel-rng.c4
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c6
-rw-r--r--drivers/char/hw_random/exynos-rng.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c10
-rw-r--r--drivers/char/hw_random/octeon-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/char/hw_random/pasemi-rng.c4
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c4
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c4
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c6
-rw-r--r--drivers/char/hw_random/virtio-rng.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c40
-rw-r--r--drivers/char/mbcs.c2
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/pc8736x_gpio.c3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c5
-rw-r--r--drivers/char/ppdev.c6
-rw-r--r--drivers/char/ps3flash.c2
-rw-r--r--drivers/char/random.c40
-rw-r--r--drivers/char/sonypi.c14
-rw-r--r--drivers/char/tb0219.c6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c87
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.h5
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_tis.c6
-rw-r--r--drivers/char/ttyprintk.c4
-rw-r--r--drivers/char/virtio_console.c330
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c14
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/clk-bcm2835.c2
-rw-r--r--drivers/clk/clk-max77686.c6
-rw-r--r--drivers/clk/clk-nomadik.c1
-rw-r--r--drivers/clk/clk-sunxi.c30
-rw-r--r--drivers/clk/clk-twl6040.c6
-rw-r--r--drivers/clk/clk-wm831x.c6
-rw-r--r--drivers/clk/clk-zynq.c383
-rw-r--r--drivers/clk/mvebu/Kconfig8
-rw-r--r--drivers/clk/mvebu/Makefile3
-rw-r--r--drivers/clk/mvebu/clk-core.c675
-rw-r--r--drivers/clk/mvebu/clk-core.h18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c189
-rw-r--r--drivers/clk/mvebu/clk-cpu.h22
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.c249
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.h22
-rw-r--r--drivers/clk/mvebu/clk.c27
-rw-r--r--drivers/clk/spear/spear1310_clock.c1
-rw-r--r--drivers/clk/ux500/abx500-clk.c2
-rw-r--r--drivers/clk/ux500/u8500_clk.c11
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/acpi_pm.c23
-rw-r--r--drivers/clocksource/arm_generic.c8
-rw-r--r--drivers/clocksource/em_sti.c8
-rw-r--r--drivers/clocksource/i8253.c2
-rw-r--r--drivers/clocksource/nomadik-mtu.c230
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c6
-rw-r--r--drivers/clocksource/sunxi_timer.c171
-rw-r--r--drivers/clocksource/time-armada-370-xp.c11
-rw-r--r--drivers/connector/connector.c4
-rw-r--r--drivers/cpufreq/Kconfig5
-rw-r--r--drivers/cpufreq/Kconfig.arm7
-rw-r--r--drivers/cpufreq/Kconfig.x862
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c7
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c37
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c558
-rw-r--r--drivers/cpufreq/cpufreq_governor.c318
-rw-r--r--drivers/cpufreq/cpufreq_governor.h176
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c731
-rw-r--r--drivers/cpufreq/cpufreq_performance.c2
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c15
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c11
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpufreq/longhaul.c14
-rw-r--r--drivers/cpufreq/omap-cpufreq.c3
-rw-r--r--drivers/cpufreq/powernow-k8.c4
-rw-r--r--drivers/cpufreq/spear-cpufreq.c291
-rw-r--r--drivers/cpuidle/Kconfig19
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c161
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/cpuidle.h13
-rw-r--r--drivers/cpuidle/driver.c222
-rw-r--r--drivers/cpuidle/governors/menu.c174
-rw-r--r--drivers/cpuidle/sysfs.c199
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/atmel-aes.c6
-rw-r--r--drivers/crypto/atmel-sha.c6
-rw-r--r--drivers/crypto/atmel-tdes.c6
-rw-r--r--drivers/crypto/bfin_crc.c6
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/geode-aes.c8
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/crypto/mv_cesa.c2
-rw-r--r--drivers/crypto/n2_core.c46
-rw-r--r--drivers/crypto/nx/nx-842.c20
-rw-r--r--drivers/crypto/nx/nx.c8
-rw-r--r--drivers/crypto/omap-aes.c8
-rw-r--r--drivers/crypto/omap-sham.c14
-rw-r--r--drivers/crypto/picoxcell_crypto.c7
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/talitos.c3
-rw-r--r--drivers/crypto/tegra-aes.c18
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c3
-rw-r--r--drivers/devfreq/Kconfig8
-rw-r--r--drivers/devfreq/devfreq.c926
-rw-r--r--drivers/devfreq/exynos4_bus.c145
-rw-r--r--drivers/devfreq/governor.h17
-rw-r--r--drivers/devfreq/governor_performance.c38
-rw-r--r--drivers/devfreq/governor_powersave.c38
-rw-r--r--drivers/devfreq/governor_simpleondemand.c55
-rw-r--r--drivers/devfreq/governor_userspace.c45
-rw-r--r--drivers/dma/dmatest.c49
-rw-r--r--drivers/dma/dw_dmac.c6
-rw-r--r--drivers/dma/edma.c6
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c142
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/intel_mid_dma.c6
-rw-r--r--drivers/dma/ioat/dca.c32
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h13
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v2.h8
-rw-r--r--drivers/dma/ioat/dma_v3.c10
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/iop-adma.c12
-rw-r--r--drivers/dma/ipu/ipu_idmac.c3
-rw-r--r--drivers/dma/ipu/ipu_irq.c3
-rw-r--r--drivers/dma/mmp_pdma.c8
-rw-r--r--drivers/dma/mmp_tdma.c8
-rw-r--r--drivers/dma/mpc512x_dma.c6
-rw-r--r--drivers/dma/mv_xor.c434
-rw-r--r--drivers/dma/mv_xor.h36
-rw-r--r--drivers/dma/omap-dma.c5
-rw-r--r--drivers/dma/pch_dma.c6
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/sa11x0-dma.c8
-rw-r--r--drivers/dma/sh/shdma.c8
-rw-r--r--drivers/dma/sirf-dma.c6
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/ste_dma40_ll.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c18
-rw-r--r--drivers/dma/timb_dma.c4
-rw-r--r--drivers/edac/Kconfig35
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/amd64_edac.c8
-rw-r--r--drivers/edac/amd76x_edac.c8
-rw-r--r--drivers/edac/cell_edac.c8
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c7
-rw-r--r--drivers/edac/e7xxx_edac.c7
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_mc_sysfs.c19
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/highbank_mc_edac.c6
-rw-r--r--drivers/edac/i3000_edac.c7
-rw-r--r--drivers/edac/i3200_edac.c7
-rw-r--r--drivers/edac/i5000_edac.c7
-rw-r--r--drivers/edac/i5100_edac.c24
-rw-r--r--drivers/edac/i5400_edac.c7
-rw-r--r--drivers/edac/i7300_edac.c9
-rw-r--r--drivers/edac/i7core_edac.c7
-rw-r--r--drivers/edac/i82443bxgx_edac.c8
-rw-r--r--drivers/edac/i82860_edac.c8
-rw-r--r--drivers/edac/i82875p_edac.c8
-rw-r--r--drivers/edac/i82975x_edac.c8
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/edac/mv64x60_edac.c10
-rw-r--r--drivers/edac/octeon_edac-l2c.c208
-rw-r--r--drivers/edac/octeon_edac-lmc.c186
-rw-r--r--drivers/edac/octeon_edac-pc.c143
-rw-r--r--drivers/edac/octeon_edac-pci.c111
-rw-r--r--drivers/edac/pasemi_edac.c8
-rw-r--r--drivers/edac/ppc4xx_edac.c27
-rw-r--r--drivers/edac/r82600_edac.c8
-rw-r--r--drivers/edac/sb_edac.c7
-rw-r--r--drivers/edac/tile_edac.c8
-rw-r--r--drivers/edac/x38_edac.c7
-rw-r--r--drivers/eisa/eisa.ids4
-rw-r--r--drivers/extcon/extcon-adc-jack.c6
-rw-r--r--drivers/extcon/extcon-arizona.c7
-rw-r--r--drivers/extcon/extcon-class.c2
-rw-r--r--drivers/extcon/extcon-gpio.c6
-rw-r--r--drivers/extcon/extcon-max77693.c42
-rw-r--r--drivers/extcon/extcon-max8997.c34
-rw-r--r--drivers/firewire/init_ohci1394_dma.c4
-rw-r--r--drivers/firewire/net.c15
-rw-r--r--drivers/firewire/nosy.c4
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firewire/sbp2.c2
-rw-r--r--drivers/firmware/dcdbas.c6
-rw-r--r--drivers/firmware/dmi_scan.c80
-rw-r--r--drivers/firmware/efivars.c679
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/Kconfig54
-rw-r--r--drivers/gpio/Makefile6
-rw-r--r--drivers/gpio/gpio-74x164.c6
-rw-r--r--drivers/gpio/gpio-ab8500.c6
-rw-r--r--drivers/gpio/gpio-adnp.c10
-rw-r--r--drivers/gpio/gpio-adp5520.c6
-rw-r--r--drivers/gpio/gpio-adp5588.c6
-rw-r--r--drivers/gpio/gpio-arizona.c6
-rw-r--r--drivers/gpio/gpio-clps711x.c199
-rw-r--r--drivers/gpio/gpio-cs5535.c6
-rw-r--r--drivers/gpio/gpio-da9052.c14
-rw-r--r--drivers/gpio/gpio-da9055.c204
-rw-r--r--drivers/gpio/gpio-em.c54
-rw-r--r--drivers/gpio/gpio-ep93xx.c2
-rw-r--r--drivers/gpio/gpio-generic.c6
-rw-r--r--drivers/gpio/gpio-ich.c11
-rw-r--r--drivers/gpio/gpio-janz-ttl.c10
-rw-r--r--drivers/gpio/gpio-langwell.c8
-rw-r--r--drivers/gpio/gpio-lpc32xx.c4
-rw-r--r--drivers/gpio/gpio-max7300.c6
-rw-r--r--drivers/gpio/gpio-max7301.c6
-rw-r--r--drivers/gpio/gpio-max730x.c16
-rw-r--r--drivers/gpio/gpio-max732x.c8
-rw-r--r--drivers/gpio/gpio-mc33880.c6
-rw-r--r--drivers/gpio/gpio-mcp23s08.c6
-rw-r--r--drivers/gpio/gpio-ml-ioh.c8
-rw-r--r--drivers/gpio/gpio-mpc5200.c4
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-msm-v2.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c36
-rw-r--r--drivers/gpio/gpio-mxc.c4
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpio-omap.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c61
-rw-r--r--drivers/gpio/gpio-pcf857x.c29
-rw-r--r--drivers/gpio/gpio-pch.c9
-rw-r--r--drivers/gpio/gpio-pl061.c66
-rw-r--r--drivers/gpio/gpio-pxa.c8
-rw-r--r--drivers/gpio/gpio-rc5t583.c6
-rw-r--r--drivers/gpio/gpio-rdc321x.c6
-rw-r--r--drivers/gpio/gpio-samsung.c80
-rw-r--r--drivers/gpio/gpio-sch.c6
-rw-r--r--drivers/gpio/gpio-sodaville.c4
-rw-r--r--drivers/gpio/gpio-spear-spics.c217
-rw-r--r--drivers/gpio/gpio-sta2x11.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c94
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-sx150x.c6
-rw-r--r--drivers/gpio/gpio-tc3589x.c26
-rw-r--r--drivers/gpio/gpio-tegra.c47
-rw-r--r--drivers/gpio/gpio-timberdale.c4
-rw-r--r--drivers/gpio/gpio-tps6586x.c15
-rw-r--r--drivers/gpio/gpio-tps65910.c6
-rw-r--r--drivers/gpio/gpio-tps65912.c6
-rw-r--r--drivers/gpio/gpio-ts5500.c466
-rw-r--r--drivers/gpio/gpio-twl4030.c55
-rw-r--r--drivers/gpio/gpio-twl6040.c4
-rw-r--r--drivers/gpio/gpio-viperboard.c517
-rw-r--r--drivers/gpio/gpio-vr41xx.c6
-rw-r--r--drivers/gpio/gpio-vt8500.c4
-rw-r--r--drivers/gpio/gpio-vx855.c6
-rw-r--r--drivers/gpio/gpio-wm831x.c6
-rw-r--r--drivers/gpio/gpio-wm8350.c6
-rw-r--r--drivers/gpio/gpio-wm8994.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c54
-rw-r--r--drivers/gpio/gpiolib.c124
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c17
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c161
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c (renamed from drivers/gpu/drm/drm_dp_i2c_helper.c)146
-rw-r--r--drivers/gpu/drm/drm_edid.c48
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_hashtab.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c120
-rw-r--r--drivers/gpu/drm/drm_mm.c86
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig32
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c178
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c57
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c179
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c141
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h69
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c116
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1955
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c236
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c501
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c457
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h80
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1838
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c136
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2050
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c839
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c86
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c451
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c428
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c10
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c365
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c98
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c139
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h483
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c358
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c420
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c109
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h315
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c763
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1091
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1989
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c978
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h123
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c235
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c90
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c697
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c342
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c99
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1150
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c132
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c109
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c149
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c337
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c769
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h145
-rw-r--r--drivers/gpu/drm/radeon/ni.c477
-rw-r--r--drivers/gpu/drm/radeon/nid.h87
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c582
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c405
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h47
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c192
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5152
-rw-r--r--drivers/gpu/drm/radeon/rv515.c124
-rw-r--r--drivers/gpu/drm/radeon/rv770.c105
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c445
-rw-r--r--drivers/gpu/drm/radeon/sid.h137
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/tegra/Kconfig23
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/dc.c833
-rw-r--r--drivers/gpu/drm/tegra/dc.h388
-rw-r--r--drivers/gpu/drm/tegra/drm.c115
-rw-r--r--drivers/gpu/drm/tegra/drm.h216
-rw-r--r--drivers/gpu/drm/tegra/fb.c56
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1321
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h575
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tegra/output.c272
-rw-r--r--drivers/gpu/drm/tegra/rgb.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c322
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c28
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c51
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c917
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2019
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c61
-rw-r--r--drivers/hid/hid-icade.c259
-rw-r--r--drivers/hid/hid-ids.h22
-rw-r--r--drivers/hid/hid-input.c97
-rw-r--r--drivers/hid/hid-multitouch.c109
-rw-r--r--drivers/hid/hid-picolcd_cir.c2
-rw-r--r--drivers/hid/hid-roccat-isku.c44
-rw-r--r--drivers/hid/hid-roccat-isku.h78
-rw-r--r--drivers/hid/hid-roccat-koneplus.c348
-rw-r--r--drivers/hid/hid-roccat-koneplus.h101
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c237
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h16
-rw-r--r--drivers/hid/hid-roccat-lua.c227
-rw-r--r--drivers/hid/hid-roccat-lua.h29
-rw-r--r--drivers/hid/hid-roccat-pyra.c342
-rw-r--r--drivers/hid/hid-roccat-pyra.h24
-rw-r--r--drivers/hid/hid-roccat-savu.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c36
-rw-r--r--drivers/hid/hidraw.c16
-rw-r--r--drivers/hid/i2c-hid/Kconfig18
-rw-r--r--drivers/hid/i2c-hid/Makefile5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c990
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c10
-rw-r--r--drivers/hsi/clients/hsi_char.c8
-rw-r--r--drivers/hv/Kconfig6
-rw-r--r--drivers/hv/Makefile1
-rw-r--r--drivers/hv/channel.c8
-rw-r--r--drivers/hv/channel_mgmt.c8
-rw-r--r--drivers/hv/hv_balloon.c1048
-rw-r--r--drivers/hwmon/Kconfig8
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru.c12
-rw-r--r--drivers/hwmon/abituguru3.c6
-rw-r--r--drivers/hwmon/ad7314.c6
-rw-r--r--drivers/hwmon/ad7414.c4
-rw-r--r--drivers/hwmon/adcxx.c6
-rw-r--r--drivers/hwmon/ads7871.c6
-rw-r--r--drivers/hwmon/adt7411.c6
-rw-r--r--drivers/hwmon/coretemp.c8
-rw-r--r--drivers/hwmon/da9052-hwmon.c6
-rw-r--r--drivers/hwmon/dme1737.c6
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/f71805f.c8
-rw-r--r--drivers/hwmon/f71882fg.c8
-rw-r--r--drivers/hwmon/fam15h_power.c10
-rw-r--r--drivers/hwmon/gpio-fan.c8
-rw-r--r--drivers/hwmon/hih6130.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c10
-rw-r--r--drivers/hwmon/hwmon.c26
-rw-r--r--drivers/hwmon/i5k_amb.c18
-rw-r--r--drivers/hwmon/it87.c930
-rw-r--r--drivers/hwmon/jz4740-hwmon.c6
-rw-r--r--drivers/hwmon/k10temp.c8
-rw-r--r--drivers/hwmon/k8temp.c8
-rw-r--r--drivers/hwmon/lm70.c6
-rw-r--r--drivers/hwmon/lm73.c16
-rw-r--r--drivers/hwmon/lm78.c6
-rw-r--r--drivers/hwmon/max1111.c8
-rw-r--r--drivers/hwmon/max197.c6
-rw-r--r--drivers/hwmon/mc13783-adc.c4
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/pc87360.c8
-rw-r--r--drivers/hwmon/pc87427.c10
-rw-r--r--drivers/hwmon/s3c-hwmon.c6
-rw-r--r--drivers/hwmon/sch5627.c4
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/sht21.c6
-rw-r--r--drivers/hwmon/sis5595.c16
-rw-r--r--drivers/hwmon/smsc47b397.c6
-rw-r--r--drivers/hwmon/tmp102.c6
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c6
-rw-r--r--drivers/hwmon/ultra45_env.c6
-rw-r--r--drivers/hwmon/vexpress.c230
-rw-r--r--drivers/hwmon/via-cputemp.c6
-rw-r--r--drivers/hwmon/via686a.c14
-rw-r--r--drivers/hwmon/vt1211.c8
-rw-r--r--drivers/hwmon/vt8231.c12
-rw-r--r--drivers/hwmon/w83627ehf.c111
-rw-r--r--drivers/hwmon/w83627hf.c95
-rw-r--r--drivers/hwmon/w83781d.c6
-rw-r--r--drivers/hwmon/wm831x-hwmon.c6
-rw-r--r--drivers/hwmon/wm8350-hwmon.c6
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c6
-rw-r--r--drivers/hwspinlock/u8500_hsem.c6
-rw-r--r--drivers/i2c/busses/Kconfig20
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c8
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c10
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c8
-rw-r--r--drivers/i2c/busses/i2c-amd756.c7
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c7
-rw-r--r--drivers/i2c/busses/i2c-at91.c348
-rw-r--r--drivers/i2c/busses/i2c-au1550.c6
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c300
-rw-r--r--drivers/i2c/busses/i2c-cpm.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c6
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c6
-rw-r--r--drivers/i2c/busses/i2c-elektor.c8
-rw-r--r--drivers/i2c/busses/i2c-gpio.c14
-rw-r--r--drivers/i2c/busses/i2c-highlander.c6
-rw-r--r--drivers/i2c/busses/i2c-hydra.c6
-rw-r--r--drivers/i2c/busses/i2c-i801.c41
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c40
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c6
-rw-r--r--drivers/i2c/busses/i2c-isch.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c38
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c18
-rw-r--r--drivers/i2c/busses/i2c-mxs.c14
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c12
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c14
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c8
-rw-r--r--drivers/i2c/busses/i2c-ocores.c174
-rw-r--r--drivers/i2c/busses/i2c-octeon.c10
-rw-r--r--drivers/i2c/busses/i2c-omap.c238
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c6
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c6
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c8
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c6
-rw-r--r--drivers/i2c/busses/i2c-piix4.c37
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c6
-rw-r--r--drivers/i2c/busses/i2c-pnx.c6
-rw-r--r--drivers/i2c/busses/i2c-powermac.c16
-rw-r--r--drivers/i2c/busses/i2c-puv3.c6
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c12
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c215
-rw-r--r--drivers/i2c/busses/i2c-s6000.c8
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c8
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c152
-rw-r--r--drivers/i2c/busses/i2c-sirf.c12
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c4
-rw-r--r--drivers/i2c/busses/i2c-sis630.c8
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/i2c/busses/i2c-via.c6
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c480
-rw-r--r--drivers/i2c/busses/i2c-xiic.c8
-rw-r--r--drivers/i2c/busses/i2c-xlr.c6
-rw-r--r--drivers/i2c/busses/scx200_acb.c16
-rw-r--r--drivers/i2c/i2c-core.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c153
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c10
-rw-r--r--drivers/ide/aec62xx.c8
-rw-r--r--drivers/ide/alim15x3.c10
-rw-r--r--drivers/ide/amd74xx.c4
-rw-r--r--drivers/ide/atiixp.c4
-rw-r--r--drivers/ide/cmd64x.c4
-rw-r--r--drivers/ide/cs5520.c4
-rw-r--r--drivers/ide/cs5530.c6
-rw-r--r--drivers/ide/cs5535.c5
-rw-r--r--drivers/ide/cy82c693.c11
-rw-r--r--drivers/ide/delkin_cb.c5
-rw-r--r--drivers/ide/hpt366.c42
-rw-r--r--drivers/ide/icside.c15
-rw-r--r--drivers/ide/ide-pci-generic.c4
-rw-r--r--drivers/ide/ide_platform.c14
-rw-r--r--drivers/ide/it8172.c5
-rw-r--r--drivers/ide/it8213.c4
-rw-r--r--drivers/ide/it821x.c10
-rw-r--r--drivers/ide/jmicron.c4
-rw-r--r--drivers/ide/ns87415.c8
-rw-r--r--drivers/ide/opti621.c4
-rw-r--r--drivers/ide/palm_bk3710.c7
-rw-r--r--drivers/ide/pdc202xx_new.c10
-rw-r--r--drivers/ide/pdc202xx_old.c8
-rw-r--r--drivers/ide/piix.c8
-rw-r--r--drivers/ide/pmac.c16
-rw-r--r--drivers/ide/rapide.c7
-rw-r--r--drivers/ide/rz1000.c6
-rw-r--r--drivers/ide/sc1200.c4
-rw-r--r--drivers/ide/scc_pata.c20
-rw-r--r--drivers/ide/serverworks.c4
-rw-r--r--drivers/ide/sgiioc4.c13
-rw-r--r--drivers/ide/siimage.c13
-rw-r--r--drivers/ide/sis5513.c10
-rw-r--r--drivers/ide/sl82c105.c4
-rw-r--r--drivers/ide/slc90e66.c5
-rw-r--r--drivers/ide/tc86c001.c12
-rw-r--r--drivers/ide/triflex.c5
-rw-r--r--drivers/ide/trm290.c6
-rw-r--r--drivers/ide/via82cxxx.c8
-rw-r--r--drivers/idle/intel_idle.c19
-rw-r--r--drivers/iio/Kconfig13
-rw-r--r--drivers/iio/Makefile8
-rw-r--r--drivers/iio/accel/Kconfig3
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c25
-rw-r--r--drivers/iio/adc/Kconfig79
-rw-r--r--drivers/iio/adc/Makefile7
-rw-r--r--drivers/iio/adc/ad7266.c17
-rw-r--r--drivers/iio/adc/ad7298.c (renamed from drivers/staging/iio/adc/ad7298_core.c)207
-rw-r--r--drivers/iio/adc/ad7476.c8
-rw-r--r--drivers/iio/adc/ad7791.c10
-rw-r--r--drivers/iio/adc/ad7793.c (renamed from drivers/staging/iio/adc/ad7793.c)390
-rw-r--r--drivers/iio/adc/ad7887.c378
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c2
-rw-r--r--drivers/iio/adc/at91_adc.c12
-rw-r--r--drivers/iio/adc/lp8788_adc.c6
-rw-r--r--drivers/iio/adc/max1363.c (renamed from drivers/staging/iio/adc/max1363_core.c)343
-rw-r--r--drivers/iio/adc/ti-adc081c.c161
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c260
-rw-r--r--drivers/iio/adc/viperboard_adc.c181
-rw-r--r--drivers/iio/amplifiers/ad8366.c6
-rw-r--r--drivers/iio/buffer_cb.c113
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig15
-rw-r--r--drivers/iio/common/hid-sensors/Makefile3
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c6
-rw-r--r--drivers/iio/dac/Kconfig12
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5064.c18
-rw-r--r--drivers/iio/dac/ad5360.c8
-rw-r--r--drivers/iio/dac/ad5380.c28
-rw-r--r--drivers/iio/dac/ad5421.c6
-rw-r--r--drivers/iio/dac/ad5446.c24
-rw-r--r--drivers/iio/dac/ad5449.c376
-rw-r--r--drivers/iio/dac/ad5504.c12
-rw-r--r--drivers/iio/dac/ad5624r_spi.c12
-rw-r--r--drivers/iio/dac/ad5686.c14
-rw-r--r--drivers/iio/dac/ad5755.c16
-rw-r--r--drivers/iio/dac/ad5764.c6
-rw-r--r--drivers/iio/dac/ad5791.c19
-rw-r--r--drivers/iio/dac/max517.c6
-rw-r--r--drivers/iio/dac/mcp4725.c8
-rw-r--r--drivers/iio/frequency/ad9523.c6
-rw-r--r--drivers/iio/frequency/adf4350.c8
-rw-r--r--drivers/iio/gyro/Kconfig10
-rw-r--r--drivers/iio/gyro/Makefile1
-rw-r--r--drivers/iio/gyro/adis16136.c580
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c25
-rw-r--r--drivers/iio/imu/Kconfig27
-rw-r--r--drivers/iio/imu/Makefile10
-rw-r--r--drivers/iio/imu/adis.c440
-rw-r--r--drivers/iio/imu/adis16480.c924
-rw-r--r--drivers/iio/imu/adis_buffer.c176
-rw-r--r--drivers/iio/imu/adis_trigger.c89
-rw-r--r--drivers/iio/industrialio-buffer.c386
-rw-r--r--drivers/iio/industrialio-core.c105
-rw-r--r--drivers/iio/industrialio-event.c11
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/iio/light/adjd_s311.c11
-rw-r--r--drivers/iio/light/hid-sensor-als.c24
-rw-r--r--drivers/iio/light/lm3533-als.c19
-rw-r--r--drivers/iio/light/vcnl4000.c8
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c25
-rw-r--r--drivers/infiniband/core/cma.c9
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c7
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h8
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c1
-rw-r--r--drivers/infiniband/hw/amso1100/c2_pd.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c797
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c210
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h33
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c20
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c10
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c27
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/user.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c9
-rw-r--r--drivers/infiniband/hw/nes/nes.c6
-rw-r--r--drivers/infiniband/hw/nes/nes.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c32
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c42
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c314
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c178
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/gameport/emu10k1-gp.c6
-rw-r--r--drivers/input/gameport/fm801-gp.c6
-rw-r--r--drivers/input/input-mt.c4
-rw-r--r--drivers/input/input.c181
-rw-r--r--drivers/input/joystick/as5011.c29
-rw-r--r--drivers/input/joystick/maplecontrol.c6
-rw-r--r--drivers/input/joystick/walkera0701.c7
-rw-r--r--drivers/input/joystick/xpad.c33
-rw-r--r--drivers/input/keyboard/Kconfig5
-rw-r--r--drivers/input/keyboard/adp5520-keys.c6
-rw-r--r--drivers/input/keyboard/adp5588-keys.c18
-rw-r--r--drivers/input/keyboard/adp5589-keys.c21
-rw-r--r--drivers/input/keyboard/bf54x-keys.c6
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c4
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c6
-rw-r--r--drivers/input/keyboard/gpio_keys.c103
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c39
-rw-r--r--drivers/input/keyboard/hilkbd.c10
-rw-r--r--drivers/input/keyboard/imx_keypad.c9
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c6
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c6
-rw-r--r--drivers/input/keyboard/lm8323.c6
-rw-r--r--drivers/input/keyboard/lm8333.c6
-rw-r--r--drivers/input/keyboard/locomokbd.c8
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c8
-rw-r--r--drivers/input/keyboard/matrix_keypad.c129
-rw-r--r--drivers/input/keyboard/max7359_keypad.c6
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c6
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c12
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c38
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/omap4-keypad.c10
-rw-r--r--drivers/input/keyboard/opencores-kbd.c6
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c10
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c6
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c6
-rw-r--r--drivers/input/keyboard/qt1070.c8
-rw-r--r--drivers/input/keyboard/qt2160.c31
-rw-r--r--drivers/input/keyboard/samsung-keypad.c109
-rw-r--r--drivers/input/keyboard/sh_keysc.c6
-rw-r--r--drivers/input/keyboard/spear-keyboard.c98
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c142
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c6
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c8
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c179
-rw-r--r--drivers/input/keyboard/tegra-kbc.c16
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c6
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c8
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c6
-rw-r--r--drivers/input/matrix-keymap.c23
-rw-r--r--drivers/input/misc/88pm80x_onkey.c6
-rw-r--r--drivers/input/misc/88pm860x_onkey.c6
-rw-r--r--drivers/input/misc/Kconfig39
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/ab8500-ponkey.c6
-rw-r--r--drivers/input/misc/ad714x-i2c.c6
-rw-r--r--drivers/input/misc/ad714x-spi.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c6
-rw-r--r--drivers/input/misc/adxl34x-spi.c6
-rw-r--r--drivers/input/misc/arizona-haptics.c255
-rw-r--r--drivers/input/misc/bfin_rotary.c6
-rw-r--r--drivers/input/misc/bma150.c28
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c6
-rw-r--r--drivers/input/misc/cobalt_btns.c6
-rw-r--r--drivers/input/misc/da9052_onkey.c28
-rw-r--r--drivers/input/misc/da9055_onkey.c171
-rw-r--r--drivers/input/misc/dm355evm_keys.c6
-rw-r--r--drivers/input/misc/gp2ap002a00f.c8
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c6
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c6
-rw-r--r--drivers/input/misc/kxtj9.c16
-rw-r--r--drivers/input/misc/m68kspkr.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c6
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c6
-rw-r--r--drivers/input/misc/mma8450.c6
-rw-r--r--drivers/input/misc/mpu3050.c8
-rw-r--r--drivers/input/misc/pcap_keys.c6
-rw-r--r--drivers/input/misc/pcf50633-input.c6
-rw-r--r--drivers/input/misc/pcf8574_keypad.c6
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c6
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c6
-rw-r--r--drivers/input/misc/pwm-beeper.c20
-rw-r--r--drivers/input/misc/rb532_button.c6
-rw-r--r--drivers/input/misc/retu-pwrbutton.c99
-rw-r--r--drivers/input/misc/rotary_encoder.c9
-rw-r--r--drivers/input/misc/sgi_btns.c6
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c3
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c6
-rw-r--r--drivers/input/misc/wistron_btns.c20
-rw-r--r--drivers/input/misc/wm831x-on.c11
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c10
-rw-r--r--drivers/input/mouse/gpio_mouse.c6
-rw-r--r--drivers/input/mouse/maplemouse.c6
-rw-r--r--drivers/input/mouse/navpoint.c6
-rw-r--r--drivers/input/mouse/pxa930_trkball.c6
-rw-r--r--drivers/input/mouse/sentelic.c2
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/serio/Kconfig9
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/altera_ps2.c6
-rw-r--r--drivers/input/serio/ambakmi.c6
-rw-r--r--drivers/input/serio/arc_ps2.c274
-rw-r--r--drivers/input/serio/ct82c710.c6
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/hil_mlc.c13
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h9
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/maceps2.c8
-rw-r--r--drivers/input/serio/pcips2.c6
-rw-r--r--drivers/input/serio/q40kbd.c6
-rw-r--r--drivers/input/serio/rpckbd.c6
-rw-r--r--drivers/input/serio/sa1111ps2.c12
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/input/serio/xilinx_ps2.c8
-rw-r--r--drivers/input/tablet/wacom_sys.c58
-rw-r--r--drivers/input/tablet/wacom_wac.c32
-rw-r--r--drivers/input/tablet/wacom_wac.h2
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c8
-rw-r--r--drivers/input/touchscreen/Kconfig18
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c6
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c6
-rw-r--r--drivers/input/touchscreen/ads7846.c10
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c6
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c8
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c8
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c125
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c19
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c6
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c6
-rw-r--r--drivers/input/touchscreen/da9034-ts.c6
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c69
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c28
-rw-r--r--drivers/input/touchscreen/eeti_ts.c6
-rw-r--r--drivers/input/touchscreen/egalax_ts.c8
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c479
-rw-r--r--drivers/input/touchscreen/htcpen.c6
-rw-r--r--drivers/input/touchscreen/ili210x.c6
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c14
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c6
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c6
-rw-r--r--drivers/input/touchscreen/max11801_ts.c8
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c6
-rw-r--r--drivers/input/touchscreen/mms114.c68
-rw-r--r--drivers/input/touchscreen/pcap_ts.c6
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c6
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c6
-rw-r--r--drivers/input/touchscreen/st1232.c8
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c133
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c398
-rw-r--r--drivers/input/touchscreen/ti_tscadc.c486
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c6
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c4
-rw-r--r--drivers/input/touchscreen/tsc2005.c8
-rw-r--r--drivers/input/touchscreen/tsc2007.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c8
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c6
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c6
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c12
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c196
-rw-r--r--drivers/iommu/amd_iommu_init.c34
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/intel-iommu.c67
-rw-r--r--drivers/iommu/omap-iommu-debug.c8
-rw-r--r--drivers/iommu/omap-iommu.c113
-rw-r--r--drivers/iommu/omap-iommu.h225
-rw-r--r--drivers/iommu/omap-iommu2.c334
-rw-r--r--drivers/iommu/omap-iopgtable.h98
-rw-r--r--drivers/iommu/omap-iovmm.c50
-rw-r--r--drivers/iommu/tegra-gart.c6
-rw-r--r--drivers/iommu/tegra-smmu.c14
-rw-r--r--drivers/ipack/Kconfig24
-rw-r--r--drivers/ipack/Makefile (renamed from drivers/staging/ipack/Makefile)2
-rw-r--r--drivers/ipack/carriers/Kconfig7
-rw-r--r--drivers/ipack/carriers/Makefile (renamed from drivers/staging/ipack/bridges/Makefile)0
-rw-r--r--drivers/ipack/carriers/tpci200.c (renamed from drivers/staging/ipack/bridges/tpci200.c)321
-rw-r--r--drivers/ipack/carriers/tpci200.h (renamed from drivers/staging/ipack/bridges/tpci200.h)33
-rw-r--r--drivers/ipack/devices/Kconfig (renamed from drivers/staging/ipack/devices/Kconfig)1
-rw-r--r--drivers/ipack/devices/Makefile (renamed from drivers/staging/ipack/devices/Makefile)0
-rw-r--r--drivers/ipack/devices/ipoctal.c (renamed from drivers/staging/ipack/devices/ipoctal.c)129
-rw-r--r--drivers/ipack/devices/ipoctal.h (renamed from drivers/staging/ipack/devices/ipoctal.h)7
-rw-r--r--drivers/ipack/devices/scc2698.h (renamed from drivers/staging/ipack/devices/scc2698.h)7
-rw-r--r--drivers/ipack/ipack.c (renamed from drivers/staging/ipack/ipack.c)64
-rw-r--r--drivers/irqchip/Kconfig9
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-sunxi.c151
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c203
-rw-r--r--drivers/irqchip/spear-shirq.c316
-rw-r--r--drivers/isdn/capi/capi.c36
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/gigaset/common.c1
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c8
-rw-r--r--drivers/isdn/hardware/avm/c4.c3
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c3
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c9
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c6
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c8
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c16
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c10
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c14
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c6
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c5
-rw-r--r--drivers/isdn/hisax/asuscom.c9
-rw-r--r--drivers/isdn/hisax/avm_a1.c3
-rw-r--r--drivers/isdn/hisax/avm_a1p.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c17
-rw-r--r--drivers/isdn/hisax/avma1_cs.c12
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c16
-rw-r--r--drivers/isdn/hisax/bkm_a8.c18
-rw-r--r--drivers/isdn/hisax/callc.c2
-rw-r--r--drivers/isdn/hisax/config.c26
-rw-r--r--drivers/isdn/hisax/diva.c31
-rw-r--r--drivers/isdn/hisax/elsa.c31
-rw-r--r--drivers/isdn/hisax/elsa_cs.c12
-rw-r--r--drivers/isdn/hisax/enternow_pci.c14
-rw-r--r--drivers/isdn/hisax/gazel.c11
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c16
-rw-r--r--drivers/isdn/hisax/hfc_pci.c6
-rw-r--r--drivers/isdn/hisax/hfc_sx.c11
-rw-r--r--drivers/isdn/hisax/hfcscard.c9
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c22
-rw-r--r--drivers/isdn/hisax/icc.c3
-rw-r--r--drivers/isdn/hisax/isac.c7
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/hisax/isurf.c5
-rw-r--r--drivers/isdn/hisax/ix1_micro.c9
-rw-r--r--drivers/isdn/hisax/mic.c3
-rw-r--r--drivers/isdn/hisax/niccy.c6
-rw-r--r--drivers/isdn/hisax/nj_s.c14
-rw-r--r--drivers/isdn/hisax/nj_u.c14
-rw-r--r--drivers/isdn/hisax/s0box.c3
-rw-r--r--drivers/isdn/hisax/saphir.c3
-rw-r--r--drivers/isdn/hisax/sedlbauer.c23
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c12
-rw-r--r--drivers/isdn/hisax/sportster.c6
-rw-r--r--drivers/isdn/hisax/teleint.c3
-rw-r--r--drivers/isdn/hisax/teles0.c3
-rw-r--r--drivers/isdn/hisax/teles3.c9
-rw-r--r--drivers/isdn/hisax/teles_cs.c12
-rw-r--r--drivers/isdn/hisax/telespci.c5
-rw-r--r--drivers/isdn/hisax/w6692.c5
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c8
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/isdn/mISDN/core.c4
-rw-r--r--drivers/isdn/mISDN/dsp_core.c3
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c5
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/isdn/pcbit/layer2.c2
-rw-r--r--drivers/leds/Kconfig4
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-triggers.c25
-rw-r--r--drivers/leds/leds-88pm860x.c9
-rw-r--r--drivers/leds/leds-adp5520.c12
-rw-r--r--drivers/leds/leds-asic3.c6
-rw-r--r--drivers/leds/leds-atmel-pwm.c2
-rw-r--r--drivers/leds/leds-bd2802.c12
-rw-r--r--drivers/leds/leds-blinkm.c6
-rw-r--r--drivers/leds/leds-clevo-mail.c13
-rw-r--r--drivers/leds/leds-cobalt-qube.c17
-rw-r--r--drivers/leds/leds-cobalt-raq.c17
-rw-r--r--drivers/leds/leds-da903x.c16
-rw-r--r--drivers/leds/leds-da9052.c6
-rw-r--r--drivers/leds/leds-fsg.c15
-rw-r--r--drivers/leds/leds-gpio.c50
-rw-r--r--drivers/leds/leds-lm3530.c6
-rw-r--r--drivers/leds/leds-lm3533.c8
-rw-r--r--drivers/leds/leds-lm355x.c12
-rw-r--r--drivers/leds/leds-lm3642.c20
-rw-r--r--drivers/leds/leds-lp3944.c8
-rw-r--r--drivers/leds/leds-lp5521.c21
-rw-r--r--drivers/leds/leds-lp5523.c28
-rw-r--r--drivers/leds/leds-lp8788.c6
-rw-r--r--drivers/leds/leds-lt3593.c28
-rw-r--r--drivers/leds/leds-max8997.c6
-rw-r--r--drivers/leds/leds-mc13783.c10
-rw-r--r--drivers/leds/leds-net48xx.c2
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/leds/leds-ns2.c122
-rw-r--r--drivers/leds/leds-ot200.c6
-rw-r--r--drivers/leds/leds-pca955x.c8
-rw-r--r--drivers/leds/leds-pca9633.c6
-rw-r--r--drivers/leds/leds-pwm.c6
-rw-r--r--drivers/leds/leds-rb532.c8
-rw-r--r--drivers/leds/leds-regulator.c6
-rw-r--r--drivers/leds/leds-renesas-tpu.c31
-rw-r--r--drivers/leds/leds-ss4200.c6
-rw-r--r--drivers/leds/leds-sunfire.c12
-rw-r--r--drivers/leds/leds-tca6507.c6
-rw-r--r--drivers/leds/leds-wm8350.c4
-rw-r--r--drivers/leds/leds-wrap.c2
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c2
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/macintosh/macio_asic.c6
-rw-r--r--drivers/macintosh/mediabay.c3
-rw-r--r--drivers/macintosh/rack-meter.c12
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c18
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c35
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c14
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c13
-rw-r--r--drivers/macintosh/windfarm_pm112.c4
-rw-r--r--drivers/macintosh/windfarm_pm121.c4
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_pm81.c4
-rw-r--r--drivers/macintosh/windfarm_pm91.c4
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c13
-rw-r--r--drivers/md/dm-bio-prison.c25
-rw-r--r--drivers/md/dm-bio-prison.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-delay.c5
-rw-r--r--drivers/md/dm-flakey.c21
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-ioctl.c64
-rw-r--r--drivers/md/dm-kcopyd.c18
-rw-r--r--drivers/md/dm-linear.c6
-rw-r--r--drivers/md/dm-raid.c107
-rw-r--r--drivers/md/dm-raid1.c75
-rw-r--r--drivers/md/dm-snap.c90
-rw-r--r--drivers/md/dm-stripe.c20
-rw-r--r--drivers/md/dm-table.c41
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-thin.c245
-rw-r--r--drivers/md/dm-verity.c25
-rw-r--r--drivers/md/dm-zero.c5
-rw-r--r--drivers/md/dm.c88
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md.c260
-rw-r--r--drivers/md/md.h28
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c16
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h16
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c50
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c20
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-btree.h2
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c16
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c57
-rw-r--r--drivers/media/common/Kconfig7
-rw-r--r--drivers/media/common/b2c2/Kconfig5
-rw-r--r--drivers/media/common/siano/Kconfig18
-rw-r--r--drivers/media/common/siano/Makefile6
-rw-r--r--drivers/media/common/siano/smscoreapi.c2
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/common/siano/smsir.h9
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dmxdev.h1
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c10
-rw-r--r--drivers/media/dvb-frontends/cx22700.c4
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/dib9000.h2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c8
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c26
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.h6
-rw-r--r--drivers/media/dvb-frontends/ds3000.c15
-rw-r--r--drivers/media/dvb-frontends/l64781.c4
-rw-r--r--drivers/media/dvb-frontends/mt312.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c6
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c6
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c19
-rw-r--r--drivers/media/dvb-frontends/tda10071.c6
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/firewire/firedtv.h1
-rw-r--r--drivers/media/i2c/adp1653.c4
-rw-r--r--drivers/media/i2c/adv7180.c8
-rw-r--r--drivers/media/i2c/adv7183.c15
-rw-r--r--drivers/media/i2c/adv7604.c16
-rw-r--r--drivers/media/i2c/as3645a.c10
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c14
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c10
-rw-r--r--drivers/media/i2c/s5k4ecgx.c2
-rw-r--r--drivers/media/i2c/smiapp-pll.c219
-rw-r--r--drivers/media/i2c/smiapp-pll.h61
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c74
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-limits.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg-defs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h2
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c88
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c55
-rw-r--r--drivers/media/i2c/vs6624.c19
-rw-r--r--drivers/media/mmc/siano/Kconfig3
-rw-r--r--drivers/media/mmc/siano/smssdio.c4
-rw-r--r--drivers/media/pci/bt8xx/bt878.c11
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c34
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c16
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c7
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-main.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c1
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c14
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c45
-rw-r--r--drivers/media/pci/cx23885/cimax2.c17
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c16
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.c1
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c1
-rw-r--r--drivers/media/pci/cx23885/netup-init.c1
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c44
-rw-r--r--drivers/media/pci/cx25821/cx25821-biffuncs.h6
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c8
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c54
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c47
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c8
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c31
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c7
-rw-r--r--drivers/media/pci/cx88/cx88-core.c12
-rw-r--r--drivers/media/pci/cx88/cx88-input.c8
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c26
-rw-r--r--drivers/media/pci/cx88/cx88-video.c8
-rw-r--r--drivers/media/pci/cx88/cx88.h4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c9
-rw-r--r--drivers/media/pci/dm1105/dm1105.c24
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-main.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c9
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c4
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/mantis/mantis_input.c5
-rw-r--r--drivers/media/pci/mantis/mantis_pci.c2
-rw-r--r--drivers/media/pci/mantis/mantis_uart.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp1033.c6
-rw-r--r--drivers/media/pci/meye/meye.c9
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c8
-rw-r--r--drivers/media/pci/ngene/ngene-core.c12
-rw-r--r--drivers/media/pci/ngene/ngene.h5
-rw-r--r--drivers/media/pci/pluto2/pluto2.c25
-rw-r--r--drivers/media/pci/pt1/pt1.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c13
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134.h2
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c26
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c16
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c12
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c15
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c8
-rw-r--r--drivers/media/pci/ttpci/av7110.c8
-rw-r--r--drivers/media/pci/ttpci/av7110.h1
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c4
-rw-r--r--drivers/media/pci/ttpci/budget-av.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.c20
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig12
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c20
-rw-r--r--drivers/media/platform/coda.c10
-rw-r--r--drivers/media/platform/davinci/Kconfig2
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c12
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c20
-rw-r--r--drivers/media/platform/davinci/isif.c9
-rw-r--r--drivers/media/platform/davinci/vpbe.c12
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c318
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c9
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c6
-rw-r--r--drivers/media/platform/davinci/vpif.c14
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c28
-rw-r--r--drivers/media/platform/davinci/vpss.c6
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c6
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/fsl-viu.c12
-rw-r--r--drivers/media/platform/m2m-deinterlace.c24
-rw-r--r--drivers/media/platform/mem2mem_testdev.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c16
-rw-r--r--drivers/media/platform/omap/omap_vout.c59
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c6
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c38
-rw-r--r--drivers/media/platform/omap/omap_voutlib.h3
-rw-r--r--drivers/media/platform/omap24xxcam.c2
-rw-r--r--drivers/media/platform/omap3isp/isp.c92
-rw-r--r--drivers/media/platform/omap3isp/isp.h9
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c6
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c227
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h10
-rw-r--r--drivers/media/platform/omap3isp/isphist.c10
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c41
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h99
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c6
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/s3c-camif/Makefile5
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c1672
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c662
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h393
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c606
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.h269
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-capture.c11
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-core.c4
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-lite.c6
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-m2m.c16
-rw-r--r--drivers/media/platform/s5p-fimc/fimc-mdevice.c10
-rw-r--r--drivers/media/platform/s5p-fimc/mipi-csis.c6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c92
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c14
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c16
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c2
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c6
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c8
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c18
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c17
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c6
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c8
-rw-r--r--drivers/media/platform/sh_vou.c6
-rw-r--r--drivers/media/platform/soc_camera/Kconfig1
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c6
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c142
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c8
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c6
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c6
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c20
-rw-r--r--drivers/media/platform/timblogiw.c12
-rw-r--r--drivers/media/platform/via-camera.c6
-rw-r--r--drivers/media/platform/vivi.c8
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-cadet.c3
-rw-r--r--drivers/media/radio/radio-isa.c10
-rw-r--r--drivers/media/radio/radio-keene.c1
-rw-r--r--drivers/media/radio/radio-maxiradio.c7
-rw-r--r--drivers/media/radio/radio-sf16fmi.c4
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c19
-rw-r--r--drivers/media/radio/radio-si4713.c1
-rw-r--r--drivers/media/radio/radio-tea5764.c12
-rw-r--r--drivers/media/radio/radio-timb.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c5
-rw-r--r--drivers/media/radio/saa7706h.c8
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c8
-rw-r--r--drivers/media/radio/si4713-i2c.c2
-rw-r--r--drivers/media/radio/tef6862.c8
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c10
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c35
-rw-r--r--drivers/media/rc/fintek-cir.c10
-rw-r--r--drivers/media/rc/gpio-ir-recv.c8
-rw-r--r--drivers/media/rc/iguanair.c10
-rw-r--r--drivers/media/rc/imon.c48
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c4
-rw-r--r--drivers/media/rc/ir-lirc-codec.c4
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c4
-rw-r--r--drivers/media/rc/ir-nec-decoder.c4
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c14
-rw-r--r--drivers/media/rc/ir-rc5-sz-decoder.c6
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c8
-rw-r--r--drivers/media/rc/ir-rx51.c16
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c4
-rw-r--r--drivers/media/rc/ir-sony-decoder.c17
-rw-r--r--drivers/media/rc/ite-cir.c10
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c2
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c2
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.c17
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-loopback.c2
-rw-r--r--drivers/media/rc/rc-main.c73
-rw-r--r--drivers/media/rc/redrat3.c10
-rw-r--r--drivers/media/rc/streamzap.c6
-rw-r--r--drivers/media/rc/ttusbir.c10
-rw-r--r--drivers/media/rc/winbond-cir.c119
-rw-r--r--drivers/media/tuners/fc2580.c61
-rw-r--r--drivers/media/tuners/max2165.c2
-rw-r--r--drivers/media/tuners/tua9001.c2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c2
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c5
-rw-r--r--drivers/media/usb/au0828/au0828-video.c16
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c8
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c14
-rw-r--r--drivers/media/usb/dvb-usb-v2/it913x.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c8
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c11
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c16
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c146
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c8
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c15
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c84
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c16
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/gspca.c3
-rw-r--r--drivers/media/usb/gspca/gspca.h2
-rw-r--r--drivers/media/usb/gspca/jeilinj.c6
-rw-r--r--drivers/media/usb/gspca/kinect.c1
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k4aa.c6
-rw-r--r--drivers/media/usb/gspca/pac7302.c62
-rw-r--r--drivers/media/usb/gspca/sonixb.c14
-rw-r--r--drivers/media/usb/gspca/sonixj.c1
-rw-r--r--drivers/media/usb/gspca/spca506.c3
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c8
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/siano/Kconfig3
-rw-r--r--drivers/media/usb/siano/smsusb.c2
-rw-r--r--drivers/media/usb/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c23
-rw-r--r--drivers/media/usb/stk1160/stk1160.h5
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c5
-rw-r--r--drivers/media/usb/tlg2300/pd-dvb.c1
-rw-r--r--drivers/media/usb/tlg2300/pd-video.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c20
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c1
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c10
-rw-r--r--drivers/media/usb/usbvision/usbvision.h2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c14
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c10
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c73
-rw-r--r--drivers/media/usb/uvc/uvc_video.c1
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h8
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c3
-rw-r--r--drivers/media/v4l2-core/Kconfig3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c304
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c700
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c40
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c56
-rw-r--r--drivers/memory/tegra20-mc.c4
-rw-r--r--drivers/memory/tegra30-mc.c4
-rw-r--r--drivers/memstick/host/Kconfig10
-rw-r--r--drivers/memstick/host/Makefile1
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c641
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptscsih.c1
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/README.ioctl12
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/i2o_config.c2
-rw-r--r--drivers/message/i2o/pci.c11
-rw-r--r--drivers/mfd/88pm800.c12
-rw-r--r--drivers/mfd/88pm805.c10
-rw-r--r--drivers/mfd/88pm80x.c2
-rw-r--r--drivers/mfd/88pm860x-core.c104
-rw-r--r--drivers/mfd/Kconfig82
-rw-r--r--drivers/mfd/Makefile14
-rw-r--r--drivers/mfd/ab3100-core.c13
-rw-r--r--drivers/mfd/ab8500-core.c181
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c6
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/adp5520.c6
-rw-r--r--drivers/mfd/arizona-core.c30
-rw-r--r--drivers/mfd/arizona-i2c.c6
-rw-r--r--drivers/mfd/arizona-irq.c19
-rw-r--r--drivers/mfd/arizona-spi.c6
-rw-r--r--drivers/mfd/as3711.c217
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/cs5535-mfd.c12
-rw-r--r--drivers/mfd/da903x.c10
-rw-r--r--drivers/mfd/da9052-core.c277
-rw-r--r--drivers/mfd/da9052-i2c.c67
-rw-r--r--drivers/mfd/da9052-irq.c288
-rw-r--r--drivers/mfd/da9052-spi.c6
-rw-r--r--drivers/mfd/da9055-core.c4
-rw-r--r--drivers/mfd/da9055-i2c.c6
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/db8500-prcmu.c33
-rw-r--r--drivers/mfd/ezx-pcap.c8
-rw-r--r--drivers/mfd/htc-i2cpld.c12
-rw-r--r--drivers/mfd/intel_msic.c10
-rw-r--r--drivers/mfd/janz-cmodio.c10
-rw-r--r--drivers/mfd/jz4740-adc.c26
-rw-r--r--drivers/mfd/lm3533-core.c18
-rw-r--r--drivers/mfd/lp8788.c4
-rw-r--r--drivers/mfd/lpc_ich.c36
-rw-r--r--drivers/mfd/lpc_sch.c6
-rw-r--r--drivers/mfd/max77686.c20
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/max8907.c4
-rw-r--r--drivers/mfd/max8925-core.c58
-rw-r--r--drivers/mfd/max8925-i2c.c6
-rw-r--r--drivers/mfd/max8997.c73
-rw-r--r--drivers/mfd/mc13xxx-core.c94
-rw-r--r--drivers/mfd/mc13xxx-i2c.c26
-rw-r--r--drivers/mfd/mc13xxx-spi.c33
-rw-r--r--drivers/mfd/mc13xxx.h18
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/mfd-core.c15
-rw-r--r--drivers/mfd/omap-usb-host.c12
-rw-r--r--drivers/mfd/omap-usb-tll.c8
-rw-r--r--drivers/mfd/omap-usb.h2
-rw-r--r--drivers/mfd/palmas.c6
-rw-r--r--drivers/mfd/pcf50633-adc.c6
-rw-r--r--drivers/mfd/pcf50633-core.c11
-rw-r--r--drivers/mfd/pm8921-core.c8
-rw-r--r--drivers/mfd/pm8xxx-irq.c4
-rw-r--r--drivers/mfd/rc5t583-irq.c2
-rw-r--r--drivers/mfd/rc5t583.c6
-rw-r--r--drivers/mfd/rdc321x-southbridge.c6
-rw-r--r--drivers/mfd/retu-mfd.c263
-rw-r--r--drivers/mfd/rtl8411.c280
-rw-r--r--drivers/mfd/rts5209.c244
-rw-r--r--drivers/mfd/rts5229.c226
-rw-r--r--drivers/mfd/rtsx_pcr.c1271
-rw-r--r--drivers/mfd/rtsx_pcr.h (renamed from drivers/staging/rts_pstor/general.h)15
-rw-r--r--drivers/mfd/sec-irq.c102
-rw-r--r--drivers/mfd/sm501.c16
-rw-r--r--drivers/mfd/sta2x11-mfd.c552
-rw-r--r--drivers/mfd/stmpe-i2c.c14
-rw-r--r--drivers/mfd/stmpe-spi.c6
-rw-r--r--drivers/mfd/stmpe.c215
-rw-r--r--drivers/mfd/syscon.c6
-rw-r--r--drivers/mfd/tc3589x.c25
-rw-r--r--drivers/mfd/tc6387xb.c6
-rw-r--r--drivers/mfd/tc6393xb.c12
-rw-r--r--drivers/mfd/ti-ssp.c6
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c274
-rw-r--r--drivers/mfd/timberdale.c64
-rw-r--r--drivers/mfd/tps6105x.c8
-rw-r--r--drivers/mfd/tps6507x.c21
-rw-r--r--drivers/mfd/tps65090.c318
-rw-r--r--drivers/mfd/tps65217.c18
-rw-r--r--drivers/mfd/tps6586x.c189
-rw-r--r--drivers/mfd/tps65910-irq.c260
-rw-r--r--drivers/mfd/tps65910.c244
-rw-r--r--drivers/mfd/tps65911-comparator.c6
-rw-r--r--drivers/mfd/tps65912-spi.c6
-rw-r--r--drivers/mfd/tps80031.c573
-rw-r--r--drivers/mfd/twl-core.c229
-rw-r--r--drivers/mfd/twl4030-audio.c6
-rw-r--r--drivers/mfd/twl4030-irq.c10
-rw-r--r--drivers/mfd/twl4030-madc.c18
-rw-r--r--drivers/mfd/twl4030-power.c146
-rw-r--r--drivers/mfd/twl6030-irq.c4
-rw-r--r--drivers/mfd/twl6040-irq.c205
-rw-r--r--drivers/mfd/twl6040.c (renamed from drivers/mfd/twl6040-core.c)146
-rw-r--r--drivers/mfd/vexpress-config.c281
-rw-r--r--drivers/mfd/vexpress-sysreg.c483
-rw-r--r--drivers/mfd/viperboard.c137
-rw-r--r--drivers/mfd/vx855.c6
-rw-r--r--drivers/mfd/wl1273-core.c4
-rw-r--r--drivers/mfd/wm5102-tables.c38
-rw-r--r--drivers/mfd/wm831x-spi.c6
-rw-r--r--drivers/mfd/wm8994-core.c68
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c6
-rw-r--r--drivers/misc/ad525x_dpot-spi.c6
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/apds9802als.c6
-rw-r--r--drivers/misc/apds990x.c6
-rw-r--r--drivers/misc/atmel-ssc.c145
-rw-r--r--drivers/misc/bh1770glc.c6
-rw-r--r--drivers/misc/bh1780gli.c6
-rw-r--r--drivers/misc/bmp085-i2c.c4
-rw-r--r--drivers/misc/bmp085-spi.c4
-rw-r--r--drivers/misc/bmp085.c2
-rw-r--r--drivers/misc/cb710/core.c10
-rw-r--r--drivers/misc/cs5535-mfgpt.c6
-rw-r--r--drivers/misc/eeprom/at24.c4
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c6
-rw-r--r--drivers/misc/fsa9480.c6
-rw-r--r--drivers/misc/hpilo.c21
-rw-r--r--drivers/misc/ibmasm/module.c6
-rw-r--r--drivers/misc/ioc4.c10
-rw-r--r--drivers/misc/isl29003.c6
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c6
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c6
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/amthif.c722
-rw-r--r--drivers/misc/mei/hw.h36
-rw-r--r--drivers/misc/mei/init.c368
-rw-r--r--drivers/misc/mei/interface.c91
-rw-r--r--drivers/misc/mei/interrupt.c760
-rw-r--r--drivers/misc/mei/iorw.c455
-rw-r--r--drivers/misc/mei/main.c462
-rw-r--r--drivers/misc/mei/mei_dev.h155
-rw-r--r--drivers/misc/mei/wd.c15
-rw-r--r--drivers/misc/pch_phub.c6
-rw-r--r--drivers/misc/phantom.c10
-rw-r--r--drivers/misc/pti.c15
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c34
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c4
-rw-r--r--drivers/misc/ti-st/st_core.c1
-rw-r--r--drivers/misc/ti-st/st_kim.c67
-rw-r--r--drivers/misc/ti_dac7512.c6
-rw-r--r--drivers/misc/tsl2550.c6
-rw-r--r--drivers/mmc/card/sdio_uart.c24
-rw-r--r--drivers/mmc/host/Kconfig21
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/au1xmmc.c4
-rw-r--r--drivers/mmc/host/bfin_sdh.c6
-rw-r--r--drivers/mmc/host/cb710-mmc.c6
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c6
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c6
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.h2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c12
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/mmci.c76
-rw-r--r--drivers/mmc/host/mmci.h4
-rw-r--r--drivers/mmc/host/mvsdio.c92
-rw-r--r--drivers/mmc/host/mxcmmc.c31
-rw-r--r--drivers/mmc/host/omap.c43
-rw-r--r--drivers/mmc/host/omap_hsmmc.c10
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c1328
-rw-r--r--drivers/mmc/host/s3cmci.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c312
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c6
-rw-r--r--drivers/mmc/host/sdhci-dove.c8
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c8
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c6
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c6
-rw-r--r--drivers/mmc/host/sdhci-pci.c14
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c6
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c6
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c12
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c6
-rw-r--r--drivers/mmc/host/wbsd.c28
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/ar7part.c7
-rw-r--r--drivers/mtd/bcm63xxpart.c32
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c16
-rw-r--r--drivers/mtd/cmdlinepart.c91
-rw-r--r--drivers/mtd/devices/Kconfig1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/docprobe.c2
-rw-r--r--drivers/mtd/devices/m25p80.c48
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c20
-rw-r--r--drivers/mtd/devices/spear_smi.c27
-rw-r--r--drivers/mtd/devices/sst25l.c10
-rw-r--r--drivers/mtd/maps/Kconfig14
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c6
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c9
-rw-r--r--drivers/mtd/maps/cdb89712.c278
-rw-r--r--drivers/mtd/maps/ck804xrom.c6
-rw-r--r--drivers/mtd/maps/esb2rom.c8
-rw-r--r--drivers/mtd/maps/fortunet.c277
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ichxrom.c8
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c19
-rw-r--r--drivers/mtd/maps/lantiq-flash.c8
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c4
-rw-r--r--drivers/mtd/maps/pci.c8
-rw-r--r--drivers/mtd/maps/physmap_of.c21
-rw-r--r--drivers/mtd/maps/pismo.c31
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c6
-rw-r--r--drivers/mtd/maps/sa1100-flash.c6
-rw-r--r--drivers/mtd/maps/scb2_flash.c12
-rw-r--r--drivers/mtd/maps/sun_uflash.c6
-rw-r--r--drivers/mtd/maps/vmu-flash.c10
-rw-r--r--drivers/mtd/mtd_blkdevs.c51
-rw-r--r--drivers/mtd/mtdoops.c15
-rw-r--r--drivers/mtd/nand/Kconfig49
-rw-r--r--drivers/mtd/nand/Makefile6
-rw-r--r--drivers/mtd/nand/ams-delta.c6
-rw-r--r--drivers/mtd/nand/atmel_nand.c34
-rw-r--r--drivers/mtd/nand/au1550nd.c8
-rw-r--r--drivers/mtd/nand/autcpu12.c237
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/Makefile4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h22
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c108
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c413
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c8
-rw-r--r--drivers/mtd/nand/cafe_nand.c12
-rw-r--r--drivers/mtd/nand/cs553x_nand.c3
-rw-r--r--drivers/mtd/nand/davinci_nand.c13
-rw-r--r--drivers/mtd/nand/denali.c162
-rw-r--r--drivers/mtd/nand/denali.h5
-rw-r--r--drivers/mtd/nand/denali_dt.c167
-rw-r--r--drivers/mtd/nand/denali_pci.c144
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c73
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c17
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c6
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/fsmc_nand.c110
-rw-r--r--drivers/mtd/nand/gpio.c34
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c10
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c44
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c15
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c6
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c8
-rw-r--r--drivers/mtd/nand/mxc_nand.c108
-rw-r--r--drivers/mtd/nand/nand_base.c117
-rw-r--r--drivers/mtd/nand/nandsim.c191
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/nomadik_nand.c235
-rw-r--r--drivers/mtd/nand/nuc900_nand.c6
-rw-r--r--drivers/mtd/nand/omap2.c130
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/pasemi_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c9
-rw-r--r--drivers/mtd/nand/sh_flctl.c310
-rw-r--r--drivers/mtd/nand/sharpsl.c6
-rw-r--r--drivers/mtd/nand/socrates_nand.c6
-rw-r--r--drivers/mtd/nand/spia.c176
-rw-r--r--drivers/mtd/ofpart.c5
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c53
-rw-r--r--drivers/mtd/onenand/samsung.c4
-rw-r--r--drivers/mtd/tests/mtd_nandbiterrs.c73
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c6
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c171
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c152
-rw-r--r--drivers/mtd/tests/mtd_readtest.c44
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c88
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c44
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c124
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c73
-rw-r--r--drivers/mtd/ubi/attach.c23
-rw-r--r--drivers/mtd/ubi/build.c12
-rw-r--r--drivers/mtd/ubi/debug.c34
-rw-r--r--drivers/mtd/ubi/debug.h57
-rw-r--r--drivers/mtd/ubi/fastmap.c6
-rw-r--r--drivers/mtd/ubi/gluebi.c28
-rw-r--r--drivers/mtd/ubi/io.c14
-rw-r--r--drivers/mtd/ubi/ubi.h40
-rw-r--r--drivers/mtd/ubi/upd.c6
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/mtd/ubi/wl.c7
-rw-r--r--drivers/net/arcnet/com20020-pci.c6
-rw-r--r--drivers/net/bonding/bond_alb.c197
-rw-r--r--drivers/net/bonding/bond_alb.h28
-rw-r--r--drivers/net/bonding/bond_debugfs.c5
-rw-r--r--drivers/net/bonding/bond_main.c21
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/bonding/bonding.h15
-rw-r--r--drivers/net/can/Kconfig9
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c12
-rw-r--r--drivers/net/can/bfin_can.c7
-rw-r--r--drivers/net/can/c_can/c_can.c22
-rw-r--r--drivers/net/can/c_can/c_can.h3
-rw-r--r--drivers/net/can/c_can/c_can_pci.c8
-rw-r--r--drivers/net/can/c_can/c_can_platform.c36
-rw-r--r--drivers/net/can/cc770/cc770_isa.c18
-rw-r--r--drivers/net/can/cc770/cc770_platform.c18
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/flexcan.c12
-rw-r--r--drivers/net/can/grcan.c1756
-rw-r--r--drivers/net/can/janz-ican3.c28
-rw-r--r--drivers/net/can/mcp251x.c6
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c37
-rw-r--r--drivers/net/can/mscan/mscan.c8
-rw-r--r--drivers/net/can/mscan/mscan.h1
-rw-r--r--drivers/net/can/pch_can.c8
-rw-r--r--drivers/net/can/sja1000/Kconfig3
-rw-r--r--drivers/net/can/sja1000/ems_pci.c4
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c5
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c8
-rw-r--r--drivers/net/can/sja1000/peak_pci.c7
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c39
-rw-r--r--drivers/net/can/sja1000/sja1000.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c16
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c16
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c1
-rw-r--r--drivers/net/can/sja1000/tscan1.c8
-rw-r--r--drivers/net/can/softing/softing_cs.c11
-rw-r--r--drivers/net/can/softing/softing_main.c14
-rw-r--r--drivers/net/can/ti_hecc.c9
-rw-r--r--drivers/net/can/usb/Kconfig29
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c7
-rw-r--r--drivers/net/can/usb/esd_usb2.c45
-rw-r--r--drivers/net/can/usb/kvaser_usb.c1627
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c5
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h1
-rw-r--r--drivers/net/dsa/Kconfig5
-rw-r--r--drivers/net/ethernet/3com/3c509.c29
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c21
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c10
-rw-r--r--drivers/net/ethernet/8390/ax88796.c18
-rw-r--r--drivers/net/ethernet/8390/etherh.c14
-rw-r--r--drivers/net/ethernet/8390/hydra.c18
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c12
-rw-r--r--drivers/net/ethernet/8390/ne3210.c4
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c17
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c12
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c271
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h13
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/alteon/acenic.c31
-rw-r--r--drivers/net/ethernet/amd/a2065.c16
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c8
-rw-r--r--drivers/net/ethernet/amd/ariadne.c10
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c6
-rw-r--r--drivers/net/ethernet/amd/declance.c8
-rw-r--r--drivers/net/ethernet/amd/depca.c10
-rw-r--r--drivers/net/ethernet/amd/hplance.c17
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/amd/sunlance.c12
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c7
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c17
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c17
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1022
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h134
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h141
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c563
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c991
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1244
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c189
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h30
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h7
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c788
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h68
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c138
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c943
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1283
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h112
-rw-r--r--drivers/net/ethernet/cadence/macb.c564
-rw-r--r--drivers/net/ethernet/cadence/macb.h67
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c63
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c511
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h459
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c7
-rw-r--r--drivers/net/ethernet/cisco/Kconfig2
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c7
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c18
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c16
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c18
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c6
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c24
-rw-r--r--drivers/net/ethernet/dlink/sundance.c95
-rw-r--r--drivers/net/ethernet/dnet.c11
-rw-r--r--drivers/net/ethernet/emulex/Kconfig2
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h62
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c414
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c80
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c915
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c5
-rw-r--r--drivers/net/ethernet/ethoc.c8
-rw-r--r--drivers/net/ethernet/fealnx.c12
-rw-r--r--drivers/net/ethernet/freescale/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fec.c171
-rw-r--r--drivers/net/ethernet/freescale/fec.h119
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c383
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c18
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c20
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c8
-rw-r--r--drivers/net/ethernet/ibm/Kconfig5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c16
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h20
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c24
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c13
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/icplus/ipg.c7
-rw-r--r--drivers/net/ethernet/intel/Kconfig30
-rw-r--r--drivers/net/ethernet/intel/e100.c7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c13
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c66
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c115
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h36
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h19
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h7
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c254
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c135
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c377
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c141
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c77
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c369
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c128
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c99
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h112
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c406
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1572
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c102
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c41
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c120
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c418
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c168
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c470
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c403
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c4
-rw-r--r--drivers/net/ethernet/marvell/Kconfig24
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c228
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2847
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c35
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c28
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c13
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c32
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c6
-rw-r--r--drivers/net/ethernet/myricom/Kconfig1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c285
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c4
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c8
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c21
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c13
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c8
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/neterion/Kconfig2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/neterion/s2io.h5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c27
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c41
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig17
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c12
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c93
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h525
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c119
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c73
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h56
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c754
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c852
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c1309
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1997
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c628
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c960
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c14
-rw-r--r--drivers/net/ethernet/rdc/r6040.c9
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c93
-rw-r--r--drivers/net/ethernet/realtek/8139too.c14
-rw-r--r--drivers/net/ethernet/realtek/atp.c58
-rw-r--r--drivers/net/ethernet/realtek/atp.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c191
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/s6gmac.c6
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig10
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.h13
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c25
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/io.h43
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c23
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/nic.c81
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c17
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c8
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c11
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c7
-rw-r--r--drivers/net/ethernet/sis/sis190.c27
-rw-r--r--drivers/net/ethernet/sis/sis900.c29
-rw-r--r--drivers/net/ethernet/smsc/epic100.c13
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c20
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h16
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c26
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c36
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c283
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c134
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h46
-rw-r--r--drivers/net/ethernet/sun/cassini.c13
-rw-r--r--drivers/net/ethernet/sun/niu.c121
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c10
-rw-r--r--drivers/net/ethernet/sun/sungem.c7
-rw-r--r--drivers/net/ethernet/sun/sunhme.c20
-rw-r--r--drivers/net/ethernet/sun/sunqe.c12
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c15
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig9
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c635
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c31
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpts.c424
-rw-r--r--drivers/net/ethernet/ti/cpts.h145
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c11
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c22
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c10
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c18
-rw-r--r--drivers/net/ethernet/via/via-rhine.c21
-rw-r--r--drivers/net/ethernet/via/via-velocity.c28
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c10
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c10
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c4
-rw-r--r--drivers/net/fddi/defxx.c46
-rw-r--r--drivers/net/fddi/skfp/skfddi.c4
-rw-r--r--drivers/net/hippi/rrunner.c11
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/hyperv/rndis_filter.c10
-rw-r--r--drivers/net/ieee802154/at86rf230.c6
-rw-r--r--drivers/net/ieee802154/fakehard.c6
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c6
-rw-r--r--drivers/net/irda/au1k_ir.c8
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/ep7211-sir.c73
-rw-r--r--drivers/net/irda/sh_irda.c10
-rw-r--r--drivers/net/irda/sh_sir.c6
-rw-r--r--drivers/net/irda/smsc-ircc2.c6
-rw-r--r--drivers/net/irda/via-ircc.c15
-rw-r--r--drivers/net/irda/vlsi_ir.c6
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/netconsole.c6
-rw-r--r--drivers/net/phy/davicom.c6
-rw-r--r--drivers/net/phy/dp83640.c78
-rw-r--r--drivers/net/phy/icplus.c29
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c14
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c6
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c6
-rw-r--r--drivers/net/phy/mdio-octeon.c6
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/micrel.c44
-rw-r--r--drivers/net/phy/smsc.c95
-rw-r--r--drivers/net/phy/spi_ks8995.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/sungem_phy.c8
-rw-r--r--drivers/net/tun.c928
-rw-r--r--drivers/net/usb/Kconfig22
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_common.c117
-rw-r--r--drivers/net/usb/asix_devices.c19
-rw-r--r--drivers/net/usb/cdc_ether.c45
-rw-r--r--drivers/net/usb/cdc_mbim.c431
-rw-r--r--drivers/net/usb/cdc_ncm.c689
-rw-r--r--drivers/net/usb/dm9601.c153
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/int51x1.c52
-rw-r--r--drivers/net/usb/mcs7830.c85
-rw-r--r--drivers/net/usb/net1080.c110
-rw-r--r--drivers/net/usb/plusb.c11
-rw-r--r--drivers/net/usb/qmi_wwan.c31
-rw-r--r--drivers/net/usb/sierra_net.c47
-rw-r--r--drivers/net/usb/smsc75xx.c1406
-rw-r--r--drivers/net/usb/smsc95xx.c1088
-rw-r--r--drivers/net/usb/smsc95xx.h25
-rw-r--r--drivers/net/usb/usbnet.c260
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c848
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c49
-rw-r--r--drivers/net/vxlan.c293
-rw-r--r--drivers/net/wan/Makefile4
-rw-r--r--drivers/net/wan/dscc4.c7
-rw-r--r--drivers/net/wan/farsync.c10
-rw-r--r--drivers/net/wan/hd64570.c5
-rw-r--r--drivers/net/wan/hd64572.c5
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c7
-rw-r--r--drivers/net/wan/pc300too.c4
-rw-r--r--drivers/net/wan/pci200syn.c4
-rw-r--r--drivers/net/wan/wanxl.c4
-rw-r--r--drivers/net/wan/wanxlfw.S1
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c1
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h3
-rw-r--r--drivers/net/wimax/i2400m/usb.c6
-rw-r--r--drivers/net/wireless/Kconfig6
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c6
-rw-r--r--drivers/net/wireless/airo.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c85
-rw-r--r--drivers/net/wireless/ath/Kconfig9
-rw-r--r--drivers/net/wireless/ath/Makefile2
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ar5523/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1798
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h152
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523_hw.h431
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c31
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c410
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h69
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c13
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c92
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c55
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c160
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c47
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c32
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c284
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h172
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c83
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h76
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h338
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c440
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c225
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c211
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c21
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig1
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c11
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c21
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c53
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c7
-rw-r--r--drivers/net/wireless/ath/hw.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig29
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c573
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h30
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c603
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c471
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c407
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c157
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c223
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c871
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h362
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h363
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c975
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1116
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/atmel_pci.c6
-rw-r--r--drivers/net/wireless/b43/b43.h5
-rw-r--r--drivers/net/wireless/b43/dma.c7
-rw-r--r--drivers/net/wireless/b43/main.c68
-rw-r--r--drivers/net/wireless/b43/main.h5
-rw-r--r--drivers/net/wireless/b43/pcmcia.c6
-rw-r--r--drivers/net/wireless/b43/pio.c4
-rw-r--r--drivers/net/wireless/b43/sdio.c6
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h5
-rw-r--r--drivers/net/wireless/b43legacy/main.c37
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c58
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c182
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h256
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h98
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c893
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h46
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c716
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c579
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c447
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h215
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c344
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c404
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2949
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h328
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c723
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h175
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c156
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.h76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c345
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c199
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c1285
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h48
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c471
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c64
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h45
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h3
-rw-r--r--drivers/net/wireless/brcm80211/include/defs.h11
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c9
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c52
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_geo.c3
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h4
-rw-r--r--drivers/net/wireless/iwlegacy/common.c45
-rw-r--r--drivers/net/wireless/iwlegacy/common.h17
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c49
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c28
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c78
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c44
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c78
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h57
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c16
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h117
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1072
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c1233
-rw-r--r--drivers/net/wireless/libertas/cfg.c33
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c39
-rw-r--r--drivers/net/wireless/libertas/if_spi.c6
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c585
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c8
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c8
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c106
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c24
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c10
-rw-r--r--drivers/net/wireless/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/mwifiex/init.c39
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.c94
-rw-r--r--drivers/net/wireless/mwifiex/main.h19
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/mwifiex/scan.c64
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c39
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c84
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c26
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c38
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c11
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c7
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c17
-rw-r--r--drivers/net/wireless/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/mwifiex/util.c19
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h2
-rw-r--r--drivers/net/wireless/mwl8k.c75
-rw-r--r--drivers/net/wireless/orinoco/cfg.c11
-rw-r--r--drivers/net/wireless/orinoco/main.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c9
-rw-r--r--drivers/net/wireless/p54/eeprom.c5
-rw-r--r--drivers/net/wireless/p54/p54pci.c19
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/p54/p54usb.c10
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/rndis_wlan.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c245
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c8
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c31
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c9
-rw-r--r--drivers/net/wireless/rtlwifi/core.c5
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c30
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c227
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c90
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c93
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c59
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c95
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c65
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c51
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c97
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/btc.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/def.h163
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c920
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h149
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c745
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.h101
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c542
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h160
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c1786
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h151
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c2380
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.h73
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/led.c151
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/led.h39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c2044
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h224
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c109
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h322
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c129
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h98
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/reg.h2097
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.c505
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c380
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/table.c738
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/table.h50
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c680
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.h725
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c268
-rw-r--r--drivers/net/wireless/rtlwifi/stats.h46
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c14
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h161
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c6
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c23
-rw-r--r--drivers/net/xen-netback/netback.c115
-rw-r--r--drivers/net/xen-netfront.c37
-rw-r--r--drivers/nfc/Makefile2
-rw-r--r--drivers/nfc/pn533.c18
-rw-r--r--drivers/nfc/pn544/Makefile7
-rw-r--r--drivers/nfc/pn544/i2c.c500
-rw-r--r--drivers/nfc/pn544/pn544.c (renamed from drivers/nfc/pn544_hci.c)679
-rw-r--r--drivers/nfc/pn544/pn544.h32
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/base.c274
-rw-r--r--drivers/of/fdt.c26
-rw-r--r--drivers/of/of_i2c.c2
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/of/pdt.c12
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/lba_pci.c2
-rw-r--r--drivers/parport/Kconfig16
-rw-r--r--drivers/parport/parport_gsc.c23
-rw-r--r--drivers/parport/parport_pc.c55
-rw-r--r--drivers/parport/parport_serial.c21
-rw-r--r--drivers/parport/parport_sunbpp.c6
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/hotplug.c37
-rw-r--r--drivers/pci/hotplug/Kconfig11
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c4
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c11
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c252
-rw-r--r--drivers/pci/hotplug/shpchp.h3
-rw-r--r--drivers/pci/hotplug/shpchp_core.c36
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c6
-rw-r--r--drivers/pci/ioapic.c8
-rw-r--r--drivers/pci/iov.c87
-rw-r--r--drivers/pci/irq.c10
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/pci-acpi.c79
-rw-r--r--drivers/pci/pci-driver.c115
-rw-r--r--drivers/pci/pci-stub.c2
-rw-r--r--drivers/pci/pci-sysfs.c162
-rw-r--r--drivers/pci/pci.c52
-rw-r--r--drivers/pci/pci.h13
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h5
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c23
-rw-r--r--drivers/pci/pcie/aspm.c21
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c22
-rw-r--r--drivers/pci/probe.c62
-rw-r--r--drivers/pci/quirks.c178
-rw-r--r--drivers/pci/remove.c36
-rw-r--r--drivers/pci/rom.c11
-rw-r--r--drivers/pci/setup-bus.c22
-rw-r--r--drivers/pci/xen-pcifront.c15
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c12
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c6
-rw-r--r--drivers/pcmcia/db1xxx_ss.c6
-rw-r--r--drivers/pcmcia/ds.c13
-rw-r--r--drivers/pcmcia/electra_cf.c4
-rw-r--r--drivers/pcmcia/i82092.c6
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_cerf.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c4
-rw-r--r--drivers/pcmcia/sa1100_h3600.c2
-rw-r--r--drivers/pcmcia/sa1100_shannon.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c4
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c2
-rw-r--r--drivers/pcmcia/vrc4171_card.c8
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c8
-rw-r--r--drivers/pcmcia/xxs1500_ss.c6
-rw-r--r--drivers/pcmcia/yenta_socket.c6
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c39
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c578
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c11
-rw-r--r--drivers/pinctrl/pinctrl-at91.c39
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c10
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c477
-rw-r--r--drivers/pinctrl/pinctrl-exynos.h170
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c919
-rw-r--r--drivers/pinctrl/pinctrl-imx.c21
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx35.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx53.c8
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c6
-rw-r--r--drivers/pinctrl/pinctrl-mmp2.c6
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c25
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8540.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-stn8815.c3
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c20
-rw-r--r--drivers/pinctrl/pinctrl-pxa168.c6
-rw-r--r--drivers/pinctrl/pinctrl-pxa910.c6
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c235
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h32
-rw-r--r--drivers/pinctrl/pinctrl-single.c92
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c72
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c4
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c6
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c6
-rw-r--r--drivers/pinctrl/pinctrl-u300.c6
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c5
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c13
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h13
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c8
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c8
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c8
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c8
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c8
-rw-r--r--drivers/platform/x86/acer-wmi.c68
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/amilo-rfkill.c4
-rw-r--r--drivers/platform/x86/apple-gmux.c7
-rw-r--r--drivers/platform/x86/asus-laptop.c25
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c10
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/eeepc-laptop.c6
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c17
-rw-r--r--drivers/platform/x86/hp-wmi.c8
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c13
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c6
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c4
-rw-r--r--drivers/platform/x86/intel_oaktrail.c6
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c2
-rw-r--r--drivers/platform/x86/samsung-laptop.c14
-rw-r--r--drivers/platform/x86/samsung-q10.c6
-rw-r--r--drivers/platform/x86/sony-laptop.c15
-rw-r--r--drivers/platform/x86/tc1100-wmi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c13
-rw-r--r--drivers/platform/x86/xo1-rfkill.c6
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/interface.c105
-rw-r--r--drivers/pnp/manager.c25
-rw-r--r--drivers/pnp/pnpacpi/core.c9
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c296
-rw-r--r--drivers/pnp/pnpbios/core.c20
-rw-r--r--drivers/pnp/resource.c16
-rw-r--r--drivers/power/88pm860x_battery.c6
-rw-r--r--drivers/power/88pm860x_charger.c6
-rw-r--r--drivers/power/Kconfig25
-rw-r--r--drivers/power/Makefile5
-rw-r--r--drivers/power/ab8500_bmdata.c519
-rw-r--r--drivers/power/ab8500_btemp.c83
-rw-r--r--drivers/power/ab8500_charger.c90
-rw-r--r--drivers/power/ab8500_fg.c88
-rw-r--r--drivers/power/abx500_chargalg.c62
-rw-r--r--drivers/power/avs/smartreflex.c64
-rw-r--r--drivers/power/bq2415x_charger.c1670
-rw-r--r--drivers/power/bq27x00_battery.c14
-rw-r--r--drivers/power/charger-manager.c42
-rw-r--r--drivers/power/collie_battery.c6
-rw-r--r--drivers/power/da9052-battery.c50
-rw-r--r--drivers/power/ds2780_battery.c6
-rw-r--r--drivers/power/ds2781_battery.c6
-rw-r--r--drivers/power/ds2782_battery.c4
-rw-r--r--drivers/power/generic-adc-battery.c11
-rw-r--r--drivers/power/gpio-charger.c6
-rw-r--r--drivers/power/intel_mid_battery.c8
-rw-r--r--drivers/power/isp1704_charger.c6
-rw-r--r--drivers/power/jz4740-battery.c51
-rw-r--r--drivers/power/lp8727_charger.c4
-rw-r--r--drivers/power/lp8788-charger.c81
-rw-r--r--drivers/power/max17040_battery.c6
-rw-r--r--drivers/power/max17042_battery.c9
-rw-r--r--drivers/power/max8903_charger.c6
-rw-r--r--drivers/power/max8925_power.c61
-rw-r--r--drivers/power/max8997_charger.c6
-rw-r--r--drivers/power/max8998_charger.c6
-rw-r--r--drivers/power/olpc_battery.c8
-rw-r--r--drivers/power/pcf50633-charger.c6
-rw-r--r--drivers/power/power_supply_core.c98
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gpio-poweroff.c126
-rw-r--r--drivers/power/rx51_battery.c251
-rw-r--r--drivers/power/s3c_adc_battery.c2
-rw-r--r--drivers/power/sbs-battery.c6
-rw-r--r--drivers/power/smb347-charger.c2
-rw-r--r--drivers/power/tosa_battery.c6
-rw-r--r--drivers/power/twl4030_charger.c12
-rw-r--r--drivers/power/wm831x_backup.c6
-rw-r--r--drivers/power/wm831x_power.c6
-rw-r--r--drivers/power/wm8350_power.c6
-rw-r--r--drivers/power/wm97xx_battery.c6
-rw-r--r--drivers/power/z2_battery.c6
-rw-r--r--drivers/pps/Kconfig1
-rw-r--r--drivers/pps/clients/pps-gpio.c2
-rw-r--r--drivers/ps3/ps3-lpm.c2
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3av.c2
-rw-r--r--drivers/ptp/Kconfig19
-rw-r--r--drivers/ptp/ptp_chardev.c61
-rw-r--r--drivers/ptp/ptp_pch.c4
-rw-r--r--drivers/pwm/Kconfig39
-rw-r--r--drivers/pwm/Makefile5
-rw-r--r--drivers/pwm/core.c29
-rw-r--r--drivers/pwm/pwm-ab8500.c6
-rw-r--r--drivers/pwm/pwm-bfin.c4
-rw-r--r--drivers/pwm/pwm-imx.c8
-rw-r--r--drivers/pwm/pwm-jz4740.c6
-rw-r--r--drivers/pwm/pwm-lpc32xx.c27
-rw-r--r--drivers/pwm/pwm-mxs.c4
-rw-r--r--drivers/pwm/pwm-puv3.c6
-rw-r--r--drivers/pwm/pwm-pxa.c6
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-spear.c276
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c54
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c68
-rw-r--r--drivers/pwm/pwm-tipwmss.c139
-rw-r--r--drivers/pwm/pwm-tipwmss.h39
-rw-r--r--drivers/pwm/pwm-twl-led.c344
-rw-r--r--drivers/pwm/pwm-twl.c359
-rw-r--r--drivers/pwm/pwm-twl6030.c184
-rw-r--r--drivers/pwm/pwm-vt8500.c98
-rw-r--r--drivers/rapidio/devices/tsi721.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c2
-rw-r--r--drivers/rapidio/rio-scan.c14
-rw-r--r--drivers/rapidio/rio.c6
-rw-r--r--drivers/regulator/88pm8607.c6
-rw-r--r--drivers/regulator/Kconfig54
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/aat2870-regulator.c4
-rw-r--r--drivers/regulator/ab3100.c6
-rw-r--r--drivers/regulator/ab8500.c12
-rw-r--r--drivers/regulator/ad5398.c6
-rw-r--r--drivers/regulator/anatop-regulator.c36
-rw-r--r--drivers/regulator/arizona-ldo1.c136
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/as3711-regulator.c369
-rw-r--r--drivers/regulator/core.c51
-rw-r--r--drivers/regulator/da903x.c6
-rw-r--r--drivers/regulator/da9052-regulator.c16
-rw-r--r--drivers/regulator/da9055-regulator.c641
-rw-r--r--drivers/regulator/db8500-prcmu.c6
-rw-r--r--drivers/regulator/dbx500-prcmu.c5
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/fan53555.c6
-rw-r--r--drivers/regulator/fixed.c8
-rw-r--r--drivers/regulator/gpio-regulator.c112
-rw-r--r--drivers/regulator/isl6271a-regulator.c6
-rw-r--r--drivers/regulator/lp3971.c8
-rw-r--r--drivers/regulator/lp3972.c8
-rw-r--r--drivers/regulator/lp872x.c4
-rw-r--r--drivers/regulator/lp8788-buck.c24
-rw-r--r--drivers/regulator/lp8788-ldo.c25
-rw-r--r--drivers/regulator/max1586.c50
-rw-r--r--drivers/regulator/max77686.c185
-rw-r--r--drivers/regulator/max8649.c6
-rw-r--r--drivers/regulator/max8660.c6
-rw-r--r--drivers/regulator/max8907-regulator.c9
-rw-r--r--drivers/regulator/max8925-regulator.c78
-rw-r--r--drivers/regulator/max8952.c6
-rw-r--r--drivers/regulator/max8973-regulator.c505
-rw-r--r--drivers/regulator/max8997.c222
-rw-r--r--drivers/regulator/max8998.c50
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/mc13892-regulator.c6
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c4
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c168
-rw-r--r--drivers/regulator/pcap-regulator.c6
-rw-r--r--drivers/regulator/pcf50633-regulator.c182
-rw-r--r--drivers/regulator/rc5t583-regulator.c6
-rw-r--r--drivers/regulator/s2mps11.c20
-rw-r--r--drivers/regulator/s5m8767.c50
-rw-r--r--drivers/regulator/tps51632-regulator.c342
-rw-r--r--drivers/regulator/tps6105x-regulator.c6
-rw-r--r--drivers/regulator/tps62360-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/regulator/tps65090-regulator.c254
-rw-r--r--drivers/regulator/tps65217-regulator.c10
-rw-r--r--drivers/regulator/tps6524x-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c189
-rw-r--r--drivers/regulator/tps65910-regulator.c15
-rw-r--r--drivers/regulator/tps65912-regulator.c6
-rw-r--r--drivers/regulator/tps80031-regulator.c788
-rw-r--r--drivers/regulator/twl-regulator.c8
-rw-r--r--drivers/regulator/vexpress.c147
-rw-r--r--drivers/regulator/virtual.c6
-rw-r--r--drivers/regulator/wm831x-dcdc.c31
-rw-r--r--drivers/regulator/wm831x-isink.c6
-rw-r--r--drivers/regulator/wm831x-ldo.c18
-rw-r--r--drivers/regulator/wm8400-regulator.c6
-rw-r--r--drivers/regulator/wm8994-regulator.c6
-rw-r--r--drivers/remoteproc/omap_remoteproc.c6
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c4
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c10
-rw-r--r--drivers/rtc/Kconfig39
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/class.c1
-rw-r--r--drivers/rtc/rtc-88pm80x.c6
-rw-r--r--drivers/rtc/rtc-88pm860x.c10
-rw-r--r--drivers/rtc/rtc-ab8500.c6
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.h75
-rw-r--r--drivers/rtc/rtc-at91sam9.c8
-rw-r--r--drivers/rtc/rtc-au1xxx.c6
-rw-r--r--drivers/rtc/rtc-bfin.c6
-rw-r--r--drivers/rtc/rtc-bq32k.c4
-rw-r--r--drivers/rtc/rtc-bq4802.c6
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/rtc/rtc-da9052.c6
-rw-r--r--drivers/rtc/rtc-da9055.c413
-rw-r--r--drivers/rtc/rtc-davinci.c25
-rw-r--r--drivers/rtc/rtc-dev.c19
-rw-r--r--drivers/rtc/rtc-dm355evm.c6
-rw-r--r--drivers/rtc/rtc-ds1286.c6
-rw-r--r--drivers/rtc/rtc-ds1302.c4
-rw-r--r--drivers/rtc/rtc-ds1305.c6
-rw-r--r--drivers/rtc/rtc-ds1307.c8
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-ds1390.c6
-rw-r--r--drivers/rtc/rtc-ds1511.c8
-rw-r--r--drivers/rtc/rtc-ds1553.c6
-rw-r--r--drivers/rtc/rtc-ds1742.c6
-rw-r--r--drivers/rtc/rtc-ds3232.c8
-rw-r--r--drivers/rtc/rtc-ds3234.c6
-rw-r--r--drivers/rtc/rtc-ep93xx.c6
-rw-r--r--drivers/rtc/rtc-fm3130.c8
-rw-r--r--drivers/rtc/rtc-imxdi.c16
-rw-r--r--drivers/rtc/rtc-isl1208.c5
-rw-r--r--drivers/rtc/rtc-jz4740.c6
-rw-r--r--drivers/rtc/rtc-lpc32xx.c6
-rw-r--r--drivers/rtc/rtc-ls1x.c6
-rw-r--r--drivers/rtc/rtc-m41t93.c6
-rw-r--r--drivers/rtc/rtc-m41t94.c6
-rw-r--r--drivers/rtc/rtc-m48t35.c6
-rw-r--r--drivers/rtc/rtc-m48t59.c6
-rw-r--r--drivers/rtc/rtc-m48t86.c6
-rw-r--r--drivers/rtc/rtc-max6902.c6
-rw-r--r--drivers/rtc/rtc-max8907.c6
-rw-r--r--drivers/rtc/rtc-max8925.c6
-rw-r--r--drivers/rtc/rtc-max8998.c6
-rw-r--r--drivers/rtc/rtc-mpc5121.c8
-rw-r--r--drivers/rtc/rtc-mrst.c12
-rw-r--r--drivers/rtc/rtc-mv.c2
-rw-r--r--drivers/rtc/rtc-mxc.c40
-rw-r--r--drivers/rtc/rtc-nuc900.c6
-rw-r--r--drivers/rtc/rtc-omap.c80
-rw-r--r--drivers/rtc/rtc-pcap.c6
-rw-r--r--drivers/rtc/rtc-pcf2123.c6
-rw-r--r--drivers/rtc/rtc-pcf50633.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c326
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pcf8583.c4
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c6
-rw-r--r--drivers/rtc/rtc-puv3.c6
-rw-r--r--drivers/rtc/rtc-r9701.c6
-rw-r--r--drivers/rtc/rtc-rc5t583.c6
-rw-r--r--drivers/rtc/rtc-rs5c313.c4
-rw-r--r--drivers/rtc/rtc-rs5c348.c6
-rw-r--r--drivers/rtc/rtc-rv3029c2.c8
-rw-r--r--drivers/rtc/rtc-rx8025.c8
-rw-r--r--drivers/rtc/rtc-rx8581.c8
-rw-r--r--drivers/rtc/rtc-s3c.c60
-rw-r--r--drivers/rtc/rtc-snvs.c8
-rw-r--r--drivers/rtc/rtc-spear.c97
-rw-r--r--drivers/rtc/rtc-stk17ta8.c6
-rw-r--r--drivers/rtc/rtc-tegra.c13
-rw-r--r--drivers/rtc/rtc-test.c18
-rw-r--r--drivers/rtc/rtc-tile.c6
-rw-r--r--drivers/rtc/rtc-tps6586x.c356
-rw-r--r--drivers/rtc/rtc-tps65910.c15
-rw-r--r--drivers/rtc/rtc-twl.c38
-rw-r--r--drivers/rtc/rtc-vr41xx.c6
-rw-r--r--drivers/rtc/rtc-vt8500.c34
-rw-r--r--drivers/rtc/rtc-wm831x.c4
-rw-r--r--drivers/rtc/rtc-wm8350.c4
-rw-r--r--drivers/s390/block/dasd.c97
-rw-r--r--drivers/s390/block/dasd_devmap.c36
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c94
-rw-r--r--drivers/s390/block/dasd_fba.c25
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c11
-rw-r--r--drivers/s390/char/con3215.c9
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_cmd.c81
-rw-r--r--drivers/s390/char/sclp_tty.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc.c151
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c10
-rw-r--r--drivers/s390/cio/device.c23
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c10
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/s390/cio/qdio_main.c52
-rw-r--r--drivers/s390/cio/qdio_setup.c9
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c68
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/s390/net/claw.c4
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/s390/net/ctcm_mpc.c3
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c49
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/sbus/char/bbc_i2c.c6
-rw-r--r--drivers/sbus/char/display7seg.c6
-rw-r--r--drivers/sbus/char/envctrl.c6
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/sbus/char/uctrl.c6
-rw-r--r--drivers/scsi/3w-9xxx.c4
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/NCR_D700.c12
-rw-r--r--drivers/scsi/NCR_Q720.c2
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/a2091.c9
-rw-r--r--drivers/scsi/aacraid/aachba.c87
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/linit.c15
-rw-r--r--drivers/scsi/advansys.c152
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1740.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c23
-rw-r--r--drivers/scsi/arm/acornscsi.c7
-rw-r--r--drivers/scsi/arm/arxescsi.c7
-rw-r--r--drivers/scsi/arm/cumana_1.c8
-rw-r--r--drivers/scsi/arm/cumana_2.c8
-rw-r--r--drivers/scsi/arm/eesox.c7
-rw-r--r--drivers/scsi/arm/oak.c7
-rw-r--r--drivers/scsi/arm/powertec.c8
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h7
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c236
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h93
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c124
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1063
-rw-r--r--drivers/scsi/be2iscsi/be_main.h152
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c424
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h23
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bvme6000_scsi.c6
-rw-r--r--drivers/scsi/csiostor/Kconfig19
-rw-r--r--drivers/scsi/csiostor/Makefile11
-rw-r--r--drivers/scsi/csiostor/csio_attr.c796
-rw-r--r--drivers/scsi/csiostor/csio_defs.h121
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4395
-rw-r--r--drivers/scsi/csiostor/csio_hw.h665
-rw-r--r--drivers/scsi/csiostor/csio_init.c1269
-rw-r--r--drivers/scsi/csiostor/csio_init.h158
-rw-r--r--drivers/scsi/csiostor/csio_isr.c624
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2135
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h255
-rw-r--r--drivers/scsi/csiostor/csio_mb.c1750
-rw-r--r--drivers/scsi/csiostor/csio_mb.h278
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c913
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h141
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2555
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h342
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1632
-rw-r--r--drivers/scsi/csiostor/csio_wr.h512
-rw-r--r--drivers/scsi/csiostor/t4fw_api_stor.h539
-rw-r--r--drivers/scsi/dc395x.c51
-rw-r--r--drivers/scsi/dmx3191d.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c7
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c17
-rw-r--r--drivers/scsi/gvp11.c11
-rw-r--r--drivers/scsi/hpsa.c92
-rw-r--r--drivers/scsi/hptiop.c416
-rw-r--r--drivers/scsi/hptiop.h72
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c32
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/isci/init.c10
-rw-r--r--drivers/scsi/jazz_esp.c6
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_esp.c6
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/Kconfig67
-rw-r--r--drivers/scsi/mpt3sas/Makefile8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h1164
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3323
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h560
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h1665
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h346
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h295
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h437
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h56
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4840
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h1139
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1650
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3297
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h418
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h219
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8166
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2128
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c434
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h193
-rw-r--r--drivers/scsi/mvme16x_scsi.c8
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c7
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h14
-rw-r--r--drivers/scsi/mvsas/mv_chips.h2
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.h6
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/nsp32.c16
-rw-r--r--drivers/scsi/osd/osd_uld.c28
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c36
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c28
-rw-r--r--drivers/scsi/pmcraid.c31
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla1280.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c72
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c153
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c83
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c59
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c6
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicpti.c20
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_pm.c98
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/scsi/scsi_transport_srp.c51
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sim710.c11
-rw-r--r--drivers/scsi/sni_53c710.c4
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/scsi/sun3x_esp.c6
-rw-r--r--drivers/scsi/sun_esp.c30
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c23
-rw-r--r--drivers/scsi/tmscsim.c23
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/scsi/virtio_scsi.c32
-rw-r--r--drivers/scsi/vmw_pvscsi.c9
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/scsi/zorro7xx.c12
-rw-r--r--drivers/sh/clk/cpg.c91
-rw-r--r--drivers/sh/pfc/gpio.c6
-rw-r--r--drivers/sh/pfc/pinctrl.c20
-rw-r--r--drivers/sn/ioc3.c14
-rw-r--r--drivers/spi/Kconfig31
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-altera.c6
-rw-r--r--drivers/spi/spi-ath79.c6
-rw-r--r--drivers/spi/spi-atmel.c23
-rw-r--r--drivers/spi/spi-bcm63xx.c22
-rw-r--r--drivers/spi/spi-bfin-sport.c8
-rw-r--r--drivers/spi/spi-bfin5xx.c4
-rw-r--r--drivers/spi/spi-bitbang.c27
-rw-r--r--drivers/spi/spi-clps711x.c296
-rw-r--r--drivers/spi/spi-coldfire-qspi.c6
-rw-r--r--drivers/spi/spi-davinci.c6
-rw-r--r--drivers/spi/spi-dw-mmio.c6
-rw-r--r--drivers/spi/spi-dw-pci.c6
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-ep93xx.c6
-rw-r--r--drivers/spi/spi-falcon.c6
-rw-r--r--drivers/spi/spi-fsl-espi.c8
-rw-r--r--drivers/spi/spi-fsl-lib.c4
-rw-r--r--drivers/spi/spi-fsl-spi.c14
-rw-r--r--drivers/spi/spi-gpio.c13
-rw-r--r--drivers/spi/spi-imx.c6
-rw-r--r--drivers/spi/spi-mpc512x-psc.c10
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c8
-rw-r--r--drivers/spi/spi-mpc52xx.c8
-rw-r--r--drivers/spi/spi-mxs.c6
-rw-r--r--drivers/spi/spi-nuc900.c6
-rw-r--r--drivers/spi/spi-oc-tiny.c10
-rw-r--r--drivers/spi/spi-octeon.c6
-rw-r--r--drivers/spi/spi-omap-100k.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c76
-rw-r--r--drivers/spi/spi-orion.c27
-rw-r--r--drivers/spi/spi-pl022.c61
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-rspi.c10
-rw-r--r--drivers/spi/spi-s3c24xx.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c48
-rw-r--r--drivers/spi/spi-sh-hspi.c51
-rw-r--r--drivers/spi/spi-sh-msiof.c6
-rw-r--r--drivers/spi/spi-sh.c6
-rw-r--r--drivers/spi/spi-sirf.c6
-rw-r--r--drivers/spi/spi-stmp.c664
-rw-r--r--drivers/spi/spi-tegra20-sflash.c665
-rw-r--r--drivers/spi/spi-tegra20-slink.c1358
-rw-r--r--drivers/spi/spi-ti-ssp.c6
-rw-r--r--drivers/spi/spi-tle62x0.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c12
-rw-r--r--drivers/spi/spi-xcomm.c6
-rw-r--r--drivers/spi/spi-xilinx.c6
-rw-r--r--drivers/spi/spi.c211
-rw-r--r--drivers/spi/spidev.c16
-rw-r--r--drivers/ssb/Kconfig8
-rw-r--r--drivers/ssb/Makefile1
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/driver_chipcommon.c178
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c30
-rw-r--r--drivers/ssb/driver_extif.c67
-rw-r--r--drivers/ssb/driver_gige.c14
-rw-r--r--drivers/ssb/driver_gpio.c188
-rw-r--r--drivers/ssb/driver_mipscore.c30
-rw-r--r--drivers/ssb/driver_pcicore.c10
-rw-r--r--drivers/ssb/embedded.c35
-rw-r--r--drivers/ssb/main.c54
-rw-r--r--drivers/ssb/pcihost_wrapper.c6
-rw-r--r--drivers/ssb/ssb_private.h53
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/binder.c474
-rw-r--r--drivers/staging/android/binder_trace.h327
-rw-r--r--drivers/staging/android/logger.c21
-rw-r--r--drivers/staging/android/lowmemorykiller.c16
-rw-r--r--drivers/staging/bcm/Adapter.h8
-rw-r--r--drivers/staging/bcm/Bcmchar.c149
-rw-r--r--drivers/staging/bcm/Bcmnet.c6
-rw-r--r--drivers/staging/bcm/CmHost.c90
-rw-r--r--drivers/staging/bcm/CmHost.h189
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c2
-rw-r--r--drivers/staging/bcm/HostMIBSInterface.h384
-rw-r--r--drivers/staging/bcm/InterfaceAdapter.h142
-rw-r--r--drivers/staging/bcm/InterfaceDld.c4
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c2
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.h5
-rw-r--r--drivers/staging/bcm/InterfaceInit.c25
-rw-r--r--drivers/staging/bcm/InterfaceInit.h4
-rw-r--r--drivers/staging/bcm/InterfaceIsr.c6
-rw-r--r--drivers/staging/bcm/InterfaceIsr.h4
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c124
-rw-r--r--drivers/staging/bcm/InterfaceMisc.h6
-rw-r--r--drivers/staging/bcm/InterfaceRx.c16
-rw-r--r--drivers/staging/bcm/InterfaceRx.h2
-rw-r--r--drivers/staging/bcm/InterfaceTx.c14
-rw-r--r--drivers/staging/bcm/Ioctl.h482
-rw-r--r--drivers/staging/bcm/LeakyBucket.c6
-rw-r--r--drivers/staging/bcm/Misc.c236
-rw-r--r--drivers/staging/bcm/Prototypes.h36
-rw-r--r--drivers/staging/bcm/Transmit.c2
-rw-r--r--drivers/staging/bcm/cntrl_SignalingInterface.h256
-rw-r--r--drivers/staging/bcm/hostmibs.c12
-rw-r--r--drivers/staging/bcm/nvm.c94
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c6
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h6
-rw-r--r--drivers/staging/ccg/ccg.c8
-rw-r--r--drivers/staging/ccg/u_serial.c5
-rw-r--r--drivers/staging/ced1401/ced_ioc.c37
-rw-r--r--drivers/staging/ced1401/usb1401.c36
-rw-r--r--drivers/staging/ced1401/usb1401.h2
-rw-r--r--drivers/staging/ced1401/userspace/use1401.c8
-rw-r--r--drivers/staging/comedi/Kconfig46
-rw-r--r--drivers/staging/comedi/comedi.h65
-rw-r--r--drivers/staging/comedi/comedi_compat32.c1
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/comedi/comedidev.h70
-rw-r--r--drivers/staging/comedi/drivers.c144
-rw-r--r--drivers/staging/comedi/drivers/8255.c27
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c26
-rw-r--r--drivers/staging/comedi/drivers/Makefile2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c205
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.h73
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c245
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.h74
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c44
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.h46
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c886
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.h271
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c39
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.h47
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c278
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.h76
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c41
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.h43
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c50
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.h57
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c38
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.h44
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h469
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c2036
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h32
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c1367
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c69
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.h71
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c116
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h109
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c287
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.h64
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c197
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.h165
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c542
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.h65
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c469
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.h121
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c57
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.h79
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2016.c460
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2016.h72
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c579
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.h83
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c392
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.h61
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c1704
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.h249
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2777
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.h191
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c320
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h98
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c414
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.h48
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c72
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c396
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c72
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c348
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c69
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c72
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1710.c153
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2016.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c381
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c64
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3001.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c273
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c119
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3300.c5
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c70
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c794
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c23
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c15
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c27
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c81
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c494
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c46
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c73
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c24
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c25
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c8
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c19
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c1468
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c72
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c23
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c93
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c157
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c69
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c106
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c3009
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c681
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c121
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c25
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c8
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h44
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c35
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c73
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c15
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c23
-rw-r--r--drivers/staging/comedi/drivers/das08.c39
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c9
-rw-r--r--drivers/staging/comedi/drivers/das16.c78
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c80
-rw-r--r--drivers/staging/comedi/drivers/das1800.c66
-rw-r--r--drivers/staging/comedi/drivers/das6402.c16
-rw-r--r--drivers/staging/comedi/drivers/das800.c79
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c80
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c14
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c13
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c54
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c9
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c115
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c620
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c27
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c27
-rw-r--r--drivers/staging/comedi/drivers/fl512.c10
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c515
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c22
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c51
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c658
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h12
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c60
-rw-r--r--drivers/staging/comedi/drivers/me4000.c42
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c595
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c10
-rw-r--r--drivers/staging/comedi/drivers/mpc8260cpm.c164
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c64
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c164
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c586
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c24
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c64
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c13
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c58
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c14
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c114
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c336
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c29
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c85
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c78
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h4
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c60
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c51
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c10
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c85
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c71
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c80
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c44
-rw-r--r--drivers/staging/comedi/drivers/pcm_common.c34
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c10
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c34
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c124
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c25
-rw-r--r--drivers/staging/comedi/drivers/poc.c15
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c95
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c950
-rw-r--r--drivers/staging/comedi/drivers/rtd520.h267
-rw-r--r--drivers/staging/comedi/drivers/rti800.c13
-rw-r--r--drivers/staging/comedi/drivers/rti802.c9
-rw-r--r--drivers/staging/comedi/drivers/s526.c8
-rw-r--r--drivers/staging/comedi/drivers/s626.c129
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c44
-rw-r--r--drivers/staging/comedi/drivers/skel.c610
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c26
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c44
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c127
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c211
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c116
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c20
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c6
-rw-r--r--drivers/staging/comedi/proc.c1
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c8
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c14
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c2
-rw-r--r--drivers/staging/csr/Makefile3
-rw-r--r--drivers/staging/csr/bh.c19
-rw-r--r--drivers/staging/csr/csr_framework_ext.c97
-rw-r--r--drivers/staging/csr/csr_framework_ext.h213
-rw-r--r--drivers/staging/csr/csr_framework_ext_types.h41
-rw-r--r--drivers/staging/csr/csr_lib.h188
-rw-r--r--drivers/staging/csr/csr_log.h172
-rw-r--r--drivers/staging/csr/csr_log_configure.h105
-rw-r--r--drivers/staging/csr/csr_log_text.h8
-rw-r--r--drivers/staging/csr/csr_macro.h75
-rw-r--r--drivers/staging/csr/csr_msg_transport.h8
-rw-r--r--drivers/staging/csr/csr_msgconv.c1
-rw-r--r--drivers/staging/csr/csr_msgconv.h9
-rw-r--r--drivers/staging/csr/csr_panic.c20
-rw-r--r--drivers/staging/csr/csr_panic.h53
-rw-r--r--drivers/staging/csr/csr_prim_defs.h7
-rw-r--r--drivers/staging/csr/csr_result.h8
-rw-r--r--drivers/staging/csr/csr_sched.h215
-rw-r--r--drivers/staging/csr/csr_sdio.h8
-rw-r--r--drivers/staging/csr/csr_serialize_primitive_types.c1
-rw-r--r--drivers/staging/csr/csr_time.c9
-rw-r--r--drivers/staging/csr/csr_time.h82
-rw-r--r--drivers/staging/csr/csr_wifi_common.h8
-rw-r--r--drivers/staging/csr/csr_wifi_fsm.h8
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_event.h8
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_types.h10
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card.h9
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.c162
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.h64
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper_private.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hip_conversions.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hip_download.c18
-rw-r--r--drivers/staging/csr/csr_wifi_hip_dump.c32
-rw-r--r--drivers/staging/csr/csr_wifi_hip_send.c2
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.h17
-rw-r--r--drivers/staging/csr/csr_wifi_hip_sigs.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.h9
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi.h10
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c45
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_udi.h9
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifihw.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifiversion.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.c2
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.h8
-rw-r--r--drivers/staging/csr/csr_wifi_hostio_prim.h9
-rw-r--r--drivers/staging/csr/csr_wifi_lib.h9
-rw-r--r--drivers/staging/csr/csr_wifi_msgconv.h9
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.h8
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_lib.h28
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_prim.h9
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.h10
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.h9
-rw-r--r--drivers/staging/csr/csr_wifi_nme_converter_init.h8
-rw-r--r--drivers/staging/csr/csr_wifi_nme_lib.h63
-rw-r--r--drivers/staging/csr/csr_wifi_nme_prim.h9
-rw-r--r--drivers/staging/csr/csr_wifi_nme_serialize.h8
-rw-r--r--drivers/staging/csr/csr_wifi_nme_task.h11
-rw-r--r--drivers/staging/csr/csr_wifi_private_common.h8
-rw-r--r--drivers/staging/csr/csr_wifi_result.h8
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.h8
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h8
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_lib.h10
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_prim.h9
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.c67
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.h7
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.h8
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_upstream_contents.c46
-rw-r--r--drivers/staging/csr/csr_wifi_router_lib.h10
-rw-r--r--drivers/staging/csr/csr_wifi_router_prim.h9
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.h8
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.h8
-rw-r--r--drivers/staging/csr/csr_wifi_router_task.h8
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_lib.h9
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_prim.h8
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.h8
-rw-r--r--drivers/staging/csr/csr_wifi_sme_lib.h10
-rw-r--r--drivers/staging/csr/csr_wifi_sme_prim.h9
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.h213
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.h336
-rw-r--r--drivers/staging/csr/csr_wifi_sme_task.h16
-rw-r--r--drivers/staging/csr/csr_wifi_vif_utils.h81
-rw-r--r--drivers/staging/csr/data_tx.c45
-rw-r--r--drivers/staging/csr/drv.c101
-rw-r--r--drivers/staging/csr/firmware.c15
-rw-r--r--drivers/staging/csr/inet.c10
-rw-r--r--drivers/staging/csr/io.c102
-rw-r--r--drivers/staging/csr/mlme.c3
-rw-r--r--drivers/staging/csr/monitor.c10
-rw-r--r--drivers/staging/csr/netdev.c90
-rw-r--r--drivers/staging/csr/os.c18
-rw-r--r--drivers/staging/csr/sdio_mmc.c28
-rw-r--r--drivers/staging/csr/sme_blocking.c307
-rw-r--r--drivers/staging/csr/sme_native.c21
-rw-r--r--drivers/staging/csr/sme_sys.c17
-rw-r--r--drivers/staging/csr/sme_userspace.h2
-rw-r--r--drivers/staging/csr/sme_wext.c49
-rw-r--r--drivers/staging/csr/ul_int.c1
-rw-r--r--drivers/staging/csr/unifi_event.c8
-rw-r--r--drivers/staging/csr/unifi_os.h23
-rw-r--r--drivers/staging/csr/unifi_pdu_processing.c39
-rw-r--r--drivers/staging/csr/unifi_priv.h7
-rw-r--r--drivers/staging/csr/unifi_sme.c15
-rw-r--r--drivers/staging/csr/unifi_wext.h1
-rw-r--r--drivers/staging/cxt1e1/musycc.c2162
-rw-r--r--drivers/staging/cxt1e1/musycc.h236
-rw-r--r--drivers/staging/cxt1e1/sbecrc.c105
-rw-r--r--drivers/staging/dgrp/dgrp_common.h1
-rw-r--r--drivers/staging/dgrp/dgrp_dpa_ops.c2
-rw-r--r--drivers/staging/dgrp/dgrp_driver.c4
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c78
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c4
-rw-r--r--drivers/staging/dgrp/dgrp_sysfs.c21
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c39
-rw-r--r--drivers/staging/et131x/README2
-rw-r--r--drivers/staging/et131x/et131x.c1323
-rw-r--r--drivers/staging/et131x/et131x.h4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c4
-rw-r--r--drivers/staging/fwserial/Kconfig11
-rw-r--r--drivers/staging/fwserial/Makefile2
-rw-r--r--drivers/staging/fwserial/TODO29
-rw-r--r--drivers/staging/fwserial/dma_fifo.c307
-rw-r--r--drivers/staging/fwserial/dma_fifo.h130
-rw-r--r--drivers/staging/fwserial/fwserial.c2943
-rw-r--r--drivers/staging/fwserial/fwserial.h387
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c33
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c29
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c68
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c70
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c23
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c100
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c49
-rw-r--r--drivers/staging/iio/accel/Kconfig21
-rw-r--r--drivers/staging/iio/accel/Makefile5
-rw-r--r--drivers/staging/iio/accel/adis16201.h89
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c476
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c136
-rw-r--r--drivers/staging/iio/accel/adis16201_trigger.c71
-rw-r--r--drivers/staging/iio/accel/adis16203.h80
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c432
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c136
-rw-r--r--drivers/staging/iio/accel/adis16203_trigger.c73
-rw-r--r--drivers/staging/iio/accel/adis16204.h79
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c462
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c134
-rw-r--r--drivers/staging/iio/accel/adis16204_trigger.c73
-rw-r--r--drivers/staging/iio/accel/adis16209.h77
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c496
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c134
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c81
-rw-r--r--drivers/staging/iio/accel/adis16220.h20
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c288
-rw-r--r--drivers/staging/iio/accel/adis16240.h85
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c481
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c132
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c82
-rw-r--r--drivers/staging/iio/accel/kxsd9.c10
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h1
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c16
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c6
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c6
-rw-r--r--drivers/staging/iio/adc/Kconfig73
-rw-r--r--drivers/staging/iio/adc/Makefile15
-rw-r--r--drivers/staging/iio/adc/ad7192.c6
-rw-r--r--drivers/staging/iio/adc/ad7280a.c8
-rw-r--r--drivers/staging/iio/adc/ad7291.c6
-rw-r--r--drivers/staging/iio/adc/ad7298.h75
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c113
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c6
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c6
-rw-r--r--drivers/staging/iio/adc/ad7780.c6
-rw-r--r--drivers/staging/iio/adc/ad7793.h115
-rw-r--r--drivers/staging/iio/adc/ad7816.c6
-rw-r--r--drivers/staging/iio/adc/ad7887.h99
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c257
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c122
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c6
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c2
-rw-r--r--drivers/staging/iio/adc/adt7310.c881
-rw-r--r--drivers/staging/iio/adc/adt7410.c460
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c8
-rw-r--r--drivers/staging/iio/adc/max1363.h177
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c139
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c13
-rw-r--r--drivers/staging/iio/adc/spear_adc.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c6
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c6
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c18
-rw-r--r--drivers/staging/iio/cdc/ad7152.c8
-rw-r--r--drivers/staging/iio/cdc/ad7746.c8
-rw-r--r--drivers/staging/iio/frequency/ad5930.c6
-rw-r--r--drivers/staging/iio/frequency/ad9832.c6
-rw-r--r--drivers/staging/iio/frequency/ad9834.c6
-rw-r--r--drivers/staging/iio/frequency/ad9850.c6
-rw-r--r--drivers/staging/iio/frequency/ad9852.c6
-rw-r--r--drivers/staging/iio/frequency/ad9910.c6
-rw-r--r--drivers/staging/iio/frequency/ad9951.c6
-rw-r--r--drivers/staging/iio/gyro/Kconfig4
-rw-r--r--drivers/staging/iio/gyro/Makefile1
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c12
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c6
-rw-r--r--drivers/staging/iio/gyro/adis16260.h84
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c496
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c136
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c75
-rw-r--r--drivers/staging/iio/gyro/adxrs450_core.c6
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c2
-rw-r--r--drivers/staging/iio/iio_hwmon.c6
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c2
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c5
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c10
-rw-r--r--drivers/staging/iio/imu/adis16400.h12
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c161
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c5
-rw-r--r--drivers/staging/iio/light/isl29018.c53
-rw-r--r--drivers/staging/iio/light/isl29028.c6
-rw-r--r--drivers/staging/iio/light/tsl2563.c10
-rw-r--r--drivers/staging/iio/light/tsl2583.c6
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c6
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c6
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c8
-rw-r--r--drivers/staging/iio/meter/ade7753.c6
-rw-r--r--drivers/staging/iio/meter/ade7753.h2
-rw-r--r--drivers/staging/iio/meter/ade7754.c6
-rw-r--r--drivers/staging/iio/meter/ade7754.h2
-rw-r--r--drivers/staging/iio/meter/ade7758.h3
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c6
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c2
-rw-r--r--drivers/staging/iio/meter/ade7759.c6
-rw-r--r--drivers/staging/iio/meter/ade7759.h2
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c6
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c6
-rw-r--r--drivers/staging/iio/meter/ade7854.h2
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c6
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c8
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c6
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c6
-rw-r--r--drivers/staging/imx-drm/Kconfig2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c3
-rw-r--r--drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h2
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c72
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c14
-rw-r--r--drivers/staging/imx-drm/parallel-display.c16
-rw-r--r--drivers/staging/ipack/Kconfig21
-rw-r--r--drivers/staging/ipack/TODO22
-rw-r--r--drivers/staging/ipack/bridges/Kconfig8
-rw-r--r--drivers/staging/ipack/ipack.h217
-rw-r--r--drivers/staging/ipack/ipack_ids.h32
-rw-r--r--drivers/staging/line6/Kconfig37
-rw-r--r--drivers/staging/line6/Makefile2
-rw-r--r--drivers/staging/line6/audio.c8
-rw-r--r--drivers/staging/line6/capture.c13
-rw-r--r--drivers/staging/line6/control.c995
-rw-r--r--drivers/staging/line6/control.h195
-rw-r--r--drivers/staging/line6/driver.c85
-rw-r--r--drivers/staging/line6/driver.h10
-rw-r--r--drivers/staging/line6/dumprequest.c135
-rw-r--r--drivers/staging/line6/dumprequest.h76
-rw-r--r--drivers/staging/line6/midi.c126
-rw-r--r--drivers/staging/line6/midi.h10
-rw-r--r--drivers/staging/line6/midibuf.c6
-rw-r--r--drivers/staging/line6/pcm.c4
-rw-r--r--drivers/staging/line6/pcm.h2
-rw-r--r--drivers/staging/line6/playback.c17
-rw-r--r--drivers/staging/line6/pod.c879
-rw-r--r--drivers/staging/line6/pod.h105
-rw-r--r--drivers/staging/line6/toneport.c8
-rw-r--r--drivers/staging/line6/usbdefs.h10
-rw-r--r--drivers/staging/line6/variax.c484
-rw-r--r--drivers/staging/line6/variax.h60
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c16
-rw-r--r--drivers/staging/media/go7007/go7007-fw.c42
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c2
-rw-r--r--drivers/staging/media/go7007/s2250-board.c13
-rw-r--r--drivers/staging/media/go7007/wis-ov7640.c20
-rw-r--r--drivers/staging/media/go7007/wis-saa7113.c20
-rw-r--r--drivers/staging/media/go7007/wis-saa7115.c20
-rw-r--r--drivers/staging/media/go7007/wis-sony-tuner.c13
-rw-r--r--drivers/staging/media/go7007/wis-tw2804.c13
-rw-r--r--drivers/staging/media/go7007/wis-tw9903.c13
-rw-r--r--drivers/staging/media/go7007/wis-uda1342.c13
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c6
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c12
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c6
-rw-r--r--drivers/staging/media/solo6x10/core.c4
-rw-r--r--drivers/staging/net/pc300_drv.c6
-rw-r--r--drivers/staging/nvec/Kconfig1
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/staging/nvec/nvec_kbd.c6
-rw-r--r--drivers/staging/nvec/nvec_paz00.c6
-rw-r--r--drivers/staging/nvec/nvec_power.c6
-rw-r--r--drivers/staging/nvec/nvec_ps2.c6
-rw-r--r--drivers/staging/octeon/ethernet.c12
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c85
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c30
-rw-r--r--drivers/staging/omap-thermal/omap-bandgap.c67
-rw-r--r--drivers/staging/omap-thermal/omap-bandgap.h9
-rw-r--r--drivers/staging/omap-thermal/omap-thermal-common.c71
-rw-r--r--drivers/staging/omapdrm/Kconfig2
-rw-r--r--drivers/staging/omapdrm/Makefile1
-rw-r--r--drivers/staging/omapdrm/TODO3
-rw-r--r--drivers/staging/omapdrm/omap_connector.c119
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c513
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h9
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c108
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h8
-rw-r--r--drivers/staging/omapdrm/omap_drv.c485
-rw-r--r--drivers/staging/omapdrm/omap_drv.h154
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c135
-rw-r--r--drivers/staging/omapdrm/omap_fb.c25
-rw-r--r--drivers/staging/omapdrm/omap_gem.c46
-rw-r--r--drivers/staging/omapdrm/omap_gem_dmabuf.c11
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c6
-rw-r--r--drivers/staging/omapdrm/omap_irq.c322
-rw-r--r--drivers/staging/omapdrm/omap_plane.c494
-rw-r--r--drivers/staging/ozwpan/ozevent.c2
-rw-r--r--drivers/staging/ozwpan/ozhcd.c7
-rw-r--r--drivers/staging/ozwpan/ozpd.c6
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/panel/panel.c8
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c24
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c38
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c18
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c23
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c321
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c121
-rw-r--r--drivers/staging/rtl8187se/r8180.h28
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c499
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.h4
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h4
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c229
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c2
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c239
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c26
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h5
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c8
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/rts5139/Makefile22
-rw-r--r--drivers/staging/rts5139/ms.c96
-rw-r--r--drivers/staging/rts5139/ms.h18
-rw-r--r--drivers/staging/rts5139/ms_mg.c104
-rw-r--r--drivers/staging/rts5139/ms_mg.h14
-rw-r--r--drivers/staging/rts5139/rts51x.c10
-rw-r--r--drivers/staging/rts5139/rts51x_card.c80
-rw-r--r--drivers/staging/rts5139/rts51x_card.h30
-rw-r--r--drivers/staging/rts5139/rts51x_chip.c24
-rw-r--r--drivers/staging/rts5139/rts51x_chip.h16
-rw-r--r--drivers/staging/rts5139/rts51x_fop.c6
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.c238
-rw-r--r--drivers/staging/rts5139/rts51x_scsi.h6
-rw-r--r--drivers/staging/rts5139/sd.c36
-rw-r--r--drivers/staging/rts5139/sd.h12
-rw-r--r--drivers/staging/rts5139/sd_cprm.c124
-rw-r--r--drivers/staging/rts5139/sd_cprm.h18
-rw-r--r--drivers/staging/rts5139/xd.c58
-rw-r--r--drivers/staging/rts5139/xd.h10
-rw-r--r--drivers/staging/rts_pstor/Kconfig16
-rw-r--r--drivers/staging/rts_pstor/Makefile16
-rw-r--r--drivers/staging/rts_pstor/TODO9
-rw-r--r--drivers/staging/rts_pstor/debug.h43
-rw-r--r--drivers/staging/rts_pstor/general.c35
-rw-r--r--drivers/staging/rts_pstor/ms.c4051
-rw-r--r--drivers/staging/rts_pstor/ms.h225
-rw-r--r--drivers/staging/rts_pstor/rtsx.c1105
-rw-r--r--drivers/staging/rts_pstor/rtsx.h186
-rw-r--r--drivers/staging/rts_pstor/rtsx_card.c1233
-rw-r--r--drivers/staging/rts_pstor/rtsx_card.h1093
-rw-r--r--drivers/staging/rts_pstor/rtsx_chip.c2264
-rw-r--r--drivers/staging/rts_pstor/rtsx_chip.h989
-rw-r--r--drivers/staging/rts_pstor/rtsx_scsi.c3137
-rw-r--r--drivers/staging/rts_pstor/rtsx_scsi.h142
-rw-r--r--drivers/staging/rts_pstor/rtsx_sys.h50
-rw-r--r--drivers/staging/rts_pstor/rtsx_transport.c769
-rw-r--r--drivers/staging/rts_pstor/rtsx_transport.h66
-rw-r--r--drivers/staging/rts_pstor/sd.c4570
-rw-r--r--drivers/staging/rts_pstor/sd.h300
-rw-r--r--drivers/staging/rts_pstor/spi.c812
-rw-r--r--drivers/staging/rts_pstor/spi.h65
-rw-r--r--drivers/staging/rts_pstor/trace.h93
-rw-r--r--drivers/staging/rts_pstor/xd.c2052
-rw-r--r--drivers/staging/rts_pstor/xd.h188
-rw-r--r--drivers/staging/sb105x/Kconfig10
-rw-r--r--drivers/staging/sb105x/Makefile3
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h295
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c3198
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.h293
-rw-r--r--drivers/staging/sb105x/sb_ser_core.h368
-rw-r--r--drivers/staging/sbe-2t3e3/cpld.c2
-rw-r--r--drivers/staging/sbe-2t3e3/main.c7
-rw-r--r--drivers/staging/sbe-2t3e3/module.c19
-rw-r--r--drivers/staging/sbe-2t3e3/netdev.c5
-rw-r--r--drivers/staging/sep/sep_main.c6
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c287
-rw-r--r--drivers/staging/silicom/bp_mod.c219
-rw-r--r--drivers/staging/silicom/bp_proc.c85
-rw-r--r--drivers/staging/silicom/bypasslib/bplibk.h9
-rw-r--r--drivers/staging/silicom/bypasslib/bypass.c1
-rw-r--r--drivers/staging/slicoss/slicoss.c166
-rw-r--r--drivers/staging/sm7xxfb/sm7xxfb.c6
-rw-r--r--drivers/staging/speakup/synth.c4
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c106
-rw-r--r--drivers/staging/telephony/Kconfig47
-rw-r--r--drivers/staging/telephony/Makefile7
-rw-r--r--drivers/staging/telephony/TODO10
-rw-r--r--drivers/staging/telephony/ixj-ver.h4
-rw-r--r--drivers/staging/telephony/ixj.c10571
-rw-r--r--drivers/staging/telephony/ixj.h1322
-rw-r--r--drivers/staging/telephony/ixj_pcmcia.c187
-rw-r--r--drivers/staging/telephony/phonedev.c166
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c13
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c1
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c12
-rw-r--r--drivers/staging/tidspbridge/dynload/dload_internal.h8
-rw-r--r--drivers/staging/tidspbridge/dynload/reloc.c6
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h1
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c6
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c2
-rw-r--r--drivers/staging/usbip/stub_dev.c15
-rw-r--r--drivers/staging/usbip/stub_rx.c5
-rw-r--r--drivers/staging/usbip/stub_tx.c3
-rw-r--r--drivers/staging/usbip/usbip_common.c40
-rw-r--r--drivers/staging/usbip/usbip_common.h4
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_detach.c9
-rw-r--r--drivers/staging/usbip/vhci.h1
-rw-r--r--drivers/staging/usbip/vhci_hcd.c51
-rw-r--r--drivers/staging/usbip/vhci_rx.c2
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c6
-rw-r--r--drivers/staging/usbip/vhci_tx.c2
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h2
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c24
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c83
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6655/hostap.c6
-rw-r--r--drivers/staging/vt6655/rxtx.c2
-rw-r--r--drivers/staging/vt6655/wcmd.c1
-rw-r--r--drivers/staging/vt6656/80211mgr.c2
-rw-r--r--drivers/staging/vt6656/Makefile1
-rw-r--r--drivers/staging/vt6656/bssdb.c43
-rw-r--r--drivers/staging/vt6656/bssdb.h1
-rw-r--r--drivers/staging/vt6656/desc.h28
-rw-r--r--drivers/staging/vt6656/device.h48
-rw-r--r--drivers/staging/vt6656/dpc.c64
-rw-r--r--drivers/staging/vt6656/firmware.c22
-rw-r--r--drivers/staging/vt6656/hostap.c6
-rw-r--r--drivers/staging/vt6656/int.h5
-rw-r--r--drivers/staging/vt6656/iocmd.h33
-rw-r--r--drivers/staging/vt6656/ioctl.c648
-rw-r--r--drivers/staging/vt6656/ioctl.h54
-rw-r--r--drivers/staging/vt6656/iowpa.h8
-rw-r--r--drivers/staging/vt6656/iwctl.c510
-rw-r--r--drivers/staging/vt6656/iwctl.h79
-rw-r--r--drivers/staging/vt6656/key.c55
-rw-r--r--drivers/staging/vt6656/key.h8
-rw-r--r--drivers/staging/vt6656/mac.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c552
-rw-r--r--drivers/staging/vt6656/mib.c1
-rw-r--r--drivers/staging/vt6656/mib.h1
-rw-r--r--drivers/staging/vt6656/rf.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c36
-rw-r--r--drivers/staging/vt6656/rxtx.h8
-rw-r--r--drivers/staging/vt6656/tkip.c40
-rw-r--r--drivers/staging/vt6656/ttype.h16
-rw-r--r--drivers/staging/vt6656/upc.h162
-rw-r--r--drivers/staging/vt6656/usbpipe.c4
-rw-r--r--drivers/staging/vt6656/wcmd.c44
-rw-r--r--drivers/staging/vt6656/wmgr.c66
-rw-r--r--drivers/staging/vt6656/wmgr.h42
-rw-r--r--drivers/staging/vt6656/wpa2.c2
-rw-r--r--drivers/staging/vt6656/wpa2.h4
-rw-r--r--drivers/staging/vt6656/wpactl.c664
-rw-r--r--drivers/staging/vt6656/wpactl.h12
-rw-r--r--drivers/staging/winbond/mds.c7
-rw-r--r--drivers/staging/winbond/wb35rx_f.h12
-rw-r--r--drivers/staging/winbond/wb35rx_s.h62
-rw-r--r--drivers/staging/winbond/wbhal.h4
-rw-r--r--drivers/staging/winbond/wbusb.c14
-rw-r--r--drivers/staging/wlags49_h2/ap_h2.c16
-rw-r--r--drivers/staging/wlags49_h2/man/wlags49.42
-rw-r--r--drivers/staging/wlags49_h2/wl_if.h133
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c25
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c7
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c6
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/staging/xgifb/TODO2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c60
-rw-r--r--drivers/staging/xgifb/vb_def.h9
-rw-r--r--drivers/staging/xgifb/vb_init.c47
-rw-r--r--drivers/staging/xgifb/vb_init.h1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c898
-rw-r--r--drivers/staging/xgifb/vb_struct.h36
-rw-r--r--drivers/staging/xgifb/vb_table.h504
-rw-r--r--drivers/staging/zram/zram_drv.c152
-rw-r--r--drivers/staging/zram/zram_drv.h4
-rw-r--r--drivers/staging/zram/zram_sysfs.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c84
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c10
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/Kconfig2
-rw-r--r--drivers/target/sbp/sbp_target.c24
-rw-r--r--drivers/target/target_core_alua.c346
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c705
-rw-r--r--drivers/target/target_core_device.c716
-rw-r--r--drivers/target/target_core_fabric_configfs.c42
-rw-r--r--drivers/target/target_core_fabric_lib.c3
-rw-r--r--drivers/target/target_core_file.c279
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_hba.c9
-rw-r--r--drivers/target/target_core_iblock.c501
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h16
-rw-r--r--drivers/target/target_core_pr.c1225
-rw-r--r--drivers/target/target_core_pr.h10
-rw-r--r--drivers/target/target_core_pscsi.c349
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/target/target_core_rd.c126
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_sbc.c167
-rw-r--r--drivers/target/target_core_spc.c534
-rw-r--r--drivers/target/target_core_stat.c312
-rw-r--r--drivers/target/target_core_tmr.c9
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c695
-rw-r--r--drivers/target/target_core_ua.c20
-rw-r--r--drivers/target/target_core_ua.h2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c14
-rw-r--r--drivers/thermal/Kconfig84
-rw-r--r--drivers/thermal/Makefile17
-rw-r--r--drivers/thermal/cpu_cooling.c107
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c108
-rw-r--r--drivers/thermal/db8500_thermal.c531
-rw-r--r--drivers/thermal/exynos_thermal.c8
-rw-r--r--drivers/thermal/fair_share.c133
-rw-r--r--drivers/thermal/rcar_thermal.c27
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/step_wise.c194
-rw-r--r--drivers/thermal/thermal_core.h53
-rw-r--r--drivers/thermal/thermal_sys.c701
-rw-r--r--drivers/thermal/user_space.c68
-rw-r--r--drivers/tty/Kconfig1
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/bfin_jtag_comm.c6
-rw-r--r--drivers/tty/cyclades.c28
-rw-r--r--drivers/tty/ehv_bytechan.c4
-rw-r--r--drivers/tty/hvc/hvc_opal.c10
-rw-r--r--drivers/tty/hvc/hvc_vio.c8
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvcs.c20
-rw-r--r--drivers/tty/hvc/hvsi.c1
-rw-r--r--drivers/tty/ipwireless/network.c5
-rw-r--r--drivers/tty/ipwireless/setup_protocol.h2
-rw-r--r--drivers/tty/ipwireless/tty.c1
-rw-r--r--drivers/tty/isicom.c35
-rw-r--r--drivers/tty/moxa.c12
-rw-r--r--drivers/tty/mxser.c35
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_tty.c752
-rw-r--r--drivers/tty/nozomi.c23
-rw-r--r--drivers/tty/pty.c83
-rw-r--r--drivers/tty/rocket.c4
-rw-r--r--drivers/tty/serial/68328serial.c2
-rw-r--r--drivers/tty/serial/8250/8250.c109
-rw-r--r--drivers/tty/serial/8250/8250.h37
-rw-r--r--drivers/tty/serial/8250/8250_acorn.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c33
-rw-r--r--drivers/tty/serial/8250/8250_early.c46
-rw-r--r--drivers/tty/serial/8250/8250_em.c8
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c10
-rw-r--r--drivers/tty/serial/8250/8250_pci.c390
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c14
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig48
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/altera_jtaguart.c6
-rw-r--r--drivers/tty/serial/altera_uart.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c25
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c96
-rw-r--r--drivers/tty/serial/arc_uart.c746
-rw-r--r--drivers/tty/serial/atmel_serial.c31
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c6
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c6
-rw-r--r--drivers/tty/serial/bfin_uart.c22
-rw-r--r--drivers/tty/serial/clps711x.c595
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c4
-rw-r--r--drivers/tty/serial/efm32-uart.c6
-rw-r--r--drivers/tty/serial/icom.c16
-rw-r--r--drivers/tty/serial/ifx6x60.c172
-rw-r--r--drivers/tty/serial/ifx6x60.h2
-rw-r--r--drivers/tty/serial/ioc3_serial.c2
-rw-r--r--drivers/tty/serial/jsm/jsm.h8
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c9
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c116
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c104
-rw-r--r--drivers/tty/serial/kgdb_nmi.c2
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c6
-rw-r--r--drivers/tty/serial/max3100.c6
-rw-r--r--drivers/tty/serial/max310x.c11
-rw-r--r--drivers/tty/serial/mcf.c6
-rw-r--r--drivers/tty/serial/mfd.c7
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c6
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c8
-rw-r--r--drivers/tty/serial/mux.c6
-rw-r--r--drivers/tty/serial/mxs-auart.c380
-rw-r--r--drivers/tty/serial/of_serial.c38
-rw-r--r--drivers/tty/serial/omap-serial.c260
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/pxa.c55
-rw-r--r--drivers/tty/serial/sa1100.c4
-rw-r--r--drivers/tty/serial/samsung.c50
-rw-r--r--drivers/tty/serial/sc26xx.c6
-rw-r--r--drivers/tty/serial/sccnxp.c6
-rw-r--r--drivers/tty/serial/serial_core.c257
-rw-r--r--drivers/tty/serial/serial_txx9.c16
-rw-r--r--drivers/tty/serial/sh-sci.c154
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c4
-rw-r--r--drivers/tty/serial/sunhv.c6
-rw-r--r--drivers/tty/serial/sunsab.c8
-rw-r--r--drivers/tty/serial/sunsu.c10
-rw-r--r--drivers/tty/serial/sunzilog.c14
-rw-r--r--drivers/tty/serial/timbuart.c6
-rw-r--r--drivers/tty/serial/uartlite.c14
-rw-r--r--drivers/tty/serial/vr41xx_siu.c8
-rw-r--r--drivers/tty/serial/vt8500_serial.c16
-rw-r--r--drivers/tty/serial/xilinx_uartps.c15
-rw-r--r--drivers/tty/synclink.c7
-rw-r--r--drivers/tty/synclink_gt.c11
-rw-r--r--drivers/tty/synclinkmp.c11
-rw-r--r--drivers/tty/sysrq.c3
-rw-r--r--drivers/tty/tty_audit.c15
-rw-r--r--drivers/tty/tty_buffer.c228
-rw-r--r--drivers/tty/tty_io.c24
-rw-r--r--drivers/tty/tty_ioctl.c21
-rw-r--r--drivers/tty/tty_ldisc.c47
-rw-r--r--drivers/tty/tty_mutex.c4
-rw-r--r--drivers/tty/tty_port.c18
-rw-r--r--drivers/tty/vt/consolemap.c6
-rw-r--r--drivers/tty/vt/selection.c9
-rw-r--r--drivers/tty/vt/vt.c5
-rw-r--r--drivers/tty/vt/vt_ioctl.c1
-rw-r--r--drivers/uio/Kconfig17
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_cif.c4
-rw-r--r--drivers/uio/uio_dmem_genirq.c359
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/uio/uio_pci_generic.c2
-rw-r--r--drivers/uio/uio_pdrv.c1
-rw-r--r--drivers/uio/uio_pdrv_genirq.c3
-rw-r--r--drivers/uio/uio_pruss.c30
-rw-r--r--drivers/uio/uio_sercos3.c4
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c6
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/ci13xxx_imx.c8
-rw-r--r--drivers/usb/chipidea/ci13xxx_msm.c6
-rw-r--r--drivers/usb/chipidea/ci13xxx_pci.c6
-rw-r--r--drivers/usb/chipidea/core.c9
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/host.c70
-rw-r--r--drivers/usb/chipidea/usbmisc_imx6q.c6
-rw-r--r--drivers/usb/class/cdc-acm.c41
-rw-r--r--drivers/usb/core/devices.c18
-rw-r--r--drivers/usb/core/driver.c27
-rw-r--r--drivers/usb/core/generic.c7
-rw-r--r--drivers/usb/core/hcd.c63
-rw-r--r--drivers/usb/core/hub.c287
-rw-r--r--drivers/usb/core/message.c66
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/urb.c29
-rw-r--r--drivers/usb/core/usb.c13
-rw-r--r--drivers/usb/dwc3/Makefile14
-rw-r--r--drivers/usb/dwc3/core.c96
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/debugfs.c6
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c57
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c24
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c24
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/early/ehci-dbgp.c4
-rw-r--r--drivers/usb/gadget/Kconfig29
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/amd5536udc.c4
-rw-r--r--drivers/usb/gadget/at91_udc.c8
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c6
-rw-r--r--drivers/usb/gadget/composite.c8
-rw-r--r--drivers/usb/gadget/config.c39
-rw-r--r--drivers/usb/gadget/dummy_hcd.c179
-rw-r--r--drivers/usb/gadget/f_acm.c79
-rw-r--r--drivers/usb/gadget/f_ecm.c112
-rw-r--r--drivers/usb/gadget/f_eem.c51
-rw-r--r--drivers/usb/gadget/f_fs.c10
-rw-r--r--drivers/usb/gadget/f_hid.c30
-rw-r--r--drivers/usb/gadget/f_loopback.c28
-rw-r--r--drivers/usb/gadget/f_mass_storage.c63
-rw-r--r--drivers/usb/gadget/f_midi.c14
-rw-r--r--drivers/usb/gadget/f_ncm.c94
-rw-r--r--drivers/usb/gadget/f_obex.c42
-rw-r--r--drivers/usb/gadget/f_phonet.c21
-rw-r--r--drivers/usb/gadget/f_rndis.c94
-rw-r--r--drivers/usb/gadget/f_serial.c38
-rw-r--r--drivers/usb/gadget/f_sourcesink.c104
-rw-r--r--drivers/usb/gadget/f_subset.c75
-rw-r--r--drivers/usb/gadget/f_uac1.c23
-rw-r--r--drivers/usb/gadget/f_uac2.c222
-rw-r--r--drivers/usb/gadget/f_uvc.c138
-rw-r--r--drivers/usb/gadget/file_storage.c3656
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c40
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c14
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c44
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h5
-rw-r--r--drivers/usb/gadget/hid.c4
-rw-r--r--drivers/usb/gadget/inode.c3
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c6
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c2
-rw-r--r--drivers/usb/gadget/mv_udc_core.c8
-rw-r--r--drivers/usb/gadget/net2272.c29
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c14
-rw-r--r--drivers/usb/gadget/printer.c12
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c17
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/storage_common.c165
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c16
-rw-r--r--drivers/usb/gadget/u_serial.c7
-rw-r--r--drivers/usb/gadget/udc-core.c11
-rw-r--r--drivers/usb/host/Kconfig37
-rw-r--r--drivers/usb/host/Makefile5
-rw-r--r--drivers/usb/host/alchemy-common.c614
-rw-r--r--drivers/usb/host/bcma-hcd.c15
-rw-r--r--drivers/usb/host/ehci-atmel.c15
-rw-r--r--drivers/usb/host/ehci-au1xxx.c184
-rw-r--r--drivers/usb/host/ehci-cns3xxx.c155
-rw-r--r--drivers/usb/host/ehci-dbg.c112
-rw-r--r--drivers/usb/host/ehci-fsl.c10
-rw-r--r--drivers/usb/host/ehci-grlib.c20
-rw-r--r--drivers/usb/host/ehci-hcd.c215
-rw-r--r--drivers/usb/host/ehci-hub.c56
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c139
-rw-r--r--drivers/usb/host/ehci-lpm.c84
-rw-r--r--drivers/usb/host/ehci-ls1x.c147
-rw-r--r--drivers/usb/host/ehci-msm.c5
-rw-r--r--drivers/usb/host/ehci-mv.c4
-rw-r--r--drivers/usb/host/ehci-mxc.c129
-rw-r--r--drivers/usb/host/ehci-octeon.c3
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-orion.c58
-rw-r--r--drivers/usb/host/ehci-pci.c171
-rw-r--r--drivers/usb/host/ehci-platform.c102
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c1
-rw-r--r--drivers/usb/host/ehci-ppc-of.c4
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c62
-rw-r--r--drivers/usb/host/ehci-s5p.c16
-rw-r--r--drivers/usb/host/ehci-sched.c139
-rw-r--r--drivers/usb/host/ehci-sh.c9
-rw-r--r--drivers/usb/host/ehci-spear.c59
-rw-r--r--drivers/usb/host/ehci-tegra.c18
-rw-r--r--drivers/usb/host/ehci-timer.c29
-rw-r--r--drivers/usb/host/ehci-vt8500.c21
-rw-r--r--drivers/usb/host/ehci-w90x900.c10
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci-xls.c142
-rw-r--r--drivers/usb/host/ehci.h52
-rw-r--r--drivers/usb/host/fhci-hcd.c8
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c19
-rw-r--r--drivers/usb/host/imx21-hcd.c3
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/isp1760-if.c15
-rw-r--r--drivers/usb/host/ohci-at91.c24
-rw-r--r--drivers/usb/host/ohci-au1xxx.c234
-rw-r--r--drivers/usb/host/ohci-cns3xxx.c166
-rw-r--r--drivers/usb/host/ohci-ep93xx.c4
-rw-r--r--drivers/usb/host/ohci-exynos.c38
-rw-r--r--drivers/usb/host/ohci-hcd.c139
-rw-r--r--drivers/usb/host/ohci-hub.c42
-rw-r--r--drivers/usb/host/ohci-jz4740.c6
-rw-r--r--drivers/usb/host/ohci-nxp.c4
-rw-r--r--drivers/usb/host/ohci-octeon.c2
-rw-r--r--drivers/usb/host/ohci-omap.c7
-rw-r--r--drivers/usb/host/ohci-omap3.c7
-rw-r--r--drivers/usb/host/ohci-pci.c49
-rw-r--r--drivers/usb/host/ohci-platform.c34
-rw-r--r--drivers/usb/host/ohci-pnx8550.c243
-rw-r--r--drivers/usb/host/ohci-ppc-of.c4
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c216
-rw-r--r--drivers/usb/host/ohci-ps3.c4
-rw-r--r--drivers/usb/host/ohci-pxa27x.c8
-rw-r--r--drivers/usb/host/ohci-q.c23
-rw-r--r--drivers/usb/host/ohci-s3c2410.c41
-rw-r--r--drivers/usb/host/ohci-sa1111.c2
-rw-r--r--drivers/usb/host/ohci-sh.c141
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/ohci-spear.c50
-rw-r--r--drivers/usb/host/ohci-tmio.c11
-rw-r--r--drivers/usb/host/ohci-xls.c152
-rw-r--r--drivers/usb/host/pci-quirks.c21
-rw-r--r--drivers/usb/host/r8a66597-hcd.c12
-rw-r--r--drivers/usb/host/sl811-hcd.c6
-rw-r--r--drivers/usb/host/ssb-hcd.c19
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c15
-rw-r--r--drivers/usb/host/uhci-hub.c3
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/uhci-q.c73
-rw-r--r--drivers/usb/host/xhci-hub.c38
-rw-r--r--drivers/usb/host/xhci-mem.c11
-rw-r--r--drivers/usb/host/xhci-pci.c16
-rw-r--r--drivers/usb/host/xhci-ring.c56
-rw-r--r--drivers/usb/host/xhci.c42
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/ezusb.c38
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/musb/am35x.c43
-rw-r--r--drivers/usb/musb/blackfin.c38
-rw-r--r--drivers/usb/musb/cppi_dma.c8
-rw-r--r--drivers/usb/musb/da8xx.c40
-rw-r--r--drivers/usb/musb/davinci.c40
-rw-r--r--drivers/usb/musb/musb_core.c154
-rw-r--r--drivers/usb/musb/musb_core.h5
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_dma.h3
-rw-r--r--drivers/usb/musb/musb_dsps.c149
-rw-r--r--drivers/usb/musb/musb_gadget.c36
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_io.h21
-rw-r--r--drivers/usb/musb/musbhsdma.c3
-rw-r--r--drivers/usb/musb/musbhsdma.h4
-rw-r--r--drivers/usb/musb/omap2430.c52
-rw-r--r--drivers/usb/musb/omap2430.h2
-rw-r--r--drivers/usb/musb/tusb6010.c45
-rw-r--r--drivers/usb/musb/tusb6010_omap.c12
-rw-r--r--drivers/usb/musb/ux500.c62
-rw-r--r--drivers/usb/musb/ux500_dma.c3
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/otg/ab8500-usb.c6
-rw-r--r--drivers/usb/otg/fsl_otg.c6
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/msm_otg.c4
-rw-r--r--drivers/usb/otg/mv_otg.c18
-rw-r--r--drivers/usb/otg/mxs-phy.c59
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c6
-rw-r--r--drivers/usb/otg/twl4030-usb.c48
-rw-r--r--drivers/usb/otg/twl6030-usb.c2
-rw-r--r--drivers/usb/phy/Kconfig13
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/mv_u3d_phy.c4
-rw-r--r--drivers/usb/phy/omap-usb2.c6
-rw-r--r--drivers/usb/phy/rcar-phy.c220
-rw-r--r--drivers/usb/phy/tegra_usb_phy.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c9
-rw-r--r--drivers/usb/renesas_usbhs/common.h1
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c14
-rw-r--r--drivers/usb/renesas_usbhs/mod.c3
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c33
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c48
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c101
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/aircable.c5
-rw-r--r--drivers/usb/serial/ark3116.c5
-rw-r--r--drivers/usb/serial/belkin_sa.c5
-rw-r--r--drivers/usb/serial/bus.c10
-rw-r--r--drivers/usb/serial/cp210x.c38
-rw-r--r--drivers/usb/serial/cyberjack.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c5
-rw-r--r--drivers/usb/serial/digi_acceleport.c4
-rw-r--r--drivers/usb/serial/empeg.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c86
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h21
-rw-r--r--drivers/usb/serial/generic.c1
-rw-r--r--drivers/usb/serial/hp4x.c5
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/io_ti.c7
-rw-r--r--drivers/usb/serial/ipaq.c5
-rw-r--r--drivers/usb/serial/ipw.c4
-rw-r--r--drivers/usb/serial/iuu_phoenix.c8
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c4
-rw-r--r--drivers/usb/serial/kobil_sct.c2
-rw-r--r--drivers/usb/serial/mct_u232.c4
-rw-r--r--drivers/usb/serial/metro-usb.c2
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/omninet.c4
-rw-r--r--drivers/usb/serial/opticon.c351
-rw-r--r--drivers/usb/serial/option.c149
-rw-r--r--drivers/usb/serial/oti6858.c2
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/siemens_mpi.c2
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/spcp8x5.c4
-rw-r--r--drivers/usb/serial/ssu100.c2
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c2
-rw-r--r--drivers/usb/serial/vivopay-serial.c3
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/usb/storage/initializers.c76
-rw-r--r--drivers/usb/storage/initializers.h4
-rw-r--r--drivers/usb/storage/realtek_cr.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h329
-rw-r--r--drivers/usb/storage/usb.c13
-rw-r--r--drivers/usb/storage/usual-tables.c15
-rw-r--r--drivers/usb/usb-skeleton.c13
-rw-r--r--drivers/usb/wusbcore/devconnect.c13
-rw-r--r--drivers/uwb/Kconfig3
-rw-r--r--drivers/uwb/reset.c1
-rw-r--r--drivers/uwb/umc-bus.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c83
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c4
-rw-r--r--drivers/vfio/vfio.c34
-rw-r--r--drivers/vhost/net.c188
-rw-r--r--drivers/vhost/tcm_vhost.c19
-rw-r--r--drivers/vhost/vhost.c77
-rw-r--r--drivers/vhost/vhost.h16
-rw-r--r--drivers/video/Kconfig25
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/acornfb.c22
-rw-r--r--drivers/video/arcfb.c10
-rw-r--r--drivers/video/arkfb.c10
-rw-r--r--drivers/video/asiliantfb.c18
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/aty128fb.c57
-rw-r--r--drivers/video/aty/atyfb_base.c84
-rw-r--r--drivers/video/aty/mach64_ct.c6
-rw-r--r--drivers/video/aty/mach64_cursor.c2
-rw-r--r--drivers/video/aty/radeon_base.c20
-rw-r--r--drivers/video/aty/radeon_monitor.c24
-rw-r--r--drivers/video/au1100fb.c6
-rw-r--r--drivers/video/au1200fb.c6
-rw-r--r--drivers/video/auo_k1900fb.c6
-rw-r--r--drivers/video/auo_k1901fb.c6
-rw-r--r--drivers/video/auo_k190x.c6
-rw-r--r--drivers/video/backlight/88pm860x_bl.c18
-rw-r--r--drivers/video/backlight/adp5520_bl.c6
-rw-r--r--drivers/video/backlight/adp8860_bl.c14
-rw-r--r--drivers/video/backlight/adp8870_bl.c14
-rw-r--r--drivers/video/backlight/ams369fg06.c6
-rw-r--r--drivers/video/backlight/apple_bl.c4
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c7
-rw-r--r--drivers/video/backlight/backlight.c29
-rw-r--r--drivers/video/backlight/corgi_lcd.c26
-rw-r--r--drivers/video/backlight/da903x_bl.c15
-rw-r--r--drivers/video/backlight/da9052_bl.c2
-rw-r--r--drivers/video/backlight/ep93xx_bl.c2
-rw-r--r--drivers/video/backlight/generic_bl.c4
-rw-r--r--drivers/video/backlight/hp680_bl.c6
-rw-r--r--drivers/video/backlight/ili9320.c18
-rw-r--r--drivers/video/backlight/ili9320.h2
-rw-r--r--drivers/video/backlight/jornada720_bl.c31
-rw-r--r--drivers/video/backlight/l4f00242t03.c9
-rw-r--r--drivers/video/backlight/lcd.c8
-rw-r--r--drivers/video/backlight/ld9040.c4
-rw-r--r--drivers/video/backlight/lm3533_bl.c8
-rw-r--r--drivers/video/backlight/lm3630_bl.c10
-rw-r--r--drivers/video/backlight/lm3639_bl.c10
-rw-r--r--drivers/video/backlight/lms283gf05.c23
-rw-r--r--drivers/video/backlight/locomolcd.c38
-rw-r--r--drivers/video/backlight/lp855x_bl.c55
-rw-r--r--drivers/video/backlight/ltv350qv.c6
-rw-r--r--drivers/video/backlight/max8925_bl.c17
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pandora_bl.c8
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c14
-rw-r--r--drivers/video/backlight/platform_lcd.c8
-rw-r--r--drivers/video/backlight/s6e63m0.c8
-rw-r--r--drivers/video/backlight/tdo24m.c39
-rw-r--r--drivers/video/backlight/tosa_bl.c13
-rw-r--r--drivers/video/backlight/tosa_lcd.c30
-rw-r--r--drivers/video/backlight/vgg2432a4.c16
-rw-r--r--drivers/video/bf537-lq035.c18
-rw-r--r--drivers/video/bf54x-lq043fb.c6
-rw-r--r--drivers/video/bfin-lq035q1-fb.c14
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c6
-rw-r--r--drivers/video/bfin_adv7393fb.c10
-rw-r--r--drivers/video/broadsheetfb.c16
-rw-r--r--drivers/video/bw2.c23
-rw-r--r--drivers/video/carminefb.c16
-rw-r--r--drivers/video/cg14.c12
-rw-r--r--drivers/video/cg3.c26
-rw-r--r--drivers/video/cg6.c12
-rw-r--r--drivers/video/chipsfb.c13
-rw-r--r--drivers/video/cirrusfb.c44
-rw-r--r--drivers/video/clps711xfb.c156
-rw-r--r--drivers/video/cobalt_lcdfb.c8
-rw-r--r--drivers/video/console/newport_con.c11
-rw-r--r--drivers/video/console/softcursor.c3
-rw-r--r--drivers/video/console/sticore.c83
-rw-r--r--drivers/video/cyber2000fb.c23
-rw-r--r--drivers/video/da8xx-fb.c180
-rw-r--r--drivers/video/dnfb.c6
-rw-r--r--drivers/video/efifb.c4
-rw-r--r--drivers/video/ep93xx-fb.c8
-rw-r--r--drivers/video/exynos/exynos_dp_core.c703
-rw-r--r--drivers/video/exynos/exynos_dp_core.h21
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c77
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h3
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c4
-rw-r--r--drivers/video/ffb.c6
-rw-r--r--drivers/video/fm2fb.c14
-rw-r--r--drivers/video/fsl-diu-fb.c207
-rw-r--r--drivers/video/gbefb.c24
-rw-r--r--drivers/video/geode/gx1fb_core.c14
-rw-r--r--drivers/video/geode/gxfb_core.c20
-rw-r--r--drivers/video/geode/lxfb_core.c20
-rw-r--r--drivers/video/grvga.c12
-rw-r--r--drivers/video/gxt4500.c28
-rw-r--r--drivers/video/hecubafb.c10
-rw-r--r--drivers/video/hgafb.c12
-rw-r--r--drivers/video/hitfb.c10
-rw-r--r--drivers/video/hpfb.c9
-rw-r--r--drivers/video/i740fb.c15
-rw-r--r--drivers/video/i810/i810_main.c72
-rw-r--r--drivers/video/i810/i810_main.h2
-rw-r--r--drivers/video/igafb.c2
-rw-r--r--drivers/video/imsttfb.c17
-rw-r--r--drivers/video/imxfb.c53
-rw-r--r--drivers/video/intelfb/intelfbdrv.c29
-rw-r--r--drivers/video/jz4740_fb.c8
-rw-r--r--drivers/video/kyro/fbdev.c21
-rw-r--r--drivers/video/leo.c6
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c20
-rw-r--r--drivers/video/mbx/mbxdebugfs.c4
-rw-r--r--drivers/video/mbx/mbxfb.c18
-rw-r--r--drivers/video/metronomefb.c22
-rw-r--r--drivers/video/msm/mddi.c9
-rw-r--r--drivers/video/mx3fb.c3
-rw-r--r--drivers/video/mxsfb.c15
-rw-r--r--drivers/video/neofb.c26
-rw-r--r--drivers/video/nuc900fb.c4
-rw-r--r--drivers/video/nvidia/nvidia.c49
-rw-r--r--drivers/video/omap/lcd_inn1510.c7
-rw-r--r--drivers/video/omap/lcd_mipid.c2
-rw-r--r--drivers/video/omap/lcdc.c2
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap/sossi.c2
-rw-r--r--drivers/video/omap2/Kconfig7
-rw-r--r--drivers/video/omap2/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c25
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c36
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c40
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c91
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c26
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c45
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c17
-rw-r--r--drivers/video/omap2/displays/panel-taal.c72
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c33
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c24
-rw-r--r--drivers/video/omap2/dss/Kconfig35
-rw-r--r--drivers/video/omap2/dss/Makefile7
-rw-r--r--drivers/video/omap2/dss/apply.c331
-rw-r--r--drivers/video/omap2/dss/core.c72
-rw-r--r--drivers/video/omap2/dss/dispc-compat.c667
-rw-r--r--drivers/video/omap2/dss/dispc-compat.h30
-rw-r--r--drivers/video/omap2/dss/dispc.c1102
-rw-r--r--drivers/video/omap2/dss/display-sysfs.c321
-rw-r--r--drivers/video/omap2/dss/display.c386
-rw-r--r--drivers/video/omap2/dss/dpi.c126
-rw-r--r--drivers/video/omap2/dss/dsi.c247
-rw-r--r--drivers/video/omap2/dss/dss.c138
-rw-r--r--drivers/video/omap2/dss/dss.h124
-rw-r--r--drivers/video/omap2/dss/dss_features.c79
-rw-r--r--drivers/video/omap2/dss/dss_features.h12
-rw-r--r--drivers/video/omap2/dss/hdmi.c158
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c82
-rw-r--r--drivers/video/omap2/dss/manager.c39
-rw-r--r--drivers/video/omap2/dss/output.c90
-rw-r--r--drivers/video/omap2/dss/overlay.c17
-rw-r--r--drivers/video/omap2/dss/rfbi.c23
-rw-r--r--drivers/video/omap2/dss/sdi.c11
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h3
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c11
-rw-r--r--drivers/video/omap2/dss/venc.c11
-rw-r--r--drivers/video/omap2/dss/venc_panel.c19
-rw-r--r--drivers/video/omap2/omapfb/Kconfig1
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c48
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c212
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c6
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h20
-rw-r--r--drivers/video/omap2/vram.c514
-rw-r--r--drivers/video/omap2/vrfb.c142
-rw-r--r--drivers/video/p9100.c6
-rw-r--r--drivers/video/platinumfb.c11
-rw-r--r--drivers/video/pm2fb.c17
-rw-r--r--drivers/video/pm3fb.c17
-rw-r--r--drivers/video/pmag-ba-fb.c6
-rw-r--r--drivers/video/pmagb-b-fb.c12
-rw-r--r--drivers/video/ps3fb.c4
-rw-r--r--drivers/video/pvr2fb.c28
-rw-r--r--drivers/video/pxa168fb.c8
-rw-r--r--drivers/video/pxa3xx-gcu.c8
-rw-r--r--drivers/video/pxafb.c33
-rw-r--r--drivers/video/q40fb.c6
-rw-r--r--drivers/video/riva/fbdev.c45
-rw-r--r--drivers/video/riva/rivafb-i2c.c9
-rw-r--r--drivers/video/s1d13xxxfb.c10
-rw-r--r--drivers/video/s3c-fb.c39
-rw-r--r--drivers/video/s3c2410fb.c16
-rw-r--r--drivers/video/s3fb.c16
-rw-r--r--drivers/video/sa1100fb.c8
-rw-r--r--drivers/video/savage/savagefb_driver.c23
-rw-r--r--drivers/video/sgivwfb.c12
-rw-r--r--drivers/video/sh7760fb.c6
-rw-r--r--drivers/video/sh_mipi_dsi.c73
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c92
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h1
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/sis/sis_main.c140
-rw-r--r--drivers/video/sis/sis_main.h20
-rw-r--r--drivers/video/skeletonfb.c17
-rw-r--r--drivers/video/sm501fb.c16
-rw-r--r--drivers/video/ssd1307fb.c397
-rw-r--r--drivers/video/sstfb.c33
-rw-r--r--drivers/video/sunxvr1000.c10
-rw-r--r--drivers/video/sunxvr2500.c12
-rw-r--r--drivers/video/sunxvr500.c12
-rw-r--r--drivers/video/tcx.c6
-rw-r--r--drivers/video/tdfxfb.c30
-rw-r--r--drivers/video/tgafb.c45
-rw-r--r--drivers/video/tmiofb.c8
-rw-r--r--drivers/video/tridentfb.c28
-rw-r--r--drivers/video/uvesafb.c74
-rw-r--r--drivers/video/vermilion/vermilion.c7
-rw-r--r--drivers/video/vfb.c6
-rw-r--r--drivers/video/vga16fb.c10
-rw-r--r--drivers/video/via/dvi.c10
-rw-r--r--drivers/video/via/dvi.h4
-rw-r--r--drivers/video/via/hw.c16
-rw-r--r--drivers/video/via/hw.h4
-rw-r--r--drivers/video/via/lcd.c10
-rw-r--r--drivers/video/via/lcd.h6
-rw-r--r--drivers/video/via/via-core.c19
-rw-r--r--drivers/video/via/via-gpio.c2
-rw-r--r--drivers/video/via/viafbdev.c12
-rw-r--r--drivers/video/vt8500lcdfb.c6
-rw-r--r--drivers/video/vt8623fb.c8
-rw-r--r--drivers/video/w100fb.c10
-rw-r--r--drivers/video/wm8505fb.c6
-rw-r--r--drivers/video/wmt_ge_rops.c6
-rw-r--r--drivers/video/xen-fbfront.c7
-rw-r--r--drivers/video/xilinxfb.c8
-rw-r--r--drivers/virt/Kconfig1
-rw-r--r--drivers/virt/fsl_hypervisor.c3
-rw-r--r--drivers/virtio/virtio.c30
-rw-r--r--drivers/virtio/virtio_balloon.c158
-rw-r--r--drivers/virtio/virtio_mmio.c36
-rw-r--r--drivers/virtio/virtio_pci.c28
-rw-r--r--drivers/virtio/virtio_ring.c46
-rw-r--r--drivers/vlynq/vlynq.c6
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c15
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c15
-rw-r--r--drivers/vme/bridges/vme_tsi148.c15
-rw-r--r--drivers/w1/masters/Kconfig3
-rw-r--r--drivers/w1/masters/ds2482.c13
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--drivers/w1/masters/mxc_w1.c26
-rw-r--r--drivers/w1/masters/omap_hdq.c10
-rw-r--r--drivers/w1/masters/w1-gpio.c65
-rw-r--r--drivers/w1/w1.c7
-rw-r--r--drivers/watchdog/Kconfig18
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/acquirewdt.c6
-rw-r--r--drivers/watchdog/advantechwdt.c6
-rw-r--r--drivers/watchdog/ar7_wdt.c6
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c6
-rw-r--r--drivers/watchdog/at91sam9_wdt.c11
-rw-r--r--drivers/watchdog/ath79_wdt.c19
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c6
-rw-r--r--drivers/watchdog/bfin_wdt.c6
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/cpu5wdt.c9
-rw-r--r--drivers/watchdog/cpwd.c6
-rw-r--r--drivers/watchdog/da9052_wdt.c10
-rw-r--r--drivers/watchdog/da9055_wdt.c211
-rw-r--r--drivers/watchdog/davinci_wdt.c17
-rw-r--r--drivers/watchdog/dw_wdt.c6
-rw-r--r--drivers/watchdog/ep93xx_wdt.c6
-rw-r--r--drivers/watchdog/gef_wdt.c4
-rw-r--r--drivers/watchdog/geodewdt.c6
-rw-r--r--drivers/watchdog/hpwdt.c32
-rw-r--r--drivers/watchdog/i6300esb.c10
-rw-r--r--drivers/watchdog/iTCO_wdt.c8
-rw-r--r--drivers/watchdog/ib700wdt.c6
-rw-r--r--drivers/watchdog/ie6xx_wdt.c10
-rw-r--r--drivers/watchdog/imx2_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c6
-rw-r--r--drivers/watchdog/ks8695_wdt.c6
-rw-r--r--drivers/watchdog/lantiq_wdt.c6
-rw-r--r--drivers/watchdog/max63xx_wdt.c6
-rw-r--r--drivers/watchdog/mixcomwd.c2
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c6
-rw-r--r--drivers/watchdog/mpcore_wdt.c25
-rw-r--r--drivers/watchdog/mtx-1_wdt.c6
-rw-r--r--drivers/watchdog/mv64x60_wdt.c6
-rw-r--r--drivers/watchdog/nuc900_wdt.c6
-rw-r--r--drivers/watchdog/nv_tco.c10
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c8
-rw-r--r--drivers/watchdog/omap_wdt.c324
-rw-r--r--drivers/watchdog/orion_wdt.c10
-rw-r--r--drivers/watchdog/pcwd.c8
-rw-r--r--drivers/watchdog/pcwd_pci.c6
-rw-r--r--drivers/watchdog/pnx4008_wdt.c6
-rw-r--r--drivers/watchdog/rc32434_wdt.c6
-rw-r--r--drivers/watchdog/rdc321x_wdt.c6
-rw-r--r--drivers/watchdog/riowd.c6
-rw-r--r--drivers/watchdog/s3c2410_wdt.c12
-rw-r--r--drivers/watchdog/sch311x_wdt.c6
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/sp5100_tco.c331
-rw-r--r--drivers/watchdog/sp5100_tco.h46
-rw-r--r--drivers/watchdog/sp805_wdt.c23
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c6
-rw-r--r--drivers/watchdog/ts72xx_wdt.c6
-rw-r--r--drivers/watchdog/twl4030_wdt.c202
-rw-r--r--drivers/watchdog/via_wdt.c6
-rw-r--r--drivers/watchdog/wdt_pci.c6
-rw-r--r--drivers/watchdog/wm831x_wdt.c6
-rw-r--r--drivers/watchdog/wm8350_wdt.c6
-rw-r--r--drivers/watchdog/xen_wdt.c6
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile7
-rw-r--r--drivers/xen/balloon.c5
-rw-r--r--drivers/xen/cpu_hotplug.c4
-rw-r--r--drivers/xen/events.c4
-rw-r--r--drivers/xen/gntdev.c130
-rw-r--r--drivers/xen/grant-table.c50
-rw-r--r--drivers/xen/platform-pci.c6
-rw-r--r--drivers/xen/privcmd.c159
-rw-r--r--drivers/xen/swiotlb-xen.c25
-rw-r--r--drivers/xen/xen-acpi-pad.c182
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c133
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c14
-rw-r--r--drivers/xen/xen-selfballoon.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c1
-rw-r--r--drivers/zorro/zorro-driver.c4
5298 files changed, 301692 insertions, 185246 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dbdefa3fe775..f5fb0722a63a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -156,4 +156,6 @@ source "drivers/pwm/Kconfig"
source "drivers/irqchip/Kconfig"
+source "drivers/ipack/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index a16a8d001ae0..7863b9fee50b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -145,3 +145,4 @@ obj-$(CONFIG_EXTCON) += extcon/
obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
+obj-$(CONFIG_IPACK_BUS) += ipack/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 119d58db8342..38c5078da11d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -181,6 +181,12 @@ config ACPI_DOCK
This driver supports ACPI-controlled docking stations and removable
drive bays such as the IBM Ultrabay and the Dell Module Bay.
+config ACPI_I2C
+ def_tristate I2C
+ depends on I2C
+ help
+ ACPI I2C enumeration support.
+
config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
@@ -261,6 +267,15 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
+config ACPI_INITRD_TABLE_OVERRIDE
+ bool "ACPI tables can be passed via uncompressed cpio in initrd"
+ default n
+ help
+ This option provides functionality to override arbitrary ACPI tables
+ via initrd. No functional change if no ACPI tables are passed via
+ initrd, therefore it's safe to say Y.
+ See Documentation/acpi/initrd_table_override.txt for details
+
config ACPI_BLACKLIST_YEAR
int "Disable ACPI for systems before Jan 1st this year" if X86_32
default 0
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 82422fe90f81..2a4502becd13 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -21,9 +21,10 @@ obj-y += acpi.o \
acpi-y += osl.o utils.o reboot.o
acpi-y += nvs.o
-# sleep related files
+# Power management related files
acpi-y += wakeup.o
acpi-y += sleep.o
+acpi-$(CONFIG_PM) += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
@@ -32,10 +33,12 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o
#
acpi-y += bus.o glue.o
acpi-y += scan.o
+acpi-y += resource.o
acpi-y += processor_core.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
+acpi-y += acpi_platform.o
acpi-y += power.o
acpi-y += event.o
acpi-y += sysfs.o
@@ -67,6 +70,7 @@ obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
+obj-$(CONFIG_ACPI_I2C) += acpi_i2c.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c
new file mode 100644
index 000000000000..82045e3f5cac
--- /dev/null
+++ b/drivers/acpi/acpi_i2c.c
@@ -0,0 +1,103 @@
+/*
+ * ACPI I2C enumeration support
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/ioport.h>
+
+ACPI_MODULE_NAME("i2c");
+
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct i2c_board_info *info = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_i2c_serialbus *sb;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
+ }
+ } else if (info->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ info->irq = r.start;
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct list_head resource_list;
+ struct i2c_board_info info;
+ struct acpi_device *adev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ memset(&info, 0, sizeof(info));
+ info.acpi_node.handle = handle;
+ info.irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_i2c_add_resource, &info);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !info.addr)
+ return AE_OK;
+
+ strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+ if (!i2c_new_device(adapter, &info)) {
+ dev_err(&adapter->dev,
+ "failed to add I2C device %s from ACPI\n",
+ dev_name(&adev->dev));
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adapter: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+void acpi_i2c_register_devices(struct i2c_adapter *adapter)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(&adapter->dev);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_i2c_add_device, NULL,
+ adapter, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&adapter->dev, "failed to enumerate I2C slaves\n");
+}
+EXPORT_SYMBOL_GPL(acpi_i2c_register_devices);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 24c807f96636..b679bf8478f7 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/memory_hotplug.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include <acpi/acpi_drivers.h>
#define ACPI_MEMORY_DEVICE_CLASS "memory"
@@ -78,6 +79,7 @@ struct acpi_memory_info {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
unsigned int enabled:1;
+ unsigned int failed:1;
};
struct acpi_memory_device {
@@ -86,8 +88,6 @@ struct acpi_memory_device {
struct list_head res_list;
};
-static int acpi_hotmem_initialized;
-
static acpi_status
acpi_memory_get_resource(struct acpi_resource *resource, void *context)
{
@@ -125,12 +125,20 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
return AE_OK;
}
+static void
+acpi_memory_free_device_resources(struct acpi_memory_device *mem_device)
+{
+ struct acpi_memory_info *info, *n;
+
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ INIT_LIST_HEAD(&mem_device->res_list);
+}
+
static int
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
{
acpi_status status;
- struct acpi_memory_info *info, *n;
-
if (!list_empty(&mem_device->res_list))
return 0;
@@ -138,9 +146,7 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
acpi_memory_get_resource, mem_device);
if (ACPI_FAILURE(status)) {
- list_for_each_entry_safe(info, n, &mem_device->res_list, list)
- kfree(info);
- INIT_LIST_HEAD(&mem_device->res_list);
+ acpi_memory_free_device_resources(mem_device);
return -EINVAL;
}
@@ -170,7 +176,7 @@ acpi_memory_get_device(acpi_handle handle,
/* Get the parent device */
result = acpi_bus_get_device(phandle, &pdevice);
if (result) {
- printk(KERN_WARNING PREFIX "Cannot get acpi bus device");
+ acpi_handle_warn(phandle, "Cannot get acpi bus device\n");
return -EINVAL;
}
@@ -180,14 +186,14 @@ acpi_memory_get_device(acpi_handle handle,
*/
result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
if (result) {
- printk(KERN_WARNING PREFIX "Cannot add acpi bus");
+ acpi_handle_warn(handle, "Cannot add acpi bus\n");
return -EINVAL;
}
end:
*mem_device = acpi_driver_data(device);
if (!(*mem_device)) {
- printk(KERN_ERR "\n driver data not found");
+ dev_err(&device->dev, "driver data not found\n");
return -ENODEV;
}
@@ -220,15 +226,6 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
struct acpi_memory_info *info;
int node;
-
- /* Get the range from the _CRS */
- result = acpi_memory_get_device_resources(mem_device);
- if (result) {
- printk(KERN_ERR PREFIX "get_device_resources failed\n");
- mem_device->state = MEMORY_INVALID_STATE;
- return result;
- }
-
node = acpi_get_node(mem_device->device->handle);
/*
* Tell the VM there is more memory here...
@@ -251,13 +248,27 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
node = memory_add_physaddr_to_nid(info->start_addr);
result = add_memory(node, info->start_addr, info->length);
- if (result)
+
+ /*
+ * If the memory block has been used by the kernel, add_memory()
+ * returns -EEXIST. If add_memory() returns the other error, it
+ * means that this memory block is not used by the kernel.
+ */
+ if (result && result != -EEXIST) {
+ info->failed = 1;
continue;
- info->enabled = 1;
+ }
+
+ if (!result)
+ info->enabled = 1;
+ /*
+ * Add num_enable even if add_memory() returns -EEXIST, so the
+ * device is bound to this driver.
+ */
num_enabled++;
}
if (!num_enabled) {
- printk(KERN_ERR PREFIX "add_memory failed\n");
+ dev_err(&mem_device->device->dev, "add_memory failed\n");
mem_device->state = MEMORY_INVALID_STATE;
return -EINVAL;
}
@@ -272,68 +283,31 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
return 0;
}
-static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
+static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- acpi_status status;
- struct acpi_object_list arg_list;
- union acpi_object arg;
- unsigned long long current_status;
-
-
- /* Issue the _EJ0 command */
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
- status = acpi_evaluate_object(mem_device->device->handle,
- "_EJ0", &arg_list, NULL);
- /* Return on _EJ0 failure */
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "_EJ0 failed"));
- return -ENODEV;
- }
-
- /* Evalute _STA to check if the device is disabled */
- status = acpi_evaluate_integer(mem_device->device->handle, "_STA",
- NULL, &current_status);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* Check for device status. Device should be disabled */
- if (current_status & ACPI_STA_DEVICE_ENABLED)
- return -EINVAL;
+ int result = 0;
+ struct acpi_memory_info *info, *n;
- return 0;
-}
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
+ if (info->failed)
+ /* The kernel does not use this memory block */
+ continue;
-static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
-{
- int result;
- struct acpi_memory_info *info, *n;
+ if (!info->enabled)
+ /*
+ * The kernel uses this memory block, but it may be not
+ * managed by us.
+ */
+ return -EBUSY;
+ result = remove_memory(info->start_addr, info->length);
+ if (result)
+ return result;
- /*
- * Ask the VM to offline this memory range.
- * Note: Assume that this function returns zero on success
- */
- list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
- if (info->enabled) {
- result = remove_memory(info->start_addr, info->length);
- if (result)
- return result;
- }
+ list_del(&info->list);
kfree(info);
}
- /* Power-off and eject the device */
- result = acpi_memory_powerdown_device(mem_device);
- if (result) {
- /* Set the status of the device to invalid */
- mem_device->state = MEMORY_INVALID_STATE;
- return result;
- }
-
- mem_device->state = MEMORY_POWER_OFF_STATE;
return result;
}
@@ -341,6 +315,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_memory_device *mem_device;
struct acpi_device *device;
+ struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
switch (event) {
@@ -353,15 +328,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived DEVICE CHECK notification for device\n"));
if (acpi_memory_get_device(handle, &mem_device)) {
- printk(KERN_ERR PREFIX "Cannot find driver data\n");
- break;
- }
-
- if (acpi_memory_check_device(mem_device))
- break;
-
- if (acpi_memory_enable_device(mem_device)) {
- printk(KERN_ERR PREFIX "Cannot enable memory device\n");
+ acpi_handle_err(handle, "Cannot find driver data\n");
break;
}
@@ -373,40 +340,28 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
"\nReceived EJECT REQUEST notification for device\n"));
if (acpi_bus_get_device(handle, &device)) {
- printk(KERN_ERR PREFIX "Device doesn't exist\n");
+ acpi_handle_err(handle, "Device doesn't exist\n");
break;
}
mem_device = acpi_driver_data(device);
if (!mem_device) {
- printk(KERN_ERR PREFIX "Driver Data is NULL\n");
+ acpi_handle_err(handle, "Driver Data is NULL\n");
break;
}
- /*
- * Currently disabling memory device from kernel mode
- * TBD: Can also be disabled from user mode scripts
- * TBD: Can also be disabled by Callback registration
- * with generic sysfs driver
- */
- if (acpi_memory_disable_device(mem_device)) {
- printk(KERN_ERR PREFIX "Disable memory device\n");
- /*
- * If _EJ0 was called but failed, _OST is not
- * necessary.
- */
- if (mem_device->state == MEMORY_INVALID_STATE)
- return;
-
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ pr_err(PREFIX "No memory, dropping EJECT\n");
break;
}
- /*
- * TBD: Invoke acpi_bus_remove to cleanup data structures
- */
+ ej_event->handle = handle;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ (void *)ej_event);
- /* _EJ0 succeeded; _OST is not necessary */
+ /* eject is performed asynchronously */
return;
-
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
@@ -420,6 +375,15 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
return;
}
+static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
+{
+ if (!mem_device)
+ return;
+
+ acpi_memory_free_device_resources(mem_device);
+ kfree(mem_device);
+}
+
static int acpi_memory_device_add(struct acpi_device *device)
{
int result;
@@ -449,23 +413,16 @@ static int acpi_memory_device_add(struct acpi_device *device)
/* Set the device state */
mem_device->state = MEMORY_POWER_ON_STATE;
- printk(KERN_DEBUG "%s \n", acpi_device_name(device));
-
- /*
- * Early boot code has recognized memory area by EFI/E820.
- * If DSDT shows these memory devices on boot, hotplug is not necessary
- * for them. So, it just returns until completion of this driver's
- * start up.
- */
- if (!acpi_hotmem_initialized)
- return 0;
+ pr_debug("%s\n", acpi_device_name(device));
if (!acpi_memory_check_device(mem_device)) {
/* call add_memory func */
result = acpi_memory_enable_device(mem_device);
- if (result)
- printk(KERN_ERR PREFIX
+ if (result) {
+ dev_err(&device->dev,
"Error in acpi_memory_enable_device\n");
+ acpi_memory_device_free(mem_device);
+ }
}
return result;
}
@@ -473,13 +430,18 @@ static int acpi_memory_device_add(struct acpi_device *device)
static int acpi_memory_device_remove(struct acpi_device *device, int type)
{
struct acpi_memory_device *mem_device = NULL;
-
+ int result;
if (!device || !acpi_driver_data(device))
return -EINVAL;
mem_device = acpi_driver_data(device);
- kfree(mem_device);
+
+ result = acpi_memory_remove_memory(mem_device);
+ if (result)
+ return result;
+
+ acpi_memory_device_free(mem_device);
return 0;
}
@@ -568,7 +530,6 @@ static int __init acpi_memory_device_init(void)
return -ENODEV;
}
- acpi_hotmem_initialized = 1;
return 0;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index af4aad6ee2eb..16fa979f7180 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -286,7 +286,7 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
- if (strict_strtoul(buf, 0, &num))
+ if (kstrtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
@@ -309,7 +309,7 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
- if (strict_strtoul(buf, 0, &num))
+ if (kstrtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
@@ -332,7 +332,7 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
- if (strict_strtoul(buf, 0, &num))
+ if (kstrtoul(buf, 0, &num))
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(num);
@@ -457,7 +457,7 @@ static void acpi_pad_notify(acpi_handle handle, u32 event,
dev_name(&device->dev), event, 0);
break;
default:
- printk(KERN_WARNING "Unsupported event [0x%x]\n", event);
+ pr_warn("Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
new file mode 100644
index 000000000000..db129b9f52cb
--- /dev/null
+++ b/drivers/acpi/acpi_platform.c
@@ -0,0 +1,104 @@
+/*
+ * ACPI support for platform bus type.
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+
+ACPI_MODULE_NAME("platform");
+
+/**
+ * acpi_create_platform_device - Create platform device for ACPI device node
+ * @adev: ACPI device node to create a platform device for.
+ *
+ * Check if the given @adev can be represented as a platform device and, if
+ * that's the case, create and register a platform device, populate its common
+ * resources and returns a pointer to it. Otherwise, return %NULL.
+ *
+ * The platform device's name will be taken from the @adev's _HID and _UID.
+ */
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+{
+ struct platform_device *pdev = NULL;
+ struct acpi_device *acpi_parent;
+ struct platform_device_info pdevinfo;
+ struct resource_list_entry *rentry;
+ struct list_head resource_list;
+ struct resource *resources;
+ int count;
+
+ /* If the ACPI node already has a physical device attached, skip it. */
+ if (adev->physical_node_count)
+ return NULL;
+
+ INIT_LIST_HEAD(&resource_list);
+ count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (count <= 0)
+ return NULL;
+
+ resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
+ if (!resources) {
+ dev_err(&adev->dev, "No memory for resources\n");
+ acpi_dev_free_resource_list(&resource_list);
+ return NULL;
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node)
+ resources[count++] = rentry->res;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ /*
+ * If the ACPI node has a parent and that parent has a physical device
+ * attached to it, that physical device should be the parent of the
+ * platform device we are about to create.
+ */
+ pdevinfo.parent = NULL;
+ acpi_parent = adev->parent;
+ if (acpi_parent) {
+ struct acpi_device_physical_node *entry;
+ struct list_head *list;
+
+ mutex_lock(&acpi_parent->physical_node_lock);
+ list = &acpi_parent->physical_node_list;
+ if (!list_empty(list)) {
+ entry = list_first_entry(list,
+ struct acpi_device_physical_node,
+ node);
+ pdevinfo.parent = entry->dev;
+ }
+ mutex_unlock(&acpi_parent->physical_node_lock);
+ }
+ pdevinfo.name = dev_name(&adev->dev);
+ pdevinfo.id = -1;
+ pdevinfo.res = resources;
+ pdevinfo.num_res = count;
+ pdevinfo.acpi_node.handle = adev->handle;
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev)) {
+ dev_err(&adev->dev, "platform device creation failed: %ld\n",
+ PTR_ERR(pdev));
+ pdev = NULL;
+ } else {
+ dev_dbg(&adev->dev, "created platform device %s\n",
+ dev_name(&pdev->dev));
+ }
+
+ kfree(resources);
+ return pdev;
+}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7f1d40797e80..bc7a03ded064 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -161,3 +161,6 @@ acpi-y += \
utxfinit.o \
utxferror.o \
utxfmutex.o
+
+acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o
+
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 5e8abb07724f..432a318c9ed1 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -44,17 +44,28 @@
#ifndef __ACDEBUG_H__
#define __ACDEBUG_H__
-#define ACPI_DEBUG_BUFFER_SIZE 4196
+#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */
-struct command_info {
+struct acpi_db_command_info {
char *name; /* Command Name */
u8 min_args; /* Minimum arguments required */
};
-struct argument_info {
+struct acpi_db_command_help {
+ u8 line_count; /* Number of help lines */
+ char *invocation; /* Command Invocation */
+ char *description; /* Command Description */
+};
+
+struct acpi_db_argument_info {
char *name; /* Argument Name */
};
+struct acpi_db_execute_walk {
+ u32 count;
+ u32 max_count;
+};
+
#define PARAM_LIST(pl) pl
#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose)
#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\
@@ -77,59 +88,71 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
/*
* dbcmds - debug commands and output routines
*/
-acpi_status acpi_db_disassemble_method(char *name);
+struct acpi_namespace_node *acpi_db_convert_to_node(char *in_string);
void acpi_db_display_table_info(char *table_arg);
-void acpi_db_unload_acpi_table(char *table_arg, char *instance_arg);
+void acpi_db_display_template(char *buffer_arg);
-void
-acpi_db_set_method_breakpoint(char *location,
- struct acpi_walk_state *walk_state,
- union acpi_parse_object *op);
+void acpi_db_unload_acpi_table(char *name);
-void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op);
+void acpi_db_send_notify(char *name, u32 value);
-void acpi_db_get_bus_info(void);
+void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
-void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op);
+acpi_status acpi_db_sleep(char *object_arg);
-void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
+void acpi_db_display_locks(void);
-void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
+void acpi_db_display_resources(char *object_arg);
-void acpi_db_send_notify(char *name, u32 value);
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
+
+void acpi_db_display_handlers(void);
+
+ACPI_HW_DEPENDENT_RETURN_VOID(void
+ acpi_db_generate_gpe(char *gpe_arg,
+ char *block_arg))
+
+/*
+ * dbmethod - control method commands
+ */
+void
+acpi_db_set_method_breakpoint(char *location,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op);
void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg);
-acpi_status
-acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
+acpi_status acpi_db_disassemble_method(char *name);
-void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
+void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op);
-acpi_status acpi_db_find_name_in_namespace(char *name_arg);
+void acpi_db_batch_execute(char *count_arg);
+/*
+ * dbnames - namespace commands
+ */
void acpi_db_set_scope(char *name);
-ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg))
+void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
-void acpi_db_find_references(char *object_arg);
+void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
-void acpi_db_display_locks(void);
+acpi_status acpi_db_find_name_in_namespace(char *name_arg);
-void acpi_db_display_resources(char *object_arg);
+void acpi_db_check_predefined_names(void);
-ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
+acpi_status
+acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
void acpi_db_check_integrity(void);
-ACPI_HW_DEPENDENT_RETURN_VOID(void
- acpi_db_generate_gpe(char *gpe_arg,
- char *block_arg))
-
-void acpi_db_check_predefined_names(void);
+void acpi_db_find_references(char *object_arg);
-void acpi_db_batch_execute(void);
+void acpi_db_get_bus_info(void);
/*
* dbdisply - debug display commands
@@ -161,7 +184,8 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc,
/*
* dbexec - debugger control method execution
*/
-void acpi_db_execute(char *name, char **args, u32 flags);
+void
+acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags);
void
acpi_db_create_execution_threads(char *num_threads_arg,
@@ -175,7 +199,8 @@ u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
* dbfileio - Debugger file I/O commands
*/
acpi_object_type
-acpi_db_match_argument(char *user_argument, struct argument_info *arguments);
+acpi_db_match_argument(char *user_argument,
+ struct acpi_db_argument_info *arguments);
void acpi_db_close_debug_file(void);
@@ -208,6 +233,11 @@ acpi_db_command_dispatch(char *input_buffer,
void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
+acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op);
+
+char *acpi_db_get_next_token(char *string,
+ char **next, acpi_object_type * return_type);
+
/*
* dbstats - Generation and display of ACPI table statistics
*/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 5935ba6707e2..ed33ebcdaebe 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -309,10 +309,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state);
acpi_status
acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state);
-struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
- *origin, union acpi_operand_object
- *mth_desc, struct acpi_thread_state
- *thread);
+struct acpi_walk_state * acpi_ds_create_walk_state(acpi_owner_id owner_id,
+ union acpi_parse_object
+ *origin,
+ union acpi_operand_object
+ *mth_desc,
+ struct acpi_thread_state
+ *thread);
acpi_status
acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c0a43b38c6a3..e975c6720448 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -84,9 +84,11 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status
+acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ce79100fb5eb..64472e4ec329 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -70,7 +70,7 @@
/*
* Enable "slack" in the AML interpreter? Default is FALSE, and the
- * interpreter strictly follows the ACPI specification. Setting to TRUE
+ * interpreter strictly follows the ACPI specification. Setting to TRUE
* allows the interpreter to ignore certain errors and/or bad AML constructs.
*
* Currently, these features are enabled by this flag:
@@ -155,26 +155,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
/*****************************************************************************
*
- * Debug support
- *
- ****************************************************************************/
-
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
-ACPI_EXTERN u32 acpi_gpe_count;
-ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
-
-/* Support for dynamic control method tracing mechanism */
-
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
-
-/*****************************************************************************
- *
* ACPI Table globals
*
****************************************************************************/
@@ -259,15 +239,6 @@ ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE reg
*
****************************************************************************/
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Lists for tracking memory allocations */
-
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-#endif
-
/* Object caches */
ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
@@ -326,6 +297,15 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
#endif
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+/* Lists for tracking memory allocations */
+
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
+ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
+#endif
+
/*****************************************************************************
*
* Namespace globals
@@ -396,13 +376,35 @@ ACPI_EXTERN struct acpi_gpe_block_info
#if (!ACPI_REDUCED_HARDWARE)
ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
-ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
+ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
*
+ * Debug support
+ *
+ ****************************************************************************/
+
+/* Procedure nesting level for debug output */
+
+extern u32 acpi_gbl_nesting_level;
+
+/* Event counters */
+
+ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+
+/* Support for dynamic control method tracing mechanism */
+
+ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+
+/*****************************************************************************
+ *
* Debugger globals
*
****************************************************************************/
@@ -426,10 +428,11 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[80];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[80];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[40];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[40];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
ACPI_EXTERN char *acpi_gbl_db_buffer;
ACPI_EXTERN char *acpi_gbl_db_filename;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c816ee675094..ff8bd0061e8b 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -262,10 +262,10 @@ struct acpi_create_field_info {
};
typedef
-acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
+acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
/*
- * Bitmapped ACPI types. Used internally only
+ * Bitmapped ACPI types. Used internally only
*/
#define ACPI_BTYPE_ANY 0x00000000
#define ACPI_BTYPE_INTEGER 0x00000001
@@ -486,8 +486,10 @@ struct acpi_gpe_device_info {
struct acpi_namespace_node *gpe_device;
};
-typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block, void *context);
+typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
+ gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context);
/* Information about each particular fixed event */
@@ -582,7 +584,7 @@ struct acpi_pscope_state {
};
/*
- * Thread state - one per thread across multiple walk states. Multiple walk
+ * Thread state - one per thread across multiple walk states. Multiple walk
* states are created when there are nested control methods executing.
*/
struct acpi_thread_state {
@@ -645,7 +647,7 @@ union acpi_generic_state {
*
****************************************************************************/
-typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state);
/* Address Range info block */
@@ -1031,6 +1033,7 @@ struct acpi_db_method_info {
acpi_handle method;
acpi_handle main_thread_gate;
acpi_handle thread_complete_gate;
+ acpi_handle info_gate;
acpi_thread_id *threads;
u32 num_threads;
u32 num_created;
@@ -1041,6 +1044,7 @@ struct acpi_db_method_info {
u32 num_loops;
char pathname[128];
char **args;
+ acpi_object_type *types;
/*
* Arguments to be passed to method for the command
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index a7f68c47f517..5efad99f2169 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -84,29 +84,29 @@
/* These macros reverse the bytes during the move, converting little-endian to big endian */
- /* Big Endian <== Little Endian */
- /* Hi...Lo Lo...Hi */
+ /* Big Endian <== Little Endian */
+ /* Hi...Lo Lo...Hi */
/* 16-bit source, 16/32/64 destination */
#define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\
- (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
#define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\
- ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
- ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+ ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
#define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\
- ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
- ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
/* 32-bit source, 16/32/64 destination */
#define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */
#define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\
- (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
- (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
- (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
+ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
#define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\
((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
@@ -196,24 +196,12 @@
#endif
#endif
-/* Macros based on machine integer width */
-
-#if ACPI_MACHINE_WIDTH == 32
-#define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_32_TO_16(d, s)
-
-#elif ACPI_MACHINE_WIDTH == 64
-#define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_64_TO_16(d, s)
-
-#else
-#error unknown ACPI_MACHINE_WIDTH
-#endif
-
/*
* Fast power-of-two math macros for non-optimized compilers
*/
-#define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2)))
-#define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2)))
-#define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1)))
+#define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2)))
+#define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2)))
+#define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1)))
#define ACPI_DIV_2(a) _ACPI_DIV(a, 1)
#define ACPI_MUL_2(a) _ACPI_MUL(a, 1)
@@ -238,12 +226,12 @@
/*
* Rounding macros (Power of two boundaries only)
*/
-#define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \
- (~(((acpi_size) boundary)-1)))
+#define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \
+ (~(((acpi_size) boundary)-1)))
-#define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \
- (((acpi_size) boundary)-1)) & \
- (~(((acpi_size) boundary)-1)))
+#define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \
+ (((acpi_size) boundary)-1)) & \
+ (~(((acpi_size) boundary)-1)))
/* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */
@@ -264,7 +252,7 @@
#define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary))
-#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
+#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
/*
* Bitmask creation
@@ -355,7 +343,6 @@
* Ascii error messages can be configured out
*/
#ifndef ACPI_NO_ERROR_MESSAGES
-
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
* the plist contains a set of parens to allow variable-length lists.
@@ -375,18 +362,15 @@
#define ACPI_WARN_PREDEFINED(plist)
#define ACPI_INFO_PREDEFINED(plist)
-#endif /* ACPI_NO_ERROR_MESSAGES */
+#endif /* ACPI_NO_ERROR_MESSAGES */
/*
* Debug macros that are conditionally compiled
*/
#ifdef ACPI_DEBUG_OUTPUT
-
/*
* Function entry tracing
*/
-#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE
-
#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
#define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \
@@ -464,45 +448,19 @@
#endif /* ACPI_SIMPLE_RETURN_MACROS */
-#else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */
-
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_TRACE_PTR(a,b)
-#define ACPI_FUNCTION_TRACE_U32(a,b)
-#define ACPI_FUNCTION_TRACE_STR(a,b)
-#define ACPI_FUNCTION_EXIT
-#define ACPI_FUNCTION_STATUS_EXIT(s)
-#define ACPI_FUNCTION_VALUE_EXIT(s)
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_ENTRY()
-
-#define return_VOID return
-#define return_ACPI_STATUS(s) return(s)
-#define return_VALUE(s) return(s)
-#define return_UINT8(s) return(s)
-#define return_UINT32(s) return(s)
-#define return_PTR(s) return(s)
-
-#endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */
-
/* Conditional execution */
#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_NORMAL_EXEC(a)
-
-#define ACPI_DEBUG_DEFINE(a) a;
#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
#define _VERBOSE_STRUCTURES
-/* Stack and buffer dumping */
+/* Various object display routines for debug */
#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
-#define ACPI_DUMP_OPERANDS(a, b, c) acpi_ex_dump_operands(a, b, c)
-
+#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
-#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a)
-#define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
+#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
#else
/*
@@ -510,25 +468,23 @@
* leaving no executable debug code!
*/
#define ACPI_DEBUG_EXEC(a)
-#define ACPI_NORMAL_EXEC(a) a;
-
-#define ACPI_DEBUG_DEFINE(a) do { } while(0)
-#define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0)
-#define ACPI_FUNCTION_TRACE(a) do { } while(0)
-#define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0)
-#define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0)
-#define ACPI_FUNCTION_TRACE_STR(a, b) do { } while(0)
-#define ACPI_FUNCTION_EXIT do { } while(0)
-#define ACPI_FUNCTION_STATUS_EXIT(s) do { } while(0)
-#define ACPI_FUNCTION_VALUE_EXIT(s) do { } while(0)
-#define ACPI_FUNCTION_ENTRY() do { } while(0)
-#define ACPI_DUMP_STACK_ENTRY(a) do { } while(0)
-#define ACPI_DUMP_OPERANDS(a, b, c) do { } while(0)
-#define ACPI_DUMP_ENTRY(a, b) do { } while(0)
-#define ACPI_DUMP_TABLES(a, b) do { } while(0)
-#define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0)
-#define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0)
-#define ACPI_DUMP_BUFFER(a, b) do { } while(0)
+#define ACPI_DEBUG_ONLY_MEMBERS(a)
+#define ACPI_FUNCTION_TRACE(a)
+#define ACPI_FUNCTION_TRACE_PTR(a, b)
+#define ACPI_FUNCTION_TRACE_U32(a, b)
+#define ACPI_FUNCTION_TRACE_STR(a, b)
+#define ACPI_FUNCTION_EXIT
+#define ACPI_FUNCTION_STATUS_EXIT(s)
+#define ACPI_FUNCTION_VALUE_EXIT(s)
+#define ACPI_FUNCTION_ENTRY()
+#define ACPI_DUMP_STACK_ENTRY(a)
+#define ACPI_DUMP_OPERANDS(a, b, c)
+#define ACPI_DUMP_ENTRY(a, b)
+#define ACPI_DUMP_TABLES(a, b)
+#define ACPI_DUMP_PATHNAME(a, b, c, d)
+#define ACPI_DUMP_BUFFER(a, b)
+#define ACPI_DEBUG_PRINT(pl)
+#define ACPI_DEBUG_PRINT_RAW(pl)
#define return_VOID return
#define return_ACPI_STATUS(s) return(s)
@@ -556,18 +512,6 @@
#define ACPI_DEBUGGER_EXEC(a)
#endif
-#ifdef ACPI_DEBUG_OUTPUT
-/*
- * 1) Set name to blanks
- * 2) Copy the object name
- */
-#define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\
- ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name))
-#else
-
-#define ACPI_ADD_OBJECT_NAME(a,b)
-#endif
-
/*
* Memory allocation tracking (DEBUG ONLY)
*/
@@ -578,13 +522,13 @@
/* Memory allocation */
#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS)
#endif
#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS)
#endif
#ifndef ACPI_FREE
-#define ACPI_FREE(a) acpio_os_free(a)
+#define ACPI_FREE(a) acpi_os_free(a)
#endif
#define ACPI_MEM_TRACKING(a)
@@ -592,16 +536,25 @@
/* Memory allocation */
-#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
#define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
#define ACPI_MEM_TRACKING(a) a
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
-/* Preemption point */
-#ifndef ACPI_PREEMPTION_POINT
-#define ACPI_PREEMPTION_POINT() /* no preemption */
-#endif
+/*
+ * Macros used for ACPICA utilities only
+ */
+
+/* Generate a UUID */
+
+#define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \
+ (b) & 0xFF, ((b) >> 8) & 0xFF, \
+ (c) & 0xFF, ((c) >> 8) & 0xFF, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)
+
+#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
#endif /* ACMACROS_H */
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 364a1303fb8f..24eb9eac9514 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
@@ -179,7 +178,7 @@ struct acpi_object_method {
union acpi_operand_object *mutex;
u8 *aml_start;
union {
- ACPI_INTERNAL_METHOD implementation;
+ acpi_internal_method implementation;
union acpi_operand_object *handler;
} dispatch;
@@ -198,7 +197,7 @@ struct acpi_object_method {
/******************************************************************************
*
- * Objects that can be notified. All share a common notify_info area.
+ * Objects that can be notified. All share a common notify_info area.
*
*****************************************************************************/
@@ -235,7 +234,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
/******************************************************************************
*
- * Fields. All share a common header/info field.
+ * Fields. All share a common header/info field.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 9440d053fbb3..d786a5128b78 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -54,7 +54,7 @@
#define _UNK 0x6B
/*
- * Reserved ASCII characters. Do not use any of these for
+ * Reserved ASCII characters. Do not use any of these for
* internal opcodes, since they are used to differentiate
* name strings from AML opcodes
*/
@@ -63,7 +63,7 @@
#define _PFX 0x6D
/*
- * All AML opcodes and the parse-time arguments for each. Used by the AML
+ * All AML opcodes and the parse-time arguments for each. Used by the AML
* parser Each list is compressed into a 32-bit number and stored in the
* master opcode table (in psopcode.c).
*/
@@ -193,7 +193,7 @@
#define ARGP_ZERO_OP ARG_NONE
/*
- * All AML opcodes and the runtime arguments for each. Used by the AML
+ * All AML opcodes and the runtime arguments for each. Used by the AML
* interpreter Each list is compressed into a 32-bit number and stored
* in the master opcode table (in psopcode.c).
*
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index b725d780d34d..eefcf47a61a0 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -150,8 +150,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state *parser_state);
void
acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
- union acpi_parse_object **op,
- u32 * arg_list, u32 * arg_count);
+ union acpi_parse_object **op, u32 *arg_list, u32 *arg_count);
acpi_status
acpi_ps_push_scope(struct acpi_parse_state *parser_state,
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 3080c017f5ba..9dfa1c83bd4e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -150,8 +150,7 @@ enum acpi_return_package_types {
* is saved here (rather than in a separate table) in order to minimize the
* overall size of the stored data.
*/
-static const union acpi_predefined_info predefined_names[] =
-{
+static const union acpi_predefined_info predefined_names[] = {
{{"_AC0", 0, ACPI_RTYPE_INTEGER}},
{{"_AC1", 0, ACPI_RTYPE_INTEGER}},
{{"_AC2", 0, ACPI_RTYPE_INTEGER}},
@@ -538,7 +537,8 @@ static const union acpi_predefined_info predefined_names[] =
/* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */
- {{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
+ {{"_WAK", 1,
+ ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
/* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
@@ -551,11 +551,12 @@ static const union acpi_predefined_info predefined_names[] =
};
#if 0
+
/* This is an internally implemented control method, no need to check */
- {{"_OSI", 1, ACPI_RTYPE_INTEGER}},
+{ {
+"_OSI", 1, ACPI_RTYPE_INTEGER}},
/* TBD: */
-
_PRT - currently ignore reversed entries. attempt to fix here?
think about possibly fixing package elements like _BIF, etc.
#endif
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f196e2c9a71f..937e66c65d1e 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -53,7 +53,7 @@
****************************************************************************/
/*
- * Walk state - current state of a parse tree walk. Used for both a leisurely
+ * Walk state - current state of a parse tree walk. Used for both a leisurely
* stroll through the tree (for whatever reason), and for control method
* execution.
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 5035327ebccc..b0f5f92b674a 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -69,6 +69,22 @@ extern const char *acpi_gbl_siz_decode[];
extern const char *acpi_gbl_trs_decode[];
extern const char *acpi_gbl_ttp_decode[];
extern const char *acpi_gbl_typ_decode[];
+extern const char *acpi_gbl_ppc_decode[];
+extern const char *acpi_gbl_ior_decode[];
+extern const char *acpi_gbl_dts_decode[];
+extern const char *acpi_gbl_ct_decode[];
+extern const char *acpi_gbl_sbt_decode[];
+extern const char *acpi_gbl_am_decode[];
+extern const char *acpi_gbl_sm_decode[];
+extern const char *acpi_gbl_wm_decode[];
+extern const char *acpi_gbl_cph_decode[];
+extern const char *acpi_gbl_cpo_decode[];
+extern const char *acpi_gbl_dp_decode[];
+extern const char *acpi_gbl_ed_decode[];
+extern const char *acpi_gbl_bpb_decode[];
+extern const char *acpi_gbl_sb_decode[];
+extern const char *acpi_gbl_fc_decode[];
+extern const char *acpi_gbl_pt_decode[];
#endif
/* Types for Resource descriptor entries */
@@ -79,14 +95,14 @@ extern const char *acpi_gbl_typ_decode[];
#define ACPI_SMALL_VARIABLE_LENGTH 3
typedef
-acpi_status(*acpi_walk_aml_callback) (u8 * aml,
+acpi_status(*acpi_walk_aml_callback) (u8 *aml,
u32 length,
u32 offset,
u8 resource_index, void **context);
typedef
acpi_status(*acpi_pkg_callback) (u8 object_type,
- union acpi_operand_object * source_object,
+ union acpi_operand_object *source_object,
union acpi_generic_state * state,
void *context);
@@ -202,7 +218,9 @@ extern const u8 _acpi_ctype[];
#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
-#endif /* ACPI_USE_SYSTEM_CLIBRARY */
+#endif /* !ACPI_USE_SYSTEM_CLIBRARY */
+
+#define ACPI_IS_ASCII(c) ((c) < 0x80)
/*
* utcopy - Object construction and conversion interfaces
@@ -210,11 +228,11 @@ extern const u8 _acpi_ctype[];
acpi_status
acpi_ut_build_simple_object(union acpi_operand_object *obj,
union acpi_object *user_obj,
- u8 * data_space, u32 * buffer_space_used);
+ u8 *data_space, u32 *buffer_space_used);
acpi_status
acpi_ut_build_package_object(union acpi_operand_object *obj,
- u8 * buffer, u32 * space_used);
+ u8 *buffer, u32 *space_used);
acpi_status
acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj,
@@ -287,9 +305,10 @@ acpi_ut_ptr_exit(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, u8 *ptr);
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
+void
+acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id);
-void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
+void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset);
void acpi_ut_report_error(char *module_name, u32 line_number);
@@ -337,15 +356,19 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
*/
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id);
+ struct acpi_pnp_device_id ** return_id);
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id);
+ struct acpi_pnp_device_id ** return_id);
+
+acpi_status
+acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id);
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
- struct acpica_device_id_list **return_cid_list);
+ struct acpi_pnp_device_id_list ** return_cid_list);
/*
* utlock - reader/writer locks
@@ -479,15 +502,19 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void acpi_ut_strupr(char *src_string);
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
void acpi_ut_print_string(char *string, u8 max_length);
u8 acpi_ut_valid_acpi_name(u32 name);
-acpi_name acpi_ut_repair_name(char *name);
+void acpi_ut_repair_name(char *name);
u8 acpi_ut_valid_acpi_char(char character, u32 position);
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer);
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
/* Values for Base above (16=Hex, 10=Decimal) */
@@ -508,12 +535,12 @@ acpi_ut_display_init_pathname(u8 type,
* utresrc
*/
acpi_status
-acpi_ut_walk_aml_resources(u8 * aml,
+acpi_ut_walk_aml_resources(u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function,
void **context);
-acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
+acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index);
u32 acpi_ut_get_descriptor_length(void *aml);
@@ -524,8 +551,7 @@ u8 acpi_ut_get_resource_header_length(void *aml);
u8 acpi_ut_get_resource_type(void *aml);
acpi_status
-acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
- u8 ** end_tag);
+acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
/*
* utmutex - mutex support
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index af4947956ec2..968449685e06 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: amlresrc.h - AML resource descriptors
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 465f02134b89..57895db3231a 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -280,7 +280,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
/*
* Get the return value and save as the last result
- * value. This is the only place where walk_state->return_desc
+ * value. This is the only place where walk_state->return_desc
* is set to anything other than zero!
*/
walk_state->return_desc = walk_state->operands[0];
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 3da6fd8530c5..b5b904ee815f 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -277,7 +277,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
*
* RETURN: Status
*
- * DESCRIPTION: Process all named fields in a field declaration. Names are
+ * DESCRIPTION: Process all named fields in a field declaration. Names are
* entered into the namespace.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index aa9a5d4e4052..52eb4e01622a 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -170,7 +170,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
*
* RETURN: Status
*
- * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
+ * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
* increments the thread count, and waits at the method semaphore
* for clearance to execute.
*
@@ -444,7 +444,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* RETURN: Status
*
* DESCRIPTION: Restart a method that was preempted by another (nested) method
- * invocation. Handle the return value (if any) from the callee.
+ * invocation. Handle the return value (if any) from the callee.
*
******************************************************************************/
@@ -530,7 +530,7 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* RETURN: None
*
- * DESCRIPTION: Terminate a control method. Delete everything that the method
+ * DESCRIPTION: Terminate a control method. Delete everything that the method
* created, delete all locals and arguments, and delete the parse
* tree if requested.
*
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8d55cebaa656..9a83b7e0f3ba 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -76,7 +76,7 @@ acpi_ds_method_data_get_type(u16 opcode,
* RETURN: Status
*
* DESCRIPTION: Initialize the data structures that hold the method's arguments
- * and locals. The data struct is an array of namespace nodes for
+ * and locals. The data struct is an array of namespace nodes for
* each - this allows ref_of and de_ref_of to work properly for these
* special data types.
*
@@ -129,7 +129,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
*
* RETURN: None
*
- * DESCRIPTION: Delete method locals and arguments. Arguments are only
+ * DESCRIPTION: Delete method locals and arguments. Arguments are only
* deleted if this method was called from another method.
*
******************************************************************************/
@@ -183,7 +183,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
*
* RETURN: Status
*
- * DESCRIPTION: Initialize arguments for a method. The parameter list is a list
+ * DESCRIPTION: Initialize arguments for a method. The parameter list is a list
* of ACPI operand objects, either null terminated or whose length
* is defined by max_param_count.
*
@@ -401,7 +401,7 @@ acpi_ds_method_data_get_value(u8 type,
* This means that either 1) The expected argument was
* not passed to the method, or 2) A local variable
* was referenced by the method (via the ASL)
- * before it was initialized. Either case is an error.
+ * before it was initialized. Either case is an error.
*/
/* If slack enabled, init the local_x/arg_x to an Integer of value zero */
@@ -465,7 +465,7 @@ acpi_ds_method_data_get_value(u8 type,
*
* RETURN: None
*
- * DESCRIPTION: Delete the entry at Opcode:Index. Inserts
+ * DESCRIPTION: Delete the entry at Opcode:Index. Inserts
* a null into the stack slot after the object is deleted.
*
******************************************************************************/
@@ -523,7 +523,7 @@ acpi_ds_method_data_delete_value(u8 type,
*
* RETURN: Status
*
- * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed
+ * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed
* as the new value for the Arg or Local and the reference count
* for obj_desc is incremented.
*
@@ -566,7 +566,7 @@ acpi_ds_store_object_to_local(u8 type,
/*
* If the reference count on the object is more than one, we must
- * take a copy of the object before we store. A reference count
+ * take a copy of the object before we store. A reference count
* of exactly 1 means that the object was just created during the
* evaluation of an expression, and we can safely use it since it
* is not used anywhere else.
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 68592dd34960..c9f15d3a3686 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -293,7 +293,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/*
* Second arg is the buffer data (optional) byte_list can be either
- * individual bytes or a string initializer. In either case, a
+ * individual bytes or a string initializer. In either case, a
* byte_list appears in the AML.
*/
arg = op->common.value.arg; /* skip first arg */
@@ -568,7 +568,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
/*
* Because of the execution pass through the non-control-method
- * parts of the table, we can arrive here twice. Only init
+ * parts of the table, we can arrive here twice. Only init
* the named object node the first time through
*/
if (acpi_ns_get_attached_object(node)) {
@@ -618,7 +618,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Initialize a namespace object from a parser Op and its
- * associated arguments. The namespace object is a more compact
+ * associated arguments. The namespace object is a more compact
* representation of the Op and its arguments.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index aa34d8984d34..d09c6b4bab2c 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: dsopcode - Dispatcher suport for regions and fields
+ * Module Name: dsopcode - Dispatcher support for regions and fields
*
*****************************************************************************/
@@ -649,7 +649,8 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
(op->common.parent->common.aml_opcode !=
AML_VAR_PACKAGE_OP)
- && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
+ && (op->common.parent->common.aml_opcode !=
+ AML_NAME_OP))) {
walk_state->result_obj = obj_desc;
}
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 73a5447475f5..afeb99f49482 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("dsutils")
*
* RETURN: None.
*
- * DESCRIPTION: Clear and remove a reference on an implicit return value. Used
+ * DESCRIPTION: Clear and remove a reference on an implicit return value. Used
* to delete "stale" return values (if enabled, the return value
* from every operator is saved at least momentarily, in case the
* parent method exits.)
@@ -107,7 +107,7 @@ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
*
* DESCRIPTION: Implements the optional "implicit return". We save the result
* of every ASL operator and control method invocation in case the
- * parent method exit. Before storing a new return value, we
+ * parent method exit. Before storing a new return value, we
* delete the previous return value.
*
******************************************************************************/
@@ -198,7 +198,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*
* If there is no parent, or the parent is a scope_op, we are executing
* at the method level. An executing method typically has no parent,
- * since each method is parsed separately. A method invoked externally
+ * since each method is parsed separately. A method invoked externally
* via execute_control_method has a scope_op as the parent.
*/
if ((!op->common.parent) ||
@@ -223,7 +223,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
}
/*
- * Decide what to do with the result based on the parent. If
+ * Decide what to do with the result based on the parent. If
* the parent opcode will not use the result, delete the object.
* Otherwise leave it as is, it will be deleted when it is used
* as an operand later.
@@ -266,7 +266,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
/*
* These opcodes allow term_arg(s) as operands and therefore
- * the operands can be method calls. The result is used.
+ * the operands can be method calls. The result is used.
*/
goto result_used;
@@ -284,7 +284,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
AML_BANK_FIELD_OP)) {
/*
* These opcodes allow term_arg(s) as operands and therefore
- * the operands can be method calls. The result is used.
+ * the operands can be method calls. The result is used.
*/
goto result_used;
}
@@ -329,9 +329,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*
* RETURN: Status
*
- * DESCRIPTION: Used after interpretation of an opcode. If there is an internal
+ * DESCRIPTION: Used after interpretation of an opcode. If there is an internal
* result descriptor, check if the parent opcode will actually use
- * this result. If not, delete the result now so that it will
+ * this result. If not, delete the result now so that it will
* not become orphaned.
*
******************************************************************************/
@@ -376,7 +376,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
*
* RETURN: Status
*
- * DESCRIPTION: Resolve all operands to their values. Used to prepare
+ * DESCRIPTION: Resolve all operands to their values. Used to prepare
* arguments to a control method invocation (a call from one
* method to another.)
*
@@ -391,7 +391,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
/*
* Attempt to resolve each of the valid operands
- * Method arguments are passed by reference, not by value. This means
+ * Method arguments are passed by reference, not by value. This means
* that the actual objects are passed, not copies of the objects.
*/
for (i = 0; i < walk_state->num_operands; i++) {
@@ -451,7 +451,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
* RETURN: Status
*
* DESCRIPTION: Translate a parse tree object that is an argument to an AML
- * opcode to the equivalent interpreter object. This may include
+ * opcode to the equivalent interpreter object. This may include
* looking up a name or entering a new name into the internal
* namespace.
*
@@ -496,9 +496,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/*
* Special handling for buffer_field declarations. This is a deferred
* opcode that unfortunately defines the field name as the last
- * parameter instead of the first. We get here when we are performing
+ * parameter instead of the first. We get here when we are performing
* the deferred execution, so the actual name of the field is already
- * in the namespace. We don't want to attempt to look it up again
+ * in the namespace. We don't want to attempt to look it up again
* because we may be executing in a different scope than where the
* actual opcode exists.
*/
@@ -560,7 +560,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* indicate this to the interpreter, set the
* object to the root
*/
- obj_desc = ACPI_CAST_PTR(union
+ obj_desc =
+ ACPI_CAST_PTR(union
acpi_operand_object,
acpi_gbl_root_node);
status = AE_OK;
@@ -604,8 +605,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/*
* If the name is null, this means that this is an
* optional result parameter that was not specified
- * in the original ASL. Create a Zero Constant for a
- * placeholder. (Store to a constant is a Noop.)
+ * in the original ASL. Create a Zero Constant for a
+ * placeholder. (Store to a constant is a Noop.)
*/
opcode = AML_ZERO_OP; /* Has no arguments! */
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 642f3c053e87..58593931be96 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -57,7 +57,7 @@ ACPI_MODULE_NAME("dswexec")
/*
* Dispatch table for opcode classes
*/
-static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = {
+static acpi_execute_op acpi_gbl_op_type_dispatch[] = {
acpi_ex_opcode_0A_0T_1R,
acpi_ex_opcode_1A_0T_0R,
acpi_ex_opcode_1A_0T_1R,
@@ -204,7 +204,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Descending callback used during the execution of control
- * methods. This is where most operators and operands are
+ * methods. This is where most operators and operands are
* dispatched to the interpreter.
*
****************************************************************************/
@@ -297,7 +297,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
if (walk_state->walk_type & ACPI_WALK_METHOD) {
/*
* Found a named object declaration during method execution;
- * we must enter this object into the namespace. The created
+ * we must enter this object into the namespace. The created
* object is temporary and will be deleted upon completion of
* the execution of this method.
*
@@ -348,7 +348,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Ascending callback used during the execution of control
- * methods. The only thing we really need to do here is to
+ * methods. The only thing we really need to do here is to
* notice the beginning of IF, ELSE, and WHILE blocks.
*
****************************************************************************/
@@ -432,7 +432,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
if (ACPI_SUCCESS(status)) {
/*
* Dispatch the request to the appropriate interpreter handler
- * routine. There is one routine per opcode "type" based upon the
+ * routine. There is one routine per opcode "type" based upon the
* number of opcode arguments and return type.
*/
status =
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 89c0114210c0..379835748357 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -254,7 +254,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_ut_get_type_name(node->type),
acpi_ut_get_node_name(node)));
- return (AE_AML_OPERAND_TYPE);
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
break;
@@ -602,7 +602,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
region_space,
walk_state);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
acpi_ex_exit_interpreter();
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index d0e6555061e4..3e65a15a735f 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -51,8 +51,9 @@
ACPI_MODULE_NAME("dswstate")
/* Local prototypes */
-static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
-static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
+static acpi_status
+acpi_ds_result_stack_push(struct acpi_walk_state *walk_state);
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state);
/*******************************************************************************
*
@@ -347,7 +348,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
*
* RETURN: Status
*
- * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT
+ * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT
* deleted by this routine.
*
******************************************************************************/
@@ -491,7 +492,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
* RETURN: A walk_state object popped from the thread's stack
*
* DESCRIPTION: Remove and return the walkstate object that is at the head of
- * the walk stack for the given walk list. NULL indicates that
+ * the walk stack for the given walk list. NULL indicates that
* the list is empty.
*
******************************************************************************/
@@ -531,14 +532,17 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
* RETURN: Pointer to the new walk state.
*
- * DESCRIPTION: Allocate and initialize a new walk state. The current walk
+ * DESCRIPTION: Allocate and initialize a new walk state. The current walk
* state is set to this new state.
*
******************************************************************************/
-struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
- *origin, union acpi_operand_object
- *method_desc, struct acpi_thread_state
+struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
+ union acpi_parse_object
+ *origin,
+ union acpi_operand_object
+ *method_desc,
+ struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
@@ -653,7 +657,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
/*
* Setup the current scope.
* Find a Named Op that has a namespace node associated with it.
- * search upwards from this Op. Current scope is the first
+ * search upwards from this Op. Current scope is the first
* Op with a namespace node.
*/
extra_op = parser_state->start_op;
@@ -704,13 +708,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
if (!walk_state) {
- return;
+ return_VOID;
}
if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
- return;
+ return_VOID;
}
/* There should not be any open scopes */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index ef0193d74b5d..36d120574423 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -89,7 +89,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
/* Set the mask bit only if there are references to this GPE */
if (gpe_event_info->runtime_count) {
- ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run,
+ (u8)register_bit);
}
return_ACPI_STATUS(AE_OK);
@@ -106,8 +107,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
* DESCRIPTION: Clear a GPE of stale events and enable it.
*
******************************************************************************/
-acpi_status
-acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
@@ -131,8 +131,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
}
/* Enable the requested GPE */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
@@ -150,7 +150,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
-acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status = AE_OK;
@@ -191,7 +192,8 @@ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info
*
******************************************************************************/
-acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status
+acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status = AE_OK;
@@ -208,7 +210,8 @@ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_i
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
- status = acpi_hw_low_set_gpe(gpe_event_info,
+ status =
+ acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_DISABLE);
}
@@ -306,7 +309,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A Non-NULL gpe_device means this is a GPE Block Device */
- obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
+ obj_desc =
+ acpi_ns_get_attached_object((struct acpi_namespace_node *)
gpe_device);
if (!obj_desc || !obj_desc->device.gpe_block) {
return (NULL);
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 8cf4c104c7b7..1571a61a7833 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -486,7 +486,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index + gpe_block->block_base_number));
+ gpe_index +
+ gpe_block->block_base_number));
continue;
}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index cb50dd91bc18..228a0c3b1d49 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -374,7 +374,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
- } else if ((gpe_event_info->
+ } else
+ if ((gpe_event_info->
flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_NOTIFY) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 4c1c8261166f..1474241bfc7e 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -227,8 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Install a handler for this PCI root bridge */
- status =
- acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
@@ -350,8 +349,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
- struct acpica_device_id *hid;
- struct acpica_device_id_list *cid;
+ struct acpi_pnp_device_id *hid;
+ struct acpi_pnp_device_id_list *cid;
u32 i;
u8 match;
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 7587eb6c9584..ae668f32cf16 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -398,7 +398,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
*
******************************************************************************/
acpi_status
-acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 87c5f2332260..3f30e753b652 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -221,7 +221,8 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
if (wake_device == ACPI_ROOT_OBJECT) {
device_node = acpi_gbl_root_node;
} else {
- device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+ device_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
}
/* Validate WakeDevice is of type Device */
@@ -324,7 +325,8 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
*
******************************************************************************/
-acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
+acpi_status
+acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
@@ -567,7 +569,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
node = acpi_ns_validate_handle(gpe_device);
@@ -650,7 +652,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
node = acpi_ns_validate_handle(gpe_device);
@@ -694,8 +696,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
* the FADT-defined gpe blocks. Otherwise, the GPE block device.
*
******************************************************************************/
-acpi_status
-acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device)
{
struct acpi_gpe_device_info info;
acpi_status status;
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bfb062e4c4b4..4492a4e03022 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -516,8 +516,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
string_length--;
}
- return_desc = acpi_ut_create_string_object((acpi_size)
- string_length);
+ return_desc =
+ acpi_ut_create_string_object((acpi_size) string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 691d4763102c..66554bc6f9a8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -78,7 +78,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
(target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
/*
* Dereference an existing alias so that we don't create a chain
- * of aliases. With this code, we guarantee that an alias is
+ * of aliases. With this code, we guarantee that an alias is
* always exactly one level of indirection away from the
* actual aliased name.
*/
@@ -90,7 +90,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
/*
* For objects that can never change (i.e., the NS node will
* permanently point to the same object), we can simply attach
- * the object to the new NS node. For other objects (such as
+ * the object to the new NS node. For other objects (such as
* Integers, buffers, etc.), we have to point the Alias node
* to the original Node.
*/
@@ -139,7 +139,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
/*
* The new alias assumes the type of the target, and it points
- * to the same object. The reference count of the object has an
+ * to the same object. The reference count of the object has an
* additional reference to prevent deletion out from under either the
* target node or the alias Node
*/
@@ -243,8 +243,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
/* Init object and attach to NS node */
- obj_desc->mutex.sync_level =
- (u8) walk_state->operands[1]->integer.value;
+ obj_desc->mutex.sync_level = (u8)walk_state->operands[1]->integer.value;
obj_desc->mutex.node =
(struct acpi_namespace_node *)walk_state->operands[0];
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index bc5b9a6a1316..d7c9f51608a7 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -145,10 +145,10 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
case ACPI_TYPE_BUFFER:
acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
- acpi_ut_dump_buffer2(source_desc->buffer.pointer,
- (source_desc->buffer.length < 256) ?
- source_desc->buffer.length : 256,
- DB_BYTE_DISPLAY);
+ acpi_ut_dump_buffer(source_desc->buffer.pointer,
+ (source_desc->buffer.length < 256) ?
+ source_desc->buffer.length : 256,
+ DB_BYTE_DISPLAY, 0);
break;
case ACPI_TYPE_STRING:
@@ -190,7 +190,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
acpi_os_printf("Table Index 0x%X\n",
source_desc->reference.value);
- return;
+ return_VOID;
default:
break;
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 213c081776fc..858b43a7dcf6 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -464,7 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
ACPI_FUNCTION_NAME(ex_dump_operand)
- if (!((ACPI_LV_EXEC & acpi_dbg_level)
+ if (!
+ ((ACPI_LV_EXEC & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return;
}
@@ -777,7 +778,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
* PARAMETERS: title - Descriptive text
* value - Value to be displayed
*
- * DESCRIPTION: Object dump output formatting functions. These functions
+ * DESCRIPTION: Object dump output formatting functions. These functions
* reduce the number of format strings required and keeps them
* all in one place for easy modification.
*
@@ -810,7 +811,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
ACPI_FUNCTION_ENTRY();
if (!flags) {
- if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ if (!
+ ((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return;
}
@@ -940,10 +942,11 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
acpi_os_printf("[Buffer] Length %.2X = ",
obj_desc->buffer.length);
if (obj_desc->buffer.length) {
- acpi_ut_dump_buffer(ACPI_CAST_PTR
- (u8, obj_desc->buffer.pointer),
- obj_desc->buffer.length,
- DB_DWORD_DISPLAY, _COMPONENT);
+ acpi_ut_debug_dump_buffer(ACPI_CAST_PTR
+ (u8,
+ obj_desc->buffer.pointer),
+ obj_desc->buffer.length,
+ DB_DWORD_DISPLAY, _COMPONENT);
} else {
acpi_os_printf("\n");
}
@@ -996,7 +999,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
}
if (!flags) {
- if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ if (!
+ ((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return_VOID;
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index dc092f5b35d6..ebc55fbf3ff7 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -59,7 +59,7 @@ ACPI_MODULE_NAME("exfield")
*
* RETURN: Status
*
- * DESCRIPTION: Read from a named field. Returns either an Integer or a
+ * DESCRIPTION: Read from a named field. Returns either an Integer or a
* Buffer, depending on the size of the field.
*
******************************************************************************/
@@ -149,7 +149,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
* Allocate a buffer for the contents of the field.
*
* If the field is larger than the current integer width, create
- * a BUFFER to hold it. Otherwise, use an INTEGER. This allows
+ * a BUFFER to hold it. Otherwise, use an INTEGER. This allows
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index a7784152ed30..aa2ccfb7cb61 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -54,8 +54,7 @@ ACPI_MODULE_NAME("exfldio")
/* Local prototypes */
static acpi_status
acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
- u32 field_datum_byte_offset,
- u64 *value, u32 read_write);
+ u32 field_datum_byte_offset, u64 *value, u32 read_write);
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value);
@@ -155,7 +154,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
#endif
/*
- * Validate the request. The entire request from the byte offset for a
+ * Validate the request. The entire request from the byte offset for a
* length of one field datum (access width) must fit within the region.
* (Region length is specified in bytes)
*/
@@ -183,7 +182,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
obj_desc->common_field.access_byte_width) {
/*
* This is the case where the access_type (acc_word, etc.) is wider
- * than the region itself. For example, a region of length one
+ * than the region itself. For example, a region of length one
* byte, and a field with Dword access specified.
*/
ACPI_ERROR((AE_INFO,
@@ -321,7 +320,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
*
* DESCRIPTION: Check if a value is out of range of the field being written.
* Used to check if the values written to Index and Bank registers
- * are out of range. Normally, the value is simply truncated
+ * are out of range. Normally, the value is simply truncated
* to fit the field, but this case is most likely a serious
* coding error in the ASL.
*
@@ -370,7 +369,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
*
* RETURN: Status
*
- * DESCRIPTION: Read or Write a single datum of a field. The field_type is
+ * DESCRIPTION: Read or Write a single datum of a field. The field_type is
* demultiplexed here to handle the different types of fields
* (buffer_field, region_field, index_field, bank_field)
*
@@ -860,7 +859,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
/*
* We must have a buffer that is at least as long as the field
- * we are writing to. This is because individual fields are
+ * we are writing to. This is because individual fields are
* indivisible and partial writes are not supported -- as per
* the ACPI specification.
*/
@@ -875,7 +874,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/*
* Copy the original data to the new buffer, starting
- * at Byte zero. All unused (upper) bytes of the
+ * at Byte zero. All unused (upper) bytes of the
* buffer will be 0.
*/
ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 271c0c57ea10..84058705ed12 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
@@ -254,7 +253,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
- * Convert the second operand if necessary. The first operand
+ * Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
@@ -573,7 +572,7 @@ acpi_ex_do_logical_op(u16 opcode,
ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
- * Convert the second operand if necessary. The first operand
+ * Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI 3.0+ specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index bcceda5be9e3..d1f449d93dcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
@@ -305,7 +304,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
ACPI_FUNCTION_TRACE(ex_release_mutex_object);
if (obj_desc->mutex.acquisition_depth == 0) {
- return (AE_NOT_ACQUIRED);
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
}
/* Match multiple Acquires with multiple Releases */
@@ -462,7 +461,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
union acpi_operand_object *next = thread->acquired_mutex_list;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_ENTRY();
+ ACPI_FUNCTION_NAME(ex_release_all_mutexes);
/* Traverse the list of owned mutexes, releasing each one */
@@ -474,6 +473,10 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
obj_desc->mutex.next = NULL;
obj_desc->mutex.acquisition_depth = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Force-releasing held mutex: %p\n",
+ obj_desc));
+
/* Release the mutex, special case for Global Lock */
if (obj_desc == acpi_gbl_global_lock_mutex) {
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index fcc75fa27d32..2ff578a16adc 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exnames - interpreter/scanner name load/execute
@@ -53,8 +52,7 @@ ACPI_MODULE_NAME("exnames")
/* Local prototypes */
static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs);
-static acpi_status
-acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
+static acpi_status acpi_ex_name_segment(u8 **in_aml_address, char *name_string);
/*******************************************************************************
*
@@ -64,7 +62,7 @@ acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
* (-1)==root, 0==none
* num_name_segs - count of 4-character name segments
*
- * RETURN: A pointer to the allocated string segment. This segment must
+ * RETURN: A pointer to the allocated string segment. This segment must
* be deleted by the caller.
*
* DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name
@@ -178,7 +176,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
- for (index = 0; (index < ACPI_NAME_SIZE)
+ for (index = 0;
+ (index < ACPI_NAME_SIZE)
&& (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 9ba8c73cea16..bbf01e9bf057 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
@@ -606,7 +605,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
}
/*
- * Set result to ONES (TRUE) if Value == 0. Note:
+ * Set result to ONES (TRUE) if Value == 0. Note:
* return_desc->Integer.Value is initially == 0 (FALSE) from above.
*/
if (!operand[0]->integer.value) {
@@ -618,7 +617,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
case AML_INCREMENT_OP: /* Increment (Operand) */
/*
- * Create a new integer. Can't just get the base integer and
+ * Create a new integer. Can't just get the base integer and
* increment it because it may be an Arg or Field.
*/
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@@ -686,7 +685,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/*
* Note: The operand is not resolved at this point because we want to
- * get the associated object, not its value. For example, we don't
+ * get the associated object, not its value. For example, we don't
* want to resolve a field_unit to its value, we want the actual
* field_unit object.
*/
@@ -727,7 +726,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/*
* The type of the base object must be integer, buffer, string, or
- * package. All others are not supported.
+ * package. All others are not supported.
*
* NOTE: Integer is not specifically supported by the ACPI spec,
* but is supported implicitly via implicit operand conversion.
@@ -965,7 +964,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
case ACPI_TYPE_PACKAGE:
/*
- * Return the referenced element of the package. We must
+ * Return the referenced element of the package. We must
* add another reference to the referenced object, however.
*/
return_desc =
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 879e8a277b94..ee5634a074c4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -123,7 +123,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
/*
* Dispatch the notify to the appropriate handler
* NOTE: the request is queued for execution after this method
- * completes. The notify handlers are NOT invoked synchronously
+ * completes. The notify handlers are NOT invoked synchronously
* from this thread -- because handlers may in turn run other
* control methods.
*/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 71fcc65c9ffa..2c89b4651f08 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
@@ -158,7 +157,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */
/*
- * Create the return object. The Source operand is guaranteed to be
+ * Create the return object. The Source operand is guaranteed to be
* either a String or a Buffer, so just use its type.
*/
return_desc = acpi_ut_create_internal_object((operand[0])->
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 0786b8659061..3e08695c3b30 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
@@ -198,7 +197,7 @@ acpi_ex_do_match(u32 match_op,
return (FALSE);
}
- return logical_result;
+ return (logical_result);
}
/*******************************************************************************
@@ -269,7 +268,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
* and the next should be examined.
*
* Upon finding a match, the loop will terminate via "break" at
- * the bottom. If it terminates "normally", match_value will be
+ * the bottom. If it terminates "normally", match_value will be
* ACPI_UINT64_MAX (Ones) (its initial value) indicating that no
* match was found.
*/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 81eca60d2748..ba9db4de7c89 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exprep - ACPI AML (p-code) execution - field prep utilities
@@ -78,8 +77,8 @@ acpi_ex_generate_access(u32 field_bit_offset,
* any_acc keyword.
*
* NOTE: Need to have the region_length in order to check for boundary
- * conditions (end-of-region). However, the region_length is a deferred
- * operation. Therefore, to complete this implementation, the generation
+ * conditions (end-of-region). However, the region_length is a deferred
+ * operation. Therefore, to complete this implementation, the generation
* of this access width must be deferred until the region length has
* been evaluated.
*
@@ -308,7 +307,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
* RETURN: Status
*
* DESCRIPTION: Initialize the areas of the field object that are common
- * to the various types of fields. Note: This is very "sensitive"
+ * to the various types of fields. Note: This is very "sensitive"
* code because we are solving the general case for field
* alignment.
*
@@ -336,13 +335,13 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
obj_desc->common_field.bit_length = field_bit_length;
/*
- * Decode the access type so we can compute offsets. The access type gives
+ * Decode the access type so we can compute offsets. The access type gives
* two pieces of information - the width of each field access and the
* necessary byte_alignment (address granularity) of the access.
*
* For any_acc, the access_bit_width is the largest width that is both
* necessary and possible in an attempt to access the whole field in one
- * I/O operation. However, for any_acc, the byte_alignment is always one
+ * I/O operation. However, for any_acc, the byte_alignment is always one
* byte.
*
* For all Buffer Fields, the byte_alignment is always one byte.
@@ -363,7 +362,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
/*
* base_byte_offset is the address of the start of the field within the
- * region. It is the byte address of the first *datum* (field-width data
+ * region. It is the byte address of the first *datum* (field-width data
* unit) of the field. (i.e., the first datum that contains at least the
* first *bit* of the field.)
*
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 1f1ce0c3d2f8..1db2c0bfde0b 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exregion - ACPI default op_region (address space) handlers
@@ -202,7 +201,7 @@ acpi_ex_system_memory_space_handler(u32 function,
* Perform the memory read or write
*
* Note: For machines that do not support non-aligned transfers, the target
- * address was checked for alignment above. We do not attempt to break the
+ * address was checked for alignment above. We do not attempt to break the
* transfer up into smaller (byte-size) chunks because the AML specifically
* asked for a transfer width that the hardware may require.
*/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fa50e77e64a8..6239956786eb 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exresnte - AML Interpreter object resolution
@@ -58,8 +57,8 @@ ACPI_MODULE_NAME("exresnte")
* PARAMETERS: object_ptr - Pointer to a location that contains
* a pointer to a NS node, and will receive a
* pointer to the resolved object.
- * walk_state - Current state. Valid only if executing AML
- * code. NULL if simply resolving an object
+ * walk_state - Current state. Valid only if executing AML
+ * code. NULL if simply resolving an object
*
* RETURN: Status
*
@@ -67,7 +66,7 @@ ACPI_MODULE_NAME("exresnte")
*
* Note: for some of the data types, the pointer attached to the Node
* can be either a pointer to an actual internal object or a pointer into the
- * AML stream itself. These types are currently:
+ * AML stream itself. These types are currently:
*
* ACPI_TYPE_INTEGER
* ACPI_TYPE_STRING
@@ -89,7 +88,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
/*
- * The stack pointer points to a struct acpi_namespace_node (Node). Get the
+ * The stack pointer points to a struct acpi_namespace_node (Node). Get the
* object that is attached to the Node.
*/
node = *object_ptr;
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index bbf40ac27585..cc176b245e22 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exresolv - AML Interpreter object resolution
@@ -327,7 +326,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
*
* RETURN: Status
*
- * DESCRIPTION: Return the base object and type. Traverse a reference list if
+ * DESCRIPTION: Return the base object and type. Traverse a reference list if
* necessary to get to the base object.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index f232fbabdea8..b9ebff2f6a09 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exresop - AML Interpreter operand/object resolution
@@ -87,7 +86,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) {
/*
* Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference
- * objects and thus allow them to be targets. (As per the ACPI
+ * objects and thus allow them to be targets. (As per the ACPI
* specification, a store to a constant is a noop.)
*/
if ((this_type == ACPI_TYPE_INTEGER) &&
@@ -337,7 +336,8 @@ acpi_ex_resolve_operands(u16 opcode,
if ((opcode == AML_STORE_OP) &&
((*stack_ptr)->common.type ==
ACPI_TYPE_LOCAL_REFERENCE)
- && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
+ && ((*stack_ptr)->reference.class ==
+ ACPI_REFCLASS_INDEX)) {
goto next_operand;
}
break;
@@ -638,7 +638,7 @@ acpi_ex_resolve_operands(u16 opcode,
if (acpi_gbl_enable_interpreter_slack) {
/*
* Enable original behavior of Store(), allowing any and all
- * objects as the source operand. The ACPI spec does not
+ * objects as the source operand. The ACPI spec does not
* allow this, however.
*/
break;
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 5fffe7ab5ece..90431f12f831 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -374,7 +374,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
* with the input value.
*
* When storing into an object the data is converted to the
- * target object type then stored in the object. This means
+ * target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
@@ -491,7 +491,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
acpi_ut_get_object_type_name(source_desc),
source_desc, node));
- /* No conversions for all other types. Just attach the source object */
+ /* No conversions for all other types. Just attach the source object */
status = acpi_ns_attach_object(node, source_desc,
source_desc->common.type);
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b35bed52e061..87153bbc4b43 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstoren - AML Interpreter object store support,
@@ -61,7 +60,7 @@ ACPI_MODULE_NAME("exstoren")
*
* RETURN: Status, resolved object in source_desc_ptr.
*
- * DESCRIPTION: Resolve an object. If the object is a reference, dereference
+ * DESCRIPTION: Resolve an object. If the object is a reference, dereference
* it and return the actual object in the source_desc_ptr.
*
******************************************************************************/
@@ -93,7 +92,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
/*
* Stores into a Field/Region or into a Integer/Buffer/String
- * are all essentially the same. This case handles the
+ * are all essentially the same. This case handles the
* "interchangeable" types Integer, String, and Buffer.
*/
if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
@@ -167,7 +166,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
*
* RETURN: Status
*
- * DESCRIPTION: "Store" an object to another object. This may include
+ * DESCRIPTION: "Store" an object to another object. This may include
* converting the source type to the target type (implicit
* conversion), and a copy of the value of the source to
* the target.
@@ -178,14 +177,14 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
* with the input value.
*
* When storing into an object the data is converted to the
- * target object type then stored in the object. This means
+ * target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
* This module allows destination types of Number, String,
* Buffer, and Package.
*
- * Assumes parameters are already validated. NOTE: source_desc
+ * Assumes parameters are already validated. NOTE: source_desc
* resolution (from a reference object) must be performed by
* the caller if necessary.
*
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 53c248473547..b5f339cb1305 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstorob - AML Interpreter object store support, store to object
@@ -108,7 +107,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
#ifdef ACPI_OBSOLETE_BEHAVIOR
/*
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
- * truncated if the string is smaller than the buffer. However, "other"
+ * truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
* standard. ACPI 3.0A changes this behavior such that the buffer
* is no longer truncated.
@@ -117,7 +116,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/*
* OBSOLETE BEHAVIOR:
* If the original source was a string, we must truncate the buffer,
- * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer
+ * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer
* copy must not truncate the original buffer.
*/
if (original_src_type == ACPI_TYPE_STRING) {
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index b760641e2fc6..c8a0ad5c1f55 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exsystem - Interface to OS services
@@ -59,7 +58,7 @@ ACPI_MODULE_NAME("exsystem")
* RETURN: Status
*
* DESCRIPTION: Implements a semaphore wait with a check to see if the
- * semaphore is available immediately. If it is not, the
+ * semaphore is available immediately. If it is not, the
* interpreter is released before waiting.
*
******************************************************************************/
@@ -104,7 +103,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
* RETURN: Status
*
* DESCRIPTION: Implements a mutex wait with a check to see if the
- * mutex is available immediately. If it is not, the
+ * mutex is available immediately. If it is not, the
* interpreter is released before waiting.
*
******************************************************************************/
@@ -152,7 +151,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
* DESCRIPTION: Suspend running thread for specified amount of time.
* Note: ACPI specification requires that Stall() does not
* relinquish the processor, and delays longer than 100 usec
- * should use Sleep() instead. We allow stalls up to 255 usec
+ * should use Sleep() instead. We allow stalls up to 255 usec
* for compatibility with other interpreters and existing BIOSs.
*
******************************************************************************/
@@ -254,7 +253,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
* RETURN: Status
*
* DESCRIPTION: Provides an access point to perform synchronization operations
- * within the AML. This operation is a request to wait for an
+ * within the AML. This operation is a request to wait for an
* event.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index d1ab7917eed7..264d22d8018c 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exutils - interpreter/scanner utilities
@@ -45,12 +44,12 @@
/*
* DEFINE_AML_GLOBALS is tested in amlcode.h
* to determine whether certain global names should be "defined" or only
- * "declared" in the current compilation. This enhances maintainability
+ * "declared" in the current compilation. This enhances maintainability
* by enabling a single header file to embody all knowledge of the names
* in question.
*
* Exactly one module of any executable should #define DEFINE_GLOBALS
- * before #including the header files which use this convention. The
+ * before #including the header files which use this convention. The
* names in question will be defined and initialized in that module,
* and declared as extern in all other modules which #include those
* header files.
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index a1e71d0ef57b..90a9aea1cee9 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
@@ -136,7 +135,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
*
* RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY
*
- * DESCRIPTION: Return current operating state of system. Determined by
+ * DESCRIPTION: Return current operating state of system. Determined by
* querying the SCI_EN bit.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index db4076580e2b..64560045052d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
@@ -339,7 +338,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block, void *context)
+ struct acpi_gpe_block_info * gpe_block,
+ void *context)
{
u32 i;
acpi_status status;
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 1455ddcdc32c..65bc3453a29c 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -259,7 +259,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
status = acpi_hw_get_pci_device_info(pci_id, info->device,
&bus_number, &is_bridge);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
info = info->next;
@@ -271,7 +271,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
pci_id->segment, pci_id->bus, pci_id->device,
pci_id->function, status, bus_number, is_bridge));
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 4af6d20ef077..f4e57503576b 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -1,4 +1,3 @@
-
/*******************************************************************************
*
* Module Name: hwregs - Read/write access functions for the various ACPI
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index b6411f16832f..bfdce22f3798 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
@@ -101,8 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status =
- acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
return_ACPI_STATUS(status);
}
@@ -129,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
* a versatile and accurate timer.
*
* Note that this function accommodates only a single timer
- * rollover. Thus for 24-bit timers, this function should only
+ * rollover. Thus for 24-bit timers, this function should only
* be used for calculating durations less than ~4.6 seconds
* (~20 minutes for 32-bit timers) -- calculations below:
*
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index c99d546b217f..b6aae58299dc 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwvalid - I/O request validation
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 7bfd649d1996..05a154c3c9ac 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwxface - Public ACPICA hardware interfaces
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 0ff1ecea5c3a..ae443fe2ebf6 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -49,8 +49,7 @@
ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
-static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
+static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
/*
* Dispatch table used to efficiently branch to the various sleep
@@ -234,8 +233,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
* function.
*
******************************************************************************/
-static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
+static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
{
acpi_status status;
struct acpi_sleep_functions *sleep_functions =
@@ -369,8 +367,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
- status =
- acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
+ status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -396,8 +393,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
status =
- acpi_hw_sleep_dispatch(sleep_state,
- ACPI_WAKE_PREP_FUNCTION_ID);
+ acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 23db53ce2293..d70eaf39dfdf 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -110,11 +110,11 @@ acpi_status acpi_ns_root_initialize(void)
status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
ACPI_IMODE_LOAD_PASS2,
ACPI_NS_NO_UPSEARCH, NULL, &new_node);
-
- if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */
+ if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create predefined name %s",
init_val->name));
+ continue;
}
/*
@@ -179,8 +179,7 @@ acpi_status acpi_ns_root_initialize(void)
/* Build an object around the static string */
- obj_desc->string.length =
- (u32) ACPI_STRLEN(val);
+ obj_desc->string.length = (u32)ACPI_STRLEN(val);
obj_desc->string.pointer = val;
obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index ac389e5bb594..15143c44f5e5 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -332,7 +332,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
*
* RETURN: None.
*
- * DESCRIPTION: Delete a subtree of the namespace. This includes all objects
+ * DESCRIPTION: Delete a subtree of the namespace. This includes all objects
* stored within the subtree.
*
******************************************************************************/
@@ -418,7 +418,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
* RETURN: Status
*
* DESCRIPTION: Delete entries within the namespace that are owned by a
- * specific ID. Used to delete entire ACPI tables. All
+ * specific ID. Used to delete entire ACPI tables. All
* reference counts are updated.
*
* MUTEX: Locks namespace during deletion walk.
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 2526aaf945ee..924b3c71473a 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -209,14 +209,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
"Invalid ACPI Object Type 0x%08X", type));
}
- if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
- this_node->name.integer =
- acpi_ut_repair_name(this_node->name.ascii);
-
- ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
- this_node->name.integer));
- }
-
acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node));
}
@@ -700,7 +692,7 @@ void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level)
*
* PARAMETERS: search_base - Root of subtree to be dumped, or
* NS_ALL to dump the entire namespace
- * max_depth - Maximum depth of dump. Use INT_MAX
+ * max_depth - Maximum depth of dump. Use INT_MAX
* for an effectively unlimited depth.
*
* RETURN: None
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 95ffe8dfa1f1..4328e2adfeb9 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -96,8 +96,8 @@ acpi_status acpi_ns_initialize_objects(void)
/* Walk entire namespace from the supplied root */
status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, acpi_ns_init_one_object, NULL,
- &info, NULL);
+ ACPI_UINT32_MAX, acpi_ns_init_one_object,
+ NULL, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 76935ff29289..911f99127b99 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -80,8 +80,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
/*
* Parse the table and load the namespace with all named
- * objects found within. Control methods are NOT parsed
- * at this time. In fact, the control methods cannot be
+ * objects found within. Control methods are NOT parsed
+ * at this time. In fact, the control methods cannot be
* parsed until the entire namespace is loaded, because
* if a control method makes a forward reference (call)
* to another control method, we can't continue parsing
@@ -122,7 +122,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
}
/*
- * Now we can parse the control methods. We always parse
+ * Now we can parse the control methods. We always parse
* them here for a sanity check, and if configured for
* just-in-time parsing, we delete the control method
* parse trees.
@@ -166,7 +166,7 @@ acpi_status acpi_ns_load_namespace(void)
}
/*
- * Load the namespace. The DSDT is required,
+ * Load the namespace. The DSDT is required,
* but the SSDT and PSDT tables are optional.
*/
status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
@@ -283,7 +283,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
* RETURN: Status
*
* DESCRIPTION: Shrinks the namespace, typically in response to an undocking
- * event. Deletes an entire subtree starting from (and
+ * event. Deletes an entire subtree starting from (and
* including) the given handle.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 96e0eb609bb4..55a175eadcc3 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -195,7 +195,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
ACPI_ERROR((AE_INFO,
"Invalid Namespace Node (%p) while traversing namespace",
next_node));
- return 0;
+ return (0);
}
size += ACPI_PATH_SEGMENT_LENGTH;
next_node = next_node->parent;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index d6c9a3cc6716..e69f7fa2579d 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("nsobject")
* RETURN: Status
*
* DESCRIPTION: Record the given object as the value associated with the
- * name whose acpi_handle is passed. If Object is NULL
+ * name whose acpi_handle is passed. If Object is NULL
* and Type is ACPI_TYPE_ANY, set the name as having no value.
* Note: Future may require that the Node->Flags field be passed
* as a parameter.
@@ -133,7 +133,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
((struct acpi_namespace_node *)object)->object) {
/*
* Value passed is a name handle and that name has a
- * non-null value. Use that name's value and type.
+ * non-null value. Use that name's value and type.
*/
obj_desc = ((struct acpi_namespace_node *)object)->object;
object_type = ((struct acpi_namespace_node *)object)->type;
@@ -321,7 +321,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
*
* RETURN: Status
*
- * DESCRIPTION: Low-level attach data. Create and attach a Data object.
+ * DESCRIPTION: Low-level attach data. Create and attach a Data object.
*
******************************************************************************/
@@ -377,7 +377,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
*
* RETURN: Status
*
- * DESCRIPTION: Low-level detach data. Delete the data node, but the caller
+ * DESCRIPTION: Low-level detach data. Delete the data node, but the caller
* is responsible for the actual data.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index ec7ba2d3463c..233f756d5cfa 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -168,11 +168,11 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
/*
* AML Parse, pass 1
*
- * In this pass, we load most of the namespace. Control methods
- * are not parsed until later. A parse tree is not created. Instead,
- * each Parser Op subtree is deleted when it is finished. This saves
+ * In this pass, we load most of the namespace. Control methods
+ * are not parsed until later. A parse tree is not created. Instead,
+ * each Parser Op subtree is deleted when it is finished. This saves
* a great deal of memory, and allows a small cache of parse objects
- * to service the entire parse. The second pass of the parse then
+ * to service the entire parse. The second pass of the parse then
* performs another complete parse of the AML.
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 456cc859f869..1d2d8ffc1bc5 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -314,22 +314,7 @@ acpi_ns_search_and_enter(u32 target_name,
* this problem, and we want to be able to enable ACPI support for them,
* even though there are a few bad names.
*/
- if (!acpi_ut_valid_acpi_name(target_name)) {
- target_name =
- acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
-
- /* Report warning only if in strict mode or debug mode */
-
- if (!acpi_gbl_enable_interpreter_slack) {
- ACPI_WARNING((AE_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- ACPI_CAST_PTR(char, &target_name)));
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- ACPI_CAST_PTR(char, &target_name)));
- }
- }
+ acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
/* Try to find the name in the namespace level specified by the caller */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index ef753a41e087..b5b4cb72a8a8 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -530,7 +530,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
((num_segments > 0) ? (num_segments - 1) : 0) + 1;
/*
- * Check to see if we're still in bounds. If not, there's a problem
+ * Check to see if we're still in bounds. If not, there's a problem
* with internal_name (invalid format).
*/
if (required_length > internal_name_length) {
@@ -557,10 +557,14 @@ acpi_ns_externalize_name(u32 internal_name_length,
(*converted_name)[j++] = '.';
}
- (*converted_name)[j++] = internal_name[names_index++];
- (*converted_name)[j++] = internal_name[names_index++];
- (*converted_name)[j++] = internal_name[names_index++];
- (*converted_name)[j++] = internal_name[names_index++];
+ /* Copy and validate the 4-char name segment */
+
+ ACPI_MOVE_NAME(&(*converted_name)[j],
+ &internal_name[names_index]);
+ acpi_ut_repair_name(&(*converted_name)[j]);
+
+ j += ACPI_NAME_SIZE;
+ names_index += ACPI_NAME_SIZE;
}
}
@@ -681,7 +685,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* prefix_node - Root of subtree to be searched, or NS_ALL for the
- * root of the name space. If Name is fully
+ * root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
* flags - Used to indicate whether to perform upsearch or
@@ -689,7 +693,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
* return_node - Where the Node is returned
*
* DESCRIPTION: Look up a name relative to a given scope and return the
- * corresponding Node. NOTE: Scope can be null.
+ * corresponding Node. NOTE: Scope can be null.
*
* MUTEX: Locks namespace
*
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 730bccc5e7f7..0483877f26b8 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -60,8 +60,8 @@ ACPI_MODULE_NAME("nswalk")
* RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
* none is found.
*
- * DESCRIPTION: Return the next peer node within the namespace. If Handle
- * is valid, Scope is ignored. Otherwise, the first node
+ * DESCRIPTION: Return the next peer node within the namespace. If Handle
+ * is valid, Scope is ignored. Otherwise, the first node
* within Scope is returned.
*
******************************************************************************/
@@ -97,8 +97,8 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
* RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
* none is found.
*
- * DESCRIPTION: Return the next peer node within the namespace. If Handle
- * is valid, Scope is ignored. Otherwise, the first node
+ * DESCRIPTION: Return the next peer node within the namespace. If Handle
+ * is valid, Scope is ignored. Otherwise, the first node
* within Scope is returned.
*
******************************************************************************/
@@ -305,7 +305,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
/*
* Depth first search: Attempt to go down another level in the
- * namespace if we are allowed to. Don't go any further if we have
+ * namespace if we are allowed to. Don't go any further if we have
* reached the caller specified maximum depth or if the user
* function has specified that the maximum depth has been reached.
*/
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 9692e6702333..d6a9f77972b6 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,16 +61,16 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
- * terminated by NULL. May be NULL
+ * terminated by NULL. May be NULL
* if no parameters are being passed.
* return_buffer - Where to put method's return value (if
- * any). If NULL, no value is returned.
+ * any). If NULL, no value is returned.
* return_type - Expected type of return object
*
* RETURN: Status
*
* DESCRIPTION: Find and evaluate the given object, passing the given
- * parameters if necessary. One of "Handle" or "Pathname" must
+ * parameters if necessary. One of "Handle" or "Pathname" must
* be valid (non-null)
*
******************************************************************************/
@@ -155,15 +155,15 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
- * terminated by NULL. May be NULL
+ * terminated by NULL. May be NULL
* if no parameters are being passed.
* return_buffer - Where to put method's return value (if
- * any). If NULL, no value is returned.
+ * any). If NULL, no value is returned.
*
* RETURN: Status
*
* DESCRIPTION: Find and evaluate the given object, passing the given
- * parameters if necessary. One of "Handle" or "Pathname" must
+ * parameters if necessary. One of "Handle" or "Pathname" must
* be valid (non-null)
*
******************************************************************************/
@@ -542,15 +542,15 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
acpi_status status;
struct acpi_namespace_node *node;
u32 flags;
- struct acpica_device_id *hid;
- struct acpica_device_id_list *cid;
+ struct acpi_pnp_device_id *hid;
+ struct acpi_pnp_device_id_list *cid;
u32 i;
u8 found;
int no_match;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
node = acpi_ns_validate_handle(obj_handle);
@@ -656,7 +656,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
* DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
* starting (and ending) at the object specified by start_handle.
* The user_function is called whenever an object of type
- * Device is found. If the user function returns
+ * Device is found. If the user function returns
* a non-zero value, the search is terminated immediately and this
* value is returned to the caller.
*
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 08e9610b34ca..811c6f13f476 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -53,8 +53,8 @@
ACPI_MODULE_NAME("nsxfname")
/* Local prototypes */
-static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
- struct acpica_device_id *source,
+static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
+ struct acpi_pnp_device_id *source,
char *string_area);
/******************************************************************************
@@ -69,8 +69,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
* RETURN: Status
*
* DESCRIPTION: This routine will search for a caller specified name in the
- * name space. The caller can restrict the search region by
- * specifying a non NULL parent. The parent value is itself a
+ * name space. The caller can restrict the search region by
+ * specifying a non NULL parent. The parent value is itself a
* namespace handle.
*
******************************************************************************/
@@ -149,7 +149,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
* RETURN: Pointer to a string containing the fully qualified Name.
*
* DESCRIPTION: This routine returns the fully qualified name associated with
- * the Handle parameter. This and the acpi_pathname_to_handle are
+ * the Handle parameter. This and the acpi_pathname_to_handle are
* complementary functions.
*
******************************************************************************/
@@ -202,8 +202,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
/* Just copy the ACPI name from the Node and zero terminate it */
- ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node),
- ACPI_NAME_SIZE);
+ ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node));
((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
status = AE_OK;
@@ -219,20 +218,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
*
* FUNCTION: acpi_ns_copy_device_id
*
- * PARAMETERS: dest - Pointer to the destination DEVICE_ID
- * source - Pointer to the source DEVICE_ID
+ * PARAMETERS: dest - Pointer to the destination PNP_DEVICE_ID
+ * source - Pointer to the source PNP_DEVICE_ID
* string_area - Pointer to where to copy the dest string
*
* RETURN: Pointer to the next string area
*
- * DESCRIPTION: Copy a single DEVICE_ID, including the string data.
+ * DESCRIPTION: Copy a single PNP_DEVICE_ID, including the string data.
*
******************************************************************************/
-static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
- struct acpica_device_id *source,
+static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
+ struct acpi_pnp_device_id *source,
char *string_area)
{
- /* Create the destination DEVICE_ID */
+
+ /* Create the destination PNP_DEVICE_ID */
dest->string = string_area;
dest->length = source->length;
@@ -256,8 +256,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
* namespace node and possibly by running several standard
* control methods (Such as in the case of a device.)
*
- * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA,
- * _ADR, _sx_w, and _sx_d methods.
+ * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB,
+ * _STA, _ADR, _sx_w, and _sx_d methods.
*
* Note: Allocates the return buffer, must be freed by the caller.
*
@@ -269,9 +269,10 @@ acpi_get_object_info(acpi_handle handle,
{
struct acpi_namespace_node *node;
struct acpi_device_info *info;
- struct acpica_device_id_list *cid_list = NULL;
- struct acpica_device_id *hid = NULL;
- struct acpica_device_id *uid = NULL;
+ struct acpi_pnp_device_id_list *cid_list = NULL;
+ struct acpi_pnp_device_id *hid = NULL;
+ struct acpi_pnp_device_id *uid = NULL;
+ struct acpi_pnp_device_id *sub = NULL;
char *next_id_string;
acpi_object_type type;
acpi_name name;
@@ -316,7 +317,7 @@ acpi_get_object_info(acpi_handle handle,
if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
/*
* Get extra info for ACPI Device/Processor objects only:
- * Run the Device _HID, _UID, and _CID methods.
+ * Run the Device _HID, _UID, _SUB, and _CID methods.
*
* Note: none of these methods are required, so they may or may
* not be present for this device. The Info->Valid bitfield is used
@@ -339,6 +340,14 @@ acpi_get_object_info(acpi_handle handle,
valid |= ACPI_VALID_UID;
}
+ /* Execute the Device._SUB method */
+
+ status = acpi_ut_execute_SUB(node, &sub);
+ if (ACPI_SUCCESS(status)) {
+ info_size += sub->length;
+ valid |= ACPI_VALID_SUB;
+ }
+
/* Execute the Device._CID method */
status = acpi_ut_execute_CID(node, &cid_list);
@@ -348,7 +357,7 @@ acpi_get_object_info(acpi_handle handle,
info_size +=
(cid_list->list_size -
- sizeof(struct acpica_device_id_list));
+ sizeof(struct acpi_pnp_device_id_list));
valid |= ACPI_VALID_CID;
}
}
@@ -418,16 +427,17 @@ acpi_get_object_info(acpi_handle handle,
next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids);
if (cid_list) {
- /* Point past the CID DEVICE_ID array */
+ /* Point past the CID PNP_DEVICE_ID array */
next_id_string +=
((acpi_size) cid_list->count *
- sizeof(struct acpica_device_id));
+ sizeof(struct acpi_pnp_device_id));
}
/*
- * Copy the HID, UID, and CIDs to the return buffer. The variable-length
- * strings are copied to the reserved area at the end of the buffer.
+ * Copy the HID, UID, SUB, and CIDs to the return buffer.
+ * The variable-length strings are copied to the reserved area
+ * at the end of the buffer.
*
* For HID and CID, check if the ID is a PCI Root Bridge.
*/
@@ -445,6 +455,11 @@ acpi_get_object_info(acpi_handle handle,
uid, next_id_string);
}
+ if (sub) {
+ next_id_string = acpi_ns_copy_device_id(&info->subsystem_id,
+ sub, next_id_string);
+ }
+
if (cid_list) {
info->compatible_id_list.count = cid_list->count;
info->compatible_id_list.list_size = cid_list->list_size;
@@ -481,6 +496,9 @@ acpi_get_object_info(acpi_handle handle,
if (uid) {
ACPI_FREE(uid);
}
+ if (sub) {
+ ACPI_FREE(sub);
+ }
if (cid_list) {
ACPI_FREE(cid_list);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 6766fc4f088f..9d029dac6b64 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -220,8 +220,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
*
* RETURN: Status
*
- * DESCRIPTION: Return the next peer object within the namespace. If Handle is
- * valid, Scope is ignored. Otherwise, the first object within
+ * DESCRIPTION: Return the next peer object within the namespace. If Handle is
+ * valid, Scope is ignored. Otherwise, the first object within
* Scope is returned.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 844464c4f901..cb79e2d4d743 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -120,7 +120,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
* RETURN: Pointer to end-of-package +1
*
* DESCRIPTION: Get next package length and return a pointer past the end of
- * the package. Consumes the package length field
+ * the package. Consumes the package length field
*
******************************************************************************/
@@ -147,8 +147,8 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
* RETURN: Pointer to the start of the name string (pointer points into
* the AML.
*
- * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name
- * prefix characters. Set parser state to point past the string.
+ * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name
+ * prefix characters. Set parser state to point past the string.
* (Name is consumed from the AML.)
*
******************************************************************************/
@@ -220,7 +220,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
*
* DESCRIPTION: Get next name (if method call, return # of required args).
* Names are looked up in the internal namespace to determine
- * if the name represents a control method. If a method
+ * if the name represents a control method. If a method
* is found, the number of arguments to the method is returned.
* This information is critical for parsing to continue correctly.
*
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 799162c1b6df..5607805aab26 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -133,18 +133,46 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
case AML_CLASS_UNKNOWN:
- /* The opcode is unrecognized. Just skip unknown opcodes */
+ /* The opcode is unrecognized. Complain and skip unknown opcodes */
- ACPI_ERROR((AE_INFO,
- "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
- walk_state->opcode, walk_state->parser_state.aml,
- walk_state->aml_offset));
+ if (walk_state->pass_number == 2) {
+ ACPI_ERROR((AE_INFO,
+ "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header))));
- ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
+ ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48);
- /* Assume one-byte bad opcode */
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * This is executed for the disassembler only. Output goes
+ * to the disassembled ASL output file.
+ */
+ acpi_os_printf
+ ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header)));
+
+ /* Dump the context surrounding the invalid opcode */
+
+ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
+ aml - 16), 48, DB_BYTE_DISPLAY,
+ walk_state->aml_offset +
+ sizeof(struct acpi_table_header) -
+ 16);
+ acpi_os_printf(" */\n");
+#endif
+ }
+
+ /* Increment past one-byte or two-byte opcode */
walk_state->parser_state.aml++;
+ if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
+ walk_state->parser_state.aml++;
+ }
+
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
default:
@@ -519,11 +547,18 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
if ((op_info->class ==
AML_CLASS_EXECUTE) && (!arg)) {
ACPI_WARNING((AE_INFO,
- "Detected an unsupported executable opcode "
- "at module-level: [0x%.4X] at table offset 0x%.4X",
- op->common.aml_opcode,
- (u32)((aml_op_start - walk_state->parser_state.aml_start)
- + sizeof(struct acpi_table_header))));
+ "Unsupported module-level executable opcode "
+ "0x%.2X at table offset 0x%.4X",
+ op->common.
+ aml_opcode,
+ (u32)
+ (ACPI_PTR_DIFF
+ (aml_op_start,
+ walk_state->
+ parser_state.
+ aml_start) +
+ sizeof(struct
+ acpi_table_header))));
}
}
break;
@@ -843,8 +878,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
*op = NULL;
}
- ACPI_PREEMPTION_POINT();
-
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index ed1d457bd5ca..1793d934aa30 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -59,7 +59,7 @@ static const u8 acpi_gbl_argument_count[] =
*
* DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands>
* The name is a simple ascii string, the operand specifier is an
- * ascii string with one letter per operand. The letter specifies
+ * ascii string with one letter per operand. The letter specifies
* the operand type.
*
******************************************************************************/
@@ -183,7 +183,7 @@ static const u8 acpi_gbl_argument_count[] =
******************************************************************************/
/*
- * Master Opcode information table. A summary of everything we know about each
+ * Master Opcode information table. A summary of everything we know about each
* opcode, all in one place.
*/
const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
@@ -392,10 +392,12 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC |
+ AML_CONSTANT),
/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC |
+ AML_CONSTANT),
/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
@@ -495,7 +497,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
AML_NSNODE | AML_NAMED | AML_DEFER),
/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_FIELD),
/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
@@ -519,12 +522,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_FIELD),
/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
- ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
- AML_DEFER),
+ ACPI_TYPE_LOCAL_BANK_FIELD,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_FIELD | AML_DEFER),
/* Internal opcodes that map to invalid AML opcodes */
@@ -632,7 +636,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE),
/* ACPI 3.0 opcodes */
@@ -695,7 +700,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
/*
* This table is indexed by the second opcode of the extended opcode
- * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
+ * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
*/
static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
/* 0 1 2 3 4 5 6 7 */
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 01985703bb98..2494caf47755 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -43,9 +43,9 @@
/*
* Parse the AML and build an operation tree as most interpreters,
- * like Perl, do. Parsing is done by hand rather than with a YACC
+ * like Perl, do. Parsing is done by hand rather than with a YACC
* generated parser to tightly constrain stack and dynamic memory
- * usage. At the same time, parsing is kept flexible and the code
+ * usage. At the same time, parsing is kept flexible and the code
* fairly compact by parsing based on a list of AML opcode
* templates in aml_op_info[]
*/
@@ -379,7 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
case AE_CTRL_FALSE:
/*
* Either an IF/WHILE Predicate was false or we encountered a BREAK
- * opcode. In both cases, we do not execute the rest of the
+ * opcode. In both cases, we do not execute the rest of the
* package; We simply close out the parent (finishing the walk of
* this branch of the tree) and continue execution at the parent
* level.
@@ -459,8 +459,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
/* Executing a control method - additional cleanup */
- acpi_ds_terminate_control_method(
- walk_state->method_desc, walk_state);
+ acpi_ds_terminate_control_method(walk_state->
+ method_desc,
+ walk_state);
}
acpi_ds_delete_walk_state(walk_state);
@@ -487,7 +488,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
acpi_gbl_current_walk_list = thread;
/*
- * Execute the walk loop as long as there is a valid Walk State. This
+ * Execute the walk loop as long as there is a valid Walk State. This
* handles nested control method invocations without recursion.
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 8736ad5f04d3..4137dcb352d1 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -108,7 +108,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
* RETURN: Pointer to the new Op, null on failure
*
* DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on
- * opcode. A cache of opcodes is available for the pure
+ * opcode. A cache of opcodes is available for the pure
* GENERIC_OP, since this is by far the most commonly used.
*
******************************************************************************/
@@ -164,7 +164,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
*
* RETURN: None.
*
- * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list
+ * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list
* or actually free it.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index de12469d1c9c..147feb6aa2a0 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -457,6 +457,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
* Get the number of vendor data bytes
*/
extra_struct_bytes = resource_length;
+
+ /*
+ * There is already one byte included in the minimum
+ * descriptor size. If there are extra struct bytes,
+ * subtract one from the count.
+ */
+ if (extra_struct_bytes) {
+ extra_struct_bytes--;
+ }
break;
case ACPI_RESOURCE_NAME_END_TAG:
@@ -601,7 +610,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
/*
* Calculate the size of the return buffer.
* The base size is the number of elements * the sizes of the
- * structures. Additional space for the strings is added below.
+ * structures. Additional space for the strings is added below.
* The minus one is to subtract the size of the u8 Source[1]
* member because it is added below.
*
@@ -664,8 +673,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
(*sub_object_list)->string.
length + 1);
} else {
- temp_size_needed +=
- acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+ temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
}
} else {
/*
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 46b5324b22d6..8b64db9a3fd2 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -109,7 +109,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
ACPI_ERROR((AE_INFO,
"Invalid/unsupported resource descriptor: Type 0x%2.2X",
resource_index));
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
/* Convert the AML byte stream resource to a local resource struct */
@@ -200,7 +200,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
ACPI_ERROR((AE_INFO,
"Invalid/unsupported resource descriptor: Type 0x%2.2X",
resource->type));
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
status = acpi_rs_convert_resource_to_aml(resource,
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 57deae166577..77d1db29a725 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -77,7 +77,7 @@ acpi_tb_find_table(char *signature,
/* Normalize the input strings */
ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
- ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
+ ACPI_MOVE_NAME(header.signature, signature);
ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 70f9d787c82c..f540ae462925 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -526,6 +526,8 @@ void acpi_tb_terminate(void)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index b6cea30da638..285e24b97382 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -354,7 +354,7 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
sum = (u8) (sum + *(buffer++));
}
- return sum;
+ return (sum);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 21101262e47a..f5632780421d 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -236,7 +236,7 @@ acpi_get_table_header(char *signature,
sizeof(struct
acpi_table_header));
if (!header) {
- return AE_NO_MEMORY;
+ return (AE_NO_MEMORY);
}
ACPI_MEMCPY(out_table_header, header,
sizeof(struct acpi_table_header));
@@ -244,7 +244,7 @@ acpi_get_table_header(char *signature,
sizeof(struct
acpi_table_header));
} else {
- return AE_NOT_FOUND;
+ return (AE_NOT_FOUND);
}
} else {
ACPI_MEMCPY(out_table_header,
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index f87cc63e69a1..a5e1e4e47098 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -211,7 +211,7 @@ static acpi_status acpi_tb_load_namespace(void)
* DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must
* be a valid ACPI table with a valid ACPI table header.
* Note1: Mainly intended to support hotplug addition of SSDTs.
- * Note2: Does not copy the incoming table. User is reponsible
+ * Note2: Does not copy the incoming table. User is responsible
* to ensure that the table is not deleted or unmapped.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 74e720800037..28f330230f99 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -67,7 +67,6 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
{
- ACPI_FUNCTION_ENTRY();
/*
* The signature and checksum must both be correct
@@ -108,7 +107,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
* RETURN: Status, RSDP physical address
*
* DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor
- * pointer structure. If it is found, set *RSDP to point to it.
+ * pointer structure. If it is found, set *RSDP to point to it.
*
* NOTE1: The RSDP must be either in the first 1K of the Extended
* BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
new file mode 100644
index 000000000000..e1d40ed26390
--- /dev/null
+++ b/drivers/acpi/acpica/utcache.c
@@ -0,0 +1,323 @@
+/******************************************************************************
+ *
+ * Module Name: utcache - local cache allocation routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utcache")
+
+#ifdef ACPI_USE_LOCAL_CACHE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_create_cache
+ *
+ * PARAMETERS: cache_name - Ascii name for the cache
+ * object_size - Size of each cached object
+ * max_depth - Maximum depth of the cache (in objects)
+ * return_cache - Where the new cache object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a cache object
+ *
+ ******************************************************************************/
+acpi_status
+acpi_os_create_cache(char *cache_name,
+ u16 object_size,
+ u16 max_depth, struct acpi_memory_list ** return_cache)
+{
+ struct acpi_memory_list *cache;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache_name || !return_cache || (object_size < 16)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Create the cache object */
+
+ cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
+ if (!cache) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Populate the cache object and return it */
+
+ ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+ cache->link_offset = 8;
+ cache->list_name = cache_name;
+ cache->object_size = object_size;
+ cache->max_depth = max_depth;
+
+ *return_cache = cache;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_purge_cache
+ *
+ * PARAMETERS: cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
+{
+ char *next;
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Walk the list of objects in this cache */
+
+ while (cache->list_head) {
+
+ /* Delete and unlink one cached state object */
+
+ next = *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)cache->
+ list_head)[cache->
+ link_offset])));
+ ACPI_FREE(cache->list_head);
+
+ cache->list_head = next;
+ cache->current_depth--;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_delete_cache
+ *
+ * PARAMETERS: cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache and delete the
+ * cache object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Purge all objects in the cache */
+
+ status = acpi_os_purge_cache(cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Now we can delete the cache object */
+
+ acpi_os_free(cache);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_release_object
+ *
+ * PARAMETERS: cache - Handle to cache object
+ * object - The object to be released
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release an object to the specified cache. If cache is full,
+ * the object is deleted.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_os_release_object(struct acpi_memory_list * cache, void *object)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache || !object) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* If cache is full, just free this object */
+
+ if (cache->current_depth >= cache->max_depth) {
+ ACPI_FREE(object);
+ ACPI_MEM_TRACKING(cache->total_freed++);
+ }
+
+ /* Otherwise put this object back into the cache */
+
+ else {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Mark the object as cached */
+
+ ACPI_MEMSET(object, 0xCA, cache->object_size);
+ ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
+
+ /* Put the object at the head of the cache list */
+
+ *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)object)[cache->
+ link_offset]))) =
+ cache->list_head;
+ cache->list_head = object;
+ cache->current_depth++;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_acquire_object
+ *
+ * PARAMETERS: cache - Handle to cache object
+ *
+ * RETURN: the acquired object. NULL on error
+ *
+ * DESCRIPTION: Get an object from the specified cache. If cache is empty,
+ * the object is allocated.
+ *
+ ******************************************************************************/
+
+void *acpi_os_acquire_object(struct acpi_memory_list *cache)
+{
+ acpi_status status;
+ void *object;
+
+ ACPI_FUNCTION_NAME(os_acquire_object);
+
+ if (!cache) {
+ return (NULL);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ ACPI_MEM_TRACKING(cache->requests++);
+
+ /* Check the cache first */
+
+ if (cache->list_head) {
+
+ /* There is an object available, use it */
+
+ object = cache->list_head;
+ cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)
+ object)[cache->
+ link_offset])));
+
+ cache->current_depth--;
+
+ ACPI_MEM_TRACKING(cache->hits++);
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Object %p from %s cache\n", object,
+ cache->list_name));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ /* Clear (zero) the previously used Object */
+
+ ACPI_MEMSET(object, 0, cache->object_size);
+ } else {
+ /* The cache is empty, create a new object */
+
+ ACPI_MEM_TRACKING(cache->total_allocated++);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ if ((cache->total_allocated - cache->total_freed) >
+ cache->max_occupied) {
+ cache->max_occupied =
+ cache->total_allocated - cache->total_freed;
+ }
+#endif
+
+ /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
+
+ status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ object = ACPI_ALLOCATE_ZEROED(cache->object_size);
+ if (!object) {
+ return (NULL);
+ }
+ }
+
+ return (object);
+}
+#endif /* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index e810894149ae..5d95166245ae 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -47,8 +47,9 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdebug")
+
#ifdef ACPI_DEBUG_OUTPUT
-static acpi_thread_id acpi_gbl_prev_thread_id;
+static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF;
static char *acpi_gbl_fn_entry_str = "----Entry";
static char *acpi_gbl_fn_exit_str = "----Exit-";
@@ -109,7 +110,7 @@ void acpi_ut_track_stack_ptr(void)
* RETURN: Updated pointer to the function name
*
* DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
- * This allows compiler macros such as __func__ to be used
+ * This allows compiler macros such as __FUNCTION__ to be used
* with no change to the debug output.
*
******************************************************************************/
@@ -222,7 +223,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print)
*
* RETURN: None
*
- * DESCRIPTION: Print message with no headers. Has same interface as
+ * DESCRIPTION: Print message with no headers. Has same interface as
* debug_print so that the same macros can be used.
*
******************************************************************************/
@@ -258,7 +259,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print_raw)
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -290,7 +291,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -299,6 +300,7 @@ acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, void *pointer)
{
+
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
@@ -319,7 +321,7 @@ acpi_ut_trace_ptr(u32 line_number,
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -350,7 +352,7 @@ acpi_ut_trace_str(u32 line_number,
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -380,7 +382,7 @@ acpi_ut_trace_u32(u32 line_number,
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -412,7 +414,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit)
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit status also.
*
******************************************************************************/
@@ -453,7 +455,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit value also.
*
******************************************************************************/
@@ -485,7 +487,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit value also.
*
******************************************************************************/
@@ -511,7 +513,7 @@ acpi_ut_ptr_exit(u32 line_number,
* PARAMETERS: buffer - Buffer to dump
* count - Amount to dump, in bytes
* display - BYTE, WORD, DWORD, or QWORD display
- * component_ID - Caller's component ID
+ * offset - Beginning buffer offset (display only)
*
* RETURN: None
*
@@ -519,7 +521,7 @@ acpi_ut_ptr_exit(u32 line_number,
*
******************************************************************************/
-void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
+void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
{
u32 i = 0;
u32 j;
@@ -541,7 +543,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
/* Print current offset */
- acpi_os_printf("%6.4X: ", i);
+ acpi_os_printf("%6.4X: ", (base_offset + i));
/* Print 16 hex chars */
@@ -623,7 +625,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_dump_buffer
+ * FUNCTION: acpi_ut_debug_dump_buffer
*
* PARAMETERS: buffer - Buffer to dump
* count - Amount to dump, in bytes
@@ -636,7 +638,8 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
*
******************************************************************************/
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+void
+acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id)
{
/* Only dump the buffer if tracing is enabled */
@@ -646,5 +649,5 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
return;
}
- acpi_ut_dump_buffer2(buffer, count, display);
+ acpi_ut_dump_buffer(buffer, count, display, 0);
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 5d84e1954575..774c3aefbf5d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -67,10 +67,10 @@ ACPI_MODULE_NAME("utids")
******************************************************************************/
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id)
+ struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
- struct acpica_device_id *hid;
+ struct acpi_pnp_device_id *hid;
u32 length;
acpi_status status;
@@ -94,16 +94,17 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
/* Allocate a buffer for the HID */
hid =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) +
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!hid) {
status = AE_NO_MEMORY;
goto cleanup;
}
- /* Area for the string starts after DEVICE_ID struct */
+ /* Area for the string starts after PNP_DEVICE_ID struct */
- hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id));
+ hid->string =
+ ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id));
/* Convert EISAID to a string or simply copy existing string */
@@ -126,6 +127,73 @@ cleanup:
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_execute_SUB
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_id - Where the _SUB is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _SUB control method that returns the subsystem
+ * ID of the device. The _SUB value is always a string containing
+ * either a valid PNP or ACPI ID.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpi_pnp_device_id *sub;
+ u32 length;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_SUB);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB,
+ ACPI_BTYPE_STRING, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the size of the String to be returned, includes null terminator */
+
+ length = obj_desc->string.length + 1;
+
+ /* Allocate a buffer for the SUB */
+
+ sub =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
+ (acpi_size) length);
+ if (!sub) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Area for the string starts after PNP_DEVICE_ID struct */
+
+ sub->string =
+ ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id));
+
+ /* Simply copy existing string */
+
+ ACPI_STRCPY(sub->string, obj_desc->string.pointer);
+ sub->length = length;
+ *return_id = sub;
+
+ cleanup:
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_execute_UID
*
* PARAMETERS: device_node - Node for the device
@@ -144,10 +212,10 @@ cleanup:
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id)
+ struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
- struct acpica_device_id *uid;
+ struct acpi_pnp_device_id *uid;
u32 length;
acpi_status status;
@@ -171,16 +239,17 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
/* Allocate a buffer for the UID */
uid =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) +
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!uid) {
status = AE_NO_MEMORY;
goto cleanup;
}
- /* Area for the string starts after DEVICE_ID struct */
+ /* Area for the string starts after PNP_DEVICE_ID struct */
- uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id));
+ uid->string =
+ ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id));
/* Convert an Integer to string, or just copy an existing string */
@@ -226,11 +295,11 @@ cleanup:
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
- struct acpica_device_id_list **return_cid_list)
+ struct acpi_pnp_device_id_list **return_cid_list)
{
union acpi_operand_object **cid_objects;
union acpi_operand_object *obj_desc;
- struct acpica_device_id_list *cid_list;
+ struct acpi_pnp_device_id_list *cid_list;
char *next_id_string;
u32 string_area_size;
u32 length;
@@ -288,11 +357,12 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/*
* Now that we know the length of the CIDs, allocate return buffer:
* 1) Size of the base structure +
- * 2) Size of the CID DEVICE_ID array +
+ * 2) Size of the CID PNP_DEVICE_ID array +
* 3) Size of the actual CID strings
*/
- cid_list_size = sizeof(struct acpica_device_id_list) +
- ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size;
+ cid_list_size = sizeof(struct acpi_pnp_device_id_list) +
+ ((count - 1) * sizeof(struct acpi_pnp_device_id)) +
+ string_area_size;
cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size);
if (!cid_list) {
@@ -300,10 +370,10 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
goto cleanup;
}
- /* Area for CID strings starts after the CID DEVICE_ID array */
+ /* Area for CID strings starts after the CID PNP_DEVICE_ID array */
next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
- ((acpi_size) count * sizeof(struct acpica_device_id));
+ ((acpi_size) count * sizeof(struct acpi_pnp_device_id));
/* Copy/convert the CIDs to the return buffer */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index d88a8aaab2a6..49563674833a 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -81,7 +81,7 @@ typedef union uint64_overlay {
* RETURN: Status (Checks for divide-by-zero)
*
* DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits)
- * divide and modulo. The result is a 64-bit quotient and a
+ * divide and modulo. The result is a 64-bit quotient and a
* 32-bit remainder.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 33c6cf7ff467..9286a69eb9aa 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -201,8 +199,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
*/
acpi_gbl_owner_id_mask[j] |= (1 << k);
- acpi_gbl_last_owner_id_index = (u8) j;
- acpi_gbl_next_owner_id_offset = (u8) (k + 1);
+ acpi_gbl_last_owner_id_index = (u8)j;
+ acpi_gbl_next_owner_id_offset = (u8)(k + 1);
/*
* Construct encoded ID from the index and bit position
@@ -252,7 +250,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
* control method or unloading a table. Either way, we would
* ignore any error anyway.
*
- * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
+ * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
*
******************************************************************************/
@@ -339,6 +337,73 @@ void acpi_ut_strupr(char *src_string)
return;
}
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to lowercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+
+void acpi_ut_strlwr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, lowercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOLOWER(*string);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_stricmp
+ *
+ * PARAMETERS: string1 - first string to compare
+ * string2 - second string to compare
+ *
+ * RETURN: int that signifies string relationship. Zero means strings
+ * are equal.
+ *
+ * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
+ * strings with no case sensitivity)
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+ int c1;
+ int c2;
+
+ do {
+ c1 = tolower((int)*string1);
+ c2 = tolower((int)*string2);
+
+ string1++;
+ string2++;
+ }
+ while ((c1 == c2) && (c1));
+
+ return (c1 - c2);
+}
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_print_string
@@ -469,8 +534,8 @@ u32 acpi_ut_dword_byte_swap(u32 value)
* RETURN: None
*
* DESCRIPTION: Set the global integer bit width based upon the revision
- * of the DSDT. For Revision 1 and 0, Integers are 32 bits.
- * For Revision 2 and above, Integers are 64 bits. Yes, this
+ * of the DSDT. For Revision 1 and 0, Integers are 32 bits.
+ * For Revision 2 and above, Integers are 64 bits. Yes, this
* makes a difference.
*
******************************************************************************/
@@ -606,7 +671,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position)
*
* RETURN: TRUE if the name is valid, FALSE otherwise
*
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
+ * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
* 1) Upper case alpha
* 2) numeric
* 3) underscore
@@ -638,29 +703,59 @@ u8 acpi_ut_valid_acpi_name(u32 name)
* RETURN: Repaired version of the name
*
* DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
- * return the new name.
+ * return the new name. NOTE: the Name parameter must reside in
+ * read/write memory, cannot be a const.
+ *
+ * An ACPI Name must consist of valid ACPI characters. We will repair the name
+ * if necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
*
******************************************************************************/
-acpi_name acpi_ut_repair_name(char *name)
+void acpi_ut_repair_name(char *name)
{
- u32 i;
- char new_name[ACPI_NAME_SIZE];
+ u32 i;
+ u8 found_bad_char = FALSE;
+ u32 original_name;
+
+ ACPI_FUNCTION_NAME(ut_repair_name);
+
+ ACPI_MOVE_NAME(&original_name, name);
+
+ /* Check each character in the name */
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- new_name[i] = name[i];
+ if (acpi_ut_valid_acpi_char(name[i], i)) {
+ continue;
+ }
/*
* Replace a bad character with something printable, yet technically
* still invalid. This prevents any collisions with existing "good"
* names in the namespace.
*/
- if (!acpi_ut_valid_acpi_char(name[i], i)) {
- new_name[i] = '*';
- }
+ name[i] = '*';
+ found_bad_char = TRUE;
}
- return (*(u32 *) new_name);
+ if (found_bad_char) {
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ name));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ name));
+ }
+ }
}
/*******************************************************************************
@@ -681,7 +776,7 @@ acpi_name acpi_ut_repair_name(char *name)
*
******************************************************************************/
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
{
u32 this_digit = 0;
u64 return_value = 0;
@@ -754,14 +849,14 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
/* Convert ASCII 0-9 to Decimal value */
- this_digit = ((u8) * string) - '0';
+ this_digit = ((u8)*string) - '0';
} else if (base == 10) {
/* Digit is out of range; possible in to_integer case only */
term = 1;
} else {
- this_digit = (u8) ACPI_TOUPPER(*string);
+ this_digit = (u8)ACPI_TOUPPER(*string);
if (ACPI_IS_XDIGIT((char)this_digit)) {
/* Convert ASCII Hex char to value */
@@ -788,8 +883,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
valid_digits++;
- if (sign_of0x && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
/*
* This is to_integer operation case.
* No any restrictions for string-to-integer conversion,
@@ -800,7 +896,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
/* Divide the digit into the correct position */
- (void)acpi_ut_short_divide((dividend - (u64) this_digit),
+ (void)acpi_ut_short_divide((dividend - (u64)this_digit),
base, &quotient, NULL);
if (return_value > quotient) {
@@ -890,7 +986,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
******************************************************************************/
acpi_status
-acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
+acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void *target_object,
acpi_pkg_callback walk_callback, void *context)
{
@@ -917,10 +1013,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
/*
* Check for:
- * 1) An uninitialized package element. It is completely
+ * 1) An uninitialized package element. It is completely
* legal to declare a package and leave it uninitialized
* 2) Not an internal object - can be a namespace node instead
- * 3) Any type other than a package. Packages are handled in else
+ * 3) Any type other than a package. Packages are handled in else
* case below.
*/
if ((!this_source_obj) ||
@@ -939,7 +1035,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
state->pkg.source_object->package.count) {
/*
* We've handled all of the objects at this level, This means
- * that we have just completed a package. That package may
+ * that we have just completed a package. That package may
* have contained one or more packages itself.
*
* Delete this state and pop the previous state (package).
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 296baa676bc5..5ccf57c0d87e 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -193,6 +193,8 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].mutex = NULL;
acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+
+ return_VOID;
}
/*******************************************************************************
@@ -226,9 +228,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
/*
* Mutex debug code, for internal debugging only.
*
- * Deadlock prevention. Check if this thread owns any mutexes of value
- * greater than or equal to this one. If so, the thread has violated
- * the mutex ordering rule. This indicates a coding error somewhere in
+ * Deadlock prevention. Check if this thread owns any mutexes of value
+ * greater than or equal to this one. If so, the thread has violated
+ * the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
@@ -319,9 +321,9 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
/*
* Mutex debug code, for internal debugging only.
*
- * Deadlock prevention. Check if this thread owns any mutexes of value
- * greater than this one. If so, the thread has violated the mutex
- * ordering rule. This indicates a coding error somewhere in
+ * Deadlock prevention. Check if this thread owns any mutexes of value
+ * greater than this one. If so, the thread has violated the mutex
+ * ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 655f0799a391..5c52ca78f6fa 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -77,7 +77,7 @@ acpi_ut_get_element_length(u8 object_type,
*
* NOTE: We always allocate the worst-case object descriptor because
* these objects are cached, and we want them to be
- * one-size-satisifies-any-request. This in itself may not be
+ * one-size-satisifies-any-request. This in itself may not be
* the most memory efficient, but the efficiency of the object
* cache should more than make up for this!
*
@@ -370,9 +370,9 @@ u8 acpi_ut_valid_internal_object(void *object)
* line_number - Caller's line number (for error output)
* component_id - Caller's component ID (for error output)
*
- * RETURN: Pointer to newly allocated object descriptor. Null on error
+ * RETURN: Pointer to newly allocated object descriptor. Null on error
*
- * DESCRIPTION: Allocate a new object descriptor. Gracefully handle
+ * DESCRIPTION: Allocate a new object descriptor. Gracefully handle
* error conditions.
*
******************************************************************************/
@@ -554,7 +554,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
/*
* Account for the space required by the object rounded up to the next
- * multiple of the machine word size. This keeps each object aligned
+ * multiple of the machine word size. This keeps each object aligned
* on a machine word boundary. (preventing alignment faults on some
* machines.)
*/
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index a1c988260073..cee0473ba813 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -147,7 +147,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
*
* RETURN: The new state object. NULL on failure.
*
- * DESCRIPTION: Create a generic state object. Attempt to obtain one from
+ * DESCRIPTION: Create a generic state object. Attempt to obtain one from
* the global state cache; If none available, create a new one.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
new file mode 100644
index 000000000000..a424a9e3fea4
--- /dev/null
+++ b/drivers/acpi/acpica/uttrack.c
@@ -0,0 +1,692 @@
+/******************************************************************************
+ *
+ * Module Name: uttrack - Memory allocation tracking routines (debug only)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * These procedures are used for tracking memory leaks in the subsystem, and
+ * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set.
+ *
+ * Each memory allocation is tracked via a doubly linked list. Each
+ * element contains the caller's component, module name, function name, and
+ * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call
+ * acpi_ut_track_allocation to add an element to the list; deletion
+ * occurs in the body of acpi_ut_free.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("uttrack")
+
+/* Local prototypes */
+static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct
+ acpi_debug_mem_block
+ *allocation);
+
+static acpi_status
+acpi_ut_track_allocation(struct acpi_debug_mem_block *address,
+ acpi_size size,
+ u8 alloc_type,
+ u32 component, const char *module, u32 line);
+
+static acpi_status
+acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
+ u32 component, const char *module, u32 line);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_list
+ *
+ * PARAMETERS: cache_name - Ascii name for the cache
+ * object_size - Size of each cached object
+ * return_cache - Where the new cache object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a local memory list for tracking purposed
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_create_list(char *list_name,
+ u16 object_size, struct acpi_memory_list **return_cache)
+{
+ struct acpi_memory_list *cache;
+
+ cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
+ if (!cache) {
+ return (AE_NO_MEMORY);
+ }
+
+ ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+
+ cache->list_name = list_name;
+ cache->object_size = object_size;
+
+ *return_cache = cache;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_and_track
+ *
+ * PARAMETERS: size - Size of the allocation
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: The subsystem's equivalent of malloc.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_and_track(acpi_size size,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_debug_mem_block *allocation;
+ acpi_status status;
+
+ allocation =
+ acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
+ component, module, line);
+ if (!allocation) {
+ return (NULL);
+ }
+
+ status = acpi_ut_track_allocation(allocation, size,
+ ACPI_MEM_MALLOC, component, module,
+ line);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_free(allocation);
+ return (NULL);
+ }
+
+ acpi_gbl_global_list->total_allocated++;
+ acpi_gbl_global_list->total_size += (u32)size;
+ acpi_gbl_global_list->current_total_size += (u32)size;
+ if (acpi_gbl_global_list->current_total_size >
+ acpi_gbl_global_list->max_occupied) {
+ acpi_gbl_global_list->max_occupied =
+ acpi_gbl_global_list->current_total_size;
+ }
+
+ return ((void *)&allocation->user_space);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_zeroed_and_track
+ *
+ * PARAMETERS: size - Size of the allocation
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
+ u32 component,
+ const char *module, u32 line)
+{
+ struct acpi_debug_mem_block *allocation;
+ acpi_status status;
+
+ allocation =
+ acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
+ component, module, line);
+ if (!allocation) {
+
+ /* Report allocation error */
+
+ ACPI_ERROR((module, line,
+ "Could not allocate size %u", (u32)size));
+ return (NULL);
+ }
+
+ status = acpi_ut_track_allocation(allocation, size,
+ ACPI_MEM_CALLOC, component, module,
+ line);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_free(allocation);
+ return (NULL);
+ }
+
+ acpi_gbl_global_list->total_allocated++;
+ acpi_gbl_global_list->total_size += (u32)size;
+ acpi_gbl_global_list->current_total_size += (u32)size;
+ if (acpi_gbl_global_list->current_total_size >
+ acpi_gbl_global_list->max_occupied) {
+ acpi_gbl_global_list->max_occupied =
+ acpi_gbl_global_list->current_total_size;
+ }
+
+ return ((void *)&allocation->user_space);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_free_and_track
+ *
+ * PARAMETERS: allocation - Address of the memory to deallocate
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Frees the memory at Allocation
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_free_and_track(void *allocation,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_debug_mem_block *debug_block;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_free, allocation);
+
+ if (NULL == allocation) {
+ ACPI_ERROR((module, line, "Attempt to delete a NULL address"));
+
+ return_VOID;
+ }
+
+ debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block,
+ (((char *)allocation) -
+ sizeof(struct acpi_debug_mem_header)));
+
+ acpi_gbl_global_list->total_freed++;
+ acpi_gbl_global_list->current_total_size -= debug_block->size;
+
+ status = acpi_ut_remove_allocation(debug_block,
+ component, module, line);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Could not free memory"));
+ }
+
+ acpi_os_free(debug_block);
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_find_allocation
+ *
+ * PARAMETERS: allocation - Address of allocated memory
+ *
+ * RETURN: Three cases:
+ * 1) List is empty, NULL is returned.
+ * 2) Element was found. Returns Allocation parameter.
+ * 3) Element was not found. Returns position where it should be
+ * inserted into the list.
+ *
+ * DESCRIPTION: Searches for an element in the global allocation tracking list.
+ * If the element is not found, returns the location within the
+ * list where the element should be inserted.
+ *
+ * Note: The list is ordered by larger-to-smaller addresses.
+ *
+ * This global list is used to detect memory leaks in ACPICA as
+ * well as other issues such as an attempt to release the same
+ * internal object more than once. Although expensive as far
+ * as cpu time, this list is much more helpful for finding these
+ * types of issues than using memory leak detectors outside of
+ * the ACPICA code.
+ *
+ ******************************************************************************/
+
+static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct
+ acpi_debug_mem_block
+ *allocation)
+{
+ struct acpi_debug_mem_block *element;
+
+ element = acpi_gbl_global_list->list_head;
+ if (!element) {
+ return (NULL);
+ }
+
+ /*
+ * Search for the address.
+ *
+ * Note: List is ordered by larger-to-smaller addresses, on the
+ * assumption that a new allocation usually has a larger address
+ * than previous allocations.
+ */
+ while (element > allocation) {
+
+ /* Check for end-of-list */
+
+ if (!element->next) {
+ return (element);
+ }
+
+ element = element->next;
+ }
+
+ if (element == allocation) {
+ return (element);
+ }
+
+ return (element->previous);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_track_allocation
+ *
+ * PARAMETERS: allocation - Address of allocated memory
+ * size - Size of the allocation
+ * alloc_type - MEM_MALLOC or MEM_CALLOC
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Inserts an element into the global allocation tracking list.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
+ acpi_size size,
+ u8 alloc_type,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_memory_list *mem_list;
+ struct acpi_debug_mem_block *element;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation);
+
+ if (acpi_gbl_disable_mem_tracking) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ mem_list = acpi_gbl_global_list;
+ status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Search the global list for this address to make sure it is not
+ * already present. This will catch several kinds of problems.
+ */
+ element = acpi_ut_find_allocation(allocation);
+ if (element == allocation) {
+ ACPI_ERROR((AE_INFO,
+ "UtTrackAllocation: Allocation (%p) already present in global list!",
+ allocation));
+ goto unlock_and_exit;
+ }
+
+ /* Fill in the instance data */
+
+ allocation->size = (u32)size;
+ allocation->alloc_type = alloc_type;
+ allocation->component = component;
+ allocation->line = line;
+
+ ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
+ allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+
+ if (!element) {
+
+ /* Insert at list head */
+
+ if (mem_list->list_head) {
+ ((struct acpi_debug_mem_block *)(mem_list->list_head))->
+ previous = allocation;
+ }
+
+ allocation->next = mem_list->list_head;
+ allocation->previous = NULL;
+
+ mem_list->list_head = allocation;
+ } else {
+ /* Insert after element */
+
+ allocation->next = element->next;
+ allocation->previous = element;
+
+ if (element->next) {
+ (element->next)->previous = allocation;
+ }
+
+ element->next = allocation;
+ }
+
+ unlock_and_exit:
+ status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_allocation
+ *
+ * PARAMETERS: allocation - Address of allocated memory
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Deletes an element from the global allocation tracking list.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_memory_list *mem_list;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_remove_allocation);
+
+ if (acpi_gbl_disable_mem_tracking) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ mem_list = acpi_gbl_global_list;
+ if (NULL == mem_list->list_head) {
+
+ /* No allocations! */
+
+ ACPI_ERROR((module, line,
+ "Empty allocation list, nothing to free!"));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink */
+
+ if (allocation->previous) {
+ (allocation->previous)->next = allocation->next;
+ } else {
+ mem_list->list_head = allocation->next;
+ }
+
+ if (allocation->next) {
+ (allocation->next)->previous = allocation->previous;
+ }
+
+ /* Mark the segment as deleted */
+
+ ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
+ allocation->size));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_allocation_info
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print some info about the outstanding allocations.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_allocation_info(void)
+{
+/*
+ struct acpi_memory_list *mem_list;
+*/
+
+ ACPI_FUNCTION_TRACE(ut_dump_allocation_info);
+
+/*
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Current allocations",
+ mem_list->current_count,
+ ROUND_UP_TO_1K (mem_list->current_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations",
+ mem_list->max_concurrent_count,
+ ROUND_UP_TO_1K (mem_list->max_concurrent_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects",
+ running_object_count,
+ ROUND_UP_TO_1K (running_object_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Total (all) allocations",
+ running_alloc_count,
+ ROUND_UP_TO_1K (running_alloc_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Current Nodes",
+ acpi_gbl_current_node_count,
+ ROUND_UP_TO_1K (acpi_gbl_current_node_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Max Nodes",
+ acpi_gbl_max_concurrent_node_count,
+ ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count *
+ sizeof (struct acpi_namespace_node)))));
+*/
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_allocations
+ *
+ * PARAMETERS: component - Component(s) to dump info for.
+ * module - Module to dump info for. NULL means all.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print a list of all outstanding allocations.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_allocations(u32 component, const char *module)
+{
+ struct acpi_debug_mem_block *element;
+ union acpi_descriptor *descriptor;
+ u32 num_outstanding = 0;
+ u8 descriptor_type;
+
+ ACPI_FUNCTION_TRACE(ut_dump_allocations);
+
+ if (acpi_gbl_disable_mem_tracking) {
+ return_VOID;
+ }
+
+ /*
+ * Walk the allocation list.
+ */
+ if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) {
+ return_VOID;
+ }
+
+ element = acpi_gbl_global_list->list_head;
+ while (element) {
+ if ((element->component & component) &&
+ ((module == NULL)
+ || (0 == ACPI_STRCMP(module, element->module)))) {
+ descriptor =
+ ACPI_CAST_PTR(union acpi_descriptor,
+ &element->user_space);
+
+ if (element->size <
+ sizeof(struct acpi_common_descriptor)) {
+ acpi_os_printf("%p Length 0x%04X %9.9s-%u "
+ "[Not a Descriptor - too small]\n",
+ descriptor, element->size,
+ element->module, element->line);
+ } else {
+ /* Ignore allocated objects that are in a cache */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
+ ACPI_DESC_TYPE_CACHED) {
+ acpi_os_printf
+ ("%p Length 0x%04X %9.9s-%u [%s] ",
+ descriptor, element->size,
+ element->module, element->line,
+ acpi_ut_get_descriptor_name
+ (descriptor));
+
+ /* Validate the descriptor type using Type field and length */
+
+ descriptor_type = 0; /* Not a valid descriptor type */
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE
+ (descriptor)) {
+ case ACPI_DESC_TYPE_OPERAND:
+ if (element->size ==
+ sizeof(union
+ acpi_operand_object))
+ {
+ descriptor_type =
+ ACPI_DESC_TYPE_OPERAND;
+ }
+ break;
+
+ case ACPI_DESC_TYPE_PARSER:
+ if (element->size ==
+ sizeof(union
+ acpi_parse_object)) {
+ descriptor_type =
+ ACPI_DESC_TYPE_PARSER;
+ }
+ break;
+
+ case ACPI_DESC_TYPE_NAMED:
+ if (element->size ==
+ sizeof(struct
+ acpi_namespace_node))
+ {
+ descriptor_type =
+ ACPI_DESC_TYPE_NAMED;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Display additional info for the major descriptor types */
+
+ switch (descriptor_type) {
+ case ACPI_DESC_TYPE_OPERAND:
+ acpi_os_printf
+ ("%12.12s RefCount 0x%04X\n",
+ acpi_ut_get_type_name
+ (descriptor->object.common.
+ type),
+ descriptor->object.common.
+ reference_count);
+ break;
+
+ case ACPI_DESC_TYPE_PARSER:
+ acpi_os_printf
+ ("AmlOpcode 0x%04hX\n",
+ descriptor->op.asl.
+ aml_opcode);
+ break;
+
+ case ACPI_DESC_TYPE_NAMED:
+ acpi_os_printf("%4.4s\n",
+ acpi_ut_get_node_name
+ (&descriptor->
+ node));
+ break;
+
+ default:
+ acpi_os_printf("\n");
+ break;
+ }
+ }
+ }
+
+ num_outstanding++;
+ }
+
+ element = element->next;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
+
+ /* Print summary */
+
+ if (!num_outstanding) {
+ ACPI_INFO((AE_INFO, "No outstanding allocations"));
+ } else {
+ ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ num_outstanding, num_outstanding));
+ }
+
+ return_VOID;
+}
+
+#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index b09632b4f5b3..390db0ca5e2e 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -147,7 +147,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
* RETURN: status - the status of the call
*
* DESCRIPTION: This function is called to get information about the current
- * state of the ACPI subsystem. It will return system information
+ * state of the ACPI subsystem. It will return system information
* in the out_buffer.
*
* If the function fails an appropriate status will be returned
@@ -238,7 +238,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
}
acpi_gbl_init_handler = handler;
- return AE_OK;
+ return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
@@ -263,6 +263,7 @@ acpi_status acpi_purge_cached_objects(void)
(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
+
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6d63cc39b9ae..d4d3826140d8 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -408,7 +408,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MOVE_32_TO_32(&bad_name,
ACPI_CAST_PTR(u32, internal_name));
- acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+ acpi_os_printf("[0x%.8X] (NON-ASCII)", bad_name);
} else {
/* Convert path to external format */
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 00a783661d0b..46f80e2c92f7 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
*access_bit_width < 32)
*access_bit_width = 32;
+ else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
+ *access_bit_width < 64)
+ *access_bit_width = 64;
if ((bit_width + bit_offset) > *access_bit_width) {
pr_warning(FW_BUG APEI_PFX
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 8e1793649ec0..8d457b55c55a 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -367,7 +367,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
* This will cause resource conflict with regular memory. So
* remove it from trigger table resources.
*/
- if (param_extension && (type & 0x0038) && param2) {
+ if ((param_extension || acpi5) && (type & 0x0038) && param2) {
struct apei_resources addr_resources;
apei_resources_init(&addr_resources);
trigger_param_region = einj_get_trigger_parameter_region(
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 903549df809b..04ab5c9d3ced 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -111,8 +111,17 @@ retry_next:
if (rc)
goto out;
/* no more record */
- if (id == APEI_ERST_INVALID_RECORD_ID)
+ if (id == APEI_ERST_INVALID_RECORD_ID) {
+ /*
+ * If the persistent store is empty initially, the function
+ * 'erst_read' below will return "-ENOENT" value. This causes
+ * 'retry_next' label is entered again. The returned value
+ * should be zero indicating the read operation is EOF.
+ */
+ len = 0;
+
goto out;
+ }
retry:
rc = len = erst_read(id, erst_dbg_buf, erst_dbg_buf_len);
/* The record may be cleared by others, try read next record */
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index e4d9d24eb73d..6d894bfd8b8f 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -931,14 +931,14 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
-static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
+static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
- u64 *id, unsigned int part,
+ u64 *id, unsigned int part, int count,
size_t size, struct pstore_info *psi);
-static int erst_clearer(enum pstore_type_id type, u64 id,
- struct pstore_info *psi);
+static int erst_clearer(enum pstore_type_id type, u64 id, int count,
+ struct timespec time, struct pstore_info *psi);
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
@@ -987,7 +987,7 @@ static int erst_close_pstore(struct pstore_info *psi)
return 0;
}
-static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
+static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
struct pstore_info *psi)
{
@@ -1055,7 +1055,7 @@ out:
}
static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
- u64 *id, unsigned int part,
+ u64 *id, unsigned int part, int count,
size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
@@ -1101,8 +1101,8 @@ static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
return ret;
}
-static int erst_clearer(enum pstore_type_id type, u64 id,
- struct pstore_info *psi)
+static int erst_clearer(enum pstore_type_id type, u64 id, int count,
+ struct timespec time, struct pstore_info *psi)
{
return erst_clear(id);
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 1599566ed1fe..7ae2750bb457 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -901,7 +901,7 @@ static unsigned long ghes_esource_prealloc_size(
return prealloc_size;
}
-static int __devinit ghes_probe(struct platform_device *ghes_dev)
+static int ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
@@ -994,7 +994,7 @@ err:
return rc;
}
-static int __devexit ghes_remove(struct platform_device *ghes_dev)
+static int ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 45e3e1759fb8..7efaeaa53b88 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -34,6 +34,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
@@ -95,6 +96,18 @@ enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
+ /* On Lenovo Thinkpad models from 2010 and 2011, the power unit
+ switches between mWh and mAh depending on whether the system
+ is running on battery or not. When mAh is the unit, most
+ reported values are incorrect and need to be adjusted by
+ 10000/design_voltage. Verified on x201, t410, t410s, and x220.
+ Pre-2010 and 2012 models appear to always report in mWh and
+ are thus unaffected (tested with t42, t61, t500, x200, x300,
+ and x230). Also, in mid-2012 Lenovo issued a BIOS update for
+ the 2011 models that fixes the issue (tested on x220 with a
+ post-1.29 BIOS), but as of Nov. 2012, no such update is
+ available for the 2010 models. */
+ ACPI_BATTERY_QUIRK_THINKPAD_MAH,
};
struct acpi_battery {
@@ -438,6 +451,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
kfree(buffer.pointer);
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
+ battery->power_unit && battery->design_voltage) {
+ battery->design_capacity = battery->design_capacity *
+ 10000 / battery->design_voltage;
+ battery->full_charge_capacity = battery->full_charge_capacity *
+ 10000 / battery->design_voltage;
+ battery->design_capacity_warning =
+ battery->design_capacity_warning *
+ 10000 / battery->design_voltage;
+ /* Curiously, design_capacity_low, unlike the rest of them,
+ is correct. */
+ /* capacity_granularity_* equal 1 on the systems tested, so
+ it's impossible to tell if they would need an adjustment
+ or not if their values were higher. */
+ }
return result;
}
@@ -486,6 +514,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
+ battery->power_unit && battery->design_voltage) {
+ battery->capacity_now = battery->capacity_now *
+ 10000 / battery->design_voltage;
+ }
return result;
}
@@ -595,6 +628,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
mutex_unlock(&battery->sysfs_lock);
}
+static void find_battery(const struct dmi_header *dm, void *private)
+{
+ struct acpi_battery *battery = (struct acpi_battery *)private;
+ /* Note: the hardcoded offsets below have been extracted from
+ the source code of dmidecode. */
+ if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) {
+ const u8 *dmi_data = (const u8 *)(dm + 1);
+ int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6));
+ if (dm->length >= 18)
+ dmi_capacity *= dmi_data[17];
+ if (battery->design_capacity * battery->design_voltage / 1000
+ != dmi_capacity &&
+ battery->design_capacity * 10 == dmi_capacity)
+ set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
+ &battery->flags);
+ }
+}
+
/*
* According to the ACPI spec, some kinds of primary batteries can
* report percentage battery remaining capacity directly to OS.
@@ -620,6 +671,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
}
+
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags))
+ return ;
+
+ if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
+ const char *s;
+ s = dmi_get_system_info(DMI_PRODUCT_VERSION);
+ if (s && !strnicmp(s, "ThinkPad", 8)) {
+ dmi_walk(find_battery, battery);
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
+ &battery->flags) &&
+ battery->design_voltage) {
+ battery->design_capacity =
+ battery->design_capacity *
+ 10000 / battery->design_voltage;
+ battery->full_charge_capacity =
+ battery->full_charge_capacity *
+ 10000 / battery->design_voltage;
+ battery->design_capacity_warning =
+ battery->design_capacity_warning *
+ 10000 / battery->design_voltage;
+ battery->capacity_now = battery->capacity_now *
+ 10000 / battery->design_voltage;
+ }
+ }
+ }
}
static int acpi_battery_update(struct acpi_battery *battery)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d59175efc428..1f0d457ecbcf 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -257,7 +257,15 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state)
}
-static int __acpi_bus_set_power(struct acpi_device *device, int state)
+/**
+ * acpi_device_set_power - Set power state of an ACPI device.
+ * @device: Device to set the power state of.
+ * @state: New power state to set.
+ *
+ * Callers must ensure that the device is power manageable before using this
+ * function.
+ */
+int acpi_device_set_power(struct acpi_device *device, int state)
{
int result = 0;
acpi_status status = AE_OK;
@@ -298,6 +306,12 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
* a lower-powered state.
*/
if (state < device->power.state) {
+ if (device->power.state >= ACPI_STATE_D3_HOT &&
+ state != ACPI_STATE_D0) {
+ printk(KERN_WARNING PREFIX
+ "Cannot transition to non-D0 state from D3\n");
+ return -ENODEV;
+ }
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, state);
if (result)
@@ -341,6 +355,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
return result;
}
+EXPORT_SYMBOL(acpi_device_set_power);
int acpi_bus_set_power(acpi_handle handle, int state)
@@ -359,7 +374,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
return -ENODEV;
}
- return __acpi_bus_set_power(device, state);
+ return acpi_device_set_power(device, state);
}
EXPORT_SYMBOL(acpi_bus_set_power);
@@ -402,7 +417,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p)
if (result)
return result;
- result = __acpi_bus_set_power(device, state);
+ result = acpi_device_set_power(device, state);
if (!result && state_p)
*state_p = state;
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 1f9f7d7d7bc5..811910b50b75 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -92,17 +92,24 @@ static int is_device_present(acpi_handle handle)
return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
}
+static bool is_container_device(const char *hid)
+{
+ const struct acpi_device_id *container_id;
+
+ for (container_id = container_device_ids;
+ container_id->id[0]; container_id++) {
+ if (!strcmp((char *)container_id->id, hid))
+ return true;
+ }
+
+ return false;
+}
+
/*******************************************************************/
static int acpi_container_add(struct acpi_device *device)
{
struct acpi_container *container;
-
- if (!device) {
- printk(KERN_ERR PREFIX "device is NULL\n");
- return -EINVAL;
- }
-
container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL);
if (!container)
return -ENOMEM;
@@ -164,7 +171,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
case ACPI_NOTIFY_BUS_CHECK:
/* Fall through */
case ACPI_NOTIFY_DEVICE_CHECK:
- printk(KERN_WARNING "Container driver received %s event\n",
+ pr_debug("Container driver received %s event\n",
(type == ACPI_NOTIFY_BUS_CHECK) ?
"ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
@@ -185,7 +192,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
result = container_device_add(&device, handle);
if (result) {
- printk(KERN_WARNING "Failed to add container\n");
+ acpi_handle_warn(handle, "Failed to add container\n");
break;
}
@@ -232,10 +239,8 @@ container_walk_namespace_cb(acpi_handle handle,
goto end;
}
- if (strcmp(hid, "ACPI0004") && strcmp(hid, "PNP0A05") &&
- strcmp(hid, "PNP0A06")) {
+ if (!is_container_device(hid))
goto end;
- }
switch (*action) {
case INSTALL_NOTIFY_HANDLER:
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
new file mode 100644
index 000000000000..c6ff606c6d5b
--- /dev/null
+++ b/drivers/acpi/device_pm.c
@@ -0,0 +1,667 @@
+/*
+ * drivers/acpi/device_pm.c - ACPI device power management routines.
+ *
+ * Copyright (C) 2012, Intel Corp.
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+
+static DEFINE_MUTEX(acpi_pm_notifier_lock);
+
+/**
+ * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
+ * @adev: ACPI device to add the notifier for.
+ * @context: Context information to pass to the notifier routine.
+ *
+ * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
+ * PM wakeup events. For example, wakeup events may be generated for bridges
+ * if one of the devices below the bridge is signaling wakeup, even if the
+ * bridge itself doesn't have a wakeup GPE associated with it.
+ */
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler, void *context)
+{
+ acpi_status status = AE_ALREADY_EXISTS;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler, context);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = true;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
+/**
+ * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @adev: ACPI device to remove the notifier from.
+ */
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (!adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_remove_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = false;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
+/**
+ * acpi_device_power_state - Get preferred power state of ACPI device.
+ * @dev: Device whose preferred target power state to return.
+ * @adev: ACPI device node corresponding to @dev.
+ * @target_state: System state to match the resultant device state.
+ * @d_max_in: Deepest low-power state to take into consideration.
+ * @d_min_p: Location to store the upper limit of the allowed states range.
+ * Return value: Preferred power state of the device on success, -ENODEV
+ * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
+ *
+ * Find the lowest power (highest number) ACPI device power state that the
+ * device can be in while the system is in the state represented by
+ * @target_state. If @d_min_p is set, the highest power (lowest number) device
+ * power state that @dev can be in for the given system sleep state is stored
+ * at the location pointed to by it.
+ *
+ * Callers must ensure that @dev and @adev are valid pointers and that @adev
+ * actually corresponds to @dev before using this function.
+ */
+int acpi_device_power_state(struct device *dev, struct acpi_device *adev,
+ u32 target_state, int d_max_in, int *d_min_p)
+{
+ char acpi_method[] = "_SxD";
+ unsigned long long d_min, d_max;
+ bool wakeup = false;
+
+ if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
+ return -EINVAL;
+
+ if (d_max_in > ACPI_STATE_D3_HOT) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF);
+ if (stat == PM_QOS_FLAGS_ALL)
+ d_max_in = ACPI_STATE_D3_HOT;
+ }
+
+ acpi_method[2] = '0' + target_state;
+ /*
+ * If the sleep state is S0, the lowest limit from ACPI is D3,
+ * but if the device has _S0W, we will use the value from _S0W
+ * as the lowest limit from ACPI. Finally, we will constrain
+ * the lowest limit with the specified one.
+ */
+ d_min = ACPI_STATE_D0;
+ d_max = ACPI_STATE_D3;
+
+ /*
+ * If present, _SxD methods return the minimum D-state (highest power
+ * state) we can use for the corresponding S-states. Otherwise, the
+ * minimum D-state is D0 (ACPI 3.x).
+ *
+ * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
+ * provided -- that's our fault recovery, we ignore retval.
+ */
+ if (target_state > ACPI_STATE_S0) {
+ acpi_evaluate_integer(adev->handle, acpi_method, NULL, &d_min);
+ wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
+ && adev->wakeup.sleep_state >= target_state;
+ } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) !=
+ PM_QOS_FLAGS_NONE) {
+ wakeup = adev->wakeup.flags.valid;
+ }
+
+ /*
+ * If _PRW says we can wake up the system from the target sleep state,
+ * the D-state returned by _SxD is sufficient for that (we assume a
+ * wakeup-aware driver if wake is set). Still, if _SxW exists
+ * (ACPI 3.x), it should return the maximum (lowest power) D-state that
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (wakeup) {
+ acpi_status status;
+
+ acpi_method[3] = 'W';
+ status = acpi_evaluate_integer(adev->handle, acpi_method, NULL,
+ &d_max);
+ if (ACPI_FAILURE(status)) {
+ if (target_state != ACPI_STATE_S0 ||
+ status != AE_NOT_FOUND)
+ d_max = d_min;
+ } else if (d_max < d_min) {
+ /* Warn the user of the broken DSDT */
+ printk(KERN_WARNING "ACPI: Wrong value from %s\n",
+ acpi_method);
+ /* Sanitize it */
+ d_min = d_max;
+ }
+ }
+
+ if (d_max_in < d_min)
+ return -EINVAL;
+ if (d_min_p)
+ *d_min_p = d_min;
+ /* constrain d_max with specified lowest limit (max number) */
+ if (d_max > d_max_in) {
+ for (d_max = d_max_in; d_max > d_min; d_max--) {
+ if (adev->power.states[d_max].flags.valid)
+ break;
+ }
+ }
+ return d_max;
+}
+EXPORT_SYMBOL_GPL(acpi_device_power_state);
+
+/**
+ * acpi_pm_device_sleep_state - Get preferred power state of ACPI device.
+ * @dev: Device whose preferred target power state to return.
+ * @d_min_p: Location to store the upper limit of the allowed states range.
+ * @d_max_in: Deepest low-power state to take into consideration.
+ * Return value: Preferred power state of the device on success, -ENODEV
+ * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
+ *
+ * The caller must ensure that @dev is valid before using this function.
+ */
+int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ return -ENODEV;
+ }
+
+ return acpi_device_power_state(dev, adev, acpi_target_system_state(),
+ d_max_in, d_min_p);
+}
+EXPORT_SYMBOL(acpi_pm_device_sleep_state);
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * acpi_wakeup_device - Wakeup notification handler for ACPI devices.
+ * @handle: ACPI handle of the device the notification is for.
+ * @event: Type of the signaled event.
+ * @context: Device corresponding to @handle.
+ */
+static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
+{
+ struct device *dev = context;
+
+ if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) {
+ pm_wakeup_event(dev, 0);
+ pm_runtime_resume(dev);
+ }
+}
+
+/**
+ * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device.
+ * @adev: ACPI device to enable/disable the remote wakeup for.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ *
+ * Enable/disable the GPE associated with @adev so that it can generate
+ * wakeup signals for the device in response to external (remote) events and
+ * enable/disable device wakeup power.
+ *
+ * Callers must ensure that @adev is a valid ACPI device node before executing
+ * this function.
+ */
+int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
+{
+ struct acpi_device_wakeup *wakeup = &adev->wakeup;
+
+ if (enable) {
+ acpi_status res;
+ int error;
+
+ error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0);
+ if (error)
+ return error;
+
+ res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(res)) {
+ acpi_disable_wakeup_device_power(adev);
+ return -EIO;
+ }
+ } else {
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_disable_wakeup_device_power(adev);
+ }
+ return 0;
+}
+
+/**
+ * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable the platform to wake up.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
+{
+ struct acpi_device *adev;
+ acpi_handle handle;
+
+ if (!device_run_wake(phys_dev))
+ return -EINVAL;
+
+ handle = DEVICE_ACPI_HANDLE(phys_dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return __acpi_device_run_wake(adev, enable);
+}
+EXPORT_SYMBOL(acpi_pm_device_run_wake);
+#else
+static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
+ void *context) {}
+#endif /* CONFIG_PM_RUNTIME */
+
+ #ifdef CONFIG_PM_SLEEP
+/**
+ * __acpi_device_sleep_wake - Enable or disable device to wake up the system.
+ * @dev: Device to enable/desible to wake up the system.
+ * @target_state: System state the device is supposed to wake up from.
+ * @enable: Whether to enable or disable @dev to wake up the system.
+ */
+int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
+ bool enable)
+{
+ return enable ?
+ acpi_enable_wakeup_device_power(adev, target_state) :
+ acpi_disable_wakeup_device_power(adev);
+}
+
+/**
+ * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
+ * @dev: Device to enable/desible to wake up the system from sleep states.
+ * @enable: Whether to enable or disable @dev to wake up the system.
+ */
+int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
+{
+ acpi_handle handle;
+ struct acpi_device *adev;
+ int error;
+
+ if (!device_can_wakeup(dev))
+ return -EINVAL;
+
+ handle = DEVICE_ACPI_HANDLE(dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ return -ENODEV;
+ }
+
+ error = __acpi_device_sleep_wake(adev, acpi_target_system_state(),
+ enable);
+ if (!error)
+ dev_info(dev, "System wakeup %s by ACPI\n",
+ enable ? "enabled" : "disabled");
+
+ return error;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
+ * @dev: Device to get the ACPI node for.
+ */
+static struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+
+ return handle && !acpi_bus_get_device(handle, &adev) ? adev : NULL;
+}
+
+/**
+ * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
+ * @dev: Device to put into a low-power state.
+ * @adev: ACPI device node corresponding to @dev.
+ * @system_state: System state to choose the device state for.
+ */
+static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev,
+ u32 system_state)
+{
+ int power_state;
+
+ if (!acpi_device_power_manageable(adev))
+ return 0;
+
+ power_state = acpi_device_power_state(dev, adev, system_state,
+ ACPI_STATE_D3, NULL);
+ if (power_state < ACPI_STATE_D0 || power_state > ACPI_STATE_D3)
+ return -EIO;
+
+ return acpi_device_set_power(adev, power_state);
+}
+
+/**
+ * acpi_dev_pm_full_power - Put ACPI device into the full-power state.
+ * @adev: ACPI device node to put into the full-power state.
+ */
+static int acpi_dev_pm_full_power(struct acpi_device *adev)
+{
+ return acpi_device_power_manageable(adev) ?
+ acpi_device_set_power(adev, ACPI_STATE_D0) : 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
+ * @dev: Device to put into a low-power state.
+ *
+ * Put the given device into a runtime low-power state using the standard ACPI
+ * mechanism. Set up remote wakeup if desired, choose the state to put the
+ * device into (this checks if remote wakeup is expected to work too), and set
+ * the power state of the device.
+ */
+int acpi_dev_runtime_suspend(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ bool remote_wakeup;
+ int error;
+
+ if (!adev)
+ return 0;
+
+ remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
+ PM_QOS_FLAGS_NONE;
+ error = __acpi_device_run_wake(adev, remote_wakeup);
+ if (remote_wakeup && error)
+ return -EAGAIN;
+
+ error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
+ if (error)
+ __acpi_device_run_wake(adev, false);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
+
+/**
+ * acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
+ * @dev: Device to put into the full-power state.
+ *
+ * Put the given device into the full-power state using the standard ACPI
+ * mechanism at run time. Set the power state of the device to ACPI D0 and
+ * disable remote wakeup.
+ */
+int acpi_dev_runtime_resume(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ int error;
+
+ if (!adev)
+ return 0;
+
+ error = acpi_dev_pm_full_power(adev);
+ __acpi_device_run_wake(adev, false);
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
+
+/**
+ * acpi_subsys_runtime_suspend - Suspend device using ACPI.
+ * @dev: Device to suspend.
+ *
+ * Carry out the generic runtime suspend procedure for @dev and use ACPI to put
+ * it into a runtime low-power state.
+ */
+int acpi_subsys_runtime_suspend(struct device *dev)
+{
+ int ret = pm_generic_runtime_suspend(dev);
+ return ret ? ret : acpi_dev_runtime_suspend(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
+
+/**
+ * acpi_subsys_runtime_resume - Resume device using ACPI.
+ * @dev: Device to Resume.
+ *
+ * Use ACPI to put the given device into the full-power state and carry out the
+ * generic runtime resume procedure for it.
+ */
+int acpi_subsys_runtime_resume(struct device *dev)
+{
+ int ret = acpi_dev_runtime_resume(dev);
+ return ret ? ret : pm_generic_runtime_resume(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * acpi_dev_suspend_late - Put device into a low-power state using ACPI.
+ * @dev: Device to put into a low-power state.
+ *
+ * Put the given device into a low-power state during system transition to a
+ * sleep state using the standard ACPI mechanism. Set up system wakeup if
+ * desired, choose the state to put the device into (this checks if system
+ * wakeup is expected to work too), and set the power state of the device.
+ */
+int acpi_dev_suspend_late(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ u32 target_state;
+ bool wakeup;
+ int error;
+
+ if (!adev)
+ return 0;
+
+ target_state = acpi_target_system_state();
+ wakeup = device_may_wakeup(dev);
+ error = __acpi_device_sleep_wake(adev, target_state, wakeup);
+ if (wakeup && error)
+ return error;
+
+ error = acpi_dev_pm_low_power(dev, adev, target_state);
+ if (error)
+ __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
+
+/**
+ * acpi_dev_resume_early - Put device into the full-power state using ACPI.
+ * @dev: Device to put into the full-power state.
+ *
+ * Put the given device into the full-power state using the standard ACPI
+ * mechanism during system transition to the working state. Set the power
+ * state of the device to ACPI D0 and disable remote wakeup.
+ */
+int acpi_dev_resume_early(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ int error;
+
+ if (!adev)
+ return 0;
+
+ error = acpi_dev_pm_full_power(adev);
+ __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
+
+/**
+ * acpi_subsys_prepare - Prepare device for system transition to a sleep state.
+ * @dev: Device to prepare.
+ */
+int acpi_subsys_prepare(struct device *dev)
+{
+ /*
+ * Follow PCI and resume devices suspended at run time before running
+ * their system suspend callbacks.
+ */
+ pm_runtime_resume(dev);
+ return pm_generic_prepare(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
+
+/**
+ * acpi_subsys_suspend_late - Suspend device using ACPI.
+ * @dev: Device to suspend.
+ *
+ * Carry out the generic late suspend procedure for @dev and use ACPI to put
+ * it into a low-power state during system transition into a sleep state.
+ */
+int acpi_subsys_suspend_late(struct device *dev)
+{
+ int ret = pm_generic_suspend_late(dev);
+ return ret ? ret : acpi_dev_suspend_late(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
+
+/**
+ * acpi_subsys_resume_early - Resume device using ACPI.
+ * @dev: Device to Resume.
+ *
+ * Use ACPI to put the given device into the full-power state and carry out the
+ * generic early resume procedure for it during system transition into the
+ * working state.
+ */
+int acpi_subsys_resume_early(struct device *dev)
+{
+ int ret = acpi_dev_resume_early(dev);
+ return ret ? ret : pm_generic_resume_early(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+#endif /* CONFIG_PM_SLEEP */
+
+static struct dev_pm_domain acpi_general_pm_domain = {
+ .ops = {
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = acpi_subsys_runtime_suspend,
+ .runtime_resume = acpi_subsys_runtime_resume,
+ .runtime_idle = pm_generic_runtime_idle,
+#endif
+#ifdef CONFIG_PM_SLEEP
+ .prepare = acpi_subsys_prepare,
+ .suspend_late = acpi_subsys_suspend_late,
+ .resume_early = acpi_subsys_resume_early,
+ .poweroff_late = acpi_subsys_suspend_late,
+ .restore_early = acpi_subsys_resume_early,
+#endif
+ },
+};
+
+/**
+ * acpi_dev_pm_attach - Prepare device for ACPI power management.
+ * @dev: Device to prepare.
+ * @power_on: Whether or not to power on the device.
+ *
+ * If @dev has a valid ACPI handle that has a valid struct acpi_device object
+ * attached to it, install a wakeup notification handler for the device and
+ * add it to the general ACPI PM domain. If @power_on is set, the device will
+ * be put into the ACPI D0 state before the function returns.
+ *
+ * This assumes that the @dev's bus type uses generic power management callbacks
+ * (or doesn't use any power management callbacks at all).
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+int acpi_dev_pm_attach(struct device *dev, bool power_on)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+
+ if (!adev)
+ return -ENODEV;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ acpi_add_pm_notifier(adev, acpi_wakeup_device, dev);
+ dev->pm_domain = &acpi_general_pm_domain;
+ if (power_on) {
+ acpi_dev_pm_full_power(adev);
+ __acpi_device_run_wake(adev, false);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
+
+/**
+ * acpi_dev_pm_detach - Remove ACPI power management from the device.
+ * @dev: Device to take care of.
+ * @power_off: Whether or not to try to remove power from the device.
+ *
+ * Remove the device from the general ACPI PM domain and remove its wakeup
+ * notifier. If @power_off is set, additionally remove power from the device if
+ * possible.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void acpi_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+
+ if (adev && dev->pm_domain == &acpi_general_pm_domain) {
+ dev->pm_domain = NULL;
+ acpi_remove_pm_notifier(adev, acpi_wakeup_device);
+ if (power_off) {
+ /*
+ * If the device's PM QoS resume latency limit or flags
+ * have been exposed to user space, they have to be
+ * hidden at this point, so that they don't affect the
+ * choice of the low-power state to put the device into.
+ */
+ dev_pm_qos_hide_latency_limit(dev);
+ dev_pm_qos_hide_flags(dev);
+ __acpi_device_run_wake(adev, false);
+ acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 88eb14304667..f32bd47b35e0 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/stddef.h>
+#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -460,12 +461,8 @@ static void handle_dock(struct dock_station *ds, int dock)
struct acpi_object_list arg_list;
union acpi_object arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer);
-
- printk(KERN_INFO PREFIX "%s - %s\n",
- (char *)name_buffer.pointer, dock ? "docking" : "undocking");
+ acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
/* _DCK method has one argument */
arg_list.count = 1;
@@ -474,11 +471,10 @@ static void handle_dock(struct dock_station *ds, int dock)
arg.integer.value = dock;
status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
- " _DCK\n", (char *)name_buffer.pointer));
+ acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
+ status);
kfree(buffer.pointer);
- kfree(name_buffer.pointer);
}
static inline void dock(struct dock_station *ds)
@@ -525,9 +521,11 @@ static void dock_lock(struct dock_station *ds, int lock)
status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
if (lock)
- printk(KERN_WARNING PREFIX "Locking device failed\n");
+ acpi_handle_warn(ds->handle,
+ "Locking device failed (0x%x)\n", status);
else
- printk(KERN_WARNING PREFIX "Unlocking device failed\n");
+ acpi_handle_warn(ds->handle,
+ "Unlocking device failed (0x%x)\n", status);
}
}
@@ -667,7 +665,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
dock_lock(ds, 0);
eject_dock(ds);
if (dock_present(ds)) {
- printk(KERN_ERR PREFIX "Unable to undock!\n");
+ acpi_handle_err(ds->handle, "Unable to undock!\n");
return -EBUSY;
}
complete_undock(ds);
@@ -715,7 +713,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
- printk(KERN_ERR PREFIX "Unable to dock!\n");
+ acpi_handle_err(handle, "Unable to dock!\n");
complete_dock(ds);
break;
}
@@ -743,7 +741,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
dock_event(ds, event, UNDOCK_EVENT);
break;
default:
- printk(KERN_ERR PREFIX "Unknown dock event %d\n", event);
+ acpi_handle_err(handle, "Unknown dock event %d\n", event);
}
}
@@ -987,7 +985,7 @@ err_rmgroup:
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
err_unregister:
platform_device_unregister(dd);
- printk(KERN_ERR "%s encountered error %d\n", __func__, ret);
+ acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
return ret;
}
@@ -1016,51 +1014,39 @@ static int dock_remove(struct dock_station *ds)
}
/**
- * find_dock - look for a dock station
+ * find_dock_and_bay - look for dock stations and bays
* @handle: acpi handle of a device
* @lvl: unused
- * @context: counter of dock stations found
+ * @context: unused
* @rv: unused
*
- * This is called by acpi_walk_namespace to look for dock stations.
+ * This is called by acpi_walk_namespace to look for dock stations and bays.
*/
static __init acpi_status
-find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
+find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- if (is_dock(handle))
+ if (is_dock(handle) || is_ejectable_bay(handle))
dock_add(handle);
return AE_OK;
}
-static __init acpi_status
-find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- /* If bay is a dock, it's already handled */
- if (is_ejectable_bay(handle) && !is_dock(handle))
- dock_add(handle);
- return AE_OK;
-}
-
static int __init dock_init(void)
{
if (acpi_disabled)
return 0;
- /* look for a dock station */
+ /* look for dock stations and bays */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock, NULL, NULL, NULL);
+ ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
- /* look for bay */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_bay, NULL, NULL, NULL);
if (!dock_station_count) {
- printk(KERN_INFO PREFIX "No dock devices found.\n");
+ pr_info(PREFIX "No dock devices found.\n");
return 0;
}
register_acpi_bus_notifier(&dock_acpi_notifier);
- printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
+ pr_info(PREFIX "%s: %d docks/bays found\n",
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a51df9681319..354007d490d1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -158,10 +158,10 @@ static int ec_transaction_done(struct acpi_ec *ec)
{
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&ec->curr_lock, flags);
+ spin_lock_irqsave(&ec->lock, flags);
if (!ec->curr || ec->curr->done)
ret = 1;
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -175,32 +175,38 @@ static void start_transaction(struct acpi_ec *ec)
static void advance_transaction(struct acpi_ec *ec, u8 status)
{
unsigned long flags;
- spin_lock_irqsave(&ec->curr_lock, flags);
- if (!ec->curr)
+ struct transaction *t = ec->curr;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ if (!t)
goto unlock;
- if (ec->curr->wlen > ec->curr->wi) {
+ if (t->wlen > t->wi) {
if ((status & ACPI_EC_FLAG_IBF) == 0)
acpi_ec_write_data(ec,
- ec->curr->wdata[ec->curr->wi++]);
+ t->wdata[t->wi++]);
else
goto err;
- } else if (ec->curr->rlen > ec->curr->ri) {
+ } else if (t->rlen > t->ri) {
if ((status & ACPI_EC_FLAG_OBF) == 1) {
- ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec);
- if (ec->curr->rlen == ec->curr->ri)
- ec->curr->done = true;
+ t->rdata[t->ri++] = acpi_ec_read_data(ec);
+ if (t->rlen == t->ri)
+ t->done = true;
} else
goto err;
- } else if (ec->curr->wlen == ec->curr->wi &&
+ } else if (t->wlen == t->wi &&
(status & ACPI_EC_FLAG_IBF) == 0)
- ec->curr->done = true;
+ t->done = true;
goto unlock;
err:
- /* false interrupt, state didn't change */
- if (in_interrupt())
- ++ec->curr->irq_count;
+ /*
+ * If SCI bit is set, then don't think it's a false IRQ
+ * otherwise will take a not handled IRQ as a false one.
+ */
+ if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
+ ++t->irq_count;
+
unlock:
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static int acpi_ec_sync_query(struct acpi_ec *ec);
@@ -238,9 +244,9 @@ static int ec_poll(struct acpi_ec *ec)
if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
break;
pr_debug(PREFIX "controller reset, restart transaction\n");
- spin_lock_irqsave(&ec->curr_lock, flags);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -253,17 +259,17 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
- spin_lock_irqsave(&ec->curr_lock, tmp);
+ spin_lock_irqsave(&ec->lock, tmp);
/* following two actions should be kept atomic */
ec->curr = t;
start_transaction(ec);
if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock_irqsave(&ec->curr_lock, tmp);
+ spin_lock_irqsave(&ec->lock, tmp);
ec->curr = NULL;
- spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -292,7 +298,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
return -EINVAL;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
status = -EINVAL;
goto unlock;
@@ -310,7 +316,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
status = -ETIME;
goto end;
}
- pr_debug(PREFIX "transaction start\n");
+ pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
/* It has to be disabled, so that it doesn't trigger. */
@@ -326,8 +333,9 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
/* It is safe to enable the GPE outside of the transaction. */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ec_storm_threshold) {
- pr_info(PREFIX "GPE storm detected, "
- "transactions will use polling mode\n");
+ pr_info(PREFIX "GPE storm detected(%d GPEs), "
+ "transactions will use polling mode\n",
+ t->irq_count);
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
pr_debug(PREFIX "transaction end\n");
@@ -335,7 +343,7 @@ end:
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
return status;
}
@@ -403,7 +411,7 @@ int ec_burst_disable(void)
EXPORT_SYMBOL(ec_burst_disable);
-int ec_read(u8 addr, u8 * val)
+int ec_read(u8 addr, u8 *val)
{
int err;
u8 temp_data;
@@ -468,10 +476,10 @@ void acpi_ec_block_transactions(void)
if (!ec)
return;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
/* Prevent transactions from being carried out */
set_bit(EC_FLAGS_BLOCKED, &ec->flags);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
void acpi_ec_unblock_transactions(void)
@@ -481,10 +489,10 @@ void acpi_ec_unblock_transactions(void)
if (!ec)
return;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
void acpi_ec_unblock_transactions_early(void)
@@ -536,9 +544,9 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
handler->handle = handle;
handler->func = func;
handler->data = data;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
list_add(&handler->node, &ec->list);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
return 0;
}
@@ -547,14 +555,14 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
struct acpi_ec_query_handler *handler, *tmp;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
if (query_bit == handler->query_bit) {
list_del(&handler->node);
kfree(handler);
}
}
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
@@ -601,9 +609,9 @@ static void acpi_ec_gpe_query(void *ec_cxt)
struct acpi_ec *ec = ec_cxt;
if (!ec)
return;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
acpi_ec_sync_query(ec);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
static int ec_check_sci(struct acpi_ec *ec, u8 state)
@@ -622,10 +630,11 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, void *data)
{
struct acpi_ec *ec = data;
+ u8 status = acpi_ec_read_status(ec);
- pr_debug(PREFIX "~~~> interrupt\n");
+ pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
- advance_transaction(ec, acpi_ec_read_status(ec));
+ advance_transaction(ec, status);
if (ec_transaction_done(ec) &&
(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
wake_up(&ec->wait);
@@ -691,10 +700,10 @@ static struct acpi_ec *make_acpi_ec(void)
if (!ec)
return NULL;
ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
- mutex_init(&ec->lock);
+ mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
- spin_lock_init(&ec->curr_lock);
+ spin_lock_init(&ec->lock);
return ec;
}
@@ -853,12 +862,12 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
ec = acpi_driver_data(device);
ec_remove_handlers(ec);
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
list_del(&handler->node);
kfree(handler);
}
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
release_region(ec->data_addr, 1);
release_region(ec->command_addr, 1);
device->driver_data = NULL;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 08373086cd7e..35da18113216 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -18,9 +18,14 @@
#define ACPI_GLUE_DEBUG 0
#if ACPI_GLUE_DEBUG
-#define DBG(x...) printk(PREFIX x)
+#define DBG(fmt, ...) \
+ printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
#else
-#define DBG(x...) do { } while(0)
+#define DBG(fmt, ...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \
+} while (0)
#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
@@ -130,46 +135,59 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
- struct acpi_device_physical_node *physical_node;
+ struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
int retval = -EINVAL;
- if (dev->archdata.acpi_handle) {
- dev_warn(dev, "Drivers changed 'acpi_handle'\n");
- return -EINVAL;
+ if (ACPI_HANDLE(dev)) {
+ if (handle) {
+ dev_warn(dev, "ACPI handle is already set\n");
+ return -EINVAL;
+ } else {
+ handle = ACPI_HANDLE(dev);
+ }
}
+ if (!handle)
+ return -EINVAL;
get_device(dev);
status = acpi_bus_get_device(handle, &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
- physical_node = kzalloc(sizeof(struct acpi_device_physical_node),
- GFP_KERNEL);
+ physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
goto err;
}
mutex_lock(&acpi_dev->physical_node_lock);
+
+ /* Sanity check. */
+ list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
+ if (pn->dev == dev) {
+ dev_warn(dev, "Already associated with ACPI node\n");
+ goto err_free;
+ }
+
/* allocate physical node id according to physical_node_id_bitmap */
physical_node->node_id =
find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
ACPI_MAX_PHYSICAL_NODE);
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
- mutex_unlock(&acpi_dev->physical_node_lock);
- kfree(physical_node);
- goto err;
+ goto err_free;
}
set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
physical_node->dev = dev;
list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
acpi_dev->physical_node_count++;
+
mutex_unlock(&acpi_dev->physical_node_lock);
- dev->archdata.acpi_handle = handle;
+ if (!ACPI_HANDLE(dev))
+ ACPI_HANDLE_SET(dev, acpi_dev->handle);
if (!physical_node->node_id)
strcpy(physical_node_name, PHYSICAL_NODE_STRING);
@@ -187,8 +205,14 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
return 0;
err:
+ ACPI_HANDLE_SET(dev, NULL);
put_device(dev);
return retval;
+
+ err_free:
+ mutex_unlock(&acpi_dev->physical_node_lock);
+ kfree(physical_node);
+ goto err;
}
static int acpi_unbind_one(struct device *dev)
@@ -198,11 +222,10 @@ static int acpi_unbind_one(struct device *dev)
acpi_status status;
struct list_head *node, *next;
- if (!dev->archdata.acpi_handle)
+ if (!ACPI_HANDLE(dev))
return 0;
- status = acpi_bus_get_device(dev->archdata.acpi_handle,
- &acpi_dev);
+ status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
@@ -228,7 +251,7 @@ static int acpi_unbind_one(struct device *dev)
sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
- dev->archdata.acpi_handle = NULL;
+ ACPI_HANDLE_SET(dev, NULL);
/* acpi_bind_one increase refcnt by one */
put_device(dev);
kfree(entry);
@@ -248,6 +271,10 @@ static int acpi_platform_notify(struct device *dev)
acpi_handle handle;
int ret = -EINVAL;
+ ret = acpi_bind_one(dev, NULL);
+ if (!ret)
+ goto out;
+
if (!dev->bus || !dev->parent) {
/* bridge devices genernally haven't bus or parent */
ret = acpi_find_bridge_device(dev, &handle);
@@ -261,16 +288,16 @@ static int acpi_platform_notify(struct device *dev)
}
if ((ret = type->find_device(dev, &handle)) != 0)
DBG("Can't get handler for %s\n", dev_name(dev));
- end:
+ end:
if (!ret)
acpi_bind_one(dev, handle);
+ out:
#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(dev->archdata.acpi_handle,
- ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 20a0f2c3ca3b..a0cc796932f7 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -61,7 +61,7 @@ static void acpi_hed_notify(struct acpi_device *device, u32 event)
blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
}
-static int __devinit acpi_hed_add(struct acpi_device *device)
+static int acpi_hed_add(struct acpi_device *device)
{
/* Only one hardware error device */
if (hed_handle)
@@ -70,7 +70,7 @@ static int __devinit acpi_hed_add(struct acpi_device *device)
return 0;
}
-static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+static int acpi_hed_remove(struct acpi_device *device, int type)
{
hed_handle = NULL;
return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ca75b9ce0489..3c407cdc1ec1 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -58,11 +58,11 @@ struct acpi_ec {
unsigned long data_addr;
unsigned long global_lock;
unsigned long flags;
- struct mutex lock;
+ struct mutex mutex;
wait_queue_head_t wait;
struct list_head list;
struct transaction *curr;
- spinlock_t curr_lock;
+ spinlock_t lock;
};
extern struct acpi_ec *first_ec;
@@ -93,4 +93,11 @@ static inline int suspend_nvs_save(void) { return 0; }
static inline void suspend_nvs_restore(void) {}
#endif
+/*--------------------------------------------------------------------------
+ Platform bus support
+ -------------------------------------------------------------------------- */
+struct platform_device;
+
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 9eaf708f5885..bd22f8667eed 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
return acpi_rsdp;
#endif
- if (efi_enabled) {
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
@@ -534,6 +534,137 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+#include <linux/earlycpio.h>
+#include <linux/memblock.h>
+
+static u64 acpi_tables_addr;
+static int all_tables_size;
+
+/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
+u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum = (u8) (sum + *(buffer++));
+ return sum;
+}
+
+/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
+static const char * const table_sigs[] = {
+ ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
+ ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
+ ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
+ ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
+ ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
+ ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
+ ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
+ ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
+ ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+
+/* Non-fatal errors: Affected tables/files are ignored */
+#define INVALID_TABLE(x, path, name) \
+ { pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
+
+#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
+
+/* Must not increase 10 or needs code modification below */
+#define ACPI_OVERRIDE_TABLES 10
+
+void __init acpi_initrd_override(void *data, size_t size)
+{
+ int sig, no, table_nr = 0, total_offset = 0;
+ long offset = 0;
+ struct acpi_table_header *table;
+ char cpio_path[32] = "kernel/firmware/acpi/";
+ struct cpio_data file;
+ struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
+ char *p;
+
+ if (data == NULL || size == 0)
+ return;
+
+ for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
+ file = find_cpio_data(cpio_path, data, size, &offset);
+ if (!file.data)
+ break;
+
+ data += offset;
+ size -= offset;
+
+ if (file.size < sizeof(struct acpi_table_header))
+ INVALID_TABLE("Table smaller than ACPI header",
+ cpio_path, file.name);
+
+ table = file.data;
+
+ for (sig = 0; table_sigs[sig]; sig++)
+ if (!memcmp(table->signature, table_sigs[sig], 4))
+ break;
+
+ if (!table_sigs[sig])
+ INVALID_TABLE("Unknown signature",
+ cpio_path, file.name);
+ if (file.size != table->length)
+ INVALID_TABLE("File length does not match table length",
+ cpio_path, file.name);
+ if (acpi_table_checksum(file.data, table->length))
+ INVALID_TABLE("Bad table checksum",
+ cpio_path, file.name);
+
+ pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
+ table->signature, cpio_path, file.name, table->length);
+
+ all_tables_size += table->length;
+ early_initrd_files[table_nr].data = file.data;
+ early_initrd_files[table_nr].size = file.size;
+ table_nr++;
+ }
+ if (table_nr == 0)
+ return;
+
+ acpi_tables_addr =
+ memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ all_tables_size, PAGE_SIZE);
+ if (!acpi_tables_addr) {
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Only calling e820_add_reserve does not work and the
+ * tables are invalid (memory got used) later.
+ * memblock_reserve works as expected and the tables won't get modified.
+ * But it's not enough on X86 because ioremap will
+ * complain later (used by acpi_os_map_memory) that the pages
+ * that should get mapped are not marked "reserved".
+ * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
+ * works fine.
+ */
+ memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
+ arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+
+ p = early_ioremap(acpi_tables_addr, all_tables_size);
+
+ for (no = 0; no < table_nr; no++) {
+ memcpy(p + total_offset, early_initrd_files[no].data,
+ early_initrd_files[no].size);
+ total_offset += early_initrd_files[no].size;
+ }
+ early_iounmap(p, all_tables_size);
+}
+#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
+
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn(PREFIX
+ "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+}
+
+
acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,
struct acpi_table_header ** new_table)
@@ -547,24 +678,73 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
if (strncmp(existing_table->signature, "DSDT", 4) == 0)
*new_table = (struct acpi_table_header *)AmlCode;
#endif
- if (*new_table != NULL) {
- printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
- "this is unsafe: tainting kernel\n",
- existing_table->signature,
- existing_table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
+ if (*new_table != NULL)
+ acpi_table_taint(existing_table);
return AE_OK;
}
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address * new_address,
- u32 *new_table_length)
+ acpi_physical_address *address,
+ u32 *table_length)
{
- return AE_SUPPORT;
-}
+#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+#else
+ int table_offset = 0;
+ struct acpi_table_header *table;
+
+ *table_length = 0;
+ *address = 0;
+
+ if (!acpi_tables_addr)
+ return AE_OK;
+
+ do {
+ if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
+ WARN_ON(1);
+ return AE_OK;
+ }
+
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
+ table_offset += table->length;
+
+ if (memcmp(existing_table->signature, table->signature, 4)) {
+ acpi_os_unmap_memory(table,
+ ACPI_HEADER_SIZE);
+ continue;
+ }
+
+ /* Only override tables with matching oem id */
+ if (memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table,
+ ACPI_HEADER_SIZE);
+ continue;
+ }
+
+ table_offset -= table->length;
+ *table_length = table->length;
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ *address = acpi_tables_addr + table_offset;
+ break;
+ } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
+
+ if (*address != 0)
+ acpi_table_taint(existing_table);
+ return AE_OK;
+#endif
+}
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
@@ -932,7 +1112,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
* having a static work_struct.
*/
- dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
+ dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
if (!dpc)
return AE_NO_MEMORY;
@@ -944,17 +1124,22 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
* because the hotplug code may call driver .remove() functions,
* which invoke flush_scheduled_work/acpi_os_wait_events_complete
* to flush these workqueues.
+ *
+ * To prevent lockdep from complaining unnecessarily, make sure that
+ * there is a different static lockdep key for each workqueue by using
+ * INIT_WORK() for each of them separately.
*/
- queue = hp ? kacpi_hotplug_wq :
- (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
- dpc->wait = hp ? 1 : 0;
-
- if (queue == kacpi_hotplug_wq)
+ if (hp) {
+ queue = kacpi_hotplug_wq;
+ dpc->wait = 1;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- else if (queue == kacpi_notify_wq)
+ } else if (type == OSL_NOTIFY_HANDLER) {
+ queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- else
+ } else {
+ queue = kacpid_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ }
/*
* On some machines, a software-initiated SMI causes corruption unless
@@ -986,6 +1171,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
{
return __acpi_os_execute(0, function, context, 1);
}
+EXPORT_SYMBOL(acpi_os_hotplug_execute);
void acpi_os_wait_events_complete(void)
{
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 2ef04098cc1d..a1dee29beed3 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -45,11 +45,12 @@ static int acpi_pci_unbind(struct acpi_device *device)
device_set_run_wake(&dev->dev, false);
pci_acpi_remove_pm_notifier(device);
+ acpi_power_resource_unregister_device(&dev->dev, device->handle);
if (!dev->subordinate)
goto out;
- acpi_pci_irq_del_prt(dev->subordinate);
+ acpi_pci_irq_del_prt(pci_domain_nr(dev->bus), dev->subordinate->number);
device->ops.bind = NULL;
device->ops.unbind = NULL;
@@ -63,7 +64,7 @@ static int acpi_pci_bind(struct acpi_device *device)
{
acpi_status status;
acpi_handle handle;
- struct pci_bus *bus;
+ unsigned char bus;
struct pci_dev *dev;
dev = acpi_get_pci_dev(device->handle);
@@ -71,6 +72,7 @@ static int acpi_pci_bind(struct acpi_device *device)
return 0;
pci_acpi_add_pm_notifier(device, dev);
+ acpi_power_resource_register_device(&dev->dev, device->handle);
if (device->wakeup.flags.run_wake)
device_set_run_wake(&dev->dev, true);
@@ -100,11 +102,11 @@ static int acpi_pci_bind(struct acpi_device *device)
goto out;
if (dev->subordinate)
- bus = dev->subordinate;
+ bus = dev->subordinate->number;
else
- bus = dev->bus;
+ bus = dev->bus->number;
- acpi_pci_irq_add_prt(device->handle, bus);
+ acpi_pci_irq_add_prt(device->handle, pci_domain_nr(dev->bus), bus);
out:
pci_dev_put(dev);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 0eefa12e648c..68a921d03247 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -184,7 +184,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
+static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
struct acpi_pci_routing_table *prt)
{
struct acpi_prt_entry *entry;
@@ -198,8 +198,8 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
* 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
* it here.
*/
- entry->id.segment = pci_domain_nr(bus);
- entry->id.bus = bus->number;
+ entry->id.segment = segment;
+ entry->id.bus = bus;
entry->id.device = (prt->address >> 16) & 0xFFFF;
entry->pin = prt->pin + 1;
@@ -244,7 +244,7 @@ static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
+int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -273,7 +273,7 @@ int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, bus, entry);
+ acpi_pci_irq_add_entry(handle, segment, bus, entry);
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -282,17 +282,16 @@ int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
return 0;
}
-void acpi_pci_irq_del_prt(struct pci_bus *bus)
+void acpi_pci_irq_del_prt(int segment, int bus)
{
struct acpi_prt_entry *entry, *tmp;
printk(KERN_DEBUG
"ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
+ segment, bus);
spin_lock(&acpi_prt_lock);
list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (pci_domain_nr(bus) == entry->id.segment
- && bus->number == entry->id.bus) {
+ if (segment == entry->id.segment && bus == entry->id.bus) {
list_del(&entry->list);
kfree(entry);
}
@@ -459,19 +458,19 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
*/
if (gsi < 0) {
u32 dev_gsi;
- dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF) &&
(acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
- printk(" - using ISA IRQ %d\n", dev->irq);
+ dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
+ pin_name(pin), dev->irq);
acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
- return 0;
} else {
- printk("\n");
- return 0;
+ dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
+ pin_name(pin));
}
+ return 0;
}
rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
@@ -495,11 +494,6 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
-/* FIXME: implement x86/x86_64 version */
-void __attribute__ ((weak)) acpi_unregister_gsi(u32 i)
-{
-}
-
void acpi_pci_irq_disable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index bce469c0b48a..7928d4dc7056 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -445,7 +445,7 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
-static int __devinit acpi_pci_root_add(struct acpi_device *device)
+static int acpi_pci_root_add(struct acpi_device *device)
{
unsigned long long segment, bus;
acpi_status status;
@@ -454,6 +454,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
acpi_handle handle;
struct acpi_device *child;
u32 flags, base_flags;
+ bool is_osc_granted = false;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -501,85 +502,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
- root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
-
- /*
- * All supported architectures that use ACPI have support for
- * PCI domains, so we indicate this in _OSC support capabilities.
- */
- flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
- acpi_pci_osc_support(root, flags);
-
- /*
- * TBD: Need PCI interface for enumeration/configuration of roots.
- */
-
- mutex_lock(&acpi_pci_root_lock);
- list_add_tail(&root->node, &acpi_pci_roots);
- mutex_unlock(&acpi_pci_root_lock);
-
printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
/*
- * Scan the Root Bridge
- * --------------------
- * Must do this prior to any attempt to bind the root device, as the
- * PCI namespace does not get created until this call is made (and
- * thus the root bridge's pci_dev does not exist).
- */
- root->bus = pci_acpi_scan_root(root);
- if (!root->bus) {
- printk(KERN_ERR PREFIX
- "Bus %04x:%02x not present in PCI namespace\n",
- root->segment, (unsigned int)root->secondary.start);
- result = -ENODEV;
- goto out_del_root;
- }
-
- /*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- result = acpi_pci_bind_root(device);
- if (result)
- goto out_del_root;
-
- /*
* PCI Routing Table
* -----------------
* Evaluate and parse _PRT, if exists.
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->bus);
+ result = acpi_pci_irq_add_prt(device->handle, root->segment,
+ root->secondary.start);
+
+ root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
/*
- * Scan and bind all _ADR-Based Devices
+ * All supported architectures that use ACPI have support for
+ * PCI domains, so we indicate this in _OSC support capabilities.
*/
- list_for_each_entry(child, &device->children, node)
- acpi_pci_bridge_scan(child);
+ flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ acpi_pci_osc_support(root, flags);
/* Indicate support for various _OSC capabilities. */
- if (pci_ext_cfg_avail(root->bus->self))
+ if (pci_ext_cfg_avail())
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
- if (pcie_aspm_support_enabled())
+ if (pcie_aspm_support_enabled()) {
flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
- OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
+ OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
+ }
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
if (flags != base_flags) {
status = acpi_pci_osc_support(root, flags);
if (ACPI_FAILURE(status)) {
- dev_info(root->bus->bridge, "ACPI _OSC support "
+ dev_info(&device->dev, "ACPI _OSC support "
"notification failed, disabling PCIe ASPM\n");
pcie_no_aspm();
flags = base_flags;
}
}
-
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
@@ -588,40 +551,81 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (pci_aer_available()) {
if (aer_acpi_firmware_first())
- dev_dbg(root->bus->bridge,
+ dev_dbg(&device->dev,
"PCIe errors handled by BIOS.\n");
else
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
}
- dev_info(root->bus->bridge,
+ dev_info(&device->dev,
"Requesting ACPI _OSC control (0x%02x)\n", flags);
status = acpi_pci_osc_control_set(device->handle, &flags,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_SUCCESS(status)) {
- dev_info(root->bus->bridge,
+ is_osc_granted = true;
+ dev_info(&device->dev,
"ACPI _OSC control (0x%02x) granted\n", flags);
- if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
- /*
- * We have ASPM control, but the FADT indicates
- * that it's unsupported. Clear it.
- */
- pcie_clear_aspm(root->bus);
- }
} else {
- dev_info(root->bus->bridge,
+ is_osc_granted = false;
+ dev_info(&device->dev,
"ACPI _OSC request failed (%s), "
"returned control mask: 0x%02x\n",
acpi_format_exception(status), flags);
- pr_info("ACPI _OSC control for PCIe not granted, "
- "disabling ASPM\n");
- pcie_no_aspm();
}
} else {
- dev_info(root->bus->bridge,
- "Unable to request _OSC control "
- "(_OSC support mask: 0x%02x)\n", flags);
+ dev_info(&device->dev,
+ "Unable to request _OSC control "
+ "(_OSC support mask: 0x%02x)\n", flags);
+ }
+
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ mutex_lock(&acpi_pci_root_lock);
+ list_add_tail(&root->node, &acpi_pci_roots);
+ mutex_unlock(&acpi_pci_root_lock);
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(root);
+ if (!root->bus) {
+ printk(KERN_ERR PREFIX
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->segment, (unsigned int)root->secondary.start);
+ result = -ENODEV;
+ goto out_del_root;
+ }
+
+ /*
+ * Attach ACPI-PCI Context
+ * -----------------------
+ * Thus binding the ACPI and PCI devices.
+ */
+ result = acpi_pci_bind_root(device);
+ if (result)
+ goto out_del_root;
+
+ /*
+ * Scan and bind all _ADR-Based Devices
+ */
+ list_for_each_entry(child, &device->children, node)
+ acpi_pci_bridge_scan(child);
+
+ /* ASPM setting */
+ if (is_osc_granted) {
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
+ pcie_clear_aspm(root->bus);
+ } else {
+ pr_info("ACPI _OSC control for PCIe not granted, "
+ "disabling ASPM\n");
+ pcie_no_aspm();
}
pci_acpi_add_bus_pm_notifier(device, root->bus);
@@ -634,6 +638,8 @@ out_del_root:
mutex_lock(&acpi_pci_root_lock);
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
+
+ acpi_pci_irq_del_prt(root->segment, root->secondary.start);
end:
kfree(root);
return result;
@@ -644,12 +650,19 @@ static int acpi_pci_root_start(struct acpi_device *device)
struct acpi_pci_root *root = acpi_driver_data(device);
struct acpi_pci_driver *driver;
+ if (system_state != SYSTEM_BOOTING)
+ pci_assign_unassigned_bus_resources(root->bus);
+
mutex_lock(&acpi_pci_root_lock);
list_for_each_entry(driver, &acpi_pci_drivers, node)
if (driver->add)
driver->add(root);
mutex_unlock(&acpi_pci_root_lock);
+ /* need to after hot-added ioapic is registered */
+ if (system_state != SYSTEM_BOOTING)
+ pci_enable_bridges(root->bus);
+
pci_bus_add_devices(root->bus);
return 0;
@@ -657,17 +670,29 @@ static int acpi_pci_root_start(struct acpi_device *device)
static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
+ acpi_status status;
+ acpi_handle handle;
struct acpi_pci_root *root = acpi_driver_data(device);
struct acpi_pci_driver *driver;
+ pci_stop_root_bus(root->bus);
+
mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry(driver, &acpi_pci_drivers, node)
+ list_for_each_entry_reverse(driver, &acpi_pci_drivers, node)
if (driver->remove)
driver->remove(root);
+ mutex_unlock(&acpi_pci_root_lock);
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
+ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
+ if (ACPI_SUCCESS(status))
+ acpi_pci_irq_del_prt(root->segment, root->secondary.start);
+
+ pci_remove_root_bus(root->bus);
+
+ mutex_lock(&acpi_pci_root_lock);
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
kfree(root);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 40e38a06ba85..6e7b9d523812 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -445,11 +445,8 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
return -ENODEV;
ret = acpi_bus_get_device(handle, &acpi_dev);
- if (ret)
- goto no_power_resource;
-
- if (!acpi_dev->power.flags.power_resources)
- goto no_power_resource;
+ if (ret || !acpi_dev->power.flags.power_resources)
+ return -ENODEV;
powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
if (!powered_device)
@@ -471,10 +468,6 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
}
return ret;
-
-no_power_resource:
- printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!");
- return -ENODEV;
}
EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 27adb090bb30..ef98796b3824 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -362,16 +362,13 @@ acpi_system_write_wakeup_device(struct file *file,
struct list_head *node, *next;
char strbuf[5];
char str[5] = "";
- unsigned int len = count;
- if (len > 4)
- len = 4;
- if (len < 0)
- return -EFAULT;
+ if (count > 4)
+ count = 4;
- if (copy_from_user(strbuf, buffer, len))
+ if (copy_from_user(strbuf, buffer, count))
return -EFAULT;
- strbuf[len] = '\0';
+ strbuf[count] = '\0';
sscanf(strbuf, "%s", str);
mutex_lock(&acpi_device_lock);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bd4e5dca3ff7..e83311bf1ebd 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -44,6 +44,7 @@
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/cpu.h>
@@ -282,7 +283,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
/* Declared with "Processor" statement; match ProcessorID */
status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Evaluating processor object\n");
+ dev_err(&device->dev,
+ "Failed to evaluate processor object (0x%x)\n",
+ status);
return -ENODEV;
}
@@ -301,8 +304,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Evaluating processor _UID [%#x]\n", status);
+ dev_err(&device->dev,
+ "Failed to evaluate processor _UID (0x%x)\n",
+ status);
return -ENODEV;
}
device_declaration = 1;
@@ -345,7 +349,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
if (!object.processor.pblk_address)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
else if (object.processor.pblk_length != 6)
- printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
+ dev_err(&device->dev, "Invalid PBLK length [%d]\n",
object.processor.pblk_length);
else {
pr->throttling.address = object.processor.pblk_address;
@@ -430,8 +434,8 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
* Initialize missing things
*/
if (pr->flags.need_hotplug_init) {
- printk(KERN_INFO "Will online and init hotplugged "
- "CPU: %d\n", pr->id);
+ pr_info("Will online and init hotplugged CPU: %d\n",
+ pr->id);
WARN(acpi_processor_start(pr), "Failed to start CPU:"
" %d\n", pr->id);
pr->flags.need_hotplug_init = 0;
@@ -492,14 +496,16 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
&pr->cdev->device.kobj,
"thermal_cooling");
if (result) {
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ dev_err(&device->dev,
+ "Failed to create sysfs link 'thermal_cooling'\n");
goto err_thermal_unregister;
}
result = sysfs_create_link(&pr->cdev->device.kobj,
&device->dev.kobj,
"device");
if (result) {
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ dev_err(&pr->cdev->device,
+ "Failed to create sysfs link 'device'\n");
goto err_remove_sysfs_thermal;
}
@@ -561,8 +567,9 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
*/
if (per_cpu(processor_device_array, pr->id) != NULL &&
per_cpu(processor_device_array, pr->id) != device) {
- printk(KERN_WARNING "BIOS reported wrong ACPI id "
- "for the processor\n");
+ dev_warn(&device->dev,
+ "BIOS reported wrong ACPI id %d for the processor\n",
+ pr->id);
result = -ENODEV;
goto err_free_cpumask;
}
@@ -695,8 +702,8 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
static void acpi_processor_hotplug_notify(acpi_handle handle,
u32 event, void *data)
{
- struct acpi_processor *pr;
struct acpi_device *device = NULL;
+ struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
int result;
@@ -716,7 +723,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
result = acpi_processor_device_add(handle, &device);
if (result) {
- printk(KERN_ERR PREFIX "Unable to add the device\n");
+ acpi_handle_err(handle, "Unable to add the device\n");
break;
}
@@ -728,20 +735,29 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
"received ACPI_NOTIFY_EJECT_REQUEST\n"));
if (acpi_bus_get_device(handle, &device)) {
- printk(KERN_ERR PREFIX
- "Device don't exist, dropping EJECT\n");
+ acpi_handle_err(handle,
+ "Device don't exist, dropping EJECT\n");
break;
}
- pr = acpi_driver_data(device);
- if (!pr) {
- printk(KERN_ERR PREFIX
- "Driver data is NULL, dropping EJECT\n");
+ if (!acpi_driver_data(device)) {
+ acpi_handle_err(handle,
+ "Driver data is NULL, dropping EJECT\n");
break;
}
- /* REVISIT: update when eject is supported */
- ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- break;
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ acpi_handle_err(handle, "No memory, dropping EJECT\n");
+ break;
+ }
+
+ ej_event->handle = handle;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ (void *)ej_event);
+
+ /* eject is performed asynchronously */
+ return;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -841,7 +857,7 @@ static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
* and do it when the CPU gets online the first time
* TBD: Cleanup above functions and try to do this more elegant.
*/
- printk(KERN_INFO "CPU %d got hotplugged\n", pr->id);
+ pr_info("CPU %d got hotplugged\n", pr->id);
pr->flags.need_hotplug_init = 1;
return AE_OK;
@@ -852,8 +868,22 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
if (cpu_online(pr->id))
cpu_down(pr->id);
+ get_online_cpus();
+ /*
+ * The cpu might become online again at this point. So we check whether
+ * the cpu has been onlined or not. If the cpu became online, it means
+ * that someone wants to use the cpu. So acpi_processor_handle_eject()
+ * returns -EAGAIN.
+ */
+ if (unlikely(cpu_online(pr->id))) {
+ put_online_cpus();
+ pr_warn("Failed to remove CPU %d, because other task "
+ "brought the CPU back online\n", pr->id);
+ return -EAGAIN;
+ }
arch_unregister_cpu(pr->id);
acpi_unmap_lsapic(pr->id);
+ put_online_cpus();
return (0);
}
#else
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e8086c725305..ed9a1cc690be 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -735,31 +735,18 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- ktime_t kt1, kt2;
- s64 idle_time;
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
pr = __this_cpu_read(processors);
- dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
- local_irq_disable();
-
-
lapic_timer_state_broadcast(pr, cx, 1);
- kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
- kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
-
- /* Update device last_residency*/
- dev->last_residency = (int)idle_time;
- local_irq_enable();
lapic_timer_state_broadcast(pr, cx, 0);
return index;
@@ -806,19 +793,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
- ktime_t kt1, kt2;
- s64 idle_time_ns;
- s64 idle_time;
pr = __this_cpu_read(processors);
- dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
- local_irq_disable();
-
-
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -829,7 +809,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
return -EINVAL;
}
}
@@ -843,22 +822,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
- kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
- kt2 = ktime_get_real();
- idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
- idle_time = idle_time_ns;
- do_div(idle_time, NSEC_PER_USEC);
-
- /* Update device last_residency*/
- dev->last_residency = (int)idle_time;
- /* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(idle_time_ns);
+ sched_clock_idle_wakeup_event(0);
- local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
@@ -883,13 +852,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
- ktime_t kt1, kt2;
- s64 idle_time_ns;
- s64 idle_time;
-
pr = __this_cpu_read(processors);
- dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
@@ -899,16 +863,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
- local_irq_disable();
acpi_safe_halt();
- local_irq_enable();
return -EBUSY;
}
}
- local_irq_disable();
-
-
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -919,7 +878,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
return -EINVAL;
}
}
@@ -934,7 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
*/
lapic_timer_state_broadcast(pr, cx, 1);
- kt1 = ktime_get_real();
/*
* disable bus master
* bm_check implies we need ARB_DIS
@@ -965,18 +922,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
- kt2 = ktime_get_real();
- idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
- idle_time = idle_time_ns;
- do_div(idle_time, NSEC_PER_USEC);
- /* Update device last_residency*/
- dev->last_residency = (int)idle_time;
+ sched_clock_idle_wakeup_event(0);
- /* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(idle_time_ns);
-
- local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
@@ -987,6 +935,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
};
/**
@@ -1009,6 +958,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
return -EINVAL;
}
+ if (!dev)
+ return -EINVAL;
+
dev->cpu = pr->id;
if (max_cstate == 0)
@@ -1200,6 +1152,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
}
/* Populate Updated C-state information */
+ acpi_processor_get_power_info(pr);
acpi_processor_setup_cpuidle_states(pr);
/* Enable all cpuidle devices */
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 836bfe069042..53e7ac9403a7 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+ /*
+ * MSR C001_0064+:
+ * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
+ */
+ if (!(hi & BIT(31)))
+ return;
+
fid = lo & 0x3f;
did = (lo >> 6) & 7;
if (boot_cpu_data.x86 == 0x10)
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
new file mode 100644
index 000000000000..a3868f6c222a
--- /dev/null
+++ b/drivers/acpi/resource.c
@@ -0,0 +1,526 @@
+/*
+ * drivers/acpi/resource.c - ACPI device resources interpretation.
+ *
+ * Copyright (C) 2012, Intel Corp.
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_X86
+#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+#else
+#define valid_IRQ(i) (true)
+#endif
+
+static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect,
+ bool window)
+{
+ unsigned long flags = IORESOURCE_MEM;
+
+ if (len == 0)
+ flags |= IORESOURCE_DISABLED;
+
+ if (write_protect == ACPI_READ_WRITE_MEMORY)
+ flags |= IORESOURCE_MEM_WRITEABLE;
+
+ if (window)
+ flags |= IORESOURCE_WINDOW;
+
+ return flags;
+}
+
+static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
+ u8 write_protect)
+{
+ res->start = start;
+ res->end = start + len - 1;
+ res->flags = acpi_dev_memresource_flags(len, write_protect, false);
+}
+
+/**
+ * acpi_dev_resource_memory - Extract ACPI memory resource information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents a memory resource and
+ * if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @res.
+ */
+bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+{
+ struct acpi_resource_memory24 *memory24;
+ struct acpi_resource_memory32 *memory32;
+ struct acpi_resource_fixed_memory32 *fixed_memory32;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ memory24 = &ares->data.memory24;
+ acpi_dev_get_memresource(res, memory24->minimum,
+ memory24->address_length,
+ memory24->write_protect);
+ break;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ memory32 = &ares->data.memory32;
+ acpi_dev_get_memresource(res, memory32->minimum,
+ memory32->address_length,
+ memory32->write_protect);
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ fixed_memory32 = &ares->data.fixed_memory32;
+ acpi_dev_get_memresource(res, fixed_memory32->address,
+ fixed_memory32->address_length,
+ fixed_memory32->write_protect);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
+
+static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode,
+ bool window)
+{
+ int flags = IORESOURCE_IO;
+
+ if (io_decode == ACPI_DECODE_16)
+ flags |= IORESOURCE_IO_16BIT_ADDR;
+
+ if (start > end || end >= 0x10003)
+ flags |= IORESOURCE_DISABLED;
+
+ if (window)
+ flags |= IORESOURCE_WINDOW;
+
+ return flags;
+}
+
+static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
+ u8 io_decode)
+{
+ u64 end = start + len - 1;
+
+ res->start = start;
+ res->end = end;
+ res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false);
+}
+
+/**
+ * acpi_dev_resource_io - Extract ACPI I/O resource information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an I/O resource and
+ * if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @res.
+ */
+bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+{
+ struct acpi_resource_io *io;
+ struct acpi_resource_fixed_io *fixed_io;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IO:
+ io = &ares->data.io;
+ acpi_dev_get_ioresource(res, io->minimum,
+ io->address_length,
+ io->io_decode);
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ fixed_io = &ares->data.fixed_io;
+ acpi_dev_get_ioresource(res, fixed_io->address,
+ fixed_io->address_length,
+ ACPI_DECODE_10);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
+
+/**
+ * acpi_dev_resource_address_space - Extract ACPI address space information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an address space resource
+ * and if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @res.
+ */
+bool acpi_dev_resource_address_space(struct acpi_resource *ares,
+ struct resource *res)
+{
+ acpi_status status;
+ struct acpi_resource_address64 addr;
+ bool window;
+ u64 len;
+ u8 io_decode;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ break;
+ default:
+ return false;
+ }
+
+ status = acpi_resource_to_address64(ares, &addr);
+ if (ACPI_FAILURE(status))
+ return true;
+
+ res->start = addr.minimum;
+ res->end = addr.maximum;
+ window = addr.producer_consumer == ACPI_PRODUCER;
+
+ switch(addr.resource_type) {
+ case ACPI_MEMORY_RANGE:
+ len = addr.maximum - addr.minimum + 1;
+ res->flags = acpi_dev_memresource_flags(len,
+ addr.info.mem.write_protect,
+ window);
+ break;
+ case ACPI_IO_RANGE:
+ io_decode = addr.granularity == 0xfff ?
+ ACPI_DECODE_10 : ACPI_DECODE_16;
+ res->flags = acpi_dev_ioresource_flags(addr.minimum,
+ addr.maximum,
+ io_decode, window);
+ break;
+ case ACPI_BUS_NUMBER_RANGE:
+ res->flags = IORESOURCE_BUS;
+ break;
+ default:
+ res->flags = 0;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
+
+/**
+ * acpi_dev_resource_ext_address_space - Extract ACPI address space information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an extended address space
+ * resource and if that's the case, use the information in it to populate the
+ * generic resource object pointed to by @res.
+ */
+bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
+ struct resource *res)
+{
+ struct acpi_resource_extended_address64 *ext_addr;
+ bool window;
+ u64 len;
+ u8 io_decode;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
+ return false;
+
+ ext_addr = &ares->data.ext_address64;
+
+ res->start = ext_addr->minimum;
+ res->end = ext_addr->maximum;
+ window = ext_addr->producer_consumer == ACPI_PRODUCER;
+
+ switch(ext_addr->resource_type) {
+ case ACPI_MEMORY_RANGE:
+ len = ext_addr->maximum - ext_addr->minimum + 1;
+ res->flags = acpi_dev_memresource_flags(len,
+ ext_addr->info.mem.write_protect,
+ window);
+ break;
+ case ACPI_IO_RANGE:
+ io_decode = ext_addr->granularity == 0xfff ?
+ ACPI_DECODE_10 : ACPI_DECODE_16;
+ res->flags = acpi_dev_ioresource_flags(ext_addr->minimum,
+ ext_addr->maximum,
+ io_decode, window);
+ break;
+ case ACPI_BUS_NUMBER_RANGE:
+ res->flags = IORESOURCE_BUS;
+ break;
+ default:
+ res->flags = 0;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
+
+/**
+ * acpi_dev_irq_flags - Determine IRQ resource flags.
+ * @triggering: Triggering type as provided by ACPI.
+ * @polarity: Interrupt polarity as provided by ACPI.
+ * @shareable: Whether or not the interrupt is shareable.
+ */
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
+{
+ unsigned long flags;
+
+ if (triggering == ACPI_LEVEL_SENSITIVE)
+ flags = polarity == ACPI_ACTIVE_LOW ?
+ IORESOURCE_IRQ_LOWLEVEL : IORESOURCE_IRQ_HIGHLEVEL;
+ else
+ flags = polarity == ACPI_ACTIVE_LOW ?
+ IORESOURCE_IRQ_LOWEDGE : IORESOURCE_IRQ_HIGHEDGE;
+
+ if (shareable == ACPI_SHARED)
+ flags |= IORESOURCE_IRQ_SHAREABLE;
+
+ return flags | IORESOURCE_IRQ;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
+
+static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
+{
+ res->start = gsi;
+ res->end = gsi;
+ res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED;
+}
+
+static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
+ u8 triggering, u8 polarity, u8 shareable)
+{
+ int irq, p, t;
+
+ if (!valid_IRQ(gsi)) {
+ acpi_dev_irqresource_disabled(res, gsi);
+ return;
+ }
+
+ /*
+ * In IO-APIC mode, use overrided attribute. Two reasons:
+ * 1. BIOS bug in DSDT
+ * 2. BIOS uses IO-APIC mode Interrupt Source Override
+ */
+ if (!acpi_get_override_irq(gsi, &t, &p)) {
+ u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+
+ if (triggering != trig || polarity != pol) {
+ pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
+ t ? "edge" : "level", p ? "low" : "high");
+ triggering = trig;
+ polarity = pol;
+ }
+ }
+
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
+ if (irq >= 0) {
+ res->start = irq;
+ res->end = irq;
+ } else {
+ acpi_dev_irqresource_disabled(res, gsi);
+ }
+}
+
+/**
+ * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
+ * @ares: Input ACPI resource object.
+ * @index: Index into the array of GSIs represented by the resource.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an interrupt resource
+ * and @index does not exceed the resource's interrupt count (true is returned
+ * in that case regardless of the results of the other checks)). If that's the
+ * case, register the GSI corresponding to @index from the array of interrupts
+ * represented by the resource and populate the generic resource object pointed
+ * to by @res accordingly. If the registration of the GSI is not successful,
+ * IORESOURCE_DISABLED will be set it that object's flags.
+ */
+bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
+ struct resource *res)
+{
+ struct acpi_resource_irq *irq;
+ struct acpi_resource_extended_irq *ext_irq;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ /*
+ * Per spec, only one interrupt per descriptor is allowed in
+ * _CRS, but some firmware violates this, so parse them all.
+ */
+ irq = &ares->data.irq;
+ if (index >= irq->interrupt_count) {
+ acpi_dev_irqresource_disabled(res, 0);
+ return false;
+ }
+ acpi_dev_get_irqresource(res, irq->interrupts[index],
+ irq->triggering, irq->polarity,
+ irq->sharable);
+ break;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ ext_irq = &ares->data.extended_irq;
+ if (index >= ext_irq->interrupt_count) {
+ acpi_dev_irqresource_disabled(res, 0);
+ return false;
+ }
+ acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
+ ext_irq->triggering, ext_irq->polarity,
+ ext_irq->sharable);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
+
+/**
+ * acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources().
+ * @list: The head of the resource list to free.
+ */
+void acpi_dev_free_resource_list(struct list_head *list)
+{
+ struct resource_list_entry *rentry, *re;
+
+ list_for_each_entry_safe(rentry, re, list, node) {
+ list_del(&rentry->node);
+ kfree(rentry);
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
+
+struct res_proc_context {
+ struct list_head *list;
+ int (*preproc)(struct acpi_resource *, void *);
+ void *preproc_data;
+ int count;
+ int error;
+};
+
+static acpi_status acpi_dev_new_resource_entry(struct resource *r,
+ struct res_proc_context *c)
+{
+ struct resource_list_entry *rentry;
+
+ rentry = kmalloc(sizeof(*rentry), GFP_KERNEL);
+ if (!rentry) {
+ c->error = -ENOMEM;
+ return AE_NO_MEMORY;
+ }
+ rentry->res = *r;
+ list_add_tail(&rentry->node, c->list);
+ c->count++;
+ return AE_OK;
+}
+
+static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
+ void *context)
+{
+ struct res_proc_context *c = context;
+ struct resource r;
+ int i;
+
+ if (c->preproc) {
+ int ret;
+
+ ret = c->preproc(ares, c->preproc_data);
+ if (ret < 0) {
+ c->error = ret;
+ return AE_CTRL_TERMINATE;
+ } else if (ret > 0) {
+ return AE_OK;
+ }
+ }
+
+ memset(&r, 0, sizeof(r));
+
+ if (acpi_dev_resource_memory(ares, &r)
+ || acpi_dev_resource_io(ares, &r)
+ || acpi_dev_resource_address_space(ares, &r)
+ || acpi_dev_resource_ext_address_space(ares, &r))
+ return acpi_dev_new_resource_entry(&r, c);
+
+ for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) {
+ acpi_status status;
+
+ status = acpi_dev_new_resource_entry(&r, c);
+ if (ACPI_FAILURE(status))
+ return status;
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_dev_get_resources - Get current resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ * @preproc: The caller's preprocessing routine.
+ * @preproc_data: Pointer passed to the caller's preprocessing routine.
+ *
+ * Evaluate the _CRS method for the given device node and process its output by
+ * (1) executing the @preproc() rountine provided by the caller, passing the
+ * resource pointer and @preproc_data to it as arguments, for each ACPI resource
+ * returned and (2) converting all of the returned ACPI resources into struct
+ * resource objects if possible. If the return value of @preproc() in step (1)
+ * is different from 0, step (2) is not applied to the given ACPI resource and
+ * if that value is negative, the whole processing is aborted and that value is
+ * returned as the final error code.
+ *
+ * The resultant struct resource objects are put on the list pointed to by
+ * @list, that must be empty initially, as members of struct resource_list_entry
+ * objects. Callers of this routine should use %acpi_dev_free_resource_list() to
+ * free that list.
+ *
+ * The number of resources in the output list is returned on success, an error
+ * code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data)
+{
+ struct res_proc_context c;
+ acpi_handle not_used;
+ acpi_status status;
+
+ if (!adev || !adev->handle || !list_empty(list))
+ return -EINVAL;
+
+ status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, &not_used);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ c.list = list;
+ c.preproc = preproc;
+ c.preproc_data = preproc_data;
+ c.count = 0;
+ c.error = 0;
+ status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ acpi_dev_process_resource, &c);
+ if (ACPI_FAILURE(status)) {
+ acpi_dev_free_resource_list(list);
+ return c.error ? c.error : -EIO;
+ }
+
+ return c.count;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1fcb8678665c..c88be6c37c30 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -29,6 +29,27 @@ extern struct acpi_device *acpi_root;
static const char *dummy_hid = "device";
+/*
+ * The following ACPI IDs are known to be suitable for representing as
+ * platform devices.
+ */
+static const struct acpi_device_id acpi_platform_device_ids[] = {
+
+ { "PNP0D40" },
+
+ /* Haswell LPSS devices */
+ { "INT33C0", 0 },
+ { "INT33C1", 0 },
+ { "INT33C2", 0 },
+ { "INT33C3", 0 },
+ { "INT33C4", 0 },
+ { "INT33C5", 0 },
+ { "INT33C6", 0 },
+ { "INT33C7", 0 },
+
+ { }
+};
+
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
DEFINE_MUTEX(acpi_device_lock);
@@ -97,6 +118,7 @@ void acpi_bus_hot_remove_device(void *context)
struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context;
struct acpi_device *device;
acpi_handle handle = ej_event->handle;
+ acpi_handle temp;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
@@ -117,13 +139,16 @@ void acpi_bus_hot_remove_device(void *context)
goto err_out;
}
+ /* device has been freed */
+ device = NULL;
+
/* power off device */
status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
printk(KERN_WARNING PREFIX
"Power-off device failed\n");
- if (device->flags.lockable) {
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
@@ -157,6 +182,7 @@ err_out:
kfree(context);
return;
}
+EXPORT_SYMBOL(acpi_bus_hot_remove_device);
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
@@ -216,6 +242,25 @@ acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *bu
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+static ssize_t acpi_device_uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
+}
+static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+
+static ssize_t acpi_device_adr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "0x%08x\n",
+ (unsigned int)(acpi_dev->pnp.bus_address));
+}
+static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+
static ssize_t
acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -259,11 +304,21 @@ static ssize_t description_show(struct device *dev,
}
static DEVICE_ATTR(description, 0444, description_show, NULL);
+static ssize_t
+acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
+}
+static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+
static int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
acpi_handle temp;
+ unsigned long long sun;
int result = 0;
/*
@@ -300,6 +355,21 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
+ if (dev->flags.bus_address)
+ result = device_create_file(&dev->dev, &dev_attr_adr);
+ if (dev->pnp.unique_id)
+ result = device_create_file(&dev->dev, &dev_attr_uid);
+
+ status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
+ if (ACPI_SUCCESS(status)) {
+ dev->pnp.sun = (unsigned long)sun;
+ result = device_create_file(&dev->dev, &dev_attr_sun);
+ if (result)
+ goto end;
+ } else {
+ dev->pnp.sun = (unsigned long)-1;
+ }
+
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
@@ -331,6 +401,14 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (ACPI_SUCCESS(status))
device_remove_file(&dev->dev, &dev_attr_eject);
+ status = acpi_get_handle(dev->handle, "_SUN", &temp);
+ if (ACPI_SUCCESS(status))
+ device_remove_file(&dev->dev, &dev_attr_sun);
+
+ if (dev->pnp.unique_id)
+ device_remove_file(&dev->dev, &dev_attr_uid);
+ if (dev->flags.bus_address)
+ device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
if (dev->handle)
@@ -340,8 +418,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
ACPI Bus operations
-------------------------------------------------------------------------- */
-int acpi_match_device_ids(struct acpi_device *device,
- const struct acpi_device_id *ids)
+static const struct acpi_device_id *__acpi_match_device(
+ struct acpi_device *device, const struct acpi_device_id *ids)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
@@ -351,14 +429,44 @@ int acpi_match_device_ids(struct acpi_device *device,
* driver for it.
*/
if (!device->status.present)
- return -ENODEV;
+ return NULL;
for (id = ids; id->id[0]; id++)
list_for_each_entry(hwid, &device->pnp.ids, list)
if (!strcmp((char *) id->id, hwid->id))
- return 0;
+ return id;
- return -ENOENT;
+ return NULL;
+}
+
+/**
+ * acpi_match_device - Match a struct device against a given list of ACPI IDs
+ * @ids: Array of struct acpi_device_id object to match against.
+ * @dev: The device structure to match.
+ *
+ * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
+ * object for that handle and use that object to match against a given list of
+ * device IDs.
+ *
+ * Return a pointer to the first matching ID on success or %NULL on failure.
+ */
+const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
+ const struct device *dev)
+{
+ struct acpi_device *adev;
+
+ if (!ids || !ACPI_HANDLE(dev)
+ || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev)))
+ return NULL;
+
+ return __acpi_match_device(adev, ids);
+}
+EXPORT_SYMBOL_GPL(acpi_match_device);
+
+int acpi_match_device_ids(struct acpi_device *device,
+ const struct acpi_device_id *ids)
+{
+ return __acpi_match_device(device, ids) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
@@ -377,6 +485,7 @@ static void acpi_device_release(struct device *dev)
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_free_ids(acpi_dev);
+ kfree(acpi_dev->pnp.unique_id);
kfree(acpi_dev);
}
@@ -859,8 +968,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
{
struct acpi_device_id button_device_ids[] = {
- {"PNP0C0D", 0},
{"PNP0C0C", 0},
+ {"PNP0C0D", 0},
{"PNP0C0E", 0},
{"", 0},
};
@@ -872,6 +981,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) {
device->wakeup.flags.run_wake = 1;
+ if (!acpi_match_device_ids(device, &button_device_ids[1])) {
+ /* Do not use Lid/sleep button for S5 wakeup */
+ if (device->wakeup.sleep_state == ACPI_STATE_S5)
+ device->wakeup.sleep_state = ACPI_STATE_S4;
+ }
device_set_wakeup_capable(&device->dev, true);
return;
}
@@ -965,8 +1079,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
* D3hot is only valid if _PR3 present.
*/
if (ps->resources.count ||
- (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
+ (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) {
ps->flags.valid = 1;
+ ps->flags.os_accessible = 1;
+ }
ps->power = -1; /* Unknown - driver assigned */
ps->latency = -1; /* Unknown - driver assigned */
@@ -982,6 +1098,11 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
+ /* Presence of _PS3 or _PRx means we can put the device into D3 cold */
+ if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set ||
+ device->power.flags.power_resources)
+ device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
+
acpi_bus_init_power(device);
return 0;
@@ -1013,11 +1134,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
device->flags.ejectable = 1;
}
- /* Presence of _LCK indicates 'lockable' */
- status = acpi_get_handle(device->handle, "_LCK", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.lockable = 1;
-
/* Power resources cannot be power manageable. */
if (device->device_type == ACPI_BUS_TYPE_POWER)
return 0;
@@ -1185,7 +1301,7 @@ static void acpi_device_set_id(struct acpi_device *device)
{
acpi_status status;
struct acpi_device_info *info;
- struct acpica_device_id_list *cid_list;
+ struct acpi_pnp_device_id_list *cid_list;
int i;
switch (device->device_type) {
@@ -1212,6 +1328,9 @@ static void acpi_device_set_id(struct acpi_device *device)
device->pnp.bus_address = info->address;
device->flags.bus_address = 1;
}
+ if (info->valid & ACPI_VALID_UID)
+ device->pnp.unique_id = kstrdup(info->unique_id.string,
+ GFP_KERNEL);
kfree(info);
@@ -1227,7 +1346,7 @@ static void acpi_device_set_id(struct acpi_device *device)
acpi_add_id(device, ACPI_DOCK_HID);
else if (!acpi_ibm_smbus_match(device))
acpi_add_id(device, ACPI_SMBUS_IBM_HID);
- else if (!acpi_device_hid(device) &&
+ else if (list_empty(&device->pnp.ids) &&
ACPI_IS_ROOT_DEVICE(device->parent)) {
acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
@@ -1483,8 +1602,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
*/
device = NULL;
acpi_bus_get_device(handle, &device);
- if (ops->acpi_op_add && !device)
+ if (ops->acpi_op_add && !device) {
acpi_add_single_object(&device, handle, type, sta, ops);
+ /* Is the device a known good platform device? */
+ if (device
+ && !acpi_match_device_ids(device, acpi_platform_device_ids))
+ acpi_create_platform_device(device);
+ }
if (!device)
return AE_CTRL_DEPTH;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fdcdbb652915..2fcc67d34b11 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -18,7 +18,6 @@
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -81,6 +80,12 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+
+u32 acpi_target_system_state(void)
+{
+ return acpi_target_sleep_state;
+}
+
static bool pwr_btn_event_pending;
/*
@@ -98,6 +103,21 @@ void __init acpi_nvs_nosave(void)
}
/*
+ * The ACPI specification wants us to save NVS memory regions during hibernation
+ * but says nothing about saving NVS during S3. Not all versions of Windows
+ * save NVS on S3 suspend either, and it is clear that not all systems need
+ * NVS to be saved at S3 time. To improve suspend/resume time, allow the
+ * user to disable saving NVS on S3 if their system does not require it, but
+ * continue to save/restore NVS for S4 as specified.
+ */
+static bool nvs_nosave_s3;
+
+void __init acpi_nvs_nosave_s3(void)
+{
+ nvs_nosave_s3 = true;
+}
+
+/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
* kernel command line option that causes the following variable to be set.
@@ -109,6 +129,180 @@ void __init acpi_old_suspend_ordering(void)
old_suspend_ordering = true;
}
+static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+{
+ acpi_old_suspend_ordering();
+ return 0;
+}
+
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+ acpi_nvs_nosave();
+ return 0;
+}
+
+static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Abit KN9 (nForce4 variant)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "HP xw4600 Workstation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Panasonic CF51-2L",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "Matsushita Electric Industrial Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW21E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB17FX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR11M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Everex StepNote Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB1Z1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-NW130D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCCW29FX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Averatec AV1020-ED2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI DELUXE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI Premium",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR26GN_P",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB1S1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW520F",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54HR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
+ {},
+};
+
+static void acpi_sleep_dmi_check(void)
+{
+ dmi_check_system(acpisleep_dmi_table);
+}
+
/**
* acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
*/
@@ -224,6 +418,7 @@ static void acpi_pm_end(void)
}
#else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0
+static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -243,7 +438,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
u32 acpi_state = acpi_suspend_states[pm_state];
int error = 0;
- error = nvs_nosave ? 0 : suspend_nvs_alloc();
+ error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
if (error)
return error;
@@ -382,167 +577,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
-
-static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
-{
- old_suspend_ordering = true;
- return 0;
-}
-
-static int __init init_nvs_nosave(const struct dmi_system_id *d)
-{
- acpi_nvs_nosave();
- return 0;
-}
-
-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
- {
- .callback = init_old_suspend_ordering,
- .ident = "Abit KN9 (nForce4 variant)",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
- DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "HP xw4600 Workstation",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Panasonic CF51-2L",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR,
- "Matsushita Electric Industrial Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-FW21E",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VPCEB17FX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-SR11M",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Everex StepNote Series",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VPCEB1Z1E",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-NW130D",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VPCCW29FX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Averatec AV1020-ED2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Asus A8N-SLI DELUXE",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Asus A8N-SLI Premium",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-SR26GN_P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-FW520F",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Asus K54C",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Asus K54HR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
- },
- },
- {},
-};
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
@@ -681,177 +715,6 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
-#ifdef CONFIG_PM
-/**
- * acpi_pm_device_sleep_state - return preferred power state of ACPI device
- * in the system sleep state given by %acpi_target_sleep_state
- * @dev: device to examine; its driver model wakeup flags control
- * whether it should be able to wake up the system
- * @d_min_p: used to store the upper limit of allowed states range
- * @d_max_in: specify the lowest allowed states
- * Return value: preferred power state of the device on success, -ENODEV
- * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
- *
- * Find the lowest power (highest number) ACPI device power state that
- * device @dev can be in while the system is in the sleep state represented
- * by %acpi_target_sleep_state. If @wake is nonzero, the device should be
- * able to wake up the system from this sleep state. If @d_min_p is set,
- * the highest power (lowest number) device power state of @dev allowed
- * in this system sleep state is stored at the location pointed to by it.
- *
- * The caller must ensure that @dev is valid before using this function.
- * The caller is also responsible for figuring out if the device is
- * supposed to be able to wake up the system and passing this information
- * via @wake.
- */
-
-int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
-{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
- struct acpi_device *adev;
- char acpi_method[] = "_SxD";
- unsigned long long d_min, d_max;
-
- if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
- return -EINVAL;
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
- printk(KERN_DEBUG "ACPI handle has no context!\n");
- return -ENODEV;
- }
-
- acpi_method[2] = '0' + acpi_target_sleep_state;
- /*
- * If the sleep state is S0, the lowest limit from ACPI is D3,
- * but if the device has _S0W, we will use the value from _S0W
- * as the lowest limit from ACPI. Finally, we will constrain
- * the lowest limit with the specified one.
- */
- d_min = ACPI_STATE_D0;
- d_max = ACPI_STATE_D3;
-
- /*
- * If present, _SxD methods return the minimum D-state (highest power
- * state) we can use for the corresponding S-states. Otherwise, the
- * minimum D-state is D0 (ACPI 3.x).
- *
- * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
- * provided -- that's our fault recovery, we ignore retval.
- */
- if (acpi_target_sleep_state > ACPI_STATE_S0)
- acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
-
- /*
- * If _PRW says we can wake up the system from the target sleep state,
- * the D-state returned by _SxD is sufficient for that (we assume a
- * wakeup-aware driver if wake is set). Still, if _SxW exists
- * (ACPI 3.x), it should return the maximum (lowest power) D-state that
- * can wake the system. _S0W may be valid, too.
- */
- if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
- adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
- acpi_status status;
-
- acpi_method[3] = 'W';
- status = acpi_evaluate_integer(handle, acpi_method, NULL,
- &d_max);
- if (ACPI_FAILURE(status)) {
- if (acpi_target_sleep_state != ACPI_STATE_S0 ||
- status != AE_NOT_FOUND)
- d_max = d_min;
- } else if (d_max < d_min) {
- /* Warn the user of the broken DSDT */
- printk(KERN_WARNING "ACPI: Wrong value from %s\n",
- acpi_method);
- /* Sanitize it */
- d_min = d_max;
- }
- }
-
- if (d_max_in < d_min)
- return -EINVAL;
- if (d_min_p)
- *d_min_p = d_min;
- /* constrain d_max with specified lowest limit (max number) */
- if (d_max > d_max_in) {
- for (d_max = d_max_in; d_max > d_min; d_max--) {
- if (adev->power.states[d_max].flags.valid)
- break;
- }
- }
- return d_max;
-}
-EXPORT_SYMBOL(acpi_pm_device_sleep_state);
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * acpi_pm_device_run_wake - Enable/disable wake-up for given device.
- * @phys_dev: Device to enable/disable the platform to wake-up the system for.
- * @enable: Whether enable or disable the wake-up functionality.
- *
- * Find the ACPI device object corresponding to @pci_dev and try to
- * enable/disable the GPE associated with it.
- */
-int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
-{
- struct acpi_device *dev;
- acpi_handle handle;
-
- if (!device_run_wake(phys_dev))
- return -EINVAL;
-
- handle = DEVICE_ACPI_HANDLE(phys_dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
- dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
- __func__);
- return -ENODEV;
- }
-
- if (enable) {
- acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
- acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
- } else {
- acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
- acpi_disable_wakeup_device_power(dev);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(acpi_pm_device_run_wake);
-
-/**
- * acpi_pm_device_sleep_wake - enable or disable the system wake-up
- * capability of given device
- * @dev: device to handle
- * @enable: 'true' - enable, 'false' - disable the wake-up capability
- */
-int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
-{
- acpi_handle handle;
- struct acpi_device *adev;
- int error;
-
- if (!device_can_wakeup(dev))
- return -EINVAL;
-
- handle = DEVICE_ACPI_HANDLE(dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
- dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__);
- return -ENODEV;
- }
-
- error = enable ?
- acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
- acpi_disable_wakeup_device_power(adev);
- if (!error)
- dev_info(dev, "wake-up capability %s by ACPI\n",
- enable ? "enabled" : "disabled");
-
- return error;
-}
-#endif /* CONFIG_PM_SLEEP */
-
static void acpi_power_off_prepare(void)
{
/* Prepare to power off the system */
@@ -873,13 +736,13 @@ int __init acpi_sleep_init(void)
u8 type_a, type_b;
#ifdef CONFIG_SUSPEND
int i = 0;
-
- dmi_check_system(acpisleep_dmi_table);
#endif
if (acpi_disabled)
return 0;
+ acpi_sleep_dmi_check();
+
sleep_states[ACPI_STATE_S0] = 1;
printk(KERN_INFO PREFIX "(supports S0");
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 7c3f98ba4afe..ea61ca9129cd 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -476,7 +476,7 @@ static void fixed_event_count(u32 event_number)
return;
}
-static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
+static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
if (event_type == ACPI_EVENT_TYPE_GPE)
@@ -638,7 +638,7 @@ void acpi_irq_stats_init(void)
if (all_counters == NULL)
goto fail;
- status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
+ status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
if (ACPI_FAILURE(status))
goto fail;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 804204d41999..506fbd4b5733 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -900,14 +900,14 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
if (tz->trips.passive.flags.valid)
tz->thermal_zone =
thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops,
+ &acpi_thermal_zone_ops, NULL,
tz->trips.passive.tsp*100,
tz->polling_frequency*100);
else
tz->thermal_zone =
thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, 0,
- tz->polling_frequency*100);
+ &acpi_thermal_zone_ops, NULL,
+ 0, tz->polling_frequency*100);
if (IS_ERR(tz->thermal_zone))
return -ENODEV;
@@ -984,6 +984,38 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
}
}
+/*
+ * On some platforms, the AML code has dependency about
+ * the evaluating order of _TMP and _CRT/_HOT/_PSV/_ACx.
+ * 1. On HP Pavilion G4-1016tx, _TMP must be invoked after
+ * /_CRT/_HOT/_PSV/_ACx, or else system will be power off.
+ * 2. On HP Compaq 6715b/6715s, the return value of _PSV is 0
+ * if _TMP has never been evaluated.
+ *
+ * As this dependency is totally transparent to OS, evaluate
+ * all of them once, in the order of _CRT/_HOT/_PSV/_ACx,
+ * _TMP, before they are actually used.
+ */
+static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz)
+{
+ acpi_handle handle = tz->device->handle;
+ unsigned long long value;
+ int i;
+
+ acpi_evaluate_integer(handle, "_CRT", NULL, &value);
+ acpi_evaluate_integer(handle, "_HOT", NULL, &value);
+ acpi_evaluate_integer(handle, "_PSV", NULL, &value);
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, name, NULL, &value);
+ if (status == AE_NOT_FOUND)
+ break;
+ }
+ acpi_evaluate_integer(handle, "_TMP", NULL, &value);
+}
+
static int acpi_thermal_get_info(struct acpi_thermal *tz)
{
int result = 0;
@@ -992,6 +1024,8 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
if (!tz)
return -EINVAL;
+ acpi_thermal_aml_dependency_fix(tz);
+
/* Get trip points [_CRT, _PSV, etc.] (required) */
result = acpi_thermal_get_trip_points(tz);
if (result)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 462f7e300363..744371304313 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -28,6 +28,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/hardirq.h>
+#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -457,3 +459,39 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
#endif
}
EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
+
+/**
+ * acpi_handle_printk: Print message with ACPI prefix and object path
+ *
+ * This function is called through acpi_handle_<level> macros and prints
+ * a message with ACPI prefix and object path. This function acquires
+ * the global namespace mutex to obtain an object path. In interrupt
+ * context, it shows the object path as <n/a>.
+ */
+void
+acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ struct acpi_buffer buffer = {
+ .length = ACPI_ALLOCATE_BUFFER,
+ .pointer = NULL
+ };
+ const char *path;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (in_interrupt() ||
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
+ path = "<n/a>";
+ else
+ path = buffer.pointer;
+
+ printk("%sACPI: %s: %pV", level, path, &vaf);
+
+ va_end(args);
+ kfree(buffer.pointer);
+}
+EXPORT_SYMBOL(acpi_handle_printk);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 0230cb6cbb3a..ac9a69cd45f5 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
+static int video_ignore_initial_backlight(const struct dmi_system_id *d)
+{
+ use_bios_initial_backlight = 0;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Folio 13-2000",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
+ },
+ },
{}
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b728880ef10e..4ac2593234e7 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -156,6 +156,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "X360"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Asus UL30VT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
+ },
+ },
{ },
};
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e8eb91bd0d28..cdbad3a454a0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -45,7 +45,6 @@ static int amba_match(struct device *dev, struct device_driver *drv)
return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
-#ifdef CONFIG_HOTPLUG
static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct amba_device *pcdev = to_amba_device(dev);
@@ -58,9 +57,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
return retval;
}
-#else
-#define amba_uevent NULL
-#endif
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
@@ -546,7 +542,8 @@ EXPORT_SYMBOL_GPL(amba_device_add);
static struct amba_device *
amba_aphb_device_add(struct device *parent, const char *name,
resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid, u64 dma_mask)
+ void *pdata, unsigned int periphid, u64 dma_mask,
+ struct resource *resbase)
{
struct amba_device *dev;
int ret;
@@ -563,7 +560,7 @@ amba_aphb_device_add(struct device *parent, const char *name,
dev->dev.platform_data = pdata;
dev->dev.parent = parent;
- ret = amba_device_add(dev, &iomem_resource);
+ ret = amba_device_add(dev, resbase);
if (ret) {
amba_device_put(dev);
return ERR_PTR(ret);
@@ -578,7 +575,7 @@ amba_apb_device_add(struct device *parent, const char *name,
void *pdata, unsigned int periphid)
{
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, 0);
+ periphid, 0, &iomem_resource);
}
EXPORT_SYMBOL_GPL(amba_apb_device_add);
@@ -588,10 +585,33 @@ amba_ahb_device_add(struct device *parent, const char *name,
void *pdata, unsigned int periphid)
{
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, ~0ULL);
+ periphid, ~0ULL, &iomem_resource);
}
EXPORT_SYMBOL_GPL(amba_ahb_device_add);
+struct amba_device *
+amba_apb_device_add_res(struct device *parent, const char *name,
+ resource_size_t base, size_t size, int irq1,
+ int irq2, void *pdata, unsigned int periphid,
+ struct resource *resbase)
+{
+ return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
+ periphid, 0, resbase);
+}
+EXPORT_SYMBOL_GPL(amba_apb_device_add_res);
+
+struct amba_device *
+amba_ahb_device_add_res(struct device *parent, const char *name,
+ resource_size_t base, size_t size, int irq1,
+ int irq2, void *pdata, unsigned int periphid,
+ struct resource *resbase)
+{
+ return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
+ periphid, ~0ULL, resbase);
+}
+EXPORT_SYMBOL_GPL(amba_ahb_device_add_res);
+
+
static void amba_device_initialize(struct amba_device *dev, const char *name)
{
device_initialize(&dev->dev);
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 0b6f0b28a487..536c166f4253 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/tegra-ahb.h>
#define DRV_NAME "tegra-ahb"
@@ -156,6 +157,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
+#ifdef CONFIG_PM_SLEEP
static int tegra_ahb_suspend(struct device *dev)
{
int i;
@@ -175,6 +177,7 @@ static int tegra_ahb_resume(struct device *dev)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
+#endif
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
@@ -240,7 +243,7 @@ static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
}
-static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+static int tegra_ahb_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_ahb *ahb;
@@ -264,7 +267,7 @@ static int __devinit tegra_ahb_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+static const struct of_device_id tegra_ahb_of_match[] = {
{ .compatible = "nvidia,tegra30-ahb", },
{ .compatible = "nvidia,tegra20-ahb", },
{},
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7862d17976b7..497912732566 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
enum {
AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_ENMOTUS = 2,
AHCI_PCI_BAR_STANDARD = 5,
};
@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /* Enmotus */
+ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1098,9 +1102,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
- /* The Connext uses non-standard BAR */
+ /* Both Connext and Enmotus devices use non-standard BARs */
if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
/* acquire resources */
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index b7078afddb74..7a8a2841fe64 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -25,6 +25,8 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+static void ahci_host_stop(struct ata_host *host);
+
enum ahci_type {
AHCI, /* standard platform ahci */
IMX53_AHCI, /* ahci on i.mx53 */
@@ -47,6 +49,15 @@ static struct platform_device_id ahci_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, ahci_devtype);
+static struct ata_port_operations ahci_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = ahci_host_stop,
+};
+
+static struct ata_port_operations ahci_platform_retry_srst_ops = {
+ .inherits = &ahci_pmp_retry_srst_ops,
+ .host_stop = ahci_host_stop,
+};
static const struct ata_port_info ahci_port_info[] = {
/* by features */
@@ -54,20 +65,20 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_platform_ops,
},
[IMX53_AHCI] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_pmp_retry_srst_ops,
+ .port_ops = &ahci_platform_retry_srst_ops,
},
[STRICT_AHCI] = {
AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_platform_ops,
},
};
@@ -75,7 +86,7 @@ static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
-static int __init ahci_probe(struct platform_device *pdev)
+static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
@@ -218,15 +229,12 @@ free_clk:
return rc;
}
-static int __devexit ahci_remove(struct platform_device *pdev)
+static void ahci_host_stop(struct ata_host *host)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = host->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
- struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
- ata_host_detach(host);
-
if (pdata && pdata->exit)
pdata->exit(dev);
@@ -234,8 +242,6 @@ static int __devexit ahci_remove(struct platform_device *pdev)
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -317,7 +323,7 @@ disable_unprepare_clk:
}
#endif
-SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "snps,spear-ahci", },
@@ -326,7 +332,8 @@ static const struct of_device_id ahci_of_match[] = {
MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver ahci_driver = {
- .remove = __devexit_p(ahci_remove),
+ .probe = ahci_probe,
+ .remove = ata_platform_remove_one,
.driver = {
.name = "ahci",
.owner = THIS_MODULE,
@@ -335,18 +342,7 @@ static struct platform_driver ahci_driver = {
},
.id_table = ahci_devtype,
};
-
-static int __init ahci_init(void)
-{
- return platform_driver_probe(&ahci_driver, ahci_probe);
-}
-module_init(ahci_init);
-
-static void __exit ahci_exit(void)
-{
- platform_driver_unregister(&ahci_driver);
-}
-module_exit(ahci_exit);
+module_platform_driver(ahci_driver);
MODULE_DESCRIPTION("AHCI SATA platform driver");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ef773e12af79..174eca609b42 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -164,28 +164,6 @@ struct piix_host_priv {
void __iomem *sidpr;
};
-static int piix_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void piix_remove_one(struct pci_dev *pdev);
-static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
-static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
-static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
-static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
-static int ich_pata_cable_detect(struct ata_port *ap);
-static u8 piix_vmw_bmdma_status(struct ata_port *ap);
-static int piix_sidpr_scr_read(struct ata_link *link,
- unsigned int reg, u32 *val);
-static int piix_sidpr_scr_write(struct ata_link *link,
- unsigned int reg, u32 val);
-static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- unsigned hints);
-static bool piix_irq_check(struct ata_port *ap);
-static int piix_port_start(struct ata_port *ap);
-#ifdef CONFIG_PM
-static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int piix_pci_device_resume(struct pci_dev *pdev);
-#endif
-
static unsigned int in_module_init = 1;
static const struct pci_device_id piix_pci_tbl[] = {
@@ -342,64 +320,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ } /* terminate list */
};
-static struct pci_driver piix_pci_driver = {
- .name = DRV_NAME,
- .id_table = piix_pci_tbl,
- .probe = piix_init_one,
- .remove = piix_remove_one,
-#ifdef CONFIG_PM
- .suspend = piix_pci_device_suspend,
- .resume = piix_pci_device_resume,
-#endif
-};
-
-static struct scsi_host_template piix_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations piix_sata_ops = {
- .inherits = &ata_bmdma32_port_ops,
- .sff_irq_check = piix_irq_check,
- .port_start = piix_port_start,
-};
-
-static struct ata_port_operations piix_pata_ops = {
- .inherits = &piix_sata_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = piix_set_piomode,
- .set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
-};
-
-static struct ata_port_operations piix_vmw_ops = {
- .inherits = &piix_pata_ops,
- .bmdma_status = piix_vmw_bmdma_status,
-};
-
-static struct ata_port_operations ich_pata_ops = {
- .inherits = &piix_pata_ops,
- .cable_detect = ich_pata_cable_detect,
- .set_dmamode = ich_set_dmamode,
-};
-
-static struct device_attribute *piix_sidpr_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- NULL
-};
-
-static struct scsi_host_template piix_sidpr_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
- .shost_attrs = piix_sidpr_shost_attrs,
-};
-
-static struct ata_port_operations piix_sidpr_sata_ops = {
- .inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
- .scr_read = piix_sidpr_scr_read,
- .scr_write = piix_sidpr_scr_write,
- .set_lpm = piix_sidpr_set_lpm,
-};
-
static const struct piix_map_db ich5_map_db = {
.mask = 0x7,
.port_enable = 0x3,
@@ -504,147 +424,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_sata_snb] = &ich8_map_db,
};
-static struct ata_port_info piix_port_info[] = {
- [piix_pata_mwdma] = /* PIIX3 MWDMA only */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .port_ops = &piix_pata_ops,
- },
-
- [piix_pata_33] = /* PIIX4 at 33MHz */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .udma_mask = ATA_UDMA2,
- .port_ops = &piix_pata_ops,
- },
-
- [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
- .udma_mask = ATA_UDMA2,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_66] = /* ICH controllers up to 66MHz */
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
- .udma_mask = ATA_UDMA4,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_100] =
- {
- .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA5,
- .port_ops = &ich_pata_ops,
- },
-
- [ich_pata_100_nomwdma1] =
- {
- .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2_ONLY,
- .udma_mask = ATA_UDMA5,
- .port_ops = &ich_pata_ops,
- },
-
- [ich5_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich6_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich6m_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8_sata] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8_2port_sata] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [tolapai_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [ich8m_apple_sata] =
- {
- .flags = PIIX_SATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
- [piix_pata_vmw] =
- {
- .flags = PIIX_PATA_FLAGS,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
- .udma_mask = ATA_UDMA2,
- .port_ops = &piix_vmw_ops,
- },
-
- /*
- * some Sandybridge chipsets have broken 32 mode up to now,
- * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
- */
- [ich8_sata_snb] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
-
-};
-
static struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
@@ -1261,6 +1040,193 @@ static u8 piix_vmw_bmdma_status(struct ata_port *ap)
return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}
+static struct scsi_host_template piix_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations piix_sata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_irq_check = piix_irq_check,
+ .port_start = piix_port_start,
+};
+
+static struct ata_port_operations piix_pata_ops = {
+ .inherits = &piix_sata_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = piix_set_piomode,
+ .set_dmamode = piix_set_dmamode,
+ .prereset = piix_pata_prereset,
+};
+
+static struct ata_port_operations piix_vmw_ops = {
+ .inherits = &piix_pata_ops,
+ .bmdma_status = piix_vmw_bmdma_status,
+};
+
+static struct ata_port_operations ich_pata_ops = {
+ .inherits = &piix_pata_ops,
+ .cable_detect = ich_pata_cable_detect,
+ .set_dmamode = ich_set_dmamode,
+};
+
+static struct device_attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ NULL
+};
+
+static struct scsi_host_template piix_sidpr_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .shost_attrs = piix_sidpr_shost_attrs,
+};
+
+static struct ata_port_operations piix_sidpr_sata_ops = {
+ .inherits = &piix_sata_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = piix_sidpr_scr_read,
+ .scr_write = piix_sidpr_scr_write,
+ .set_lpm = piix_sidpr_set_lpm,
+};
+
+static struct ata_port_info piix_port_info[] = {
+ [piix_pata_mwdma] = /* PIIX3 MWDMA only */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .port_ops = &piix_pata_ops,
+ },
+
+ [piix_pata_33] = /* PIIX4 at 33MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &piix_pata_ops,
+ },
+
+ [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_66] = /* ICH controllers up to 66MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100_nomwdma1] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2_ONLY,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich5_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6m_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_2port_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [tolapai_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8m_apple_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [piix_pata_vmw] =
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &piix_vmw_ops,
+ },
+
+ /*
+ * some Sandybridge chipsets have broken 32 mode up to now,
+ * see https://bugzilla.kernel.org/show_bug.cgi?id=40592
+ */
+ [ich8_sata_snb] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+};
+
#define AHCI_PCI_BAR 5
#define AHCI_GLOBAL_CTL 0x04
#define AHCI_ENABLE (1 << 31)
@@ -1304,7 +1270,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
* they are found return an error code so we can turn off DMA
*/
-static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
+static int piix_check_450nx_errata(struct pci_dev *ata_dev)
{
struct pci_dev *pdev = NULL;
u16 cfg;
@@ -1330,8 +1296,8 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
return no_piix_dma;
}
-static void __devinit piix_init_pcs(struct ata_host *host,
- const struct piix_map_db *map_db)
+static void piix_init_pcs(struct ata_host *host,
+ const struct piix_map_db *map_db)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u16 pcs, new_pcs;
@@ -1347,9 +1313,9 @@ static void __devinit piix_init_pcs(struct ata_host *host,
}
}
-static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
- struct ata_port_info *pinfo,
- const struct piix_map_db *map_db)
+static const int *piix_init_sata_map(struct pci_dev *pdev,
+ struct ata_port_info *pinfo,
+ const struct piix_map_db *map_db)
{
const int *map;
int i, invalid_map = 0;
@@ -1426,7 +1392,7 @@ static bool piix_no_sidpr(struct ata_host *host)
return false;
}
-static int __devinit piix_init_sidpr(struct ata_host *host)
+static int piix_init_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
@@ -1585,12 +1551,31 @@ static void piix_ignore_devices_quirk(struct ata_host *host)
},
{ } /* terminate list */
};
- const struct dmi_system_id *dmi = dmi_first_match(ignore_hyperv);
+ static const struct dmi_system_id allow_virtual_pc[] = {
+ {
+ /* In MS Virtual PC guests the DMI ident is nearly
+ * identical to a Hyper-V guest. One difference is the
+ * product version which is used here to identify
+ * a Virtual PC guest. This entry allows ata_piix to
+ * drive the emulated hardware.
+ */
+ .ident = "MS Virtual PC 2007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
+ },
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
+ const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
- if (dmi && prefer_ms_hyperv) {
+ if (ignore && !allow && prefer_ms_hyperv) {
host->flags |= ATA_HOST_IGNORE_ATA;
dev_info(host->dev, "%s detected, ATA device ignore set\n",
- dmi->ident);
+ ignore->ident);
}
#endif
}
@@ -1610,8 +1595,7 @@ static void piix_ignore_devices_quirk(struct ata_host *host)
* Zero on success, or -ERRNO value.
*/
-static int __devinit piix_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
@@ -1727,6 +1711,17 @@ static void piix_remove_one(struct pci_dev *pdev)
ata_pci_remove_one(pdev);
}
+static struct pci_driver piix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = piix_remove_one,
+#ifdef CONFIG_PM
+ .suspend = piix_pci_device_suspend,
+ .resume = piix_pci_device_resume,
+#endif
+};
+
static int __init piix_init(void)
{
int rc;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4201e535a8c8..6cd7805e47ca 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1384,7 +1384,7 @@ int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
if (rc == -EIO) {
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"applying PMP SRST workaround "
"and retrying\n");
rc = ahci_do_softreset(link, class, 0, deadline,
@@ -1951,13 +1951,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
/* Use the nominal value 10 ms if the read MDAT is zero,
* the nominal value of DETO is 20 ms.
*/
- if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] &
+ if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
ATA_LOG_DEVSLP_VALID_MASK) {
- mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] &
+ mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
ATA_LOG_DEVSLP_MDAT_MASK;
if (!mdat)
mdat = 10;
- deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO];
+ deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
if (!deto)
deto = 20;
} else {
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 5b0ba3f20edc..ef01ac07502e 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -76,6 +76,9 @@ acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
acpi_integer adr;
struct ata_port *ap = dev->link->ap;
+ if (dev->flags & ATA_DFLAG_ACPI_DISABLED)
+ return NULL;
+
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
@@ -945,6 +948,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
return rc;
}
+ dev->flags |= ATA_DFLAG_ACPI_DISABLED;
ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
/* We can safely continue if no _GTF command has been executed
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f46fbd3bd3fb..46cd3f4c6aaa 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -67,6 +67,7 @@
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
#include "libata.h"
#include "libata-transport.h"
@@ -2324,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- /* check and mark DevSlp capability */
- if (ata_id_has_devslp(dev->id))
- dev->flags |= ATA_DFLAG_DEVSLP;
-
- /* Obtain SATA Settings page from Identify Device Data Log,
- * which contains DevSlp timing variables etc.
- * Exclude old devices with ata_id_has_ncq()
+ /* Check and mark DevSlp capability. Get DevSlp timing variables
+ * from SATA Settings page of Identify Device Data Log.
*/
- if (ata_id_has_ncq(dev->id)) {
+ if (ata_id_has_devslp(dev->id)) {
+ u8 sata_setting[ATA_SECT_SIZE];
+ int i, j;
+
+ dev->flags |= ATA_DFLAG_DEVSLP;
err_mask = ata_read_log_page(dev,
ATA_LOG_SATA_ID_DEV_DATA,
ATA_LOG_SATA_SETTINGS,
- dev->sata_settings,
+ sata_setting,
1);
if (err_mask)
ata_dev_dbg(dev,
"failed to get Identify Device Data, Emask 0x%x\n",
err_mask);
+ else
+ for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+ j = ATA_LOG_DEVSLP_OFFSET + i;
+ dev->devslp_timing[i] = sata_setting[j];
+ }
}
dev->cdb_len = 16;
@@ -2560,6 +2565,7 @@ int ata_bus_probe(struct ata_port *ap)
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
@@ -6286,8 +6292,7 @@ void ata_host_detach(struct ata_host *host)
*/
void ata_pci_remove_one(struct pci_dev *pdev)
{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
ata_host_detach(host);
}
@@ -6356,7 +6361,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc = 0;
rc = ata_host_suspend(host, mesg);
@@ -6370,7 +6375,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
int ata_pci_device_resume(struct pci_dev *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
@@ -6382,6 +6387,26 @@ int ata_pci_device_resume(struct pci_dev *pdev)
#endif /* CONFIG_PCI */
+/**
+ * ata_platform_remove_one - Platform layer callback for device removal
+ * @pdev: Platform device that was removed
+ *
+ * Platform layer indicates to libata via this hook that hot-unplug or
+ * module unload event has occurred. Detach all ports. Resource
+ * release is handled via devres.
+ *
+ * LOCKING:
+ * Inherited from platform layer (may sleep).
+ */
+int ata_platform_remove_one(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
static int __init ata_parse_force_one(char **cur,
struct ata_force_ent *force_ent,
const char **reason)
@@ -6877,6 +6902,8 @@ EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PM */
#endif /* CONFIG_PCI */
+EXPORT_SYMBOL_GPL(ata_platform_remove_one);
+
EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index e60437cd0d19..bcf4437214f5 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
*/
static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
{
- if (qc->flags & AC_ERR_MEDIA)
+ if (qc->err_mask & AC_ERR_MEDIA)
return 0; /* don't retry media errors */
if (qc->flags & ATA_QCFLAG_IO)
return 1; /* otherwise retry anything from fs stack */
@@ -2657,6 +2657,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
* bus as we may be talking too fast.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = 0xff;
/* If the controller has a pio mode setup function
* then use it to set the chipset to rights. Don't
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a6df6a351d6e..7c337e754dab 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
- if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ if (atadev && ap->ops->sw_activity_show &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY))
return ap->ops->sw_activity_show(atadev, buf);
return -EINVAL;
}
@@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
enum sw_activity val;
int rc;
- if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ if (atadev && ap->ops->sw_activity_store &&
+ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
val = simple_strtoul(buf, NULL, 0);
switch (val) {
case OFF: case BLINK_ON: case BLINK_OFF:
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 371fd2c698b7..405022d302c3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -674,13 +674,16 @@ void arasan_cf_error_handler(struct ata_port *ap)
static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
{
+ struct ata_queued_cmd *qc = acdev->qc;
+ struct ata_port *ap = qc->ap;
+ struct ata_taskfile *tf = &qc->tf;
u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
- u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+ u32 write = tf->flags & ATA_TFLAG_WRITE;
xfer_ctr |= write ? XFER_WRITE : XFER_READ;
writel(xfer_ctr, acdev->vbase + XFER_CTR);
- acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
+ ap->ops->sff_exec_command(ap, tf);
ata_sff_queue_work(&acdev->work);
}
@@ -788,7 +791,7 @@ static struct ata_port_operations arasan_cf_ops = {
.set_dmamode = arasan_cf_set_dmamode,
};
-static int __devinit arasan_cf_probe(struct platform_device *pdev)
+static int arasan_cf_probe(struct platform_device *pdev)
{
struct arasan_cf_dev *acdev;
struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
@@ -902,7 +905,7 @@ free_clk:
return ret;
}
-static int __devexit arasan_cf_remove(struct platform_device *pdev)
+static int arasan_cf_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
@@ -952,7 +955,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove = __devexit_p(arasan_cf_remove),
+ .remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 53d3770a0b1b..033f3f4c20ad 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -27,9 +27,9 @@
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
+#include <linux/platform_data/atmel.h>
#include <mach/at91sam9_smc.h>
-#include <mach/board.h>
#include <asm/gpio.h>
#define DRV_NAME "pata_at91"
@@ -313,7 +313,7 @@ static struct ata_port_operations pata_at91_port_ops = {
.cable_detect = ata_cable_40wire,
};
-static int __devinit pata_at91_probe(struct platform_device *pdev)
+static int pata_at91_probe(struct platform_device *pdev)
{
struct at91_cf_data *board = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
@@ -420,7 +420,7 @@ err_put:
return ret;
}
-static int __devexit pata_at91_remove(struct platform_device *pdev)
+static int pata_at91_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct at91_ide_info *info;
@@ -441,7 +441,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev)
static struct platform_driver pata_at91_driver = {
.probe = pata_at91_probe,
- .remove = __devexit_p(pata_at91_remove),
+ .remove = pata_at91_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 1e65842e2ca7..8d43510c6bec 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1538,7 +1538,7 @@ static unsigned short atapi_io_port[] = {
* - IRQ (IORESOURCE_IRQ)
*
*/
-static int __devinit bfin_atapi_probe(struct platform_device *pdev)
+static int bfin_atapi_probe(struct platform_device *pdev)
{
int board_idx = 0;
struct resource *res;
@@ -1608,7 +1608,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
* A bfin atapi device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
-static int __devexit bfin_atapi_remove(struct platform_device *pdev)
+static int bfin_atapi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
@@ -1654,7 +1654,7 @@ static int bfin_atapi_resume(struct platform_device *pdev)
static struct platform_driver bfin_atapi_driver = {
.probe = bfin_atapi_probe,
- .remove = __devexit_p(bfin_atapi_remove),
+ .remove = bfin_atapi_remove,
.suspend = bfin_atapi_suspend,
.resume = bfin_atapi_resume,
.driver = {
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 7ba01415b676..2949cfc2dd31 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -474,14 +474,14 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* check for enabled ports */
pci_read_config_byte(pdev, CNTRL, &reg);
if (!port_ok)
- dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
+ dev_notice(&pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
+ dev_notice(&pdev->dev, "Primary port is disabled\n");
ppi[0] = &ata_dummy_port_info;
}
if (port_ok && !(reg & CNTRL_CH1)) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
+ dev_notice(&pdev->dev, "Secondary port is disabled\n");
ppi[1] = &ata_dummy_port_info;
}
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index de74d804f031..bfcf377e8f77 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -115,7 +115,7 @@ static struct ata_port_operations cs5520_port_ops = {
.set_piomode = cs5520_set_piomode,
};
-static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index dec1b6c4b351..0448860a2077 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -38,6 +38,7 @@
#include <linux/delay.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86_32
#include <asm/msr.h>
@@ -80,6 +81,21 @@ enum {
IDE_ETC_UDMA_MASK = 0xc0,
};
+/* Some Bachmann OT200 devices have a non working UDMA support due a
+ * missing resistor.
+ */
+static const struct dmi_system_id udma_quirk_dmi_table[] = {
+ {
+ .ident = "Bachmann electronic OT200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Bachmann electronic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OT200"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1")
+ },
+ },
+ { }
+};
+
static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
@@ -242,9 +258,23 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &cs5536_port_ops,
};
- const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+ static const struct ata_port_info no_udma_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .port_ops = &cs5536_port_ops,
+ };
+
+
+ const struct ata_port_info *ppi[2];
u32 cfg;
+ if (dmi_check_system(udma_quirk_dmi_table))
+ ppi[0] = &no_udma_info;
+ else
+ ppi[0] = &info;
+
+ ppi[1] = &ata_dummy_port_info;
+
if (use_msr)
printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index e056406d6a11..556222f04731 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -822,8 +822,7 @@ static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
/* if link is ocuppied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
- ata_link_printk(al, KERN_ERR, "SRST failed (errno=%d)\n",
- rc);
+ ata_link_err(al, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -857,8 +856,7 @@ static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
/* Can become DEBUG later */
if (count)
- ata_port_printk(ap, KERN_DEBUG,
- "drained %d bytes to clear DRQ.\n", count);
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
}
@@ -912,7 +910,7 @@ static struct ata_port_operations ep93xx_pata_port_ops = {
.port_start = ep93xx_pata_port_start,
};
-static int __devinit ep93xx_pata_probe(struct platform_device *pdev)
+static int ep93xx_pata_probe(struct platform_device *pdev)
{
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
@@ -1013,7 +1011,7 @@ err_rel_gpio:
return err;
}
-static int __devexit ep93xx_pata_remove(struct platform_device *pdev)
+static int ep93xx_pata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ep93xx_pata_data *drv_data = host->private_data;
@@ -1031,7 +1029,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ep93xx_pata_probe,
- .remove = __devexit_p(ep93xx_pata_remove),
+ .remove = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 52e7e7b8c74f..d7c732042a4f 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -337,10 +337,9 @@ static struct ata_port_operations pata_icside_port_ops = {
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
-static void __devinit
-pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
- struct pata_icside_info *info,
- const struct portinfo *port)
+static void pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
+ struct pata_icside_info *info,
+ const struct portinfo *port)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
void __iomem *cmd = base + port->dataoffset;
@@ -368,7 +367,7 @@ pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
}
-static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
+static int pata_icside_register_v5(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
void __iomem *base;
@@ -391,7 +390,7 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
return 0;
}
-static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
+static int pata_icside_register_v6(struct pata_icside_info *info)
{
struct pata_icside_state *state = info->state;
struct expansion_card *ec = info->ec;
@@ -434,7 +433,7 @@ static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
return icside_dma_init(info);
}
-static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
+static int pata_icside_add_ports(struct pata_icside_info *info)
{
struct expansion_card *ec = info->ec;
struct ata_host *host;
@@ -474,8 +473,8 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
&pata_icside_sht);
}
-static int __devinit
-pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int pata_icside_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct pata_icside_state *state;
struct pata_icside_info info;
@@ -575,7 +574,7 @@ static void pata_icside_shutdown(struct expansion_card *ec)
}
}
-static void __devexit pata_icside_remove(struct expansion_card *ec)
+static void pata_icside_remove(struct expansion_card *ec)
{
struct ata_host *host = ecard_get_drvdata(ec);
struct pata_icside_state *state = host->private_data;
@@ -602,7 +601,7 @@ static const struct ecard_id pata_icside_ids[] = {
static struct ecard_driver pata_icside_driver = {
.probe = pata_icside_probe,
- .remove = __devexit_p(pata_icside_remove),
+ .remove = pata_icside_remove,
.shutdown = pata_icside_shutdown,
.id_table = pata_icside_ids,
.drv = {
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 87bb05b3cafc..40849445a9dc 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -60,7 +60,7 @@ static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
@@ -91,7 +91,7 @@ static void pata_imx_setup_port(struct ata_ioports *ioaddr)
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
}
-static int __devinit pata_imx_probe(struct platform_device *pdev)
+static int pata_imx_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
@@ -167,7 +167,7 @@ free_priv:
return -ENOMEM;
}
-static int __devexit pata_imx_remove(struct platform_device *pdev)
+static int pata_imx_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct pata_imx_priv *priv = host->private_data;
@@ -225,7 +225,7 @@ static const struct dev_pm_ops pata_imx_pm_ops = {
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove = __devexit_p(pata_imx_remove),
+ .remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index badb1789a918..dcc6b243e525 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -137,7 +137,7 @@ static void ixp4xx_setup_port(struct ata_port *ap,
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
}
-static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
+static int ixp4xx_pata_probe(struct platform_device *pdev)
{
unsigned int irq;
struct resource *cs0, *cs1;
@@ -187,22 +187,13 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
}
-static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
-{
- struct ata_host *host = platform_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ixp4xx_pata_probe,
- .remove = __devexit_p(ixp4xx_pata_remove),
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index b057e3fa44bc..e5725edcf515 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -935,7 +935,7 @@ static struct ata_port_operations pata_macio_ops = {
.sff_irq_clear = pata_macio_irq_clear,
};
-static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
+static void pata_macio_invariants(struct pata_macio_priv *priv)
{
const int *bidp;
@@ -976,9 +976,8 @@ static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
priv->aapl_bus_id = 1;
}
-static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
- void __iomem * base,
- void __iomem * dma)
+static void pata_macio_setup_ios(struct ata_ioports *ioaddr,
+ void __iomem * base, void __iomem * dma)
{
/* cmd_addr is the base of regs for that port */
ioaddr->cmd_addr = base;
@@ -999,8 +998,8 @@ static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
ioaddr->bmdma_addr = dma;
}
-static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
- struct ata_port_info *pinfo)
+static void pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
+ struct ata_port_info *pinfo)
{
int i = 0;
@@ -1027,11 +1026,11 @@ static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
}
-static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
- resource_size_t tfregs,
- resource_size_t dmaregs,
- resource_size_t fcregs,
- unsigned long irq)
+static int pata_macio_common_init(struct pata_macio_priv *priv,
+ resource_size_t tfregs,
+ resource_size_t dmaregs,
+ resource_size_t fcregs,
+ unsigned long irq)
{
struct ata_port_info pinfo;
const struct ata_port_info *ppi[] = { &pinfo, NULL };
@@ -1113,8 +1112,8 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
&pata_macio_sht);
}
-static int __devinit pata_macio_attach(struct macio_dev *mdev,
- const struct of_device_id *match)
+static int pata_macio_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
struct pata_macio_priv *priv;
resource_size_t tfregs, dmaregs = 0;
@@ -1190,7 +1189,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
return rc;
}
-static int __devexit pata_macio_detach(struct macio_dev *mdev)
+static int pata_macio_detach(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct pata_macio_priv *priv = host->private_data;
@@ -1257,8 +1256,8 @@ static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
#endif /* CONFIG_PMAC_MEDIABAY */
-static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pata_macio_pci_attach(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct pata_macio_priv *priv;
struct device_node *np;
@@ -1310,7 +1309,7 @@ static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
return 0;
}
-static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
+static void pata_macio_pci_detach(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index d2c102fd4330..652f57e83484 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -621,9 +621,10 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
.qc_prep = ata_noop_qc_prep,
};
-static int __devinit
-mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
- unsigned long raw_ata_regs, int mwdma_mask, int udma_mask)
+static int mpc52xx_ata_init_one(struct device *dev,
+ struct mpc52xx_ata_priv *priv,
+ unsigned long raw_ata_regs,
+ int mwdma_mask, int udma_mask)
{
struct ata_host *host;
struct ata_port *ap;
@@ -663,24 +664,11 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
&mpc52xx_ata_sht);
}
-static struct mpc52xx_ata_priv *
-mpc52xx_ata_remove_one(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
- struct mpc52xx_ata_priv *priv = host->private_data;
-
- ata_host_detach(host);
-
- return priv;
-}
-
-
/* ======================================================================== */
/* OF Platform driver */
/* ======================================================================== */
-static int __devinit
-mpc52xx_ata_probe(struct platform_device *op)
+static int mpc52xx_ata_probe(struct platform_device *op)
{
unsigned int ipb_freq;
struct resource res_mem;
@@ -815,11 +803,12 @@ mpc52xx_ata_probe(struct platform_device *op)
static int
mpc52xx_ata_remove(struct platform_device *op)
{
- struct mpc52xx_ata_priv *priv;
+ struct ata_host *host = platform_get_drvdata(op);
+ struct mpc52xx_ata_priv *priv = host->private_data;
int task_irq;
/* Deregister the ATA interface */
- priv = mpc52xx_ata_remove_one(&op->dev);
+ ata_platform_remove_one(op);
/* Clean up DMA */
task_irq = bcom_get_task_irq(priv->dmatsk);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1d61d5d278fa..ff2e57f3b597 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -5,19 +5,22 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2005 - 2009 Cavium Networks
+ * Copyright (C) 2005 - 2012 Cavium Inc.
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
-#include <linux/irq.h>
+#include <linux/hrtimer.h>
#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
+#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
/*
@@ -34,20 +37,36 @@
*/
#define DRV_NAME "pata_octeon_cf"
-#define DRV_VERSION "2.1"
+#define DRV_VERSION "2.2"
+
+/* Poll interval in nS. */
+#define OCTEON_CF_BUSY_POLL_INTERVAL 500000
+#define DMA_CFG 0
+#define DMA_TIM 0x20
+#define DMA_INT 0x38
+#define DMA_INT_EN 0x50
struct octeon_cf_port {
- struct workqueue_struct *wq;
- struct delayed_work delayed_finish;
+ struct hrtimer delayed_finish;
struct ata_port *ap;
int dma_finished;
+ void *c0;
+ unsigned int cs0;
+ unsigned int cs1;
+ bool is_true_ide;
+ u64 dma_base;
};
static struct scsi_host_template octeon_cf_sht = {
ATA_PIO_SHT(DRV_NAME),
};
+static int enable_dma;
+module_param(enable_dma, int, 0444);
+MODULE_PARM_DESC(enable_dma,
+ "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
+
/**
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
@@ -66,12 +85,29 @@ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
return val;
}
-static void octeon_cf_set_boot_reg_cfg(int cs)
+static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
{
union cvmx_mio_boot_reg_cfgx reg_cfg;
+ unsigned int tim_mult;
+
+ switch (multiplier) {
+ case 8:
+ tim_mult = 3;
+ break;
+ case 4:
+ tim_mult = 0;
+ break;
+ case 2:
+ tim_mult = 2;
+ break;
+ default:
+ tim_mult = 1;
+ break;
+ }
+
reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
- reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
+ reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
reg_cfg.s.sam = 0; /* Don't combine write and output enable */
reg_cfg.s.we_ext = 0; /* No write enable extension */
@@ -92,12 +128,12 @@ static void octeon_cf_set_boot_reg_cfg(int cs)
*/
static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
{
- struct octeon_cf_data *ocd = ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_reg_timx reg_tim;
- int cs = ocd->base_region;
int T;
struct ata_timing timing;
+ unsigned int div;
int use_iordy;
int trh;
int pause;
@@ -106,7 +142,15 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
int t2;
int t2i;
- T = (int)(2000000000000LL / octeon_get_clock_rate());
+ /*
+ * A divisor value of four will overflow the timing fields at
+ * clock rates greater than 800MHz
+ */
+ if (octeon_get_io_clock_rate() <= 800000000)
+ div = 4;
+ else
+ div = 8;
+ T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
BUG();
@@ -121,23 +165,26 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
if (t2i)
t2i--;
- trh = ns_to_tim_reg(2, 20);
+ trh = ns_to_tim_reg(div, 20);
if (trh)
trh--;
- pause = timing.cycle - timing.active - timing.setup - trh;
+ pause = (int)timing.cycle - (int)timing.active -
+ (int)timing.setup - trh;
+ if (pause < 0)
+ pause = 0;
if (pause)
pause--;
- octeon_cf_set_boot_reg_cfg(cs);
- if (ocd->dma_engine >= 0)
+ octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
+ if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
- octeon_cf_set_boot_reg_cfg(cs + 1);
+ octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
use_iordy = ata_pio_need_iordy(dev);
- reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
+ reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
/* Disable page mode */
reg_tim.s.pagem = 0;
/* Enable dynamic timing */
@@ -161,20 +208,22 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
/* How long read enable is asserted */
reg_tim.s.oe = t2;
/* Time after CE that read/write starts */
- reg_tim.s.ce = ns_to_tim_reg(2, 5);
+ reg_tim.s.ce = ns_to_tim_reg(div, 5);
/* Time before CE that address is valid */
reg_tim.s.adr = 0;
/* Program the bootbus region timing for the data port chip select. */
- cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
- if (ocd->dma_engine >= 0)
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
+ if (cf_port->is_true_ide)
/* True IDE mode, program both chip selects. */
- cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
+ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
+ reg_tim.u64);
}
static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
{
- struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
+ union cvmx_mio_boot_pin_defs pin_defs;
union cvmx_mio_boot_dma_timx dma_tim;
unsigned int oe_a;
unsigned int oe_n;
@@ -183,6 +232,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
unsigned int pause;
unsigned int T0, Tkr, Td;
unsigned int tim_mult;
+ int c;
const struct ata_timing *timing;
@@ -199,13 +249,19 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
/* not spec'ed, value in eclocks, not affected by tim_mult */
dma_arq = 8;
pause = 25 - dma_arq * 1000 /
- (octeon_get_clock_rate() / 1000000); /* Tz */
+ (octeon_get_io_clock_rate() / 1000000); /* Tz */
oe_a = Td;
/* Tkr from cf spec, lengthened to meet T0 */
oe_n = max(T0 - oe_a, Tkr);
- dma_tim.s.dmack_pi = 1;
+ pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
+
+ /* DMA channel number. */
+ c = (cf_port->dma_base & 8) >> 3;
+
+ /* Invert the polarity if the default is 0*/
+ dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
@@ -228,14 +284,11 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
ns_to_tim_reg(tim_mult, 60));
- pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
- "%d, dmarq: %d, pause: %d\n",
+ pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
- dma_tim.u64);
-
+ cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
}
/**
@@ -489,15 +542,10 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
ata_wait_idle(ap);
}
-static void octeon_cf_irq_on(struct ata_port *ap)
+static void octeon_cf_ata_port_noaction(struct ata_port *ap)
{
}
-static void octeon_cf_irq_clear(struct ata_port *ap)
-{
- return;
-}
-
static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -519,7 +567,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
*/
static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
{
- struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = qc->ap->private_data;
union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
struct scatterlist *sg;
@@ -535,15 +583,16 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
*/
mio_boot_dma_int.u64 = 0;
mio_boot_dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
- mio_boot_dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
/* Enable the interrupt. */
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
- mio_boot_dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
/* Set the direction of the DMA */
mio_boot_dma_cfg.u64 = 0;
+#ifdef __LITTLE_ENDIAN
+ mio_boot_dma_cfg.s.endian = 1;
+#endif
mio_boot_dma_cfg.s.en = 1;
mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
@@ -569,8 +618,7 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
(mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
(void *)(unsigned long)mio_boot_dma_cfg.s.adr);
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
- mio_boot_dma_cfg.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
/**
@@ -583,10 +631,9 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
- struct octeon_cf_data *ocd = ap->dev->platform_data;
+ struct octeon_cf_port *cf_port = ap->private_data;
union cvmx_mio_boot_dma_cfgx dma_cfg;
union cvmx_mio_boot_dma_intx dma_int;
- struct octeon_cf_port *cf_port;
u8 status;
VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -596,9 +643,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
if (ap->hsm_task_state != HSM_ST_LAST)
return 0;
- cf_port = ap->private_data;
-
- dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+ dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
if (dma_cfg.s.size != 0xfffff) {
/* Error, the transfer was not complete. */
qc->err_mask |= AC_ERR_HOST_BUS;
@@ -608,15 +653,15 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
/* Stop and clear the dma engine. */
dma_cfg.u64 = 0;
dma_cfg.s.size = -1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
/* Disable the interrupt. */
dma_int.u64 = 0;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
/* Clear the DMA complete status */
dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
status = ap->ops->sff_check_status(ap);
@@ -649,69 +694,68 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
struct ata_queued_cmd *qc;
union cvmx_mio_boot_dma_intx dma_int;
union cvmx_mio_boot_dma_cfgx dma_cfg;
- struct octeon_cf_data *ocd;
ap = host->ports[i];
- ocd = ap->dev->platform_data;
cf_port = ap->private_data;
- dma_int.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
- dma_cfg.u64 =
- cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+
+ dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
+ dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
- if (dma_int.s.done && !dma_cfg.s.en) {
- if (!sg_is_last(qc->cursg)) {
- qc->cursg = sg_next(qc->cursg);
- handled = 1;
- octeon_cf_dma_start(qc);
- continue;
- } else {
- cf_port->dma_finished = 1;
- }
- }
- if (!cf_port->dma_finished)
- continue;
- status = ioread8(ap->ioaddr.altstatus_addr);
- if (status & (ATA_BUSY | ATA_DRQ)) {
- /*
- * We are busy, try to handle it
- * later. This is the DMA finished
- * interrupt, and it could take a
- * little while for the card to be
- * ready for more commands.
- */
- /* Clear DMA irq. */
- dma_int.u64 = 0;
- dma_int.s.done = 1;
- cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
- dma_int.u64);
-
- queue_delayed_work(cf_port->wq,
- &cf_port->delayed_finish, 1);
+ if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
+ continue;
+
+ if (dma_int.s.done && !dma_cfg.s.en) {
+ if (!sg_is_last(qc->cursg)) {
+ qc->cursg = sg_next(qc->cursg);
handled = 1;
+ octeon_cf_dma_start(qc);
+ continue;
} else {
- handled |= octeon_cf_dma_finished(ap, qc);
+ cf_port->dma_finished = 1;
}
}
+ if (!cf_port->dma_finished)
+ continue;
+ status = ioread8(ap->ioaddr.altstatus_addr);
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ /*
+ * We are busy, try to handle it later. This
+ * is the DMA finished interrupt, and it could
+ * take a little while for the card to be
+ * ready for more commands.
+ */
+ /* Clear DMA irq. */
+ dma_int.u64 = 0;
+ dma_int.s.done = 1;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT,
+ dma_int.u64);
+ hrtimer_start_range_ns(&cf_port->delayed_finish,
+ ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
+ OCTEON_CF_BUSY_POLL_INTERVAL / 5,
+ HRTIMER_MODE_REL);
+ handled = 1;
+ } else {
+ handled |= octeon_cf_dma_finished(ap, qc);
+ }
}
spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
-static void octeon_cf_delayed_finish(struct work_struct *work)
+static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
{
- struct octeon_cf_port *cf_port = container_of(work,
+ struct octeon_cf_port *cf_port = container_of(hrt,
struct octeon_cf_port,
- delayed_finish.work);
+ delayed_finish);
struct ata_port *ap = cf_port->ap;
struct ata_host *host = ap->host;
struct ata_queued_cmd *qc;
unsigned long flags;
u8 status;
+ enum hrtimer_restart rv = HRTIMER_NORESTART;
spin_lock_irqsave(&host->lock, flags);
@@ -726,15 +770,17 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
status = ioread8(ap->ioaddr.altstatus_addr);
if (status & (ATA_BUSY | ATA_DRQ)) {
/* Still busy, try again. */
- queue_delayed_work(cf_port->wq,
- &cf_port->delayed_finish, 1);
+ hrtimer_forward_now(hrt,
+ ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
+ rv = HRTIMER_RESTART;
goto out;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
octeon_cf_dma_finished(ap, qc);
out:
spin_unlock_irqrestore(&host->lock, flags);
+ return rv;
}
static void octeon_cf_dev_config(struct ata_device *dev)
@@ -786,58 +832,125 @@ static struct ata_port_operations octeon_cf_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = octeon_cf_qc_issue,
.sff_dev_select = octeon_cf_dev_select,
- .sff_irq_on = octeon_cf_irq_on,
- .sff_irq_clear = octeon_cf_irq_clear,
+ .sff_irq_on = octeon_cf_ata_port_noaction,
+ .sff_irq_clear = octeon_cf_ata_port_noaction,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
.dev_config = octeon_cf_dev_config,
};
-static int __devinit octeon_cf_probe(struct platform_device *pdev)
+static int octeon_cf_probe(struct platform_device *pdev)
{
struct resource *res_cs0, *res_cs1;
+ bool is_16bit;
+ const __be32 *cs_num;
+ struct property *reg_prop;
+ int n_addr, n_size, reg_len;
+ struct device_node *node;
+ const void *prop;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
struct ata_port *ap;
- struct octeon_cf_data *ocd;
int irq = 0;
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
- char version[32];
+ int rv = -ENOMEM;
- res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_cs0)
+ node = pdev->dev.of_node;
+ if (node == NULL)
return -EINVAL;
- ocd = pdev->dev.platform_data;
+ cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
+ if (!cf_port)
+ return -ENOMEM;
- cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
- resource_size(res_cs0));
+ cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
- if (!cs0)
- return -ENOMEM;
+ prop = of_get_property(node, "cavium,bus-width", NULL);
+ if (prop)
+ is_16bit = (be32_to_cpup(prop) == 16);
+ else
+ is_16bit = false;
- /* Determine from availability of DMA if True IDE mode or not */
- if (ocd->dma_engine >= 0) {
- res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_cs1)
- return -EINVAL;
+ n_addr = of_n_addr_cells(node);
+ n_size = of_n_size_cells(node);
+ reg_prop = of_find_property(node, "reg", &reg_len);
+ if (!reg_prop || reg_len < sizeof(__be32)) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cs_num = reg_prop->value;
+ cf_port->cs0 = be32_to_cpup(cs_num);
+
+ if (cf_port->is_true_ide) {
+ struct device_node *dma_node;
+ dma_node = of_parse_phandle(node,
+ "cavium,dma-engine-handle", 0);
+ if (dma_node) {
+ struct platform_device *dma_dev;
+ dma_dev = of_find_device_by_node(dma_node);
+ if (dma_dev) {
+ struct resource *res_dma;
+ int i;
+ res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
+ if (!res_dma) {
+ of_node_put(dma_node);
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
+ resource_size(res_dma));
+
+ if (!cf_port->dma_base) {
+ of_node_put(dma_node);
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+
+ irq_handler = octeon_cf_interrupt;
+ i = platform_get_irq(dma_dev, 0);
+ if (i > 0)
+ irq = i;
+ }
+ of_node_put(dma_node);
+ }
+ res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_cs1) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
- resource_size(res_cs1));
+ res_cs1->end - res_cs1->start + 1);
if (!cs1)
- return -ENOMEM;
+ goto free_cf_port;
+
+ if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+ cs_num += n_addr + n_size;
+ cf_port->cs1 = be32_to_cpup(cs_num);
}
- cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
- if (!cf_port)
- return -ENOMEM;
+ res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res_cs0) {
+ rv = -EINVAL;
+ goto free_cf_port;
+ }
+
+ cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+ resource_size(res_cs0));
+
+ if (!cs0)
+ goto free_cf_port;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -846,21 +959,22 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->private_data = cf_port;
+ pdev->dev.platform_data = cf_port;
cf_port->ap = ap;
ap->ops = &octeon_cf_ops;
ap->pio_mask = ATA_PIO6;
ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
- base = cs0 + ocd->base_region_bias;
- if (!ocd->is16bit) {
+ if (!is_16bit) {
+ base = cs0 + 0x800;
ap->ioaddr.cmd_addr = base;
ata_sff_std_ports(&ap->ioaddr);
ap->ioaddr.altstatus_addr = base + 0xe;
ap->ioaddr.ctl_addr = base + 0xe;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
- } else if (cs1) {
- /* Presence of cs1 indicates True IDE mode. */
+ } else if (cf_port->is_true_ide) {
+ base = cs0;
ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
@@ -876,19 +990,15 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- ap->mwdma_mask = ATA_MWDMA4;
- irq = platform_get_irq(pdev, 0);
- irq_handler = octeon_cf_interrupt;
-
- /* True IDE mode needs delayed work to poll for not-busy. */
- cf_port->wq = create_singlethread_workqueue(DRV_NAME);
- if (!cf_port->wq)
- goto free_cf_port;
- INIT_DELAYED_WORK(&cf_port->delayed_finish,
- octeon_cf_delayed_finish);
+ ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
+ /* True IDE mode needs a timer to poll for not-busy. */
+ hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ cf_port->delayed_finish.function = octeon_cf_delayed_finish;
} else {
/* 16 bit but not True IDE */
+ base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
octeon_cf_ops.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
@@ -902,28 +1012,71 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ap->ioaddr.ctl_addr = base + 0xe;
ap->ioaddr.altstatus_addr = base + 0xe;
}
+ cf_port->c0 = ap->ioaddr.ctl_addr;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
- snprintf(version, sizeof(version), "%s %d bit%s",
- DRV_VERSION,
- (ocd->is16bit) ? 16 : 8,
- (cs1) ? ", True IDE" : "");
- ata_print_version_once(&pdev->dev, version);
+ dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+ is_16bit ? 16 : 8,
+ cf_port->is_true_ide ? ", True IDE" : "");
- return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
+ return ata_host_activate(host, irq, irq_handler,
+ IRQF_SHARED, &octeon_cf_sht);
free_cf_port:
kfree(cf_port);
- return -ENOMEM;
+ return rv;
+}
+
+static void octeon_cf_shutdown(struct device *dev)
+{
+ union cvmx_mio_boot_dma_cfgx dma_cfg;
+ union cvmx_mio_boot_dma_intx dma_int;
+
+ struct octeon_cf_port *cf_port = dev->platform_data;
+
+ if (cf_port->dma_base) {
+ /* Stop and clear the dma engine. */
+ dma_cfg.u64 = 0;
+ dma_cfg.s.size = -1;
+ cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
+
+ /* Disable the interrupt. */
+ dma_int.u64 = 0;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
+
+ /* Clear the DMA complete status */
+ dma_int.s.done = 1;
+ cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
+
+ __raw_writeb(0, cf_port->c0);
+ udelay(20);
+ __raw_writeb(ATA_SRST, cf_port->c0);
+ udelay(20);
+ __raw_writeb(0, cf_port->c0);
+ mdelay(100);
+ }
}
+static struct of_device_id octeon_cf_match[] = {
+ {
+ .compatible = "cavium,ebt3000-compact-flash",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+
static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = octeon_cf_match,
+ .shutdown = octeon_cf_shutdown
},
};
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1654dc27e7f8..a7e95a54c782 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -14,8 +14,9 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
+#include <linux/libata.h>
-static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
+static int pata_of_platform_probe(struct platform_device *ofdev)
{
int ret;
struct device_node *dn = ofdev->dev.of_node;
@@ -76,11 +77,6 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
reg_shift, pio_mask);
}
-static int __devexit pata_of_platform_remove(struct platform_device *ofdev)
-{
- return __pata_platform_remove(&ofdev->dev);
-}
-
static struct of_device_id pata_of_platform_match[] = {
{ .compatible = "ata-generic", },
{ .compatible = "electra-ide", },
@@ -95,7 +91,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove = __devexit_p(pata_of_platform_remove),
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 5ff31b68135c..df2bb7504fc8 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -48,7 +48,7 @@ static struct ata_port_operations palmld_port_ops = {
.cable_detect = ata_cable_40wire,
};
-static __devinit int palmld_pata_probe(struct platform_device *pdev)
+static int palmld_pata_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
@@ -109,11 +109,9 @@ err1:
return ret;
}
-static __devexit int palmld_pata_remove(struct platform_device *dev)
+static int palmld_pata_remove(struct platform_device *dev)
{
- struct ata_host *host = platform_get_drvdata(dev);
-
- ata_host_detach(host);
+ ata_platform_remove_one(dev);
/* power down the HDD */
gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
@@ -129,7 +127,7 @@ static struct platform_driver palmld_pata_platform_driver = {
.owner = THIS_MODULE,
},
.probe = palmld_pata_probe,
- .remove = __devexit_p(palmld_pata_remove),
+ .remove = palmld_pata_remove,
};
module_platform_driver(palmld_pata_platform_driver);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index c9399c8688c5..3f94a886bb35 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -700,7 +700,8 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
* @pdev: instance of pci_dev found
* @ent: matching entry in the id_tbl[]
*/
-static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pdc2027x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index f1848aeda783..71e093767f4e 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -98,12 +98,9 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
*
* If no IRQ resource is present, PIO polling mode is used instead.
*/
-int __devinit __pata_platform_probe(struct device *dev,
- struct resource *io_res,
- struct resource *ctl_res,
- struct resource *irq_res,
- unsigned int ioport_shift,
- int __pio_mask)
+int __pata_platform_probe(struct device *dev, struct resource *io_res,
+ struct resource *ctl_res, struct resource *irq_res,
+ unsigned int ioport_shift, int __pio_mask)
{
struct ata_host *host;
struct ata_port *ap;
@@ -178,24 +175,7 @@ int __devinit __pata_platform_probe(struct device *dev,
}
EXPORT_SYMBOL_GPL(__pata_platform_probe);
-/**
- * __pata_platform_remove - unplug a platform interface
- * @dev: device
- *
- * A platform bus ATA device has been unplugged. Perform the needed
- * cleanup. Also called on module unload for any active devices.
- */
-int __pata_platform_remove(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pata_platform_remove);
-
-static int __devinit pata_platform_probe(struct platform_device *pdev)
+static int pata_platform_probe(struct platform_device *pdev)
{
struct resource *io_res;
struct resource *ctl_res;
@@ -242,14 +222,9 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
pio_mask);
}
-static int __devexit pata_platform_remove(struct platform_device *pdev)
-{
- return __pata_platform_remove(&pdev->dev);
-}
-
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove = __devexit_p(pata_platform_remove),
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 4b8ba559fe24..b0ac9e0c5e01 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -229,7 +229,7 @@ static void pxa_ata_dma_irq(int dma, void *port)
complete(&pd->dma_done);
}
-static int __devinit pxa_ata_probe(struct platform_device *pdev)
+static int pxa_ata_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
@@ -369,7 +369,7 @@ static int __devinit pxa_ata_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit pxa_ata_remove(struct platform_device *pdev)
+static int pxa_ata_remove(struct platform_device *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct pata_pxa_data *data = host->ports[0]->private_data;
@@ -383,7 +383,7 @@ static int __devexit pxa_ata_remove(struct platform_device *pdev)
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove = __devexit_p(pxa_ata_remove),
+ .remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 9417101bd5ca..3c5eb8fa6bd1 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -102,7 +102,7 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR;
}
-static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
+static int rb532_pata_driver_probe(struct platform_device *pdev)
{
int irq;
int gpio;
@@ -177,7 +177,7 @@ err_free_gpio:
return ret;
}
-static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
+static int rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
struct rb532_cf_info *info = ah->private_data;
@@ -190,7 +190,7 @@ static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove = __devexit_p(rb532_pata_driver_remove),
+ .remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 32a3499e83e7..6a8665574fee 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -321,13 +321,11 @@ static struct scsi_host_template rdc_sht = {
* Zero on success, or -ERRNO value.
*/
-static int __devinit rdc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
- unsigned long port_flags;
struct ata_host *host;
struct rdc_host_priv *hpriv;
int rc;
@@ -337,8 +335,6 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
- port_flags = port_info[0].flags;
-
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index db0d18cf1c2a..d3830c45a369 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -169,8 +169,7 @@ static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* Zero on success, or -ERRNO value.
*/
-static int __devinit sch_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 5cfdf94823d0..64c5f0d0f812 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -323,8 +323,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
return tmpbyte & 0x30;
}
-static int __devinit sil680_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 937aeb34b310..2e391730e8be 100755..100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -43,6 +43,7 @@
/* These two are defined in "libata.h" */
#undef DRV_NAME
#undef DRV_VERSION
+
#define DRV_NAME "sata-dwc"
#define DRV_VERSION "1.3"
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 400bf1c3e982..5dba77ccaa0b 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -213,7 +213,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
- tf.command = 0x80;
+ tf.command = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
do {
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
-static int __devinit ahci_highbank_probe(struct platform_device *pdev)
+static int ahci_highbank_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
@@ -368,16 +368,6 @@ err0:
return rc;
}
-static int __devexit ahci_highbank_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_detach(host);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
@@ -432,7 +422,7 @@ SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove = __devexit_p(ahci_highbank_remove),
+ .remove = ata_platform_remove_one,
.driver = {
.name = "highbank-ahci",
.owner = THIS_MODULE,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index dc35f4d42b8b..1e6827c89429 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -273,12 +273,10 @@ static void inic_reset_port(void __iomem *port_base)
static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
- void __iomem *addr;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
- addr = scr_addr + scr_map[sc_reg] * 4;
*val = readl(scr_addr + scr_map[sc_reg] * 4);
/* this controller has stuck DIAG.N, ignore it */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 68f4fb54d627..35c6b6d09c27 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4148,7 +4148,7 @@ err:
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
-static int __devexit mv_platform_remove(struct platform_device *pdev)
+static int mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
@@ -4215,7 +4215,7 @@ static int mv_platform_resume(struct platform_device *pdev)
#endif
#ifdef CONFIG_OF
-static struct of_device_id mv_sata_dt_ids[] __devinitdata = {
+static struct of_device_id mv_sata_dt_ids[] = {
{ .compatible = "marvell,orion-sata", },
{},
};
@@ -4224,7 +4224,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove = __devexit_p(mv_platform_remove),
+ .remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
@@ -4429,7 +4429,7 @@ static int mv_pci_device_resume(struct pci_dev *pdev)
#endif
static int mv_platform_probe(struct platform_device *pdev);
-static int __devexit mv_platform_remove(struct platform_device *pdev);
+static int mv_platform_remove(struct platform_device *pdev);
static int __init mv_init(void)
{
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 489c81768321..fb0dd87f8893 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -147,6 +147,10 @@ struct pdc_port_priv {
dma_addr_t pkt_dma;
};
+struct pdc_host_priv {
+ spinlock_t hard_reset_lock;
+};
+
static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
+ struct pdc_host_priv *hpriv = ap->host->private_data;
u8 tmp;
- spin_lock(&ap->host->lock);
+ spin_lock(&hpriv->hard_reset_lock);
tmp = readb(pcictl_b1_mmio);
tmp &= ~(0x10 << ata_no);
@@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
- spin_unlock(&ap->host->lock);
+ spin_unlock(&hpriv->hard_reset_lock);
}
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
@@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
+ struct pdc_host_priv *hpriv;
void __iomem *host_mmio;
int n_ports, i, rc;
int is_sataii_tx4;
@@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
+ hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ spin_lock_init(&hpriv->hard_reset_lock);
+ host->private_data = hpriv;
host->iomap = pcim_iomap_table(pdev);
is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index a5f2a563a26a..59f0d630d634 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -506,8 +506,6 @@ static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
- void __iomem *addr;
- addr = scr_addr + sil24_scr_map[sc_reg] * 4;
*val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
@@ -519,8 +517,6 @@ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
- void __iomem *addr;
- addr = scr_addr + sil24_scr_map[sc_reg] * 4;
writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
return 0;
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 122605593166..7b7127a58f51 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -315,9 +315,8 @@ static int pdc_port_start(struct ata_port *ap)
return 0;
}
-static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
- unsigned int total_len)
+static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
+ unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
@@ -337,9 +336,8 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
buf32[dw], buf32[dw + 1]);
}
-static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
- unsigned int total_len)
+static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
+ unsigned int total_len)
{
u32 addr;
unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
@@ -486,10 +484,10 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
/*
* Build ATA, host DMA packets
*/
- pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
- pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
if (qc->tf.flags & ATA_TFLAG_LBA48)
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index e8cf88ba145d..44f304b3de63 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -312,8 +312,7 @@ static struct ata_port_operations vsc_sata_ops = {
.scr_write = vsc_sata_scr_write,
};
-static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
- void __iomem *base)
+static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
@@ -335,8 +334,8 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
}
-static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int vsc_sata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static const struct ata_port_info pi = {
.flags = ATA_FLAG_SATA,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ff7bb8a42ed6..77a7480dc4d1 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1507,9 +1507,9 @@ static void do_housekeeping (unsigned long arg) {
/********** creation of communication queues **********/
-static int __devinit create_queues (amb_dev * dev, unsigned int cmds,
- unsigned int txs, unsigned int * rxs,
- unsigned int * rx_buffer_sizes) {
+static int create_queues(amb_dev *dev, unsigned int cmds, unsigned int txs,
+ unsigned int *rxs, unsigned int *rx_buffer_sizes)
+{
unsigned char pool;
size_t total = 0;
void * memory;
@@ -1737,8 +1737,9 @@ static int decode_loader_result (loader_command cmd, u32 result)
return res;
}
-static int __devinit do_loader_command (volatile loader_block * lb,
- const amb_dev * dev, loader_command cmd) {
+static int do_loader_command(volatile loader_block *lb, const amb_dev *dev,
+ loader_command cmd)
+{
unsigned long timeout;
@@ -1793,8 +1794,9 @@ static int __devinit do_loader_command (volatile loader_block * lb,
/* loader: determine loader version */
-static int __devinit get_loader_version (loader_block * lb,
- const amb_dev * dev, u32 * version) {
+static int get_loader_version(loader_block *lb, const amb_dev *dev,
+ u32 *version)
+{
int res;
PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
@@ -1809,9 +1811,9 @@ static int __devinit get_loader_version (loader_block * lb,
/* loader: write memory data blocks */
-static int __devinit loader_write (loader_block* lb,
- const amb_dev *dev,
- const struct ihex_binrec *rec) {
+static int loader_write(loader_block *lb, const amb_dev *dev,
+ const struct ihex_binrec *rec)
+{
transfer_block * tb = &lb->payload.transfer;
PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
@@ -1824,9 +1826,9 @@ static int __devinit loader_write (loader_block* lb,
/* loader: verify memory data blocks */
-static int __devinit loader_verify (loader_block * lb,
- const amb_dev *dev,
- const struct ihex_binrec *rec) {
+static int loader_verify(loader_block *lb, const amb_dev *dev,
+ const struct ihex_binrec *rec)
+{
transfer_block * tb = &lb->payload.transfer;
int res;
@@ -1842,8 +1844,8 @@ static int __devinit loader_verify (loader_block * lb,
/* loader: start microcode */
-static int __devinit loader_start (loader_block * lb,
- const amb_dev * dev, u32 address) {
+static int loader_start(loader_block *lb, const amb_dev *dev, u32 address)
+{
PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
lb->payload.start = cpu_to_be32 (address);
@@ -1918,7 +1920,8 @@ static int amb_reset (amb_dev * dev, int diags) {
/********** transfer and start the microcode **********/
-static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
+static int ucode_init(loader_block *lb, amb_dev *dev)
+{
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
@@ -1980,7 +1983,8 @@ static inline __be32 bus_addr(void * addr) {
return cpu_to_be32 (virt_to_bus (addr));
}
-static int __devinit amb_talk (amb_dev * dev) {
+static int amb_talk(amb_dev *dev)
+{
adap_talk_block a;
unsigned char pool;
unsigned long timeout;
@@ -2027,7 +2031,8 @@ static int __devinit amb_talk (amb_dev * dev) {
}
// get microcode version
-static void __devinit amb_ucode_version (amb_dev * dev) {
+static void amb_ucode_version(amb_dev *dev)
+{
u32 major;
u32 minor;
command cmd;
@@ -2042,7 +2047,8 @@ static void __devinit amb_ucode_version (amb_dev * dev) {
}
// get end station address
-static void __devinit amb_esi (amb_dev * dev, u8 * esi) {
+static void amb_esi(amb_dev *dev, u8 *esi)
+{
u32 lower4;
u16 upper2;
command cmd;
@@ -2088,7 +2094,7 @@ static void fixup_plx_window (amb_dev *dev, loader_block *lb)
return;
}
-static int __devinit amb_init (amb_dev * dev)
+static int amb_init(amb_dev *dev)
{
loader_block lb;
@@ -2184,7 +2190,8 @@ static void setup_pci_dev(struct pci_dev *pci_dev)
}
}
-static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int amb_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
amb_dev * dev;
int err;
@@ -2285,7 +2292,7 @@ out_disable:
}
-static void __devexit amb_remove_one(struct pci_dev *pci_dev)
+static void amb_remove_one(struct pci_dev *pci_dev)
{
struct amb_dev *dev;
@@ -2379,7 +2386,7 @@ MODULE_DEVICE_TABLE(pci, amb_pci_tbl);
static struct pci_driver amb_driver = {
.name = "amb",
.probe = amb_probe,
- .remove = __devexit_p(amb_remove_one),
+ .remove = amb_remove_one,
.id_table = amb_pci_tbl,
};
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 81e44f7b0ab6..c1eb6fa8ac35 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1567,7 +1567,7 @@ tx_complete++;
/*--------------------------------- entries ---------------------------------*/
-static char * const media_name[] __devinitconst = {
+static char * const media_name[] = {
"MMF", "SMF", "MMF", "03?", /* 0- 3 */
"UTP", "05?", "06?", "07?", /* 4- 7 */
"TAXI","09?", "10?", "11?", /* 8-11 */
@@ -1591,7 +1591,7 @@ static char * const media_name[] __devinitconst = {
} })
-static int __devinit get_esi_asic(struct atm_dev *dev)
+static int get_esi_asic(struct atm_dev *dev)
{
struct eni_dev *eni_dev;
unsigned char tonga;
@@ -1683,7 +1683,7 @@ static int __devinit get_esi_asic(struct atm_dev *dev)
#undef GET_SEPROM
-static int __devinit get_esi_fpga(struct atm_dev *dev, void __iomem *base)
+static int get_esi_fpga(struct atm_dev *dev, void __iomem *base)
{
void __iomem *mac_base;
int i;
@@ -1694,7 +1694,7 @@ static int __devinit get_esi_fpga(struct atm_dev *dev, void __iomem *base)
}
-static int __devinit eni_do_init(struct atm_dev *dev)
+static int eni_do_init(struct atm_dev *dev)
{
struct midway_eprom __iomem *eprom;
struct eni_dev *eni_dev;
@@ -1797,7 +1797,7 @@ static void eni_do_release(struct atm_dev *dev)
iounmap(ed->ioaddr);
}
-static int __devinit eni_start(struct atm_dev *dev)
+static int eni_start(struct atm_dev *dev)
{
struct eni_dev *eni_dev;
@@ -2226,8 +2226,8 @@ static const struct atmdev_ops ops = {
};
-static int __devinit eni_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
+static int eni_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
{
struct atm_dev *dev;
struct eni_dev *eni_dev;
@@ -2292,7 +2292,7 @@ static struct pci_device_id eni_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci,eni_pci_tbl);
-static void __devexit eni_remove_one(struct pci_dev *pdev)
+static void eni_remove_one(struct pci_dev *pdev)
{
struct atm_dev *dev = pci_get_drvdata(pdev);
struct eni_dev *ed = ENI_DEV(dev);
@@ -2310,7 +2310,7 @@ static struct pci_driver eni_driver = {
.name = DEV_LABEL,
.id_table = eni_pci_tbl,
.probe = eni_init_one,
- .remove = __devexit_p(eni_remove_one),
+ .remove = eni_remove_one,
};
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 86fed1b91695..b41c9481b67b 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -252,7 +252,7 @@ struct reginit_item {
};
-static struct reginit_item PHY_NTC_INIT[] __devinitdata = {
+static struct reginit_item PHY_NTC_INIT[] = {
{ PHY_CLEARALL, 0x40 },
{ 0x12, 0x0001 },
{ 0x13, 0x7605 },
@@ -1295,7 +1295,7 @@ static const struct atmdev_ops ops = {
};
-static void __devinit undocumented_pci_fix (struct pci_dev *pdev)
+static void undocumented_pci_fix(struct pci_dev *pdev)
{
u32 tint;
@@ -1319,13 +1319,13 @@ static void __devinit undocumented_pci_fix (struct pci_dev *pdev)
* PHY routines *
**************************************************************************/
-static void __devinit write_phy (struct fs_dev *dev, int regnum, int val)
+static void write_phy(struct fs_dev *dev, int regnum, int val)
{
submit_command (dev, &dev->hp_txq, QE_CMD_PRP_WR | QE_CMD_IMM_INQ,
regnum, val, 0);
}
-static int __devinit init_phy (struct fs_dev *dev, struct reginit_item *reginit)
+static int init_phy(struct fs_dev *dev, struct reginit_item *reginit)
{
int i;
@@ -1381,7 +1381,7 @@ static void reset_chip (struct fs_dev *dev)
}
}
-static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
+static void *aligned_kmalloc(int size, gfp_t flags, int alignment)
{
void *t;
@@ -1398,8 +1398,8 @@ static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
return NULL;
}
-static int __devinit init_q (struct fs_dev *dev,
- struct queue *txq, int queue, int nentries, int is_rq)
+static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
+ int nentries, int is_rq)
{
int sz = nentries * sizeof (struct FS_QENTRY);
struct FS_QENTRY *p;
@@ -1434,8 +1434,8 @@ static int __devinit init_q (struct fs_dev *dev,
}
-static int __devinit init_fp (struct fs_dev *dev,
- struct freepool *fp, int queue, int bufsize, int nr_buffers)
+static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
+ int bufsize, int nr_buffers)
{
func_enter ();
@@ -1528,7 +1528,7 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n);
}
-static void __devexit free_queue (struct fs_dev *dev, struct queue *txq)
+static void free_queue(struct fs_dev *dev, struct queue *txq)
{
func_enter ();
@@ -1544,7 +1544,7 @@ static void __devexit free_queue (struct fs_dev *dev, struct queue *txq)
func_exit ();
}
-static void __devexit free_freepool (struct fs_dev *dev, struct freepool *fp)
+static void free_freepool(struct fs_dev *dev, struct freepool *fp)
{
func_enter ();
@@ -1662,7 +1662,7 @@ static void fs_poll (unsigned long data)
}
#endif
-static int __devinit fs_init (struct fs_dev *dev)
+static int fs_init(struct fs_dev *dev)
{
struct pci_dev *pci_dev;
int isr, to;
@@ -1897,8 +1897,8 @@ unmap:
return 1;
}
-static int __devinit firestream_init_one (struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
+static int firestream_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
{
struct atm_dev *atm_dev;
struct fs_dev *fs_dev;
@@ -1934,7 +1934,7 @@ static int __devinit firestream_init_one (struct pci_dev *pci_dev,
return -ENODEV;
}
-static void __devexit firestream_remove_one (struct pci_dev *pdev)
+static void firestream_remove_one(struct pci_dev *pdev)
{
int i;
struct fs_dev *dev, *nxtdev;
@@ -2038,7 +2038,7 @@ static struct pci_driver firestream_driver = {
.name = "firestream",
.id_table = firestream_pci_tbl,
.probe = firestream_init_one,
- .remove = __devexit_p(firestream_remove_one),
+ .remove = firestream_remove_one,
};
static int __init firestream_init_module (void)
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 361f5aee3be1..204814e88e46 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -527,8 +527,7 @@ fore200e_pca_reset(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_pca_map(struct fore200e* fore200e)
+static int fore200e_pca_map(struct fore200e* fore200e)
{
DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
@@ -561,8 +560,7 @@ fore200e_pca_unmap(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_pca_configure(struct fore200e* fore200e)
+static int fore200e_pca_configure(struct fore200e *fore200e)
{
struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
u8 master_ctrl, latency;
@@ -2028,8 +2026,7 @@ fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
}
-static int __devinit
-fore200e_irq_request(struct fore200e* fore200e)
+static int fore200e_irq_request(struct fore200e *fore200e)
{
if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
@@ -2051,8 +2048,7 @@ fore200e_irq_request(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_get_esi(struct fore200e* fore200e)
+static int fore200e_get_esi(struct fore200e *fore200e)
{
struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
int ok, i;
@@ -2081,8 +2077,7 @@ fore200e_get_esi(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_alloc_rx_buf(struct fore200e* fore200e)
+static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
{
int scheme, magn, nbr, size, i;
@@ -2146,8 +2141,7 @@ fore200e_alloc_rx_buf(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_bs_queue(struct fore200e* fore200e)
+static int fore200e_init_bs_queue(struct fore200e *fore200e)
{
int scheme, magn, i;
@@ -2209,8 +2203,7 @@ fore200e_init_bs_queue(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_rx_queue(struct fore200e* fore200e)
+static int fore200e_init_rx_queue(struct fore200e *fore200e)
{
struct host_rxq* rxq = &fore200e->host_rxq;
struct cp_rxq_entry __iomem * cp_entry;
@@ -2269,8 +2262,7 @@ fore200e_init_rx_queue(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_tx_queue(struct fore200e* fore200e)
+static int fore200e_init_tx_queue(struct fore200e *fore200e)
{
struct host_txq* txq = &fore200e->host_txq;
struct cp_txq_entry __iomem * cp_entry;
@@ -2332,8 +2324,7 @@ fore200e_init_tx_queue(struct fore200e* fore200e)
}
-static int __devinit
-fore200e_init_cmd_queue(struct fore200e* fore200e)
+static int fore200e_init_cmd_queue(struct fore200e *fore200e)
{
struct host_cmdq* cmdq = &fore200e->host_cmdq;
struct cp_cmdq_entry __iomem * cp_entry;
@@ -2374,10 +2365,10 @@ fore200e_init_cmd_queue(struct fore200e* fore200e)
}
-static void __devinit
-fore200e_param_bs_queue(struct fore200e* fore200e,
- enum buffer_scheme scheme, enum buffer_magn magn,
- int queue_length, int pool_size, int supply_blksize)
+static void fore200e_param_bs_queue(struct fore200e *fore200e,
+ enum buffer_scheme scheme,
+ enum buffer_magn magn, int queue_length,
+ int pool_size, int supply_blksize)
{
struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
@@ -2388,8 +2379,7 @@ fore200e_param_bs_queue(struct fore200e* fore200e,
}
-static int __devinit
-fore200e_initialize(struct fore200e* fore200e)
+static int fore200e_initialize(struct fore200e *fore200e)
{
struct cp_queues __iomem * cpq;
int ok, scheme, magn;
@@ -2440,8 +2430,7 @@ fore200e_initialize(struct fore200e* fore200e)
}
-static void __devinit
-fore200e_monitor_putc(struct fore200e* fore200e, char c)
+static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
{
struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
@@ -2452,8 +2441,7 @@ fore200e_monitor_putc(struct fore200e* fore200e, char c)
}
-static int __devinit
-fore200e_monitor_getc(struct fore200e* fore200e)
+static int fore200e_monitor_getc(struct fore200e *fore200e)
{
struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
unsigned long timeout = jiffies + msecs_to_jiffies(50);
@@ -2477,8 +2465,7 @@ fore200e_monitor_getc(struct fore200e* fore200e)
}
-static void __devinit
-fore200e_monitor_puts(struct fore200e* fore200e, char* str)
+static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
{
while (*str) {
@@ -2497,8 +2484,7 @@ fore200e_monitor_puts(struct fore200e* fore200e, char* str)
#define FW_EXT "_ecd.bin2"
#endif
-static int __devinit
-fore200e_load_and_start_fw(struct fore200e* fore200e)
+static int fore200e_load_and_start_fw(struct fore200e *fore200e)
{
const struct firmware *firmware;
struct device *device;
@@ -2566,8 +2552,7 @@ release:
}
-static int __devinit
-fore200e_register(struct fore200e* fore200e, struct device *parent)
+static int fore200e_register(struct fore200e *fore200e, struct device *parent)
{
struct atm_dev* atm_dev;
@@ -2593,8 +2578,7 @@ fore200e_register(struct fore200e* fore200e, struct device *parent)
}
-static int __devinit
-fore200e_init(struct fore200e* fore200e, struct device *parent)
+static int fore200e_init(struct fore200e *fore200e, struct device *parent)
{
if (fore200e_register(fore200e, parent) < 0)
return -ENODEV;
@@ -2644,7 +2628,7 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
#ifdef CONFIG_SBUS
static const struct of_device_id fore200e_sba_match[];
-static int __devinit fore200e_sba_probe(struct platform_device *op)
+static int fore200e_sba_probe(struct platform_device *op)
{
const struct of_device_id *match;
const struct fore200e_bus *bus;
@@ -2681,7 +2665,7 @@ static int __devinit fore200e_sba_probe(struct platform_device *op)
return 0;
}
-static int __devexit fore200e_sba_remove(struct platform_device *op)
+static int fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
@@ -2707,13 +2691,13 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove = __devexit_p(fore200e_sba_remove),
+ .remove = fore200e_sba_remove,
};
#endif
#ifdef CONFIG_PCI
-static int __devinit
-fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int fore200e_pca_detect(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
struct fore200e* fore200e;
@@ -2766,7 +2750,7 @@ out_disable:
}
-static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
+static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
{
struct fore200e *fore200e;
@@ -2789,7 +2773,7 @@ MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
static struct pci_driver fore200e_pca_driver = {
.name = "fore_200e",
.probe = fore200e_pca_detect,
- .remove = __devexit_p(fore200e_pca_remove_one),
+ .remove = fore200e_pca_remove_one,
.id_table = fore200e_pca_tbl,
};
#endif
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index b182c2f7d777..72b6960fa95f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -349,8 +349,8 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
return NULL;
}
-static int __devinit
-he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int he_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
struct atm_dev *atm_dev = NULL;
struct he_dev *he_dev = NULL;
@@ -406,8 +406,7 @@ init_one_failure:
return err;
}
-static void __devexit
-he_remove_one (struct pci_dev *pci_dev)
+static void he_remove_one(struct pci_dev *pci_dev)
{
struct atm_dev *atm_dev;
struct he_dev *he_dev;
@@ -445,8 +444,7 @@ rate_to_atmf(unsigned rate) /* cps to atm forum format */
return (NONZERO | (exp << 9) | (rate & 0x1ff));
}
-static void __devinit
-he_init_rx_lbfp0(struct he_dev *he_dev)
+static void he_init_rx_lbfp0(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
@@ -476,8 +474,7 @@ he_init_rx_lbfp0(struct he_dev *he_dev)
he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
}
-static void __devinit
-he_init_rx_lbfp1(struct he_dev *he_dev)
+static void he_init_rx_lbfp1(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
@@ -507,8 +504,7 @@ he_init_rx_lbfp1(struct he_dev *he_dev)
he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
}
-static void __devinit
-he_init_tx_lbfp(struct he_dev *he_dev)
+static void he_init_tx_lbfp(struct he_dev *he_dev)
{
unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
@@ -537,8 +533,7 @@ he_init_tx_lbfp(struct he_dev *he_dev)
he_writel(he_dev, lbufd_index - 1, TLBF_T);
}
-static int __devinit
-he_init_tpdrq(struct he_dev *he_dev)
+static int he_init_tpdrq(struct he_dev *he_dev)
{
he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
@@ -559,8 +554,7 @@ he_init_tpdrq(struct he_dev *he_dev)
return 0;
}
-static void __devinit
-he_init_cs_block(struct he_dev *he_dev)
+static void he_init_cs_block(struct he_dev *he_dev)
{
unsigned clock, rate, delta;
int reg;
@@ -655,8 +649,7 @@ he_init_cs_block(struct he_dev *he_dev)
}
-static int __devinit
-he_init_cs_block_rcm(struct he_dev *he_dev)
+static int he_init_cs_block_rcm(struct he_dev *he_dev)
{
unsigned (*rategrid)[16][16];
unsigned rate, delta;
@@ -776,8 +769,7 @@ he_init_cs_block_rcm(struct he_dev *he_dev)
return 0;
}
-static int __devinit
-he_init_group(struct he_dev *he_dev, int group)
+static int he_init_group(struct he_dev *he_dev, int group)
{
struct he_buff *heb, *next;
dma_addr_t mapping;
@@ -915,8 +907,7 @@ out_free_rbpl_table:
return -ENOMEM;
}
-static int __devinit
-he_init_irq(struct he_dev *he_dev)
+static int he_init_irq(struct he_dev *he_dev)
{
int i;
@@ -978,8 +969,7 @@ he_init_irq(struct he_dev *he_dev)
return 0;
}
-static int __devinit
-he_start(struct atm_dev *dev)
+static int he_start(struct atm_dev *dev)
{
struct he_dev *he_dev;
struct pci_dev *pci_dev;
@@ -2879,7 +2869,7 @@ MODULE_DEVICE_TABLE(pci, he_pci_tbl);
static struct pci_driver he_driver = {
.name = "he",
.probe = he_init_one,
- .remove = __devexit_p(he_remove_one),
+ .remove = he_remove_one,
.id_table = he_pci_tbl,
};
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 7d01c2a75256..1dc0519333f2 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1789,7 +1789,7 @@ static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
}
-static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
+static u16 read_bia(const hrz_dev *dev, u16 addr)
{
u32 ctrl = rd_regl (dev, CONTROL_0_REG);
@@ -1845,7 +1845,8 @@ static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
/********** initialise a card **********/
-static int __devinit hrz_init (hrz_dev * dev) {
+static int hrz_init(hrz_dev *dev)
+{
int onefivefive;
u16 chan;
@@ -2681,7 +2682,8 @@ static const struct atmdev_ops hrz_ops = {
.owner = THIS_MODULE,
};
-static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
+static int hrz_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_ent)
{
hrz_dev * dev;
int err = 0;
@@ -2836,7 +2838,7 @@ out_disable:
goto out;
}
-static void __devexit hrz_remove_one(struct pci_dev *pci_dev)
+static void hrz_remove_one(struct pci_dev *pci_dev)
{
hrz_dev *dev;
@@ -2901,7 +2903,7 @@ MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
static struct pci_driver hrz_driver = {
.name = "horizon",
.probe = hrz_probe,
- .remove = __devexit_p(hrz_remove_one),
+ .remove = hrz_remove_one,
.id_table = hrz_pci_tbl,
};
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 8974bd2b961e..272f00927761 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3109,8 +3109,7 @@ deinit_card(struct idt77252_dev *card)
}
-static void __devinit
-init_sram(struct idt77252_dev *card)
+static void init_sram(struct idt77252_dev *card)
{
int i;
@@ -3257,8 +3256,7 @@ init_sram(struct idt77252_dev *card)
IPRINTK("%s: SRAM initialization complete.\n", card->name);
}
-static int __devinit
-init_card(struct atm_dev *dev)
+static int init_card(struct atm_dev *dev)
{
struct idt77252_dev *card = dev->dev_data;
struct pci_dev *pcidev = card->pcidev;
@@ -3537,8 +3535,7 @@ init_card(struct atm_dev *dev)
/*****************************************************************************/
-static int __devinit
-idt77252_preset(struct idt77252_dev *card)
+static int idt77252_preset(struct idt77252_dev *card)
{
u16 pci_command;
@@ -3579,8 +3576,7 @@ idt77252_preset(struct idt77252_dev *card)
}
-static unsigned long __devinit
-probe_sram(struct idt77252_dev *card)
+static unsigned long probe_sram(struct idt77252_dev *card)
{
u32 data, addr;
@@ -3601,8 +3597,8 @@ probe_sram(struct idt77252_dev *card)
return addr * sizeof(u32);
}
-static int __devinit
-idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
+static int idt77252_init_one(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
{
static struct idt77252_dev **last = &idt77252_chain;
static int index = 0;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 96cce6d53195..4217f29a85e0 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2299,7 +2299,7 @@ static int reset_sar(struct atm_dev *dev)
}
-static int __devinit ia_init(struct atm_dev *dev)
+static int ia_init(struct atm_dev *dev)
{
IADEV *iadev;
unsigned long real_base;
@@ -2492,7 +2492,7 @@ static void ia_free_rx(IADEV *iadev)
iadev->rx_dle_dma);
}
-static int __devinit ia_start(struct atm_dev *dev)
+static int ia_start(struct atm_dev *dev)
{
IADEV *iadev;
int error;
@@ -3168,8 +3168,7 @@ static const struct atmdev_ops ops = {
.owner = THIS_MODULE,
};
-static int __devinit ia_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct atm_dev *dev;
IADEV *iadev;
@@ -3229,7 +3228,7 @@ err_out:
return ret;
}
-static void __devexit ia_remove_one(struct pci_dev *pdev)
+static void ia_remove_one(struct pci_dev *pdev)
{
struct atm_dev *dev = pci_get_drvdata(pdev);
IADEV *iadev = INPH_IA_DEV(dev);
@@ -3270,7 +3269,7 @@ static struct pci_driver ia_driver = {
.name = DEV_LABEL,
.id_table = ia_pci_tbl,
.probe = ia_init_one,
- .remove = __devexit_p(ia_remove_one),
+ .remove = ia_remove_one,
};
static int __init ia_module_init(void)
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 6a0955e6d4fc..53ecac5a2161 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -636,82 +636,82 @@ struct rx_buf_desc {
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
-typedef volatile u_int freg_t;
+typedef volatile u_int ffreg_t;
typedef u_int rreg_t;
typedef struct _ffredn_t {
- freg_t idlehead_high; /* Idle cell header (high) */
- freg_t idlehead_low; /* Idle cell header (low) */
- freg_t maxrate; /* Maximum rate */
- freg_t stparms; /* Traffic Management Parameters */
- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
- freg_t rm_type; /* */
- u_int filler5[0x17 - 0x06];
- freg_t cmd_reg; /* Command register */
- u_int filler18[0x20 - 0x18];
- freg_t cbr_base; /* CBR Pointer Base */
- freg_t vbr_base; /* VBR Pointer Base */
- freg_t abr_base; /* ABR Pointer Base */
- freg_t ubr_base; /* UBR Pointer Base */
- u_int filler24;
- freg_t vbrwq_base; /* VBR Wait Queue Base */
- freg_t abrwq_base; /* ABR Wait Queue Base */
- freg_t ubrwq_base; /* UBR Wait Queue Base */
- freg_t vct_base; /* Main VC Table Base */
- freg_t vcte_base; /* Extended Main VC Table Base */
- u_int filler2a[0x2C - 0x2A];
- freg_t cbr_tab_beg; /* CBR Table Begin */
- freg_t cbr_tab_end; /* CBR Table End */
- freg_t cbr_pointer; /* CBR Pointer */
- u_int filler2f[0x30 - 0x2F];
- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
- u_int filler38[0x40 - 0x38];
- freg_t queue_base; /* Base address for PRQ and TCQ */
- freg_t desc_base; /* Base address of descriptor table */
- u_int filler42[0x45 - 0x42];
- freg_t mode_reg_0; /* Mode register 0 */
- freg_t mode_reg_1; /* Mode register 1 */
- freg_t intr_status_reg;/* Interrupt Status register */
- freg_t mask_reg; /* Mask Register */
- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
- freg_t state_reg; /* Status register */
- u_int filler4c[0x58 - 0x4c];
- freg_t curr_desc_num; /* Contains the current descriptor num */
- freg_t next_desc; /* Next descriptor */
- freg_t next_vc; /* Next VC */
- u_int filler5b[0x5d - 0x5b];
- freg_t present_slot_cnt;/* Present slot count */
- u_int filler5e[0x6a - 0x5e];
- freg_t new_desc_num; /* New descriptor number */
- freg_t new_vc; /* New VC */
- freg_t sched_tbl_ptr; /* Schedule table pointer */
- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
- freg_t abrwq_wptr; /* ABR wait queue write pointer */
- freg_t abrwq_rptr; /* ABR wait queue read pointer */
- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
- freg_t cbr_vc; /* CBR VC */
- freg_t vbr_sb_vc; /* VBR SB VC */
- freg_t abr_sb_vc; /* ABR SB VC */
- freg_t ubr_sb_vc; /* UBR SB VC */
- freg_t vbr_next_link; /* VBR next link */
- freg_t abr_next_link; /* ABR next link */
- freg_t ubr_next_link; /* UBR next link */
- u_int filler7a[0x7c-0x7a];
- freg_t out_rate_head; /* Out of rate head */
- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ ffreg_t idlehead_high; /* Idle cell header (high) */
+ ffreg_t idlehead_low; /* Idle cell header (low) */
+ ffreg_t maxrate; /* Maximum rate */
+ ffreg_t stparms; /* Traffic Management Parameters */
+ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+ ffreg_t rm_type; /* */
+ u_int filler5[0x17 - 0x06];
+ ffreg_t cmd_reg; /* Command register */
+ u_int filler18[0x20 - 0x18];
+ ffreg_t cbr_base; /* CBR Pointer Base */
+ ffreg_t vbr_base; /* VBR Pointer Base */
+ ffreg_t abr_base; /* ABR Pointer Base */
+ ffreg_t ubr_base; /* UBR Pointer Base */
+ u_int filler24;
+ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
+ ffreg_t abrwq_base; /* ABR Wait Queue Base */
+ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
+ ffreg_t vct_base; /* Main VC Table Base */
+ ffreg_t vcte_base; /* Extended Main VC Table Base */
+ u_int filler2a[0x2C - 0x2A];
+ ffreg_t cbr_tab_beg; /* CBR Table Begin */
+ ffreg_t cbr_tab_end; /* CBR Table End */
+ ffreg_t cbr_pointer; /* CBR Pointer */
+ u_int filler2f[0x30 - 0x2F];
+ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
+ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
+ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+ u_int filler38[0x40 - 0x38];
+ ffreg_t queue_base; /* Base address for PRQ and TCQ */
+ ffreg_t desc_base; /* Base address of descriptor table */
+ u_int filler42[0x45 - 0x42];
+ ffreg_t mode_reg_0; /* Mode register 0 */
+ ffreg_t mode_reg_1; /* Mode register 1 */
+ ffreg_t intr_status_reg;/* Interrupt Status register */
+ ffreg_t mask_reg; /* Mask Register */
+ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+ ffreg_t state_reg; /* Status register */
+ u_int filler4c[0x58 - 0x4c];
+ ffreg_t curr_desc_num; /* Contains the current descriptor num */
+ ffreg_t next_desc; /* Next descriptor */
+ ffreg_t next_vc; /* Next VC */
+ u_int filler5b[0x5d - 0x5b];
+ ffreg_t present_slot_cnt;/* Present slot count */
+ u_int filler5e[0x6a - 0x5e];
+ ffreg_t new_desc_num; /* New descriptor number */
+ ffreg_t new_vc; /* New VC */
+ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
+ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
+ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
+ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
+ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
+ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
+ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
+ ffreg_t cbr_vc; /* CBR VC */
+ ffreg_t vbr_sb_vc; /* VBR SB VC */
+ ffreg_t abr_sb_vc; /* ABR SB VC */
+ ffreg_t ubr_sb_vc; /* UBR SB VC */
+ ffreg_t vbr_next_link; /* VBR next link */
+ ffreg_t abr_next_link; /* ABR next link */
+ ffreg_t ubr_next_link; /* UBR next link */
+ u_int filler7a[0x7c-0x7a];
+ ffreg_t out_rate_head; /* Out of rate head */
+ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
} ffredn_t;
typedef struct _rfredn_t {
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 68c758871812..fa7d701933ba 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -551,8 +551,8 @@ static inline void sram_write(const struct lanai_dev *lanai,
writel(val, sram_addr(lanai, offset));
}
-static int __devinit sram_test_word(const struct lanai_dev *lanai,
- int offset, u32 pattern)
+static int sram_test_word(const struct lanai_dev *lanai, int offset,
+ u32 pattern)
{
u32 readback;
sram_write(lanai, pattern, offset);
@@ -566,7 +566,7 @@ static int __devinit sram_test_word(const struct lanai_dev *lanai,
return -EIO;
}
-static int __devinit sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
+static int sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
{
int offset, result = 0;
for (offset = 0; offset < SRAM_BYTES && result == 0; offset += 4)
@@ -574,7 +574,7 @@ static int __devinit sram_test_pass(const struct lanai_dev *lanai, u32 pattern)
return result;
}
-static int __devinit sram_test_and_clear(const struct lanai_dev *lanai)
+static int sram_test_and_clear(const struct lanai_dev *lanai)
{
#ifdef FULL_MEMORY_TEST
int result;
@@ -860,7 +860,7 @@ static inline void aal0_buffer_free(struct lanai_dev *lanai)
#ifndef READ_EEPROM
/* Stub functions to use if EEPROM reading is disabled */
-static int __devinit eeprom_read(struct lanai_dev *lanai)
+static int eeprom_read(struct lanai_dev *lanai)
{
printk(KERN_INFO DEV_LABEL "(itf %d): *NOT* reading EEPROM\n",
lanai->number);
@@ -868,7 +868,7 @@ static int __devinit eeprom_read(struct lanai_dev *lanai)
return 0;
}
-static int __devinit eeprom_validate(struct lanai_dev *lanai)
+static int eeprom_validate(struct lanai_dev *lanai)
{
lanai->serialno = 0;
lanai->magicno = EEPROM_MAGIC_VALUE;
@@ -877,7 +877,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
#else /* READ_EEPROM */
-static int __devinit eeprom_read(struct lanai_dev *lanai)
+static int eeprom_read(struct lanai_dev *lanai)
{
int i, address;
u8 data;
@@ -953,7 +953,7 @@ static inline u32 eeprom_be4(const struct lanai_dev *lanai, int address)
}
/* Checksum/validate EEPROM contents */
-static int __devinit eeprom_validate(struct lanai_dev *lanai)
+static int eeprom_validate(struct lanai_dev *lanai)
{
int i, s;
u32 v;
@@ -1448,7 +1448,7 @@ static void vcc_rx_aal0(struct lanai_dev *lanai)
#include <linux/vmalloc.h>
#endif
-static int __devinit vcc_table_allocate(struct lanai_dev *lanai)
+static int vcc_table_allocate(struct lanai_dev *lanai)
{
#ifdef VCCTABLE_GETFREEPAGE
APRINTK((lanai->num_vci) * sizeof(struct lanai_vcc *) <= PAGE_SIZE,
@@ -1588,7 +1588,7 @@ static void lanai_reset(struct lanai_dev *lanai)
/*
* Allocate service buffer and tell card about it
*/
-static int __devinit service_buffer_allocate(struct lanai_dev *lanai)
+static int service_buffer_allocate(struct lanai_dev *lanai)
{
lanai_buf_allocate(&lanai->service, SERVICE_ENTRIES * 4, 8,
lanai->pci);
@@ -1942,7 +1942,7 @@ static int check_board_id_and_rev(const char *name, u32 val, int *revp)
/* -------------------- PCI INITIALIZATION/SHUTDOWN: */
-static int __devinit lanai_pci_start(struct lanai_dev *lanai)
+static int lanai_pci_start(struct lanai_dev *lanai)
{
struct pci_dev *pci = lanai->pci;
int result;
@@ -2123,7 +2123,7 @@ static inline void lanai_cbr_shutdown(struct lanai_dev *lanai)
/* -------------------- OPERATIONS: */
/* setup a newly detected device */
-static int __devinit lanai_dev_open(struct atm_dev *atmdev)
+static int lanai_dev_open(struct atm_dev *atmdev)
{
struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
unsigned long raw_base;
@@ -2566,8 +2566,8 @@ static const struct atmdev_ops ops = {
};
/* initialize one probed card */
-static int __devinit lanai_init_one(struct pci_dev *pci,
- const struct pci_device_id *ident)
+static int lanai_init_one(struct pci_dev *pci,
+ const struct pci_device_id *ident)
{
struct lanai_dev *lanai;
struct atm_dev *atmdev;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 1c70c45fa044..ed1d2b7f923b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -121,8 +121,8 @@
static u32 ns_read_sram(ns_dev * card, u32 sram_address);
static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
int count);
-static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
-static void __devinit ns_init_card_error(ns_dev * card, int error);
+static int ns_init_card(int i, struct pci_dev *pcidev);
+static void ns_init_card_error(ns_dev * card, int error);
static scq_info *get_scq(ns_dev *card, int size, u32 scd);
static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
static void push_rxbufs(ns_dev *, struct sk_buff *);
@@ -180,8 +180,8 @@ MODULE_LICENSE("GPL");
/* Functions */
-static int __devinit nicstar_init_one(struct pci_dev *pcidev,
- const struct pci_device_id *ent)
+static int nicstar_init_one(struct pci_dev *pcidev,
+ const struct pci_device_id *ent)
{
static int index = -1;
unsigned int error;
@@ -200,7 +200,7 @@ err_out:
return -ENODEV;
}
-static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
+static void nicstar_remove_one(struct pci_dev *pcidev)
{
int i, j;
ns_dev *card = pci_get_drvdata(pcidev);
@@ -262,7 +262,7 @@ static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
kfree(card);
}
-static struct pci_device_id nicstar_pci_tbl[] __devinitdata = {
+static struct pci_device_id nicstar_pci_tbl[] = {
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
{0,} /* terminate list */
};
@@ -273,7 +273,7 @@ static struct pci_driver nicstar_driver = {
.name = "nicstar",
.id_table = nicstar_pci_tbl,
.probe = nicstar_init_one,
- .remove = __devexit_p(nicstar_remove_one),
+ .remove = nicstar_remove_one,
};
static int __init nicstar_init(void)
@@ -351,7 +351,7 @@ static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
spin_unlock_irqrestore(&card->res_lock, flags);
}
-static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
+static int ns_init_card(int i, struct pci_dev *pcidev)
{
int j;
struct ns_dev *card = NULL;
@@ -821,7 +821,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
-static void __devinit ns_init_card_error(ns_dev * card, int error)
+static void ns_init_card_error(ns_dev *card, int error)
{
if (error >= 17) {
writel(0x00000000, card->membase + CFG);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 98510931c815..0474a89170b9 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -42,7 +42,8 @@
#include <linux/swab.h>
#include <linux/slab.h>
-#define VERSION "0.07"
+#define VERSION "1.04"
+#define DRIVER_VERSION 0x01
#define PTAG "solos-pci"
#define CONFIG_RAM_SIZE 128
@@ -56,16 +57,21 @@
#define FLASH_BUSY 0x60
#define FPGA_MODE 0x5C
#define FLASH_MODE 0x58
+#define GPIO_STATUS 0x54
+#define DRIVER_VER 0x50
#define TX_DMA_ADDR(port) (0x40 + (4 * (port)))
#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
#define DATA_RAM_SIZE 32768
#define BUF_SIZE 2048
#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
-#define FPGA_PAGE 528 /* FPGA flash page size*/
-#define SOLOS_PAGE 512 /* Solos flash page size*/
-#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/
-#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/
+/* Old boards use ATMEL AD45DB161D flash */
+#define ATMEL_FPGA_PAGE 528 /* FPGA flash page size*/
+#define ATMEL_SOLOS_PAGE 512 /* Solos flash page size*/
+#define ATMEL_FPGA_BLOCK (ATMEL_FPGA_PAGE * 8) /* FPGA block size*/
+#define ATMEL_SOLOS_BLOCK (ATMEL_SOLOS_PAGE * 8) /* Solos block size*/
+/* Current boards use M25P/M25PE SPI flash */
+#define SPI_FLASH_BLOCK (256 * 64)
#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
@@ -122,11 +128,14 @@ struct solos_card {
struct sk_buff_head cli_queue[4];
struct sk_buff *tx_skb[4];
struct sk_buff *rx_skb[4];
+ unsigned char *dma_bounce;
wait_queue_head_t param_wq;
wait_queue_head_t fw_wq;
int using_dma;
+ int dma_alignment;
int fpga_version;
int buffer_size;
+ int atmel_flash;
};
@@ -164,7 +173,6 @@ static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
static uint32_t fpga_tx(struct solos_card *);
static irqreturn_t solos_irq(int irq, void *dev_id);
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
-static int list_vccs(int vci);
static int atm_init(struct solos_card *, struct device *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
@@ -452,7 +460,6 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
len = skb->len;
memcpy(buf, skb->data, len);
- dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
return len;
@@ -499,6 +506,78 @@ static ssize_t console_store(struct device *dev, struct device_attribute *attr,
return err?:count;
}
+struct geos_gpio_attr {
+ struct device_attribute attr;
+ int offset;
+};
+
+#define SOLOS_GPIO_ATTR(_name, _mode, _show, _store, _offset) \
+ struct geos_gpio_attr gpio_attr_##_name = { \
+ .attr = __ATTR(_name, _mode, _show, _store), \
+ .offset = _offset }
+
+static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ if (count != 1 && (count != 2 || buf[1] != '\n'))
+ return -EINVAL;
+
+ spin_lock_irq(&card->param_queue_lock);
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ if (buf[0] == '1') {
+ data32 |= 1 << gattr->offset;
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else if (buf[0] == '0') {
+ data32 &= ~(1 << gattr->offset);
+ iowrite32(data32, card->config_regs + GPIO_STATUS);
+ } else {
+ count = -EINVAL;
+ }
+ spin_unlock_irq(&card->param_queue_lock);
+ return count;
+}
+
+static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ data32 = (data32 >> gattr->offset) & 1;
+
+ return sprintf(buf, "%d\n", data32);
+}
+
+static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
+ struct solos_card *card = pci_get_drvdata(pdev);
+ uint32_t data32;
+
+ data32 = ioread32(card->config_regs + GPIO_STATUS);
+ switch (gattr->offset) {
+ case 0:
+ /* HardwareVersion */
+ data32 = data32 & 0x1F;
+ break;
+ case 1:
+ /* HardwareVariant */
+ data32 = (data32 >> 5) & 0x0F;
+ break;
+ }
+ return sprintf(buf, "%d\n", data32);
+}
+
static DEVICE_ATTR(console, 0644, console_show, console_store);
@@ -507,6 +586,14 @@ static DEVICE_ATTR(console, 0644, console_show, console_store);
#include "solos-attrlist.c"
+static SOLOS_GPIO_ATTR(GPIO1, 0644, geos_gpio_show, geos_gpio_store, 9);
+static SOLOS_GPIO_ATTR(GPIO2, 0644, geos_gpio_show, geos_gpio_store, 10);
+static SOLOS_GPIO_ATTR(GPIO3, 0644, geos_gpio_show, geos_gpio_store, 11);
+static SOLOS_GPIO_ATTR(GPIO4, 0644, geos_gpio_show, geos_gpio_store, 12);
+static SOLOS_GPIO_ATTR(GPIO5, 0644, geos_gpio_show, geos_gpio_store, 13);
+static SOLOS_GPIO_ATTR(PushButton, 0444, geos_gpio_show, NULL, 14);
+static SOLOS_GPIO_ATTR(HardwareVersion, 0444, hardware_show, NULL, 0);
+static SOLOS_GPIO_ATTR(HardwareVariant, 0444, hardware_show, NULL, 1);
#undef SOLOS_ATTR_RO
#undef SOLOS_ATTR_RW
@@ -523,6 +610,23 @@ static struct attribute_group solos_attr_group = {
.name = "parameters",
};
+static struct attribute *gpio_attrs[] = {
+ &gpio_attr_GPIO1.attr.attr,
+ &gpio_attr_GPIO2.attr.attr,
+ &gpio_attr_GPIO3.attr.attr,
+ &gpio_attr_GPIO4.attr.attr,
+ &gpio_attr_GPIO5.attr.attr,
+ &gpio_attr_PushButton.attr.attr,
+ &gpio_attr_HardwareVersion.attr.attr,
+ &gpio_attr_HardwareVariant.attr.attr,
+ NULL
+};
+
+static struct attribute_group gpio_attr_group = {
+ .attrs = gpio_attrs,
+ .name = "gpio",
+};
+
static int flash_upgrade(struct solos_card *card, int chip)
{
const struct firmware *fw;
@@ -534,16 +638,25 @@ static int flash_upgrade(struct solos_card *card, int chip)
switch (chip) {
case 0:
fw_name = "solos-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 1:
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
break;
case 2:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-db-FPGA.bin";
- blocksize = FPGA_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_FPGA_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -553,7 +666,10 @@ static int flash_upgrade(struct solos_card *card, int chip)
case 3:
if (card->fpga_version > LEGACY_BUFFERS){
fw_name = "solos-Firmware.bin";
- blocksize = SOLOS_BLOCK;
+ if (card->atmel_flash)
+ blocksize = ATMEL_SOLOS_BLOCK;
+ else
+ blocksize = SPI_FLASH_BLOCK;
} else {
dev_info(&card->dev->dev, "FPGA version doesn't support"
" daughter board upgrades\n");
@@ -569,6 +685,9 @@ static int flash_upgrade(struct solos_card *card, int chip)
dev_info(&card->dev->dev, "Flash upgrade starting\n");
+ /* New FPGAs require driver version before permitting flash upgrades */
+ iowrite32(DRIVER_VERSION, card->config_regs + DRIVER_VER);
+
numblocks = fw->size / blocksize;
dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size);
dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks);
@@ -598,9 +717,13 @@ static int flash_upgrade(struct solos_card *card, int chip)
/* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */
iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE);
- /* Copy block to buffer, swapping each 16 bits */
+ /* Copy block to buffer, swapping each 16 bits for Atmel flash */
for(i = 0; i < blocksize; i += 4) {
- uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i));
+ uint32_t word;
+ if (card->atmel_flash)
+ word = swahb32p((uint32_t *)(fw->data + offset + i));
+ else
+ word = *(uint32_t *)(fw->data + offset + i);
if(card->fpga_version > LEGACY_BUFFERS)
iowrite32(word, FLASH_BUF + i);
else
@@ -710,7 +833,8 @@ void solos_bh(unsigned long card_arg)
dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n",
le16_to_cpu(header->vpi), le16_to_cpu(header->vci),
port);
- continue;
+ dev_kfree_skb_any(skb);
+ break;
}
atm_charge(vcc, skb->truesize);
vcc->push(vcc, skb);
@@ -790,44 +914,6 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return vcc;
}
-static int list_vccs(int vci)
-{
- struct hlist_head *head;
- struct atm_vcc *vcc;
- struct hlist_node *node;
- struct sock *s;
- int num_found = 0;
- int i;
-
- read_lock(&vcc_sklist_lock);
- if (vci != 0){
- head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
- sk_for_each(s, node, head) {
- num_found ++;
- vcc = atm_sk(s);
- printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
- vcc->dev->number,
- vcc->vpi,
- vcc->vci);
- }
- } else {
- for(i = 0; i < VCC_HTABLE_SIZE; i++){
- head = &vcc_hash[i];
- sk_for_each(s, node, head) {
- num_found ++;
- vcc = atm_sk(s);
- printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
- vcc->dev->number,
- vcc->vpi,
- vcc->vci);
- }
- }
- }
- read_unlock(&vcc_sklist_lock);
- return num_found;
-}
-
-
static int popen(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
@@ -840,7 +926,7 @@ static int popen(struct atm_vcc *vcc)
return -EINVAL;
}
- skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
@@ -857,8 +943,6 @@ static int popen(struct atm_vcc *vcc)
set_bit(ATM_VF_ADDR, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
- list_vccs(0);
-
return 0;
}
@@ -866,10 +950,21 @@ static int popen(struct atm_vcc *vcc)
static void pclose(struct atm_vcc *vcc)
{
struct solos_card *card = vcc->dev->dev_data;
- struct sk_buff *skb;
+ unsigned char port = SOLOS_CHAN(vcc->dev);
+ struct sk_buff *skb, *tmpskb;
struct pkt_hdr *header;
- skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
+ /* Remove any yet-to-be-transmitted packets from the pending queue */
+ spin_lock(&card->tx_queue_lock);
+ skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
+ if (SKB_CB(skb)->vcc == vcc) {
+ skb_unlink(skb, &card->tx_queue[port]);
+ solos_pop(vcc, skb);
+ }
+ }
+ spin_unlock(&card->tx_queue_lock);
+
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n");
return;
@@ -881,15 +976,22 @@ static void pclose(struct atm_vcc *vcc)
header->vci = cpu_to_le16(vcc->vci);
header->type = cpu_to_le16(PKT_PCLOSE);
- fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL);
+ skb_get(skb);
+ fpga_queue(card, port, skb, NULL);
- clear_bit(ATM_VF_ADDR, &vcc->flags);
- clear_bit(ATM_VF_READY, &vcc->flags);
+ if (!wait_event_timeout(card->param_wq, !skb_shared(skb), 5 * HZ))
+ dev_warn(&card->dev->dev,
+ "Timeout waiting for VCC close on port %d\n", port);
+
+ dev_kfree_skb(skb);
/* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
tasklet has finished processing any incoming packets (and, more to
the point, using the vcc pointer). */
tasklet_unlock_wait(&card->tlet);
+
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+
return;
}
@@ -967,10 +1069,11 @@ static uint32_t fpga_tx(struct solos_card *card)
for (port = 0; tx_pending; tx_pending >>= 1, port++) {
if (tx_pending & 1) {
struct sk_buff *oldskb = card->tx_skb[port];
- if (oldskb)
+ if (oldskb) {
pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
oldskb->len, PCI_DMA_TODEVICE);
-
+ card->tx_skb[port] = NULL;
+ }
spin_lock(&card->tx_queue_lock);
skb = skb_dequeue(&card->tx_queue[port]);
if (!skb)
@@ -982,7 +1085,12 @@ static uint32_t fpga_tx(struct solos_card *card)
tx_started |= 1 << port;
oldskb = skb; /* We're done with this skb already */
} else if (skb && card->using_dma) {
- SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
+ unsigned char *data = skb->data;
+ if ((unsigned long)data & card->dma_alignment) {
+ data = card->dma_bounce + (BUF_SIZE * port);
+ memcpy(data, skb->data, skb->len);
+ }
+ SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
skb->len, PCI_DMA_TODEVICE);
card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
@@ -1011,9 +1119,10 @@ static uint32_t fpga_tx(struct solos_card *card)
if (vcc) {
atomic_inc(&vcc->stats->tx);
solos_pop(vcc, oldskb);
- } else
+ } else {
dev_kfree_skb_irq(oldskb);
-
+ wake_up(&card->param_wq);
+ }
}
}
/* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */
@@ -1153,18 +1262,33 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
db_fpga_upgrade = db_firmware_upgrade = 0;
}
+ /* Stopped using Atmel flash after 0.03-38 */
+ if (fpga_ver < 39)
+ card->atmel_flash = 1;
+ else
+ card->atmel_flash = 0;
+
+ data32 = ioread32(card->config_regs + PORTS);
+ card->nr_ports = (data32 & 0x000000FF);
+
if (card->fpga_version >= DMA_SUPPORTED) {
pci_set_master(dev);
card->using_dma = 1;
+ if (1) { /* All known FPGA versions so far */
+ card->dma_alignment = 3;
+ card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL);
+ if (!card->dma_bounce) {
+ dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n");
+ /* Fallback to MMIO doesn't work */
+ goto out_unmap_both;
+ }
+ }
} else {
card->using_dma = 0;
/* Set RX empty flag for all ports */
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
}
- data32 = ioread32(card->config_regs + PORTS);
- card->nr_ports = (data32 & 0x000000FF);
-
pci_set_drvdata(dev, card);
tasklet_init(&card->tlet, solos_bh, (unsigned long)card);
@@ -1199,6 +1323,10 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto out_free_irq;
+ if (card->fpga_version >= DMA_SUPPORTED &&
+ sysfs_create_group(&card->dev->dev.kobj, &gpio_attr_group))
+ dev_err(&card->dev->dev, "Could not register parameter group for GPIOs\n");
+
return 0;
out_free_irq:
@@ -1207,6 +1335,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
tasklet_kill(&card->tlet);
out_unmap_both:
+ kfree(card->dma_bounce);
pci_set_drvdata(dev, NULL);
pci_iounmap(dev, card->buffers);
out_unmap_config:
@@ -1248,7 +1377,7 @@ static int atm_init(struct solos_card *card, struct device *parent)
card->atmdev[i]->phy_data = (void *)(unsigned long)i;
atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND);
- skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*header), GFP_KERNEL);
if (!skb) {
dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n");
continue;
@@ -1309,11 +1438,16 @@ static void fpga_remove(struct pci_dev *dev)
iowrite32(1, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
+ if (card->fpga_version >= DMA_SUPPORTED)
+ sysfs_remove_group(&card->dev->dev.kobj, &gpio_attr_group);
+
atm_remove(card);
free_irq(dev->irq, card);
tasklet_kill(&card->tlet);
+ kfree(card->dma_bounce);
+
/* Release device from reset */
iowrite32(0, card->config_regs + FPGA_MODE);
(void)ioread32(card->config_regs + FPGA_MODE);
@@ -1328,7 +1462,7 @@ static void fpga_remove(struct pci_dev *dev)
kfree(card);
}
-static struct pci_device_id fpga_pci_tbl[] __devinitdata = {
+static struct pci_device_id fpga_pci_tbl[] = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
@@ -1345,6 +1479,8 @@ static struct pci_driver fpga_driver = {
static int __init solos_pci_init(void)
{
+ BUILD_BUG_ON(sizeof(struct solos_skb_cb) > sizeof(((struct sk_buff *)0)->cb));
+
printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION);
return pci_register_driver(&fpga_driver);
}
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index abe4e20b0766..969c3c29000c 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1094,8 +1094,8 @@ static irqreturn_t zatm_int(int irq,void *dev_id)
/*----------------------------- (E)EPROM access -----------------------------*/
-static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value,
- unsigned short cmd)
+static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value,
+ unsigned short cmd)
{
int error;
@@ -1105,8 +1105,7 @@ static void __devinit eprom_set(struct zatm_dev *zatm_dev,unsigned long value,
}
-static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev,
- unsigned short cmd)
+static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd)
{
unsigned int value;
int error;
@@ -1118,8 +1117,8 @@ static unsigned long __devinit eprom_get(struct zatm_dev *zatm_dev,
}
-static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev,
- unsigned long data,int bits,unsigned short cmd)
+static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data,
+ int bits, unsigned short cmd)
{
unsigned long value;
int i;
@@ -1133,8 +1132,8 @@ static void __devinit eprom_put_bits(struct zatm_dev *zatm_dev,
}
-static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev,
- unsigned char *byte,unsigned short cmd)
+static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
+ unsigned short cmd)
{
int i;
@@ -1149,8 +1148,8 @@ static void __devinit eprom_get_byte(struct zatm_dev *zatm_dev,
}
-static unsigned char __devinit eprom_try_esi(struct atm_dev *dev,
- unsigned short cmd,int offset,int swap)
+static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
+ int offset, int swap)
{
unsigned char buf[ZEPROM_SIZE];
struct zatm_dev *zatm_dev;
@@ -1170,7 +1169,7 @@ static unsigned char __devinit eprom_try_esi(struct atm_dev *dev,
}
-static void __devinit eprom_get_esi(struct atm_dev *dev)
+static void eprom_get_esi(struct atm_dev *dev)
{
if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return;
(void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0);
@@ -1180,7 +1179,7 @@ static void __devinit eprom_get_esi(struct atm_dev *dev)
/*--------------------------------- entries ---------------------------------*/
-static int __devinit zatm_init(struct atm_dev *dev)
+static int zatm_init(struct atm_dev *dev)
{
struct zatm_dev *zatm_dev;
struct pci_dev *pci_dev;
@@ -1257,7 +1256,7 @@ static int __devinit zatm_init(struct atm_dev *dev)
}
-static int __devinit zatm_start(struct atm_dev *dev)
+static int zatm_start(struct atm_dev *dev)
{
struct zatm_dev *zatm_dev = ZATM_DEV(dev);
struct pci_dev *pdev = zatm_dev->pci_dev;
@@ -1584,8 +1583,8 @@ static const struct atmdev_ops ops = {
.change_qos = zatm_change_qos,
};
-static int __devinit zatm_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *ent)
+static int zatm_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *ent)
{
struct atm_dev *dev;
struct zatm_dev *zatm_dev;
@@ -1636,7 +1635,7 @@ out_free:
MODULE_LICENSE("GPL");
-static struct pci_device_id zatm_pci_tbl[] __devinitdata = {
+static struct pci_device_id zatm_pci_tbl[] = {
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
{ 0, }
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 5ad3bad2b0a5..d585735430dd 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
+static struct fb_fix_screeninfo cfag12864bfb_fix = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
+static struct fb_var_screeninfo cfag12864bfb_var = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
@@ -80,7 +80,7 @@ static struct fb_ops cfag12864bfb_ops = {
.fb_mmap = cfag12864bfb_mmap,
};
-static int __devinit cfag12864bfb_probe(struct platform_device *device)
+static int cfag12864bfb_probe(struct platform_device *device)
{
int ret = -EINVAL;
struct fb_info *info = framebuffer_alloc(0, &device->dev);
@@ -114,7 +114,7 @@ none:
return ret;
}
-static int __devexit cfag12864bfb_remove(struct platform_device *device)
+static int cfag12864bfb_remove(struct platform_device *device)
{
struct fb_info *info = platform_get_drvdata(device);
@@ -128,7 +128,7 @@ static int __devexit cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove = __devexit_p(cfag12864bfb_remove),
+ .remove = cfag12864bfb_remove,
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index b34b5cda5ae1..c8b453939da2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -57,7 +57,7 @@ config DEVTMPFS_MOUNT
on the rootfs is completely empty.
config STANDALONE
- bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
+ bool "Select only drivers that don't need compile-time external firmware"
default y
help
Select this option if you don't have magic firmware for drivers that
@@ -185,7 +185,6 @@ config DMA_SHARED_BUFFER
bool
default n
select ANON_INODES
- depends on EXPERIMENTAL
help
This option enables the framework for buffer-sharing between
multiple drivers. A buffer is associated with a file using driver
@@ -193,8 +192,8 @@ config DMA_SHARED_BUFFER
driver.
config CMA
- bool "Contiguous Memory Allocator (EXPERIMENTAL)"
- depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ bool "Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
select MIGRATION
select MEMORY_ISOLATION
help
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 8fc200b2e2c0..d78b204e65c1 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -158,7 +158,7 @@ attribute_container_add_device(struct device *dev,
ic = kzalloc(sizeof(*ic), GFP_KERNEL);
if (!ic) {
- dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
+ dev_err(dev, "failed to allocate class container\n");
continue;
}
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 181ed2660b33..24eb07868344 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -164,8 +164,6 @@ static const struct kset_uevent_ops bus_uevent_ops = {
static struct kset *bus_kset;
-
-#ifdef CONFIG_HOTPLUG
/* Manually detach a device from its associated driver. */
static ssize_t driver_unbind(struct device_driver *drv,
const char *buf, size_t count)
@@ -252,7 +250,6 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
return -EINVAL;
return count;
}
-#endif
static struct device *next_device(struct klist_iter *i)
{
@@ -618,11 +615,6 @@ static void driver_remove_attrs(struct bus_type *bus,
}
}
-#ifdef CONFIG_HOTPLUG
-/*
- * Thanks to drivers making their tables __devinit, we can't allow manual
- * bind and unbind from userspace unless CONFIG_HOTPLUG is enabled.
- */
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
@@ -666,12 +658,6 @@ static void remove_probe_files(struct bus_type *bus)
bus_remove_file(bus, &bus_attr_drivers_autoprobe);
bus_remove_file(bus, &bus_attr_drivers_probe);
}
-#else
-static inline int add_bind_files(struct device_driver *drv) { return 0; }
-static inline void remove_bind_files(struct device_driver *drv) {}
-static inline int add_probe_files(struct bus_type *bus) { return 0; }
-static inline void remove_probe_files(struct bus_type *bus) {}
-#endif
static ssize_t driver_uevent_store(struct device_driver *drv,
const char *buf, size_t count)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index abea76c36a4b..a235085e343c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -171,6 +171,27 @@ ssize_t device_show_int(struct device *dev,
}
EXPORT_SYMBOL_GPL(device_show_int);
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ if (strtobool(buf, ea->var) < 0)
+ return -EINVAL;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_bool);
+
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_bool);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
@@ -1180,7 +1201,6 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
- device_pm_remove(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
@@ -1205,6 +1225,7 @@ void device_del(struct device *dev)
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
bus_remove_device(dev);
+ device_pm_remove(dev);
driver_deferred_probe_del(dev);
/* Notify the platform of the removal, in case they
@@ -1399,7 +1420,7 @@ struct root_device {
struct module *owner;
};
-inline struct root_device *to_root_device(struct device *d)
+static inline struct root_device *to_root_device(struct device *d)
{
return container_of(d, struct root_device, dev);
}
@@ -1840,10 +1861,12 @@ void device_shutdown(void)
pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
- dev_dbg(dev, "shutdown\n");
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
} else if (dev->driver && dev->driver->shutdown) {
- dev_dbg(dev, "shutdown\n");
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 63452943abd1..fb10728f6372 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
- * on the linux-kerenl list, you have been warned.
+ * on the linux-kernel list, you have been warned.
*/
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8731979d668a..668390664764 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -50,8 +50,8 @@ static void devres_log(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
- dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
- op, node, node->name, (unsigned long)node->size);
+ dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
+ op, node, node->name, (unsigned long)node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
#define set_node_dbginfo(node, n, s) do {} while (0)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 147d1a4dd269..17cf7cad601e 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -148,7 +148,7 @@ static int dev_mkdir(const char *name, umode_t mode)
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, 1);
+ dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 460e22dee36d..a3f79c495a41 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -298,6 +298,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction)
{
+ might_sleep();
+
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3fbedc75e7c5..0ce39a33b3c2 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -218,6 +218,8 @@ void dmam_release_declared_memory(struct device *dev)
}
EXPORT_SYMBOL(dmam_release_declared_memory);
+#endif
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -236,8 +238,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
}
EXPORT_SYMBOL(dma_common_get_sgtable);
-#endif
-
/*
* Create userspace mapping for the DMA-coherent memory.
*/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8945f4e489ed..b392b353be39 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -143,7 +143,7 @@ struct fw_cache_entry {
};
struct firmware_priv {
- struct timer_list timeout;
+ struct delayed_work timeout_work;
bool nowait;
struct device dev;
struct firmware_buf *buf;
@@ -246,7 +246,6 @@ static void __fw_free_buf(struct kref *ref)
__func__, buf->fw_id, buf, buf->data,
(unsigned int)buf->size);
- spin_lock(&fwc->lock);
list_del(&buf->list);
spin_unlock(&fwc->lock);
@@ -263,19 +262,32 @@ static void __fw_free_buf(struct kref *ref)
static void fw_free_buf(struct firmware_buf *buf)
{
- kref_put(&buf->ref, __fw_free_buf);
+ struct firmware_cache *fwc = buf->fwc;
+ spin_lock(&fwc->lock);
+ if (!kref_put(&buf->ref, __fw_free_buf))
+ spin_unlock(&fwc->lock);
}
/* direct firmware loading support */
-static const char *fw_path[] = {
+static char fw_path_para[256];
+static const char * const fw_path[] = {
+ fw_path_para,
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
"/lib/firmware"
};
+/*
+ * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
+ * from kernel command line because firmware_class is generally built in
+ * kernel instead of module.
+ */
+module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
+MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
+
/* Don't inline this: 'struct kstat' is biggish */
-static noinline long fw_file_size(struct file *file)
+static noinline_for_stack long fw_file_size(struct file *file)
{
struct kstat st;
if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
@@ -293,7 +305,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
char *buf;
size = fw_file_size(file);
- if (size < 0)
+ if (size <= 0)
return false;
buf = vmalloc(size);
if (!buf)
@@ -315,6 +327,11 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
struct file *file;
+
+ /* skip the unset customized path */
+ if (!fw_path[i][0])
+ continue;
+
snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
file = filp_open(path, O_RDONLY, 0);
@@ -667,11 +684,18 @@ static struct bin_attribute firmware_attr_data = {
.write = firmware_data_write,
};
-static void firmware_class_timeout(u_long data)
+static void firmware_class_timeout_work(struct work_struct *work)
{
- struct firmware_priv *fw_priv = (struct firmware_priv *) data;
+ struct firmware_priv *fw_priv = container_of(work,
+ struct firmware_priv, timeout_work.work);
+ mutex_lock(&fw_lock);
+ if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
+ mutex_unlock(&fw_lock);
+ return;
+ }
fw_load_abort(fw_priv);
+ mutex_unlock(&fw_lock);
}
static struct firmware_priv *
@@ -690,8 +714,8 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
fw_priv->nowait = nowait;
fw_priv->fw = firmware;
- setup_timer(&fw_priv->timeout,
- firmware_class_timeout, (u_long) fw_priv);
+ INIT_DELAYED_WORK(&fw_priv->timeout_work,
+ firmware_class_timeout_work);
f_dev = &fw_priv->dev;
@@ -858,7 +882,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
dev_dbg(f_dev->parent, "firmware: direct-loading"
" firmware %s\n", buf->fw_id);
+ mutex_lock(&fw_lock);
set_bit(FW_STATUS_DONE, &buf->status);
+ mutex_unlock(&fw_lock);
complete_all(&buf->completion);
direct_load = 1;
goto handle_fw;
@@ -894,15 +920,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
- mod_timer(&fw_priv->timeout,
- round_jiffies_up(jiffies + timeout));
+ schedule_delayed_work(&fw_priv->timeout_work, timeout);
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
wait_for_completion(&buf->completion);
- del_timer_sync(&fw_priv->timeout);
+ cancel_delayed_work_sync(&fw_priv->timeout_work);
handle_fw:
mutex_lock(&fw_lock);
@@ -963,6 +988,9 @@ err_put_dev:
* firmware image for this or any other device.
*
* Caller must hold the reference count of @device.
+ *
+ * The function can be called safely inside device's suspend and
+ * resume callback.
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 86c88216a503..987604d56c83 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -70,6 +70,13 @@ void unregister_memory_isolate_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_isolate_notifier);
+static void memory_block_release(struct device *dev)
+{
+ struct memory_block *mem = container_of(dev, struct memory_block, dev);
+
+ kfree(mem);
+}
+
/*
* register_memory - Setup a sysfs device for a memory block
*/
@@ -80,6 +87,7 @@ int register_memory(struct memory_block *memory)
memory->dev.bus = &memory_subsys;
memory->dev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.release = memory_block_release;
error = device_register(&memory->dev);
return error;
@@ -246,7 +254,7 @@ static bool pages_correctly_reserved(unsigned long start_pfn,
* OK to have direct references to sparsemem variables in here.
*/
static int
-memory_block_action(unsigned long phys_index, unsigned long action)
+memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
@@ -261,7 +269,7 @@ memory_block_action(unsigned long phys_index, unsigned long action)
if (!pages_correctly_reserved(start_pfn, nr_pages))
return -EBUSY;
- ret = online_pages(start_pfn, nr_pages);
+ ret = online_pages(start_pfn, nr_pages, online_type);
break;
case MEM_OFFLINE:
ret = offline_pages(start_pfn, nr_pages);
@@ -276,7 +284,8 @@ memory_block_action(unsigned long phys_index, unsigned long action)
}
static int __memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req)
+ unsigned long to_state, unsigned long from_state_req,
+ int online_type)
{
int ret = 0;
@@ -288,7 +297,7 @@ static int __memory_block_change_state(struct memory_block *mem,
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
- ret = memory_block_action(mem->start_section_nr, to_state);
+ ret = memory_block_action(mem->start_section_nr, to_state, online_type);
if (ret) {
mem->state = from_state_req;
@@ -311,12 +320,14 @@ out:
}
static int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req)
+ unsigned long to_state, unsigned long from_state_req,
+ int online_type)
{
int ret;
mutex_lock(&mem->state_mutex);
- ret = __memory_block_change_state(mem, to_state, from_state_req);
+ ret = __memory_block_change_state(mem, to_state, from_state_req,
+ online_type);
mutex_unlock(&mem->state_mutex);
return ret;
@@ -330,10 +341,18 @@ store_mem_state(struct device *dev,
mem = container_of(dev, struct memory_block, dev);
- if (!strncmp(buf, "online", min((int)count, 6)))
- ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
- else if(!strncmp(buf, "offline", min((int)count, 7)))
- ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
+ ret = memory_block_change_state(mem, MEM_ONLINE,
+ MEM_OFFLINE, ONLINE_KERNEL);
+ else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
+ ret = memory_block_change_state(mem, MEM_ONLINE,
+ MEM_OFFLINE, ONLINE_MOVABLE);
+ else if (!strncmp(buf, "online", min_t(int, count, 6)))
+ ret = memory_block_change_state(mem, MEM_ONLINE,
+ MEM_OFFLINE, ONLINE_KEEP);
+ else if(!strncmp(buf, "offline", min_t(int, count, 7)))
+ ret = memory_block_change_state(mem, MEM_OFFLINE,
+ MEM_ONLINE, -1);
if (ret)
return ret;
@@ -635,7 +654,6 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
mem_remove_simple_file(mem, phys_device);
mem_remove_simple_file(mem, removable);
unregister_memory(mem);
- kfree(mem);
} else
kobject_put(&mem->dev.kobj);
@@ -669,7 +687,7 @@ int offline_memory_block(struct memory_block *mem)
mutex_lock(&mem->state_mutex);
if (mem->state != MEM_OFFLINE)
- ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
mutex_unlock(&mem->state_mutex);
return ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index af1a177216f1..fac124a7e1c5 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -227,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
- node_state(node->dev.id, N_HIGH_MEMORY)) {
+ node_state(node->dev.id, N_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
@@ -252,6 +252,24 @@ static inline void hugetlb_register_node(struct node *node) {}
static inline void hugetlb_unregister_node(struct node *node) {}
#endif
+static void node_device_release(struct device *dev)
+{
+ struct node *node = to_node(dev);
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ /*
+ * We schedule the work only when a memory section is
+ * onlined/offlined on this node. When we come here,
+ * all the memory on this node has been offlined,
+ * so we won't enqueue new work to this work.
+ *
+ * The work is using node->node_work, so we should
+ * flush work before freeing the memory.
+ */
+ flush_work(&node->node_work);
+#endif
+ kfree(node);
+}
/*
* register_node - Setup a sysfs device for a node.
@@ -259,12 +277,13 @@ static inline void hugetlb_unregister_node(struct node *node) {}
*
* Initialize and register the node device.
*/
-int register_node(struct node *node, int num, struct node *parent)
+static int register_node(struct node *node, int num, struct node *parent)
{
int error;
node->dev.id = num;
node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
error = device_register(&node->dev);
if (!error){
@@ -306,7 +325,7 @@ void unregister_node(struct node *node)
device_unregister(&node->dev);
}
-struct node node_devices[MAX_NUMNODES];
+struct node *node_devices[MAX_NUMNODES];
/*
* register cpu under node
@@ -323,15 +342,15 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
if (!obj)
return 0;
- ret = sysfs_create_link(&node_devices[nid].dev.kobj,
+ ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
- &node_devices[nid].dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
@@ -345,10 +364,10 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
if (!obj)
return 0;
- sysfs_remove_link(&node_devices[nid].dev.kobj,
+ sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ kobject_name(&node_devices[nid]->dev.kobj));
return 0;
}
@@ -390,15 +409,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
- &node_devices[nid].dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -431,10 +450,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (node_test_and_set(nid, *unlinked_nodes))
continue;
- sysfs_remove_link(&node_devices[nid].dev.kobj,
+ sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ kobject_name(&node_devices[nid]->dev.kobj));
}
NODEMASK_FREE(unlinked_nodes);
return 0;
@@ -500,7 +519,7 @@ static void node_hugetlb_work(struct work_struct *work)
static void init_node_hugetlb_work(int nid)
{
- INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
+ INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
}
static int node_memory_callback(struct notifier_block *self,
@@ -517,7 +536,7 @@ static int node_memory_callback(struct notifier_block *self,
* when transitioning to/from memoryless state.
*/
if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid].node_work);
+ schedule_work(&node_devices[nid]->node_work);
break;
case MEM_GOING_ONLINE:
@@ -558,9 +577,13 @@ int register_one_node(int nid)
struct node *parent = NULL;
if (p_node != nid)
- parent = &node_devices[p_node];
+ parent = node_devices[p_node];
+
+ node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
+ if (!node_devices[nid])
+ return -ENOMEM;
- error = register_node(&node_devices[nid], nid, parent);
+ error = register_node(node_devices[nid], nid, parent);
/* link cpu under this node */
for_each_present_cpu(cpu) {
@@ -581,7 +604,8 @@ int register_one_node(int nid)
void unregister_one_node(int nid)
{
- unregister_node(&node_devices[nid]);
+ unregister_node(node_devices[nid]);
+ node_devices[nid] = NULL;
}
/*
@@ -614,23 +638,29 @@ static ssize_t show_node_state(struct device *dev,
{ __ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
- _NODE_ATTR(possible, N_POSSIBLE),
- _NODE_ATTR(online, N_ONLINE),
- _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
- _NODE_ATTR(has_cpu, N_CPU),
+ [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
+ [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
+ [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
#ifdef CONFIG_HIGHMEM
- _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
+ [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
+#ifdef CONFIG_MOVABLE_NODE
+ [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
+#endif
+ [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
};
static struct attribute *node_state_attrs[] = {
- &node_state_attr[0].attr.attr,
- &node_state_attr[1].attr.attr,
- &node_state_attr[2].attr.attr,
- &node_state_attr[3].attr.attr,
+ &node_state_attr[N_POSSIBLE].attr.attr,
+ &node_state_attr[N_ONLINE].attr.attr,
+ &node_state_attr[N_NORMAL_MEMORY].attr.attr,
#ifdef CONFIG_HIGHMEM
- &node_state_attr[4].attr.attr,
+ &node_state_attr[N_HIGH_MEMORY].attr.attr,
+#endif
+#ifdef CONFIG_MOVABLE_NODE
+ &node_state_attr[N_MEMORY].attr.attr,
#endif
+ &node_state_attr[N_CPU].attr.attr,
NULL
};
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 72c776f2a1f5..c0b8df38402b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
+#include <linux/acpi.h>
#include "base.h"
#include "power/power.h"
@@ -44,7 +45,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
* be setup before the platform_notifier is called. So if a user needs to
* manipulate any relevant information in the pdev_archdata they can do:
*
- * platform_devic_alloc()
+ * platform_device_alloc()
* ... manipulate ...
* platform_device_add()
*
@@ -122,7 +123,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
/**
- * platform_get_irq - get an IRQ for a device
+ * platform_get_irq_byname - get an IRQ for a device by name
* @dev: platform device
* @name: IRQ name
*/
@@ -436,6 +437,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
+ ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
if (pdevinfo->dma_mask) {
/*
@@ -466,6 +468,7 @@ struct platform_device *platform_device_register_full(
ret = platform_device_add(pdev);
if (ret) {
err:
+ ACPI_HANDLE_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
err_alloc:
@@ -481,8 +484,16 @@ static int platform_drv_probe(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
+ int ret;
+
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_attach(_dev, true);
- return drv->probe(dev);
+ ret = drv->probe(dev);
+ if (ret && ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
+
+ return ret;
}
static int platform_drv_probe_fail(struct device *_dev)
@@ -494,8 +505,13 @@ static int platform_drv_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
+ int ret;
- return drv->remove(dev);
+ ret = drv->remove(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
+
+ return ret;
}
static void platform_drv_shutdown(struct device *_dev)
@@ -504,6 +520,8 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
}
/**
@@ -709,6 +727,10 @@ static int platform_match(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then try ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
/* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eb78e9640c4a..9d8fde709390 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
+ clk_disable_unprepare(ce->clk);
if (ce->status >= PCE_STATUS_ACQUIRED)
clk_put(ce->clk);
@@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id)
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
- clk_enable(clk);
+ clk_prepare_enable(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced on.\n");
}
@@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id)
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
- clk_disable(clk);
+ clk_disable_unprepare(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced off.\n");
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 96b71b6536d6..acc3a8ded29d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -470,10 +470,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
return -EBUSY;
not_suspended = 0;
- list_for_each_entry(pdd, &genpd->dev_list, list_node)
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(pdd->dev,
+ PM_QOS_FLAG_NO_POWER_OFF
+ | PM_QOS_FLAG_REMOTE_WAKEUP);
+ if (stat > PM_QOS_FLAGS_NONE)
+ return -EBUSY;
+
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|| pdd->dev->power.irq_safe))
not_suspended++;
+ }
if (not_suspended > genpd->in_progress)
return -EBUSY;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a3c1404c7933..2b7f77d3fcb0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -513,6 +513,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
Out:
TRACE_RESUME(error);
+
+ pm_runtime_enable(dev);
return error;
}
@@ -589,8 +591,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Unlock;
- pm_runtime_enable(dev);
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -930,6 +930,8 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ __pm_runtime_disable(dev, false);
+
if (dev->power.syscore)
return 0;
@@ -1133,11 +1135,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
complete_all(&dev->power.completion);
-
if (error)
async_error = error;
- else if (dev->power.is_suspended)
- __pm_runtime_disable(dev, false);
return error;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d9468642fc41..50b2831e027d 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -23,6 +23,7 @@
#include <linux/rcupdate.h>
#include <linux/opp.h>
#include <linux/of.h>
+#include <linux/export.h>
/*
* Internal data structure organization with the OPP layer library is as
@@ -65,6 +66,7 @@ struct opp {
unsigned long u_volt;
struct device_opp *dev_opp;
+ struct rcu_head head;
};
/**
@@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
+EXPORT_SYMBOL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
+EXPORT_SYMBOL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
@@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev)
return count;
}
+EXPORT_SYMBOL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
@@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev)
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR.
+ * using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Note: available is a modifier for the search. if available=true, then the
* match is for exact matching frequency and is available in the stored OPP
@@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
@@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
@@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available) {
@@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
@@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
list_replace_rcu(&opp->node, &new_opp->node);
mutex_unlock(&dev_opp_list_lock);
- synchronize_rcu();
+ kfree_rcu(opp, head);
/* Notify the change of the OPP availability */
if (availability_req)
@@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
new_opp);
- /* clean up old opp */
- new_opp = opp;
- goto out;
+ return 0;
unlock:
mutex_unlock(&dev_opp_list_lock);
-out:
kfree(new_opp);
return r;
}
@@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
+EXPORT_SYMBOL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
@@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
+EXPORT_SYMBOL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 0dbfdf4419af..b16686a0a5a2 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -93,8 +93,10 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add(struct device *dev);
-extern void pm_qos_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_flags(struct device *dev);
+extern void pm_qos_sysfs_remove_flags(struct device *dev);
#else /* CONFIG_PM */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index fbbd4ed2edf2..d21349544ce5 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -40,6 +40,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/pm_runtime.h>
#include "power.h"
@@ -48,6 +49,50 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
+ * __dev_pm_qos_flags - Check PM QoS flags for a given device.
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ struct pm_qos_flags *pqf;
+ s32 val;
+
+ if (!qos)
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ pqf = &qos->flags;
+ if (list_empty(&pqf->list))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ val = pqf->effective_flags & mask;
+ if (val)
+ return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
+
+ return PM_QOS_FLAGS_NONE;
+}
+
+/**
+ * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ */
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ unsigned long irqflags;
+ enum pm_qos_flags_status ret;
+
+ spin_lock_irqsave(&dev->power.lock, irqflags);
+ ret = __dev_pm_qos_flags(dev, mask);
+ spin_unlock_irqrestore(&dev->power.lock, irqflags);
+
+ return ret;
+}
+
+/**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
@@ -55,9 +100,7 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c = dev->power.constraints;
-
- return c ? pm_qos_read_value(c) : 0;
+ return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
}
/**
@@ -76,30 +119,39 @@ s32 dev_pm_qos_read_value(struct device *dev)
return ret;
}
-/*
- * apply_constraint
- * @req: constraint request to apply
- * @action: action to perform add/update/remove, of type enum pm_qos_req_action
- * @value: defines the qos request
+/**
+ * apply_constraint - Add/modify/remove device PM QoS request.
+ * @req: Constraint request to apply
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
* code and if needed call the per-device and the global notification
* callbacks
*/
static int apply_constraint(struct dev_pm_qos_request *req,
- enum pm_qos_req_action action, int value)
+ enum pm_qos_req_action action, s32 value)
{
- int ret, curr_value;
-
- ret = pm_qos_update_target(req->dev->power.constraints,
- &req->node, action, value);
+ struct dev_pm_qos *qos = req->dev->power.qos;
+ int ret;
- if (ret) {
- /* Call the global callbacks if needed */
- curr_value = pm_qos_read_value(req->dev->power.constraints);
- blocking_notifier_call_chain(&dev_pm_notifiers,
- (unsigned long)curr_value,
- req);
+ switch(req->type) {
+ case DEV_PM_QOS_LATENCY:
+ ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
+ action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency);
+ blocking_notifier_call_chain(&dev_pm_notifiers,
+ (unsigned long)value,
+ req);
+ }
+ break;
+ case DEV_PM_QOS_FLAGS:
+ ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
}
return ret;
@@ -114,28 +166,32 @@ static int apply_constraint(struct dev_pm_qos_request *req,
*/
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
+ struct dev_pm_qos *qos;
struct pm_qos_constraints *c;
struct blocking_notifier_head *n;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
+ qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+ if (!qos)
return -ENOMEM;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
- kfree(c);
+ kfree(qos);
return -ENOMEM;
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
+ c = &qos->latency;
plist_head_init(&c->list);
c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ INIT_LIST_HEAD(&qos->flags.list);
+
spin_lock_irq(&dev->power.lock);
- dev->power.constraints = c;
+ dev->power.qos = qos;
spin_unlock_irq(&dev->power.lock);
return 0;
@@ -151,7 +207,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
void dev_pm_qos_constraints_init(struct device *dev)
{
mutex_lock(&dev_pm_qos_mtx);
- dev->power.constraints = NULL;
+ dev->power.qos = NULL;
dev->power.power_state = PMSG_ON;
mutex_unlock(&dev_pm_qos_mtx);
}
@@ -164,24 +220,28 @@ void dev_pm_qos_constraints_init(struct device *dev)
*/
void dev_pm_qos_constraints_destroy(struct device *dev)
{
+ struct dev_pm_qos *qos;
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
+ struct pm_qos_flags *f;
/*
- * If the device's PM QoS resume latency limit has been exposed to user
- * space, it has to be hidden at this point.
+ * If the device's PM QoS resume latency limit or PM QoS flags have been
+ * exposed to user space, they have to be hidden at this point.
*/
dev_pm_qos_hide_latency_limit(dev);
+ dev_pm_qos_hide_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
- c = dev->power.constraints;
- if (!c)
+ qos = dev->power.qos;
+ if (!qos)
goto out;
- /* Flush the constraints list for the device */
- plist_for_each_entry_safe(req, tmp, &c->list, node) {
+ /* Flush the constraints lists for the device. */
+ c = &qos->latency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
* callbacks if needed
@@ -189,13 +249,18 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ f = &qos->flags;
+ list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
spin_lock_irq(&dev->power.lock);
- dev->power.constraints = NULL;
+ dev->power.qos = NULL;
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
- kfree(c);
+ kfree(qos);
out:
mutex_unlock(&dev_pm_qos_mtx);
@@ -205,6 +270,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
* @req: pointer to a preallocated handle
+ * @type: type of the request
* @value: defines the qos request
*
* This function inserts a new entry in the device constraints list of
@@ -218,9 +284,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
* to allocate for data structures, -ENODEV if the device has just been removed
* from the system.
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
- s32 value)
+ enum dev_pm_qos_req_type type, s32 value)
{
int ret = 0;
@@ -235,7 +304,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.constraints) {
+ if (!dev->power.qos) {
if (dev->power.power_state.event == PM_EVENT_INVALID) {
/* The device has been removed from the system. */
req->dev = NULL;
@@ -251,8 +320,10 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
}
}
- if (!ret)
+ if (!ret) {
+ req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
out:
mutex_unlock(&dev_pm_qos_mtx);
@@ -262,6 +333,37 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
/**
+ * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
+ * @req : PM QoS request to modify.
+ * @new_value: New value to request.
+ */
+static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ s32 curr_value;
+ int ret = 0;
+
+ if (!req->dev->power.qos)
+ return -ENODEV;
+
+ switch(req->type) {
+ case DEV_PM_QOS_LATENCY:
+ curr_value = req->data.pnode.prio;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ curr_value = req->data.flr.flags;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (curr_value != new_value)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
+
+ return ret;
+}
+
+/**
* dev_pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a dev_pm_qos request to use
* @new_value: defines the qos request
@@ -275,11 +377,13 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
-int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
- s32 new_value)
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
- int ret = 0;
+ int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
@@ -289,17 +393,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -EINVAL;
mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.constraints) {
- if (new_value != req->node.prio)
- ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
- new_value);
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
+ ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
+
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
@@ -315,6 +411,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
@@ -329,7 +428,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
mutex_lock(&dev_pm_qos_mtx);
- if (req->dev->power.constraints) {
+ if (req->dev->power.qos) {
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
@@ -362,13 +461,13 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.constraints)
+ if (!dev->power.qos)
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
if (!ret)
ret = blocking_notifier_chain_register(
- dev->power.constraints->notifiers, notifier);
+ dev->power.qos->latency.notifiers, notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -393,9 +492,9 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (dev->power.constraints)
+ if (dev->power.qos)
retval = blocking_notifier_chain_unregister(
- dev->power.constraints->notifiers,
+ dev->power.qos->latency.notifiers,
notifier);
mutex_unlock(&dev_pm_qos_mtx);
@@ -443,26 +542,36 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req, s32 value)
{
struct device *ancestor = dev->parent;
- int error = -ENODEV;
+ int ret = -ENODEV;
while (ancestor && !ancestor->power.ignore_children)
ancestor = ancestor->parent;
if (ancestor)
- error = dev_pm_qos_add_request(ancestor, req, value);
+ ret = dev_pm_qos_add_request(ancestor, req,
+ DEV_PM_QOS_LATENCY, value);
- if (error < 0)
+ if (ret < 0)
req->dev = NULL;
- return error;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
#ifdef CONFIG_PM_RUNTIME
-static void __dev_pm_qos_drop_user_request(struct device *dev)
+static void __dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
{
- dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = NULL;
+ switch(type) {
+ case DEV_PM_QOS_LATENCY:
+ dev_pm_qos_remove_request(dev->power.qos->latency_req);
+ dev->power.qos->latency_req = NULL;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ dev_pm_qos_remove_request(dev->power.qos->flags_req);
+ dev->power.qos->flags_req = NULL;
+ break;
+ }
}
/**
@@ -478,21 +587,21 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
- if (dev->power.pq_req)
+ if (dev->power.qos && dev->power.qos->latency_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, value);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
if (ret < 0)
return ret;
- dev->power.pq_req = req;
- ret = pm_qos_sysfs_add(dev);
+ dev->power.qos->latency_req = req;
+ ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
return ret;
}
@@ -504,10 +613,92 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (dev->power.pq_req) {
- pm_qos_sysfs_remove(dev);
- __dev_pm_qos_drop_user_request(dev);
+ if (dev->power.qos && dev->power.qos->latency_req) {
+ pm_qos_sysfs_remove_latency(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
}
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+
+/**
+ * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
+ * @dev: Device whose PM QoS flags are to be exposed to user space.
+ * @val: Initial values of the flags.
+ */
+int dev_pm_qos_expose_flags(struct device *dev, s32 val)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev))
+ return -EINVAL;
+
+ if (dev->power.qos && dev->power.qos->flags_req)
+ return -EEXIST;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(dev);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
+ if (ret < 0)
+ goto fail;
+
+ dev->power.qos->flags_req = req;
+ ret = pm_qos_sysfs_add_flags(dev);
+ if (ret)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+
+fail:
+ pm_runtime_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+
+/**
+ * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
+ * @dev: Device whose PM QoS flags are to be hidden from user space.
+ */
+void dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (dev->power.qos && dev->power.qos->flags_req) {
+ pm_qos_sysfs_remove_flags(dev);
+ pm_runtime_get_sync(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ pm_runtime_put(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+
+/**
+ * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
+ * @dev: Device to update the PM QoS flags request for.
+ * @mask: Flags to set/clear.
+ * @set: Whether to set or clear the flags (true means set).
+ */
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
+{
+ s32 value;
+ int ret;
+
+ if (!dev->power.qos || !dev->power.qos->flags_req)
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ value = dev_pm_qos_requested_flags(dev);
+ if (set)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+
+ return ret;
+}
#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index b91dc6f1e914..50d16e3cb0a9 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
static ssize_t pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
}
static ssize_t pm_qos_latency_store(struct device *dev,
@@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.pq_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
pm_qos_latency_show, pm_qos_latency_store);
+
+static ssize_t pm_qos_no_power_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
+}
+
+static ssize_t pm_qos_no_power_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_no_power_off, 0644,
+ pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+
+static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_REMOTE_WAKEUP));
+}
+
+static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
+ pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_attrs[] = {
+static struct attribute *pm_qos_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_attr_group = {
+static struct attribute_group pm_qos_latency_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_attrs,
+ .attrs = pm_qos_latency_attrs,
+};
+
+static struct attribute *pm_qos_flags_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_no_power_off.attr,
+ &dev_attr_pm_qos_remote_wakeup.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_flags_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_flags_attrs,
};
int dpm_sysfs_add(struct device *dev)
@@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add(struct device *dev)
+int pm_qos_sysfs_add_latency(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+}
+
+int pm_qos_sysfs_add_flags(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
-void pm_qos_sysfs_remove(struct device *dev)
+void pm_qos_sysfs_remove_flags(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 80f9ab9c3aa4..401d1919635a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -15,10 +15,18 @@
#include <linux/regmap.h>
#include <linux/fs.h>
+#include <linux/list.h>
struct regmap;
struct regcache_ops;
+struct regmap_debugfs_off_cache {
+ struct list_head list;
+ off_t min;
+ off_t max;
+ unsigned int base_reg;
+};
+
struct regmap_format {
size_t buf_size;
size_t reg_bytes;
@@ -31,14 +39,12 @@ struct regmap_format {
unsigned int (*parse_val)(void *buf);
};
-typedef void (*regmap_lock)(struct regmap *map);
-typedef void (*regmap_unlock)(struct regmap *map);
-
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
regmap_lock lock;
regmap_unlock unlock;
+ void *lock_arg; /* This is passed to lock/unlock functions */
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
@@ -50,6 +56,12 @@ struct regmap {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
const char *debugfs_name;
+
+ unsigned int debugfs_reg_len;
+ unsigned int debugfs_val_len;
+ unsigned int debugfs_tot_len;
+
+ struct list_head debugfs_off_cache;
#endif
unsigned int max_register;
@@ -57,6 +69,10 @@ struct regmap {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
u8 read_flag_mask;
u8 write_flag_mask;
@@ -120,6 +136,8 @@ int _regmap_write(struct regmap *map, unsigned int reg,
struct regmap_range_node {
struct rb_node node;
+ const char *name;
+ struct regmap *map;
unsigned int range_min;
unsigned int range_max;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index bb1ff175b962..d9a6c94ce423 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -56,17 +56,107 @@ static const struct file_operations regmap_name_fops = {
.llseek = default_llseek,
};
-static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
{
- int reg_len, val_len, tot_len;
- size_t buf_pos = 0;
+ struct regmap_debugfs_off_cache *c;
+
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
+/*
+ * Work out where the start offset maps into register numbers, bearing
+ * in mind that we suppress hidden registers.
+ */
+static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
+ unsigned int base,
+ loff_t from,
+ loff_t *pos)
+{
+ struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0;
+ unsigned int i, ret;
+
+ /*
+ * If we don't have a cache build one so we don't have to do a
+ * linear scan each time.
+ */
+ if (list_empty(&map->debugfs_off_cache)) {
+ for (i = base; i <= map->max_register; i += map->reg_stride) {
+ /* Skip unprinted registers, closing off cache entry */
+ if (!regmap_readable(map, i) ||
+ regmap_precious(map, i)) {
+ if (c) {
+ c->max = p - 1;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ c = NULL;
+ }
+
+ continue;
+ }
+
+ /* No cache entry? Start a new one */
+ if (!c) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ regmap_debugfs_free_dump_cache(map);
+ return base;
+ }
+ c->min = p;
+ c->base_reg = i;
+ }
+
+ p += map->debugfs_tot_len;
+ }
+ }
+
+ /* Close the last entry off if we didn't scan beyond it */
+ if (c) {
+ c->max = p - 1;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ }
+
+ /*
+ * This should never happen; we return above if we fail to
+ * allocate and we should never be in this code if there are
+ * no registers at all.
+ */
+ if (list_empty(&map->debugfs_off_cache)) {
+ WARN_ON(list_empty(&map->debugfs_off_cache));
+ return base;
+ }
+
+ /* Find the relevant block */
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (from >= c->min && from <= c->max) {
+ *pos = c->min;
+ return c->base_reg;
+ }
+
+ *pos = c->min;
+ ret = c->base_reg;
+ }
+
+ return ret;
+}
+
+static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
+ unsigned int to, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ size_t buf_pos = 0;
+ loff_t p = *ppos;
ssize_t ret;
int i;
- struct regmap *map = file->private_data;
char *buf;
- unsigned int val;
+ unsigned int val, start_reg;
if (*ppos < 0 || !count)
return -EINVAL;
@@ -76,11 +166,18 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
return -ENOMEM;
/* Calculate the length of a fixed format */
- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
- val_len = 2 * map->format.val_bytes;
- tot_len = reg_len + val_len + 3; /* : \n */
+ if (!map->debugfs_tot_len) {
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
+ buf, count);
+ map->debugfs_val_len = 2 * map->format.val_bytes;
+ map->debugfs_tot_len = map->debugfs_reg_len +
+ map->debugfs_val_len + 3; /* : \n */
+ }
- for (i = 0; i <= map->max_register; i += map->reg_stride) {
+ /* Work out which register we're starting at */
+ start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
+
+ for (i = start_reg; i <= to; i += map->reg_stride) {
if (!regmap_readable(map, i))
continue;
@@ -90,26 +187,27 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
+ if (buf_pos + 1 + map->debugfs_tot_len >= count)
break;
/* Format the register */
snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
- reg_len, i);
- buf_pos += reg_len + 2;
+ map->debugfs_reg_len, i - from);
+ buf_pos += map->debugfs_reg_len + 2;
/* Format the value, write all X if we can't read */
ret = regmap_read(map, i, &val);
if (ret == 0)
snprintf(buf + buf_pos, count - buf_pos,
- "%.*x", val_len, val);
+ "%.*x", map->debugfs_val_len, val);
else
- memset(buf + buf_pos, 'X', val_len);
+ memset(buf + buf_pos, 'X',
+ map->debugfs_val_len);
buf_pos += 2 * map->format.val_bytes;
buf[buf_pos++] = '\n';
}
- p += tot_len;
+ p += map->debugfs_tot_len;
}
ret = buf_pos;
@@ -126,6 +224,15 @@ out:
return ret;
}
+static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+
+ return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+ count, ppos);
+}
+
#undef REGMAP_ALLOW_WRITE_DEBUGFS
#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
/*
@@ -174,6 +281,22 @@ static const struct file_operations regmap_map_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap_range_node *range = file->private_data;
+ struct regmap *map = range->map;
+
+ return regmap_read_debugfs(map, range->range_min, range->range_max,
+ user_buf, count, ppos);
+}
+
+static const struct file_operations regmap_range_fops = {
+ .open = simple_open,
+ .read = regmap_range_read_file,
+ .llseek = default_llseek,
+};
+
static ssize_t regmap_access_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
@@ -244,6 +367,11 @@ static const struct file_operations regmap_access_fops = {
void regmap_debugfs_init(struct regmap *map, const char *name)
{
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
+
+ INIT_LIST_HEAD(&map->debugfs_off_cache);
+
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
dev_name(map->dev), name);
@@ -276,11 +404,24 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_bool("cache_bypass", 0400, map->debugfs,
&map->cache_bypass);
}
+
+ next = rb_first(&map->range_tree);
+ while (next) {
+ range_node = rb_entry(next, struct regmap_range_node, node);
+
+ if (range_node->name)
+ debugfs_create_file(range_node->name, 0400,
+ map->debugfs, range_node,
+ &regmap_range_fops);
+
+ next = rb_next(&range_node->node);
+ }
}
void regmap_debugfs_exit(struct regmap *map)
{
debugfs_remove_recursive(map->debugfs);
+ regmap_debugfs_free_dump_cache(map);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5b6b1d8e6cc0..5972ad958544 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -458,3 +458,22 @@ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
+
+/**
+ * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
+ *
+ * Useful for drivers to request their own IRQs and for integration
+ * with subsystems. For ease of integration NULL is accepted as a
+ * domain, allowing devices to just call this even if no domain is
+ * allocated.
+ *
+ * @data: regmap_irq controller to operate on.
+ */
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
+{
+ if (data)
+ return data->domain;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 52069d29ff12..f00b059c057a 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -34,6 +34,36 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+bool regmap_reg_in_ranges(unsigned int reg,
+ const struct regmap_range *ranges,
+ unsigned int nranges)
+{
+ const struct regmap_range *r;
+ int i;
+
+ for (i = 0, r = ranges; i < nranges; i++, r++)
+ if (regmap_reg_in_range(reg, r))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
+
+static bool _regmap_check_range_table(struct regmap *map,
+ unsigned int reg,
+ const struct regmap_access_table *table)
+{
+ /* Check "no ranges" first */
+ if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
+ return false;
+
+ /* In case zero "yes ranges" are supplied, any reg is OK */
+ if (!table->n_yes_ranges)
+ return true;
+
+ return regmap_reg_in_ranges(reg, table->yes_ranges,
+ table->n_yes_ranges);
+}
+
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
if (map->max_register && reg > map->max_register)
@@ -42,6 +72,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
if (map->writeable_reg)
return map->writeable_reg(map->dev, reg);
+ if (map->wr_table)
+ return _regmap_check_range_table(map, reg, map->wr_table);
+
return true;
}
@@ -56,6 +89,9 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (map->readable_reg)
return map->readable_reg(map->dev, reg);
+ if (map->rd_table)
+ return _regmap_check_range_table(map, reg, map->rd_table);
+
return true;
}
@@ -67,6 +103,9 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
if (map->volatile_reg)
return map->volatile_reg(map->dev, reg);
+ if (map->volatile_table)
+ return _regmap_check_range_table(map, reg, map->volatile_table);
+
return true;
}
@@ -78,11 +117,14 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
if (map->precious_reg)
return map->precious_reg(map->dev, reg);
+ if (map->precious_table)
+ return _regmap_check_range_table(map, reg, map->precious_table);
+
return false;
}
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
- unsigned int num)
+ size_t num)
{
unsigned int i;
@@ -214,23 +256,27 @@ static unsigned int regmap_parse_32_native(void *buf)
return *(u32 *)buf;
}
-static void regmap_lock_mutex(struct regmap *map)
+static void regmap_lock_mutex(void *__map)
{
+ struct regmap *map = __map;
mutex_lock(&map->mutex);
}
-static void regmap_unlock_mutex(struct regmap *map)
+static void regmap_unlock_mutex(void *__map)
{
+ struct regmap *map = __map;
mutex_unlock(&map->mutex);
}
-static void regmap_lock_spinlock(struct regmap *map)
+static void regmap_lock_spinlock(void *__map)
{
+ struct regmap *map = __map;
spin_lock(&map->spinlock);
}
-static void regmap_unlock_spinlock(struct regmap *map)
+static void regmap_unlock_spinlock(void *__map)
{
+ struct regmap *map = __map;
spin_unlock(&map->spinlock);
}
@@ -335,14 +381,21 @@ struct regmap *regmap_init(struct device *dev,
goto err;
}
- if (bus->fast_io) {
- spin_lock_init(&map->spinlock);
- map->lock = regmap_lock_spinlock;
- map->unlock = regmap_unlock_spinlock;
+ if (config->lock && config->unlock) {
+ map->lock = config->lock;
+ map->unlock = config->unlock;
+ map->lock_arg = config->lock_arg;
} else {
- mutex_init(&map->mutex);
- map->lock = regmap_lock_mutex;
- map->unlock = regmap_unlock_mutex;
+ if (bus->fast_io) {
+ spin_lock_init(&map->spinlock);
+ map->lock = regmap_lock_spinlock;
+ map->unlock = regmap_unlock_spinlock;
+ } else {
+ mutex_init(&map->mutex);
+ map->lock = regmap_lock_mutex;
+ map->unlock = regmap_unlock_mutex;
+ }
+ map->lock_arg = map;
}
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
@@ -359,6 +412,10 @@ struct regmap *regmap_init(struct device *dev,
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
+ map->wr_table = config->wr_table;
+ map->rd_table = config->rd_table;
+ map->volatile_table = config->volatile_table;
+ map->precious_table = config->precious_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
@@ -519,20 +576,38 @@ struct regmap *regmap_init(struct device *dev,
}
map->range_tree = RB_ROOT;
- for (i = 0; i < config->n_ranges; i++) {
+ for (i = 0; i < config->num_ranges; i++) {
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
struct regmap_range_node *new;
/* Sanity check */
- if (range_cfg->range_max < range_cfg->range_min ||
- range_cfg->range_max > map->max_register ||
- range_cfg->selector_reg > map->max_register ||
- range_cfg->window_len == 0)
+ if (range_cfg->range_max < range_cfg->range_min) {
+ dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ range_cfg->range_max, range_cfg->range_min);
+ goto err_range;
+ }
+
+ if (range_cfg->range_max > map->max_register) {
+ dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ range_cfg->range_max, map->max_register);
+ goto err_range;
+ }
+
+ if (range_cfg->selector_reg > map->max_register) {
+ dev_err(map->dev,
+ "Invalid range %d: selector out of map\n", i);
+ goto err_range;
+ }
+
+ if (range_cfg->window_len == 0) {
+ dev_err(map->dev, "Invalid range %d: window_len 0\n",
+ i);
goto err_range;
+ }
/* Make sure, that this register range has no selector
or data window within its boundary */
- for (j = 0; j < config->n_ranges; j++) {
+ for (j = 0; j < config->num_ranges; j++) {
unsigned sel_reg = config->ranges[j].selector_reg;
unsigned win_min = config->ranges[j].window_start;
unsigned win_max = win_min +
@@ -540,11 +615,17 @@ struct regmap *regmap_init(struct device *dev,
if (range_cfg->range_min <= sel_reg &&
sel_reg <= range_cfg->range_max) {
+ dev_err(map->dev,
+ "Range %d: selector for %d in window\n",
+ i, j);
goto err_range;
}
if (!(win_max < range_cfg->range_min ||
win_min > range_cfg->range_max)) {
+ dev_err(map->dev,
+ "Range %d: window for %d in window\n",
+ i, j);
goto err_range;
}
}
@@ -555,6 +636,8 @@ struct regmap *regmap_init(struct device *dev,
goto err_range;
}
+ new->map = map;
+ new->name = range_cfg->name;
new->range_min = range_cfg->range_min;
new->range_max = range_cfg->range_max;
new->selector_reg = range_cfg->selector_reg;
@@ -564,6 +647,7 @@ struct regmap *regmap_init(struct device *dev,
new->window_len = range_cfg->window_len;
if (_regmap_range_add(map, new) == false) {
+ dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
}
@@ -579,7 +663,7 @@ struct regmap *regmap_init(struct device *dev,
}
ret = regcache_init(map, config);
- if (ret < 0)
+ if (ret != 0)
goto err_range;
regmap_debugfs_init(map, config->name);
@@ -738,59 +822,57 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
EXPORT_SYMBOL_GPL(dev_get_regmap);
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
+ struct regmap_range_node *range,
unsigned int val_num)
{
- struct regmap_range_node *range;
void *orig_work_buf;
unsigned int win_offset;
unsigned int win_page;
bool page_chg;
int ret;
- range = _regmap_range_lookup(map, *reg);
- if (range) {
- win_offset = (*reg - range->range_min) % range->window_len;
- win_page = (*reg - range->range_min) / range->window_len;
-
- if (val_num > 1) {
- /* Bulk write shouldn't cross range boundary */
- if (*reg + val_num - 1 > range->range_max)
- return -EINVAL;
+ win_offset = (*reg - range->range_min) % range->window_len;
+ win_page = (*reg - range->range_min) / range->window_len;
- /* ... or single page boundary */
- if (val_num > range->window_len - win_offset)
- return -EINVAL;
- }
+ if (val_num > 1) {
+ /* Bulk write shouldn't cross range boundary */
+ if (*reg + val_num - 1 > range->range_max)
+ return -EINVAL;
- /* It is possible to have selector register inside data window.
- In that case, selector register is located on every page and
- it needs no page switching, when accessed alone. */
- if (val_num > 1 ||
- range->window_start + win_offset != range->selector_reg) {
- /* Use separate work_buf during page switching */
- orig_work_buf = map->work_buf;
- map->work_buf = map->selector_work_buf;
+ /* ... or single page boundary */
+ if (val_num > range->window_len - win_offset)
+ return -EINVAL;
+ }
- ret = _regmap_update_bits(map, range->selector_reg,
- range->selector_mask,
- win_page << range->selector_shift,
- &page_chg);
+ /* It is possible to have selector register inside data window.
+ In that case, selector register is located on every page and
+ it needs no page switching, when accessed alone. */
+ if (val_num > 1 ||
+ range->window_start + win_offset != range->selector_reg) {
+ /* Use separate work_buf during page switching */
+ orig_work_buf = map->work_buf;
+ map->work_buf = map->selector_work_buf;
- map->work_buf = orig_work_buf;
+ ret = _regmap_update_bits(map, range->selector_reg,
+ range->selector_mask,
+ win_page << range->selector_shift,
+ &page_chg);
- if (ret < 0)
- return ret;
- }
+ map->work_buf = orig_work_buf;
- *reg = range->window_start + win_offset;
+ if (ret != 0)
+ return ret;
}
+ *reg = range->window_start + win_offset;
+
return 0;
}
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
+ struct regmap_range_node *range;
u8 *u8 = map->work_buf;
void *buf;
int ret = -ENOTSUPP;
@@ -814,7 +896,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
ival);
if (ret) {
dev_err(map->dev,
- "Error in caching of register: %u ret: %d\n",
+ "Error in caching of register: %x ret: %d\n",
reg + i, ret);
return ret;
}
@@ -825,9 +907,35 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
}
}
- ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
- if (ret < 0)
- return ret;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ int val_num = val_len / map->format.val_bytes;
+ int win_offset = (reg - range->range_min) % range->window_len;
+ int win_residue = range->window_len - win_offset;
+
+ /* If the write goes beyond the end of the window split it */
+ while (val_num > win_residue) {
+ dev_dbg(map->dev, "Writing window %d/%zu\n",
+ win_residue, val_len / map->format.val_bytes);
+ ret = _regmap_raw_write(map, reg, val, win_residue *
+ map->format.val_bytes);
+ if (ret != 0)
+ return ret;
+
+ reg += win_residue;
+ val_num -= win_residue;
+ val += win_residue * map->format.val_bytes;
+ val_len -= win_residue * map->format.val_bytes;
+
+ win_offset = (reg - range->range_min) %
+ range->window_len;
+ win_residue = range->window_len - win_offset;
+ }
+
+ ret = _regmap_select_page(map, &reg, range, val_num);
+ if (ret != 0)
+ return ret;
+ }
map->format.format_reg(map->work_buf, reg, map->reg_shift);
@@ -876,6 +984,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
+ struct regmap_range_node *range;
int ret;
BUG_ON(!map->format.format_write && !map->format.format_val);
@@ -897,9 +1006,12 @@ int _regmap_write(struct regmap *map, unsigned int reg,
trace_regmap_reg_write(map->dev, reg, val);
if (map->format.format_write) {
- ret = _regmap_select_page(map, &reg, 1);
- if (ret < 0)
- return ret;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
map->format.format_write(map, reg, val);
@@ -939,11 +1051,11 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_write(map, reg, val);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -975,11 +1087,11 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_raw_write(map, reg, val, val_len);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -994,7 +1106,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
* @val_count: Number of registers to write
*
* This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
+ * data to the device either in single transfer or multiple transfer.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
@@ -1011,7 +1123,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
/* No formatting is require if val_byte is 1 */
if (val_bytes == 1) {
@@ -1047,7 +1159,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
kfree(wval);
out:
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -1055,12 +1167,17 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len)
{
+ struct regmap_range_node *range;
u8 *u8 = map->work_buf;
int ret;
- ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
- if (ret < 0)
- return ret;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range,
+ val_len / map->format.val_bytes);
+ if (ret != 0)
+ return ret;
+ }
map->format.format_reg(map->work_buf, reg, map->reg_shift);
@@ -1137,11 +1254,11 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_read(map, reg, val);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -1171,7 +1288,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
@@ -1193,7 +1310,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
}
out:
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -1300,9 +1417,9 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
bool change;
int ret;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_update_bits(map, reg, mask, val, &change);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -1326,9 +1443,9 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
{
int ret;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_update_bits(map, reg, mask, val, change);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
@@ -1357,7 +1474,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
if (map->patch)
return -EBUSY;
- map->lock(map);
+ map->lock(map->lock_arg);
bypass = map->cache_bypass;
@@ -1385,7 +1502,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
out:
map->cache_bypass = bypass;
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index a533af218368..8b4221cfd118 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -65,6 +65,14 @@ config BCMA_DRIVER_GMAC_CMN
If unsure, say N
+config BCMA_DRIVER_GPIO
+ bool "BCMA GPIO driver"
+ depends on BCMA && GPIOLIB
+ help
+ Driver to provide access to the GPIO pins of the bcma bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 8ad42d41b2f2..734b32f09c0a 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -6,6 +6,7 @@ bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
+bcma-$(CONFIG_BCMA_DRIVER_GPIO) += driver_gpio.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 169fc58427d3..cb0c45488572 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -22,7 +22,7 @@
struct bcma_bus;
/* main.c */
-int __devinit bcma_bus_register(struct bcma_bus *bus);
+int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
@@ -48,8 +48,8 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_pmu.c */
-u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc);
-u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
+u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
+u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_SFLASH
/* driver_chipcommon_sflash.c */
@@ -84,9 +84,26 @@ extern void __exit bcma_host_pci_exit(void);
/* driver_pci.c */
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
+extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
+
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
-bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
-void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+/* driver_gpio.c */
+int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+ return -ENOTSUPP;
+}
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+ return 0;
+}
+#endif /* CONFIG_BCMA_DRIVER_GPIO */
+
#endif
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index a4c3ebcc4c86..e461ad25fda4 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -4,12 +4,15 @@
*
* Copyright 2005, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
+#include <linux/bcm47xx_wdt.h>
#include <linux/export.h>
+#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
@@ -22,20 +25,119 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
return value;
}
-void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
+static u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
{
- u32 leddc_on = 10;
- u32 leddc_off = 90;
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ return bcma_pmu_get_alp_clock(cc);
- if (cc->setup_done)
+ return 20000000;
+}
+
+static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+ u32 nb;
+
+ if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ nb = 32;
+ else if (cc->core->id.rev < 26)
+ nb = 16;
+ else
+ nb = (cc->core->id.rev >= 37) ? 32 : 24;
+ } else {
+ nb = 28;
+ }
+ if (nb == 32)
+ return 0xffffffff;
+ else
+ return (1 << nb) - 1;
+}
+
+static u32 bcma_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt,
+ u32 ticks)
+{
+ struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt);
+
+ return bcma_chipco_watchdog_timer_set(cc, ticks);
+}
+
+static u32 bcma_chipco_watchdog_timer_set_ms_wdt(struct bcm47xx_wdt *wdt,
+ u32 ms)
+{
+ struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt);
+ u32 ticks;
+
+ ticks = bcma_chipco_watchdog_timer_set(cc, cc->ticks_per_ms * ms);
+ return ticks / cc->ticks_per_ms;
+}
+
+static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ /* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP clock */
+ return bcma_chipco_get_alp_clock(cc) / 4000;
+ else
+ /* based on 32KHz ILP clock */
+ return 32;
+ } else {
+ return bcma_chipco_get_alp_clock(cc) / 1000;
+ }
+}
+
+int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
+{
+ struct bcm47xx_wdt wdt = {};
+ struct platform_device *pdev;
+
+ wdt.driver_data = cc;
+ wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
+ wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
+ wdt.max_timer_ms = bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
+
+ pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
+ cc->core->bus->num, &wdt,
+ sizeof(wdt));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ cc->watchdog = pdev;
+
+ return 0;
+}
+
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
+{
+ if (cc->early_setup_done)
return;
+ spin_lock_init(&cc->gpio_lock);
+
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
if (cc->core->id.rev >= 35)
cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ bcma_pmu_early_init(cc);
+
+ cc->early_setup_done = true;
+}
+
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
+{
+ u32 leddc_on = 10;
+ u32 leddc_off = 90;
+
+ if (cc->setup_done)
+ return;
+
+ bcma_core_chipcommon_early_init(cc);
+
if (cc->core->id.rev >= 20) {
bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0);
@@ -56,15 +158,33 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
(leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
}
+ cc->ticks_per_ms = bcma_chipco_watchdog_ticks_per_ms(cc);
cc->setup_done = true;
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
-void bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
+u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
{
- /* instant NMI */
- bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks);
+ u32 maxt;
+ enum bcma_clkmode clkmode;
+
+ maxt = bcma_chipco_watchdog_get_max_timer(cc);
+ if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (ticks == 1)
+ ticks = 2;
+ else if (ticks > maxt)
+ ticks = maxt;
+ bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
+ } else {
+ clkmode = ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC;
+ bcma_core_set_clockmode(cc->core, clkmode);
+ if (ticks > maxt)
+ ticks = maxt;
+ /* instant NMI */
+ bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks);
+ }
+ return ticks;
}
void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value)
@@ -84,28 +204,97 @@ u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
+/*
+ * If the bit is set to 0, chipcommon controlls this GPIO,
+ * if the bit is set to 1, it is used by some part of the chip and not our code.
+ */
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
- return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+ unsigned long flags;
+ u32 res;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res;
+
+ if (cc->core->id.rev < 20)
+ return 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
+}
+
+u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 res;
+
+ if (cc->core->id.rev < 20)
+ return 0;
+
+ spin_lock_irqsave(&cc->gpio_lock, flags);
+ res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value);
+ spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+ return res;
}
#ifdef CONFIG_BCMA_DRIVER_MIPS
@@ -118,8 +307,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
struct bcma_serial_port *ports = cc->serial_ports;
if (ccrev >= 11 && ccrev != 15) {
- /* Fixed ALP clock */
- baud_base = bcma_pmu_alp_clock(cc);
+ baud_base = bcma_chipco_get_alp_clock(cc);
if (ccrev >= 21) {
/* Turn off UART clock before switching clocksource. */
bcma_cc_write32(cc, BCMA_CC_CORECTL,
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 9042781edec3..1f0b83e18f68 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
- cc->core->id.rev != 0x38) {
+ cc->core->id.rev != 38) {
bcma_err(bus, "NAND flash on unsupported board!\n");
return -ENOTSUPP;
}
@@ -32,6 +32,9 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
}
cc->nflash.present = true;
+ if (cc->core->id.rev == 38 &&
+ (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT))
+ cc->nflash.boot = true;
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 201faf106b3f..c62c788b3289 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -13,12 +13,13 @@
#include <linux/export.h>
#include <linux/bcma/bcma.h>
-static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
+u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
{
@@ -144,7 +145,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
}
}
-void bcma_pmu_init(struct bcma_drv_cc *cc)
+void bcma_pmu_early_init(struct bcma_drv_cc *cc)
{
u32 pmucap;
@@ -153,7 +154,10 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
+}
+void bcma_pmu_init(struct bcma_drv_cc *cc)
+{
if (cc->pmu.rev == 1)
bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
~BCMA_CC_PMU_CTL_NOILPONW);
@@ -165,7 +169,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_pmu_workarounds(cc);
}
-u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
+u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
@@ -193,7 +197,7 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
/* Find the output of the "m" pll divider given pll controls that start with
* pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
*/
-static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+static u32 bcma_pmu_pll_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
{
u32 tmp, div, ndiv, p1, p2, fc;
struct bcma_bus *bus = cc->core->bus;
@@ -222,14 +226,14 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT;
/* Do calculation in Mhz */
- fc = bcma_pmu_alp_clock(cc) / 1000000;
+ fc = bcma_pmu_get_alp_clock(cc) / 1000000;
fc = (p1 * ndiv * fc) / p2;
/* Return clock in Hertz */
return (fc / div) * 1000000;
}
-static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+static u32 bcma_pmu_pll_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
{
u32 tmp, ndiv, p1div, p2div;
u32 clock;
@@ -260,7 +264,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
}
/* query bus clock frequency for PMU-enabled chipcommon */
-static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+static u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
@@ -268,40 +272,42 @@ static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM47162:
- return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
- BCMA_CC_PMU5_MAINPLL_SSB);
+ return bcma_pmu_pll_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM5356:
- return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
- BCMA_CC_PMU5_MAINPLL_SSB);
+ return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
- return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
- BCMA_CC_PMU5_MAINPLL_SSB);
+ return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM4706:
- return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
- BCMA_CC_PMU5_MAINPLL_SSB);
+ return bcma_pmu_pll_clock_bcm4706(cc,
+ BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM53572:
return 75000000;
default:
- bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+ bcma_warn(bus, "No bus clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
}
return BCMA_CC_PMU_HT_CLOCK;
}
/* query cpu clock frequency for PMU-enabled chipcommon */
-u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
+u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
return 300000000;
+ /* New PMUs can have different clock for bus and CPU */
if (cc->pmu.rev >= 5) {
u32 pll;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4706:
- return bcma_pmu_clock_bcm4706(cc,
+ return bcma_pmu_pll_clock_bcm4706(cc,
BCMA_CC_PMU4706_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_CPU);
case BCMA_CHIP_ID_BCM5356:
@@ -316,10 +322,11 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
break;
}
- return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
+ return bcma_pmu_pll_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
}
- return bcma_pmu_get_clockcontrol(cc);
+ /* On old PMUs CPU has the same clock as the bus */
+ return bcma_pmu_get_bus_clock(cc);
}
static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 2c4eec2ca5a0..1e694db4532d 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -12,7 +12,7 @@
static struct resource bcma_sflash_resource = {
.name = "bcma_sflash",
- .start = BCMA_SFLASH,
+ .start = BCMA_SOC_FLASH2,
.end = 0,
.flags = IORESOURCE_MEM | IORESOURCE_READONLY,
};
@@ -31,15 +31,42 @@ struct bcma_sflash_tbl_e {
};
static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
- { "", 0x14, 0x10000, 32, },
+ { "M25P20", 0x11, 0x10000, 4, },
+ { "M25P40", 0x12, 0x10000, 8, },
+
+ { "M25P16", 0x14, 0x10000, 32, },
+ { "M25P32", 0x15, 0x10000, 64, },
+ { "M25P64", 0x16, 0x10000, 128, },
+ { "M25FL128", 0x17, 0x10000, 256, },
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+ { "SST25WF512", 1, 0x1000, 16, },
+ { "SST25VF512", 0x48, 0x1000, 16, },
+ { "SST25WF010", 2, 0x1000, 32, },
+ { "SST25VF010", 0x49, 0x1000, 32, },
+ { "SST25WF020", 3, 0x1000, 64, },
+ { "SST25VF020", 0x43, 0x1000, 64, },
+ { "SST25WF040", 4, 0x1000, 128, },
+ { "SST25VF040", 0x44, 0x1000, 128, },
+ { "SST25VF040B", 0x8d, 0x1000, 128, },
+ { "SST25WF080", 5, 0x1000, 256, },
+ { "SST25VF080B", 0x8e, 0x1000, 256, },
+ { "SST25VF016", 0x41, 0x1000, 512, },
+ { "SST25VF032", 0x4a, 0x1000, 1024, },
+ { "SST25VF064", 0x4b, 0x1000, 2048, },
{ 0 },
};
static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+ { "AT45DB011", 0xc, 256, 512, },
+ { "AT45DB021", 0x14, 256, 1024, },
+ { "AT45DB041", 0x1c, 256, 2048, },
+ { "AT45DB081", 0x24, 256, 4096, },
+ { "AT45DB161", 0x2c, 512, 4096, },
+ { "AT45DB321", 0x34, 512, 8192, },
+ { "AT45DB642", 0x3c, 1024, 8192, },
{ 0 },
};
@@ -84,6 +111,8 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
break;
}
break;
+ case 0x13:
+ return -ENOTSUPP;
default:
for (e = bcma_sflash_st_tbl; e->name; e++) {
if (e->id == id)
@@ -116,7 +145,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
return -ENOTSUPP;
}
- sflash->window = BCMA_SFLASH;
+ sflash->window = BCMA_SOC_FLASH2;
sflash->blocksize = e->blocksize;
sflash->numblocks = e->numblocks;
sflash->size = sflash->blocksize * sflash->numblocks;
diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c
index 834225f65e8f..dcb137926d31 100644
--- a/drivers/bcma/driver_gmac_cmn.c
+++ b/drivers/bcma/driver_gmac_cmn.c
@@ -8,7 +8,7 @@
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
-void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
{
mutex_init(&gc->phy_mutex);
}
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
new file mode 100644
index 000000000000..71f755c06fc6
--- /dev/null
+++ b/drivers/bcma/driver_gpio.c
@@ -0,0 +1,103 @@
+/*
+ * Broadcom specific AMBA
+ * GPIO driver
+ *
+ * Copyright 2011, Broadcom Corporation
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcma_private.h"
+
+static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
+{
+ return container_of(chip, struct bcma_drv_cc, gpio);
+}
+
+static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ return !!bcma_chipco_gpio_in(cc, 1 << gpio);
+}
+
+static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_outen(cc, 1 << gpio, 0);
+ return 0;
+}
+
+static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio);
+ bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+ return 0;
+}
+
+static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ bcma_chipco_gpio_control(cc, 1 << gpio, 0);
+ /* clear pulldown */
+ bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0);
+ /* Set pullup */
+ bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio);
+
+ return 0;
+}
+
+static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ /* clear pullup */
+ bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
+}
+
+int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+ struct gpio_chip *chip = &cc->gpio;
+
+ chip->label = "bcma_gpio";
+ chip->owner = THIS_MODULE;
+ chip->request = bcma_gpio_request;
+ chip->free = bcma_gpio_free;
+ chip->get = bcma_gpio_get_value;
+ chip->set = bcma_gpio_set_value;
+ chip->direction_input = bcma_gpio_direction_input;
+ chip->direction_output = bcma_gpio_direction_output;
+ chip->ngpio = 16;
+ /* There is just one SoC in one device and its GPIO addresses should be
+ * deterministic to address them more easily. The other buses could get
+ * a random base number. */
+ if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ chip->base = 0;
+ else
+ chip->base = -1;
+
+ return gpiochip_add(chip);
+}
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+ return gpiochip_remove(&cc->gpio);
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index cc65b45b4368..792daad28cbc 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
~(1 << irqflag));
else
- bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
+ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
/* assign the new one */
if (irq == 0) {
@@ -171,7 +171,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
struct bcma_bus *bus = mcore->core->bus;
if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
- return bcma_pmu_get_clockcpu(&bus->drv_cc);
+ return bcma_pmu_get_cpu_clock(&bus->drv_cc);
bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
return 0;
@@ -181,47 +181,66 @@ EXPORT_SYMBOL(bcma_cpu_clock);
static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
+ struct bcma_drv_cc *cc = &bus->drv_cc;
- switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
+ switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
case BCMA_CC_FLASHT_ATSER:
bcma_debug(bus, "Found serial flash\n");
- bcma_sflash_init(&bus->drv_cc);
+ bcma_sflash_init(cc);
break;
case BCMA_CC_FLASHT_PARA:
bcma_debug(bus, "Found parallel flash\n");
- bus->drv_cc.pflash.window = 0x1c000000;
- bus->drv_cc.pflash.window_size = 0x02000000;
+ cc->pflash.present = true;
+ cc->pflash.window = BCMA_SOC_FLASH2;
+ cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
- if ((bcma_read32(bus->drv_cc.core, BCMA_CC_FLASH_CFG) &
+ if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
BCMA_CC_FLASH_CFG_DS) == 0)
- bus->drv_cc.pflash.buswidth = 1;
+ cc->pflash.buswidth = 1;
else
- bus->drv_cc.pflash.buswidth = 2;
+ cc->pflash.buswidth = 2;
break;
default:
bcma_err(bus, "Flash type not supported\n");
}
- if (bus->drv_cc.core->id.rev == 38 ||
+ if (cc->core->id.rev == 38 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
- if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) {
+ if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
bcma_debug(bus, "Found NAND flash\n");
- bcma_nflash_init(&bus->drv_cc);
+ bcma_nflash_init(cc);
}
}
}
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+ struct bcma_bus *bus = mcore->core->bus;
+
+ if (mcore->early_setup_done)
+ return;
+
+ bcma_chipco_serial_init(&bus->drv_cc);
+ bcma_core_mips_flash_detect(mcore);
+
+ mcore->early_setup_done = true;
+}
+
void bcma_core_mips_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus;
struct bcma_device *core;
bus = mcore->core->bus;
+ if (mcore->setup_done)
+ return;
+
bcma_info(bus, "Initializing MIPS core...\n");
- if (!mcore->setup_done)
- mcore->assigned_irqs = 1;
+ bcma_core_mips_early_init(mcore);
+
+ mcore->assigned_irqs = 1;
/* Assign IRQs to all cores on the bus */
list_for_each_entry(core, &bus->cores, list) {
@@ -256,10 +275,5 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
bcma_info(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
- if (mcore->setup_done)
- return;
-
- bcma_chipco_serial_init(&bus->drv_cc);
- bcma_core_mips_flash_detect(mcore);
mcore->setup_done = true;
}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c39ee6d45850..cf7a476a519f 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -207,14 +207,14 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
* Init.
**************************************************/
-static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
+static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc);
bcma_core_pci_config_fixup(pc);
}
-void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
+void bcma_core_pci_init(struct bcma_drv_pci *pc)
{
if (pc->setup_done)
return;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 9baf886e82df..af0c9fabee54 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -24,7 +24,7 @@
#define BCMA_PCI_SLOT_MAX 16
#define PCI_CONFIG_SPACE_SIZE 256
-bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u16 chipid_top;
@@ -35,11 +35,6 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
chipid_top != 0x5300)
return false;
- if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
- bcma_info(bus, "This PCI core is disabled and not working\n");
- return false;
- }
-
bcma_core_enable(pc->core, 0);
return !mips_busprobe32(tmp, pc->core->io_addr);
@@ -269,10 +264,9 @@ static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
}
/* return cap_offset if requested capability exists in the PCI config space */
-static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
- unsigned int dev,
- unsigned int func, u8 req_cap_id,
- unsigned char *buf, u32 *buflen)
+static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
+ unsigned int func, u8 req_cap_id,
+ unsigned char *buf, u32 *buflen)
{
u8 cap_id;
u8 cap_ptr = 0;
@@ -339,7 +333,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
* Retry Status (CRS) Completion Status to software then
* enable the feature.
*/
-static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
+static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u8 cap_ptr, root_ctrl, root_cap, dev;
@@ -386,7 +380,7 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
}
}
-void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
struct bcma_drv_pci_host *pc_host;
@@ -396,6 +390,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
bcma_info(bus, "PCIEcore in host mode found\n");
+ if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
+ bcma_info(bus, "This PCIE core is disabled and not working\n");
+ return;
+ }
+
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
if (!pc_host) {
bcma_err(bus, "can not allocate memory");
@@ -452,6 +451,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
+ pc_host->io_resource.start = 0x100;
+ pc_host->io_resource.end = 0x47F;
pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
tmp | BCMA_SOC_PCI_MEM);
@@ -459,6 +460,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
+ pc_host->io_resource.start = 0x480;
+ pc_host->io_resource.end = 0x7FF;
pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
@@ -534,7 +537,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
{
struct resource *res;
- int pos;
+ int pos, err;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
@@ -547,8 +550,12 @@ static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
for (pos = 0; pos < 6; pos++) {
res = &dev->resource[pos];
- if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM))
- pci_assign_resource(dev, pos);
+ if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
+ err = pci_assign_resource(dev, pos);
+ if (err)
+ pr_err("PCI: Problem fixing up the addresses on %s\n",
+ pci_name(dev));
+ }
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index b6b4b5ebd4c2..fbf2759e7e4e 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -155,8 +155,8 @@ static const struct bcma_host_ops bcma_host_pci_ops = {
.awrite32 = bcma_host_pci_awrite32,
};
-static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int bcma_host_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct bcma_bus *bus;
int err = -ENOMEM;
@@ -226,7 +226,7 @@ err_kfree_bus:
return err;
}
-static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
+static void bcma_host_pci_remove(struct pci_dev *dev)
{
struct bcma_bus *bus = pci_get_drvdata(dev);
@@ -238,7 +238,7 @@ static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int bcma_host_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -261,11 +261,11 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
bcma_host_pci_resume);
#define BCMA_PM_OPS (&bcma_pm_ops)
-#else /* CONFIG_PM */
+#else /* CONFIG_PM_SLEEP */
#define BCMA_PM_OPS NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
@@ -284,7 +284,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.name = "bcma-pci-bridge",
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
- .remove = __devexit_p(bcma_host_pci_remove),
+ .remove = bcma_host_pci_remove,
.driver.pm = BCMA_PM_OPS,
};
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index d865470bc951..324f9debda88 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -81,6 +81,18 @@ struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
}
EXPORT_SYMBOL_GPL(bcma_find_core);
+static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit)
+{
+ struct bcma_device *core;
+
+ list_for_each_entry(core, &bus->cores, list) {
+ if (core->id.id == coreid && core->core_unit == unit)
+ return core;
+ }
+ return NULL;
+}
+
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
@@ -152,6 +164,17 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
+ err = bcma_gpio_init(&bus->drv_cc);
+ if (err == -ENOTSUPP)
+ bcma_debug(bus, "GPIO driver not activated\n");
+ else if (err)
+ bcma_err(bus, "Error registering GPIO driver: %i\n", err);
+
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ err = bcma_chipco_watchdog_register(&bus->drv_cc);
+ if (err)
+ bcma_err(bus, "Error registering watchdog driver\n");
+ }
return 0;
}
@@ -165,9 +188,11 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
if (core->dev_registered)
device_unregister(&core->dev);
}
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ platform_device_unregister(bus->drv_cc.watchdog);
}
-int __devinit bcma_bus_register(struct bcma_bus *bus)
+int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
@@ -183,6 +208,20 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
return -1;
}
+ /* Early init CC core */
+ core = bcma_find_core(bus, bcma_cc_core_id(bus));
+ if (core) {
+ bus->drv_cc.core = core;
+ bcma_core_chipcommon_early_init(&bus->drv_cc);
+ }
+
+ /* Try to get SPROM */
+ err = bcma_sprom_get(bus);
+ if (err == -ENOENT) {
+ bcma_err(bus, "No SPROM available\n");
+ } else if (err)
+ bcma_err(bus, "Failed to get SPROM: %d\n", err);
+
/* Init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
@@ -198,10 +237,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
}
/* Init PCIE core */
- core = bcma_find_core(bus, BCMA_CORE_PCIE);
+ core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
+ if (core) {
+ bus->drv_pci[0].core = core;
+ bcma_core_pci_init(&bus->drv_pci[0]);
+ }
+
+ /* Init PCIE core */
+ core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
if (core) {
- bus->drv_pci.core = core;
- bcma_core_pci_init(&bus->drv_pci);
+ bus->drv_pci[1].core = core;
+ bcma_core_pci_init(&bus->drv_pci[1]);
}
/* Init GBIT MAC COMMON core */
@@ -211,13 +257,6 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
}
- /* Try to get SPROM */
- err = bcma_sprom_get(bus);
- if (err == -ENOENT) {
- bcma_err(bus, "No SPROM available\n");
- } else if (err)
- bcma_err(bus, "Failed to get SPROM: %d\n", err);
-
/* Register found cores */
bcma_register_cores(bus);
@@ -229,6 +268,13 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus)
{
struct bcma_device *cores[3];
+ int err;
+
+ err = bcma_gpio_unregister(&bus->drv_cc);
+ if (err == -EBUSY)
+ bcma_err(bus, "Some GPIOs are still in use.\n");
+ else if (err)
+ bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
@@ -275,18 +321,18 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return -1;
}
- /* Init CC core */
+ /* Early init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
- bcma_core_chipcommon_init(&bus->drv_cc);
+ bcma_core_chipcommon_early_init(&bus->drv_cc);
}
- /* Init MIPS core */
+ /* Early init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
- bcma_core_mips_init(&bus->drv_mips);
+ bcma_core_mips_early_init(&bus->drv_mips);
}
bcma_info(bus, "Early bus registered\n");
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 0d546b64be34..4adf9ef9a113 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -595,8 +595,11 @@ int bcma_sprom_get(struct bcma_bus *bus)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
err = bcma_sprom_valid(sprom);
- if (err)
+ if (err) {
+ bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n");
+ err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
goto out;
+ }
bcma_sprom_extract_r8(bus, sprom);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index d2ed7f18d1ac..175649468c95 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
-#define VERSION "50"
+#define VERSION "81"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -10,7 +10,7 @@
#define AOE_PARTITIONS (16)
#endif
-#define WHITESPACE " \t\v\f\n"
+#define WHITESPACE " \t\v\f\n,"
enum {
AOECMD_ATA,
@@ -73,21 +73,29 @@ enum {
DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */
DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
DEVFL_GDALLOC = (1<<3), /* need to alloc gendisk */
- DEVFL_KICKME = (1<<4), /* slow polling network card catch */
- DEVFL_NEWSIZE = (1<<5), /* need to update dev size in block layer */
+ DEVFL_GD_NOW = (1<<4), /* allocating gendisk */
+ DEVFL_KICKME = (1<<5), /* slow polling network card catch */
+ DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
+ DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
+ DEVFL_FREED = (1<<8), /* device has been cleaned up */
};
enum {
DEFAULTBCNT = 2 * 512, /* 2 sectors */
MIN_BUFS = 16,
- NTARGETS = 8,
+ NTARGETS = 4,
NAOEIFS = 8,
NSKBPOOLMAX = 256,
NFACTIVE = 61,
TIMERTICK = HZ / 10,
- MINTIMER = HZ >> 2,
- MAXTIMER = HZ << 1,
+ RTTSCALE = 8,
+ RTTDSCALE = 3,
+ RTTAVG_INIT = USEC_PER_SEC / 4 << RTTSCALE,
+ RTTDEV_INIT = RTTAVG_INIT / 4,
+
+ HARD_SCORN_SECS = 10, /* try another remote port after this */
+ MAX_TAINT = 1000, /* cap on aoetgt taint */
};
struct buf {
@@ -100,10 +108,17 @@ struct buf {
struct request *rq;
};
+enum frame_flags {
+ FFL_PROBE = 1,
+};
+
struct frame {
struct list_head head;
u32 tag;
+ struct timeval sent; /* high-res time packet was sent */
+ u32 sent_jiffs; /* low-res jiffies-based sent time */
ulong waited;
+ ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
sector_t lba;
struct sk_buff *skb; /* command skb freed on module exit */
@@ -112,6 +127,7 @@ struct frame {
struct bio_vec *bv;
ulong bcnt;
ulong bv_off;
+ char flags;
};
struct aoeif {
@@ -122,28 +138,31 @@ struct aoeif {
struct aoetgt {
unsigned char addr[6];
- ushort nframes;
+ ushort nframes; /* cap on frames to use */
struct aoedev *d; /* parent device I belong to */
struct list_head ffree; /* list of free frames */
struct aoeif ifs[NAOEIFS];
struct aoeif *ifp; /* current aoeif in use */
- ushort nout;
- ushort maxout;
- ulong falloc;
- ulong lastwadj; /* last window adjustment */
+ ushort nout; /* number of AoE commands outstanding */
+ ushort maxout; /* current value for max outstanding */
+ ushort next_cwnd; /* incr maxout after decrementing to zero */
+ ushort ssthresh; /* slow start threshold */
+ ulong falloc; /* number of allocated frames */
+ int taint; /* how much we want to avoid this aoetgt */
int minbcnt;
int wpkts, rpkts;
+ char nout_probes;
};
struct aoedev {
struct aoedev *next;
ulong sysminor;
ulong aoemajor;
+ u32 rttavg; /* scaled AoE round trip time average */
+ u32 rttdev; /* scaled round trip time mean deviation */
u16 aoeminor;
u16 flags;
u16 nopen; /* (bd_openers isn't available without sleeping) */
- u16 rttavg; /* round trip average of requests/responses */
- u16 mintimer;
u16 fw_ver; /* version of blade's firmware */
u16 lasttag; /* last tag sent */
u16 useme;
@@ -151,7 +170,7 @@ struct aoedev {
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
struct request_queue *blkq;
- struct hd_geometry geo;
+ struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
spinlock_t lock;
@@ -164,11 +183,12 @@ struct aoedev {
} ip;
ulong maxbcnt;
struct list_head factive[NFACTIVE]; /* hash of active frames */
- struct aoetgt *targets[NTARGETS];
+ struct list_head rexmitq; /* deferred retransmissions */
+ struct aoetgt **targets;
+ ulong ntargets; /* number of allocated aoetgt pointers */
struct aoetgt **tgt; /* target in use when working */
- struct aoetgt *htgt; /* target needing rexmit assistance */
- ulong ntargets;
ulong kicked;
+ char ident[512];
};
/* kthread tracking */
@@ -195,6 +215,7 @@ void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
struct sk_buff *aoecmd_ata_rsp(struct sk_buff *);
void aoecmd_cfg_rsp(struct sk_buff *);
void aoecmd_sleepwork(struct work_struct *);
+void aoecmd_wreset(struct aoetgt *t);
void aoecmd_cleanslate(struct aoedev *);
void aoecmd_exit(void);
int aoecmd_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00dfc5008ad4..a129f8c8073d 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -16,11 +16,19 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <scsi/sg.h>
#include "aoe.h"
static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
+/* GPFS needs a larger value than the default. */
+static int aoe_maxsectors;
+module_param(aoe_maxsectors, int, 0644);
+MODULE_PARM_DESC(aoe_maxsectors,
+ "When nonzero, set the maximum number of sectors per I/O request");
+
static ssize_t aoedisk_show_state(struct device *dev,
struct device_attribute *attr, char *page)
{
@@ -59,7 +67,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
nd = nds;
ne = nd + ARRAY_SIZE(nds);
t = d->targets;
- te = t + NTARGETS;
+ te = t + d->ntargets;
for (; t < te && *t; t++) {
ifp = (*t)->ifs;
e = ifp + NAOEIFS;
@@ -91,6 +99,14 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
}
+static ssize_t aoedisk_show_payload(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct aoedev *d = disk->private_data;
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
+}
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
@@ -99,12 +115,14 @@ static struct device_attribute dev_attr_firmware_version = {
.attr = { .name = "firmware-version", .mode = S_IRUGO },
.show = aoedisk_show_fwver,
};
+static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL);
static struct attribute *aoe_attrs[] = {
&dev_attr_state.attr,
&dev_attr_mac.attr,
&dev_attr_netif.attr,
&dev_attr_firmware_version.attr,
+ &dev_attr_payload.attr,
NULL,
};
@@ -129,9 +147,18 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
struct aoedev *d = bdev->bd_disk->private_data;
ulong flags;
+ if (!virt_addr_valid(d)) {
+ pr_crit("aoe: invalid device pointer in %s\n",
+ __func__);
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
+ return -ENODEV;
+
mutex_lock(&aoeblk_mutex);
spin_lock_irqsave(&d->lock, flags);
- if (d->flags & DEVFL_UP) {
+ if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
d->nopen++;
spin_unlock_irqrestore(&d->lock, flags);
mutex_unlock(&aoeblk_mutex);
@@ -195,9 +222,38 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+static int
+aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
+{
+ struct aoedev *d;
+
+ if (!arg)
+ return -EINVAL;
+
+ d = bdev->bd_disk->private_data;
+ if ((d->flags & DEVFL_UP) == 0) {
+ pr_err("aoe: disk not up\n");
+ return -ENODEV;
+ }
+
+ if (cmd == HDIO_GET_IDENTITY) {
+ if (!copy_to_user((void __user *) arg, &d->ident,
+ sizeof(d->ident)))
+ return 0;
+ return -EFAULT;
+ }
+
+ /* udev calls scsi_id, which uses SG_IO, resulting in noise */
+ if (cmd != SG_IO)
+ pr_info("aoe: unknown ioctl 0x%x\n", cmd);
+
+ return -ENOTTY;
+}
+
static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
+ .ioctl = aoeblk_ioctl,
.getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
@@ -212,6 +268,18 @@ aoeblk_gdalloc(void *vp)
struct request_queue *q;
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
+ int late = 0;
+
+ spin_lock_irqsave(&d->lock, flags);
+ if (d->flags & DEVFL_GDALLOC
+ && !(d->flags & DEVFL_TKILL)
+ && !(d->flags & DEVFL_GD_NOW))
+ d->flags |= DEVFL_GD_NOW;
+ else
+ late = 1;
+ spin_unlock_irqrestore(&d->lock, flags);
+ if (late)
+ return;
gd = alloc_disk(AOE_PARTITIONS);
if (gd == NULL) {
@@ -231,23 +299,24 @@ aoeblk_gdalloc(void *vp)
if (q == NULL) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
- mempool_destroy(mp);
- goto err_disk;
+ goto err_mempool;
}
- d->blkq = blk_alloc_queue(GFP_KERNEL);
- if (!d->blkq)
- goto err_mempool;
- d->blkq->backing_dev_info.name = "aoe";
- if (bdi_init(&d->blkq->backing_dev_info))
- goto err_blkq;
spin_lock_irqsave(&d->lock, flags);
- blk_queue_max_hw_sectors(d->blkq, BLK_DEF_MAX_SECTORS);
+ WARN_ON(!(d->flags & DEVFL_GD_NOW));
+ WARN_ON(!(d->flags & DEVFL_GDALLOC));
+ WARN_ON(d->flags & DEVFL_TKILL);
+ WARN_ON(d->gd);
+ WARN_ON(d->flags & DEVFL_UP);
+ blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
+ q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
d->gd = gd;
+ if (aoe_maxsectors)
+ blk_queue_max_hw_sectors(q, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
gd->fops = &aoe_bdops;
@@ -263,18 +332,21 @@ aoeblk_gdalloc(void *vp)
add_disk(gd);
aoedisk_add_sysfs(d);
+
+ spin_lock_irqsave(&d->lock, flags);
+ WARN_ON(!(d->flags & DEVFL_GD_NOW));
+ d->flags &= ~DEVFL_GD_NOW;
+ spin_unlock_irqrestore(&d->lock, flags);
return;
-err_blkq:
- blk_cleanup_queue(d->blkq);
- d->blkq = NULL;
err_mempool:
- mempool_destroy(d->bufpool);
+ mempool_destroy(mp);
err_disk:
put_disk(gd);
err:
spin_lock_irqsave(&d->lock, flags);
- d->flags &= ~DEVFL_GDALLOC;
+ d->flags &= ~DEVFL_GD_NOW;
+ schedule_work(&d->work);
spin_unlock_irqrestore(&d->lock, flags);
}
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index ed57a890c643..42e67ad6bd20 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -39,6 +39,11 @@ struct ErrMsg {
};
static DEFINE_MUTEX(aoechr_mutex);
+
+/* A ring buffer of error messages, to be read through
+ * "/dev/etherd/err". When no messages are present,
+ * readers will block waiting for messages to appear.
+ */
static struct ErrMsg emsgs[NMSG];
static int emsgs_head_idx, emsgs_tail_idx;
static struct completion emsgs_comp;
@@ -282,7 +287,7 @@ aoechr_init(void)
int n, i;
n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
- if (n < 0) {
+ if (n < 0) {
printk(KERN_ERR "aoe: can't register char device\n");
return n;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 9fe4f1865558..25ef5c014fca 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -22,6 +22,7 @@
#define MAXIOC (8192) /* default meant to avoid most soft lockups */
static void ktcomplete(struct frame *, struct sk_buff *);
+static int count_targets(struct aoedev *d, int *untainted);
static struct buf *nextbuf(struct aoedev *);
@@ -29,7 +30,7 @@ static int aoe_deadsecs = 60 * 3;
module_param(aoe_deadsecs, int, 0644);
MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
-static int aoe_maxout = 16;
+static int aoe_maxout = 64;
module_param(aoe_maxout, int, 0644);
MODULE_PARM_DESC(aoe_maxout,
"Only aoe_maxout outstanding packets for every MAC on eX.Y.");
@@ -43,6 +44,8 @@ static struct {
spinlock_t lock;
} iocq;
+static struct page *empty_page;
+
static struct sk_buff *
new_skb(ulong len)
{
@@ -59,6 +62,23 @@ new_skb(ulong len)
}
static struct frame *
+getframe_deferred(struct aoedev *d, u32 tag)
+{
+ struct list_head *head, *pos, *nx;
+ struct frame *f;
+
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head) {
+ f = list_entry(pos, struct frame, head);
+ if (f->tag == tag) {
+ list_del(pos);
+ return f;
+ }
+ }
+ return NULL;
+}
+
+static struct frame *
getframe(struct aoedev *d, u32 tag)
{
struct frame *f;
@@ -162,8 +182,10 @@ aoe_freetframe(struct frame *f)
t = f->t;
f->buf = NULL;
+ f->lba = 0;
f->bv = NULL;
f->r_skb = NULL;
+ f->flags = 0;
list_add(&f->head, &t->ffree);
}
@@ -217,20 +239,25 @@ newframe(struct aoedev *d)
struct frame *f;
struct aoetgt *t, **tt;
int totout = 0;
+ int use_tainted;
+ int has_untainted;
- if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
+ if (!d->targets || !d->targets[0]) {
printk(KERN_ERR "aoe: NULL TARGETS!\n");
return NULL;
}
tt = d->tgt; /* last used target */
- for (;;) {
+ for (use_tainted = 0, has_untainted = 0;;) {
tt++;
- if (tt >= &d->targets[NTARGETS] || !*tt)
+ if (tt >= &d->targets[d->ntargets] || !*tt)
tt = d->targets;
t = *tt;
- totout += t->nout;
+ if (!t->taint) {
+ has_untainted = 1;
+ totout += t->nout;
+ }
if (t->nout < t->maxout
- && t != d->htgt
+ && (use_tainted || !t->taint)
&& t->ifp->nd) {
f = newtframe(d, t);
if (f) {
@@ -239,8 +266,12 @@ newframe(struct aoedev *d)
return f;
}
}
- if (tt == d->tgt) /* we've looped and found nada */
- break;
+ if (tt == d->tgt) { /* we've looped and found nada */
+ if (!use_tainted && !has_untainted)
+ use_tainted = 1;
+ else
+ break;
+ }
}
if (totout == 0) {
d->kicked++;
@@ -277,21 +308,68 @@ fhash(struct frame *f)
list_add_tail(&f->head, &d->factive[n]);
}
+static void
+ata_rw_frameinit(struct frame *f)
+{
+ struct aoetgt *t;
+ struct aoe_hdr *h;
+ struct aoe_atahdr *ah;
+ struct sk_buff *skb;
+ char writebit, extbit;
+
+ skb = f->skb;
+ h = (struct aoe_hdr *) skb_mac_header(skb);
+ ah = (struct aoe_atahdr *) (h + 1);
+ skb_put(skb, sizeof(*h) + sizeof(*ah));
+ memset(h, 0, skb->len);
+
+ writebit = 0x10;
+ extbit = 0x4;
+
+ t = f->t;
+ f->tag = aoehdr_atainit(t->d, t, h);
+ fhash(f);
+ t->nout++;
+ f->waited = 0;
+ f->waited_total = 0;
+ if (f->buf)
+ f->lba = f->buf->sector;
+
+ /* set up ata header */
+ ah->scnt = f->bcnt >> 9;
+ put_lba(ah, f->lba);
+ if (t->d->flags & DEVFL_EXT) {
+ ah->aflags |= AOEAFL_EXT;
+ } else {
+ extbit = 0;
+ ah->lba3 &= 0x0f;
+ ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
+ }
+ if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
+ skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
+ ah->aflags |= AOEAFL_WRITE;
+ skb->len += f->bcnt;
+ skb->data_len = f->bcnt;
+ skb->truesize += f->bcnt;
+ t->wpkts++;
+ } else {
+ t->rpkts++;
+ writebit = 0;
+ }
+
+ ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ skb->dev = t->ifp->nd;
+}
+
static int
aoecmd_ata_rw(struct aoedev *d)
{
struct frame *f;
- struct aoe_hdr *h;
- struct aoe_atahdr *ah;
struct buf *buf;
struct aoetgt *t;
struct sk_buff *skb;
struct sk_buff_head queue;
ulong bcnt, fbcnt;
- char writebit, extbit;
-
- writebit = 0x10;
- extbit = 0x4;
buf = nextbuf(d);
if (buf == NULL)
@@ -326,50 +404,18 @@ aoecmd_ata_rw(struct aoedev *d)
} while (fbcnt);
/* initialize the headers & frame */
- skb = f->skb;
- h = (struct aoe_hdr *) skb_mac_header(skb);
- ah = (struct aoe_atahdr *) (h+1);
- skb_put(skb, sizeof *h + sizeof *ah);
- memset(h, 0, skb->len);
- f->tag = aoehdr_atainit(d, t, h);
- fhash(f);
- t->nout++;
- f->waited = 0;
f->buf = buf;
f->bcnt = bcnt;
- f->lba = buf->sector;
-
- /* set up ata header */
- ah->scnt = bcnt >> 9;
- put_lba(ah, buf->sector);
- if (d->flags & DEVFL_EXT) {
- ah->aflags |= AOEAFL_EXT;
- } else {
- extbit = 0;
- ah->lba3 &= 0x0f;
- ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
- }
- if (bio_data_dir(buf->bio) == WRITE) {
- skb_fillup(skb, f->bv, f->bv_off, bcnt);
- ah->aflags |= AOEAFL_WRITE;
- skb->len += bcnt;
- skb->data_len = bcnt;
- skb->truesize += bcnt;
- t->wpkts++;
- } else {
- t->rpkts++;
- writebit = 0;
- }
-
- ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
+ ata_rw_frameinit(f);
/* mark all tracking fields and load out */
buf->nframesout += 1;
buf->sector += bcnt >> 9;
- skb->dev = t->ifp->nd;
- skb = skb_clone(skb, GFP_ATOMIC);
+ skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -442,11 +488,14 @@ resend(struct aoedev *d, struct frame *f)
h = (struct aoe_hdr *) skb_mac_header(skb);
ah = (struct aoe_atahdr *) (h+1);
- snprintf(buf, sizeof buf,
- "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
- "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
- h->src, h->dst, t->nout);
- aoechr_error(buf);
+ if (!(f->flags & FFL_PROBE)) {
+ snprintf(buf, sizeof(buf),
+ "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
+ "retransmit", d->aoemajor, d->aoeminor,
+ f->tag, jiffies, n,
+ h->src, h->dst, t->nout);
+ aoechr_error(buf);
+ }
f->tag = n;
fhash(f);
@@ -458,12 +507,46 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
}
static int
+tsince_hr(struct frame *f)
+{
+ struct timeval now;
+ int n;
+
+ do_gettimeofday(&now);
+ n = now.tv_usec - f->sent.tv_usec;
+ n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+
+ if (n < 0)
+ n = -n;
+
+ /* For relatively long periods, use jiffies to avoid
+ * discrepancies caused by updates to the system time.
+ *
+ * On system with HZ of 1000, 32-bits is over 49 days
+ * worth of jiffies, or over 71 minutes worth of usecs.
+ *
+ * Jiffies overflow is handled by subtraction of unsigned ints:
+ * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
+ * $3 = 4
+ * (gdb)
+ */
+ if (n > USEC_PER_SEC / 4) {
+ n = ((u32) jiffies) - f->sent_jiffs;
+ n *= USEC_PER_SEC / HZ;
+ }
+
+ return n;
+}
+
+static int
tsince(u32 tag)
{
int n;
@@ -472,7 +555,7 @@ tsince(u32 tag)
n -= tag & 0xffff;
if (n < 0)
n += 1<<16;
- return n;
+ return jiffies_to_usecs(n + 1);
}
static struct aoeif *
@@ -503,70 +586,189 @@ ejectif(struct aoetgt *t, struct aoeif *ifp)
dev_put(nd);
}
-static int
-sthtith(struct aoedev *d)
+static struct frame *
+reassign_frame(struct frame *f)
{
- struct frame *f, *nf;
- struct list_head *nx, *pos, *head;
+ struct frame *nf;
struct sk_buff *skb;
- struct aoetgt *ht = d->htgt;
- int i;
- for (i = 0; i < NFACTIVE; i++) {
- head = &d->factive[i];
- list_for_each_safe(pos, nx, head) {
- f = list_entry(pos, struct frame, head);
- if (f->t != ht)
- continue;
+ nf = newframe(f->t->d);
+ if (!nf)
+ return NULL;
+ if (nf->t == f->t) {
+ aoe_freetframe(nf);
+ return NULL;
+ }
- nf = newframe(d);
- if (!nf)
- return 0;
+ skb = nf->skb;
+ nf->skb = f->skb;
+ nf->buf = f->buf;
+ nf->bcnt = f->bcnt;
+ nf->lba = f->lba;
+ nf->bv = f->bv;
+ nf->bv_off = f->bv_off;
+ nf->waited = 0;
+ nf->waited_total = f->waited_total;
+ nf->sent = f->sent;
+ nf->sent_jiffs = f->sent_jiffs;
+ f->skb = skb;
+
+ return nf;
+}
- /* remove frame from active list */
- list_del(pos);
+static void
+probe(struct aoetgt *t)
+{
+ struct aoedev *d;
+ struct frame *f;
+ struct sk_buff *skb;
+ struct sk_buff_head queue;
+ size_t n, m;
+ int frag;
- /* reassign all pertinent bits to new outbound frame */
- skb = nf->skb;
- nf->skb = f->skb;
- nf->buf = f->buf;
- nf->bcnt = f->bcnt;
- nf->lba = f->lba;
- nf->bv = f->bv;
- nf->bv_off = f->bv_off;
- nf->waited = 0;
- f->skb = skb;
+ d = t->d;
+ f = newtframe(d, t);
+ if (!f) {
+ pr_err("%s %pm for e%ld.%d: %s\n",
+ "aoe: cannot probe remote address",
+ t->addr,
+ (long) d->aoemajor, d->aoeminor,
+ "no frame available");
+ return;
+ }
+ f->flags |= FFL_PROBE;
+ ifrotate(t);
+ f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
+ ata_rw_frameinit(f);
+ skb = f->skb;
+ for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
+ if (n < PAGE_SIZE)
+ m = n;
+ else
+ m = PAGE_SIZE;
+ skb_fill_page_desc(skb, frag, empty_page, 0, m);
+ }
+ skb->len += f->bcnt;
+ skb->data_len = f->bcnt;
+ skb->truesize += f->bcnt;
+
+ skb = skb_clone(f->skb, GFP_ATOMIC);
+ if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
+ __skb_queue_head_init(&queue);
+ __skb_queue_tail(&queue, skb);
+ aoenet_xmit(&queue);
+ }
+}
+
+static long
+rto(struct aoedev *d)
+{
+ long t;
+
+ t = 2 * d->rttavg >> RTTSCALE;
+ t += 8 * d->rttdev >> RTTDSCALE;
+ if (t == 0)
+ t = 1;
+
+ return t;
+}
+
+static void
+rexmit_deferred(struct aoedev *d)
+{
+ struct aoetgt *t;
+ struct frame *f;
+ struct frame *nf;
+ struct list_head *pos, *nx, *head;
+ int since;
+ int untainted;
+
+ count_targets(d, &untainted);
+
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head) {
+ f = list_entry(pos, struct frame, head);
+ t = f->t;
+ if (t->taint) {
+ if (!(f->flags & FFL_PROBE)) {
+ nf = reassign_frame(f);
+ if (nf) {
+ if (t->nout_probes == 0
+ && untainted > 0) {
+ probe(t);
+ t->nout_probes++;
+ }
+ list_replace(&f->head, &nf->head);
+ pos = &nf->head;
+ aoe_freetframe(f);
+ f = nf;
+ t = f->t;
+ }
+ } else if (untainted < 1) {
+ /* don't probe w/o other untainted aoetgts */
+ goto stop_probe;
+ } else if (tsince_hr(f) < t->taint * rto(d)) {
+ /* reprobe slowly when taint is high */
+ continue;
+ }
+ } else if (f->flags & FFL_PROBE) {
+stop_probe: /* don't probe untainted aoetgts */
+ list_del(pos);
aoe_freetframe(f);
- ht->nout--;
- nf->t->nout++;
- resend(d, nf);
+ /* leaving d->kicked, because this is routine */
+ f->t->d->flags |= DEVFL_KICKME;
+ continue;
}
+ if (t->nout >= t->maxout)
+ continue;
+ list_del(pos);
+ t->nout++;
+ if (f->flags & FFL_PROBE)
+ t->nout_probes++;
+ since = tsince_hr(f);
+ f->waited += since;
+ f->waited_total += since;
+ resend(d, f);
}
- /* We've cleaned up the outstanding so take away his
- * interfaces so he won't be used. We should remove him from
- * the target array here, but cleaning up a target is
- * involved. PUNT!
- */
- memset(ht->ifs, 0, sizeof ht->ifs);
- d->htgt = NULL;
- return 1;
}
-static inline unsigned char
-ata_scnt(unsigned char *packet) {
- struct aoe_hdr *h;
- struct aoe_atahdr *ah;
+/* An aoetgt accumulates demerits quickly, and successful
+ * probing redeems the aoetgt slowly.
+ */
+static void
+scorn(struct aoetgt *t)
+{
+ int n;
- h = (struct aoe_hdr *) packet;
- ah = (struct aoe_atahdr *) (h+1);
- return ah->scnt;
+ n = t->taint++;
+ t->taint += t->taint * 2;
+ if (n > t->taint)
+ t->taint = n;
+ if (t->taint > MAX_TAINT)
+ t->taint = MAX_TAINT;
+}
+
+static int
+count_targets(struct aoedev *d, int *untainted)
+{
+ int i, good;
+
+ for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
+ if (d->targets[i]->taint == 0)
+ good++;
+
+ if (untainted)
+ *untainted = good;
+ return i;
}
static void
rexmit_timer(ulong vp)
{
struct aoedev *d;
- struct aoetgt *t, **tt, **te;
+ struct aoetgt *t;
struct aoeif *ifp;
struct frame *f;
struct list_head *head, *pos, *nx;
@@ -574,15 +776,18 @@ rexmit_timer(ulong vp)
register long timeout;
ulong flags, n;
int i;
+ int utgts; /* number of aoetgt descriptors (not slots) */
+ int since;
d = (struct aoedev *) vp;
- /* timeout is always ~150% of the moving average */
- timeout = d->rttavg;
- timeout += timeout >> 1;
-
spin_lock_irqsave(&d->lock, flags);
+ /* timeout based on observed timings and variations */
+ timeout = rto(d);
+
+ utgts = count_targets(d, NULL);
+
if (d->flags & DEVFL_TKILL) {
spin_unlock_irqrestore(&d->lock, flags);
return;
@@ -593,67 +798,61 @@ rexmit_timer(ulong vp)
head = &d->factive[i];
list_for_each_safe(pos, nx, head) {
f = list_entry(pos, struct frame, head);
- if (tsince(f->tag) < timeout)
+ if (tsince_hr(f) < timeout)
break; /* end of expired frames */
/* move to flist for later processing */
list_move_tail(pos, &flist);
}
}
- /* window check */
- tt = d->targets;
- te = tt + d->ntargets;
- for (; tt < te && (t = *tt); tt++) {
- if (t->nout == t->maxout
- && t->maxout < t->nframes
- && (jiffies - t->lastwadj)/HZ > 10) {
- t->maxout++;
- t->lastwadj = jiffies;
- }
- }
-
- if (!list_empty(&flist)) { /* retransmissions necessary */
- n = d->rttavg <<= 1;
- if (n > MAXTIMER)
- d->rttavg = MAXTIMER;
- }
/* process expired frames */
while (!list_empty(&flist)) {
pos = flist.next;
f = list_entry(pos, struct frame, head);
- n = f->waited += timeout;
- n /= HZ;
- if (n > aoe_deadsecs) {
+ since = tsince_hr(f);
+ n = f->waited_total + since;
+ n /= USEC_PER_SEC;
+ if (aoe_deadsecs
+ && n > aoe_deadsecs
+ && !(f->flags & FFL_PROBE)) {
/* Waited too long. Device failure.
* Hang all frames on first hash bucket for downdev
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
aoedev_downdev(d);
- break;
+ goto out;
}
- list_del(pos);
t = f->t;
- if (n > aoe_deadsecs/2)
- d->htgt = t; /* see if another target can help */
-
- if (t->nout == t->maxout) {
- if (t->maxout > 1)
- t->maxout--;
- t->lastwadj = jiffies;
+ n = f->waited + since;
+ n /= USEC_PER_SEC;
+ if (aoe_deadsecs && utgts > 0
+ && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
+ scorn(t); /* avoid this target */
+
+ if (t->maxout != 1) {
+ t->ssthresh = t->maxout / 2;
+ t->maxout = 1;
}
- ifp = getif(t, f->skb->dev);
- if (ifp && ++ifp->lost > (t->nframes << 1)
- && (ifp != t->ifs || t->ifs[1].nd)) {
- ejectif(t, ifp);
- ifp = NULL;
+ if (f->flags & FFL_PROBE) {
+ t->nout_probes--;
+ } else {
+ ifp = getif(t, f->skb->dev);
+ if (ifp && ++ifp->lost > (t->nframes << 1)
+ && (ifp != t->ifs || t->ifs[1].nd)) {
+ ejectif(t, ifp);
+ ifp = NULL;
+ }
}
- resend(d, f);
+ list_move_tail(pos, &d->rexmitq);
+ t->nout--;
}
+ rexmit_deferred(d);
- if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
+out:
+ if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
d->blkq->request_fn(d->blkq);
}
@@ -774,8 +973,7 @@ nextbuf(struct aoedev *d)
void
aoecmd_work(struct aoedev *d)
{
- if (d->htgt && !sthtith(d))
- return;
+ rexmit_deferred(d);
while (aoecmd_ata_rw(d))
;
}
@@ -809,6 +1007,17 @@ aoecmd_sleepwork(struct work_struct *work)
}
static void
+ata_ident_fixstring(u16 *id, int ns)
+{
+ u16 s;
+
+ while (ns-- > 0) {
+ s = *id;
+ *id++ = s >> 8 | s << 8;
+ }
+}
+
+static void
ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
{
u64 ssize;
@@ -843,6 +1052,11 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
}
+ ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
+ ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
+ ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
+ memcpy(d->ident, id, sizeof(d->ident));
+
if (d->ssize != ssize)
printk(KERN_INFO
"aoe: %pm e%ld.%d v%04x has %llu sectors\n",
@@ -862,26 +1076,28 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
}
static void
-calc_rttavg(struct aoedev *d, int rtt)
+calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
{
register long n;
n = rtt;
- if (n < 0) {
- n = -rtt;
- if (n < MINTIMER)
- n = MINTIMER;
- else if (n > MAXTIMER)
- n = MAXTIMER;
- d->mintimer += (n - d->mintimer) >> 1;
- } else if (n < d->mintimer)
- n = d->mintimer;
- else if (n > MAXTIMER)
- n = MAXTIMER;
-
- /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
- n -= d->rttavg;
- d->rttavg += n >> 2;
+
+ /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
+ n -= d->rttavg >> RTTSCALE;
+ d->rttavg += n;
+ if (n < 0)
+ n = -n;
+ n -= d->rttdev >> RTTDSCALE;
+ d->rttdev += n;
+
+ if (!t || t->maxout >= t->nframes)
+ return;
+ if (t->maxout < t->ssthresh)
+ t->maxout += 1;
+ else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
+ t->maxout += 1;
+ t->next_cwnd = t->maxout;
+ }
}
static struct aoetgt *
@@ -890,7 +1106,7 @@ gettgt(struct aoedev *d, char *addr)
struct aoetgt **t, **e;
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
return *t;
@@ -966,19 +1182,22 @@ ktiocomplete(struct frame *f)
struct aoeif *ifp;
struct aoedev *d;
long n;
+ int untainted;
if (f == NULL)
return;
t = f->t;
d = t->d;
+ skb = f->r_skb;
+ buf = f->buf;
+ if (f->flags & FFL_PROBE)
+ goto out;
+ if (!skb) /* just fail the buf. */
+ goto noskb;
hout = (struct aoe_hdr *) skb_mac_header(f->skb);
ahout = (struct aoe_atahdr *) (hout+1);
- buf = f->buf;
- skb = f->r_skb;
- if (skb == NULL)
- goto noskb; /* just fail the buf. */
hin = (struct aoe_hdr *) skb->data;
skb_pull(skb, sizeof(*hin));
@@ -988,9 +1207,9 @@ ktiocomplete(struct frame *f)
pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
-noskb: if (buf)
+noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
- goto badrsp;
+ goto out;
}
n = ahout->scnt << 9;
@@ -998,8 +1217,10 @@ noskb: if (buf)
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
if (skb->len < n) {
- pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
- skb->len, n);
+ pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
+ "aoe: runt data size in read from",
+ (long) d->aoemajor, d->aoeminor,
+ skb->len, n);
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
break;
}
@@ -1010,13 +1231,13 @@ noskb: if (buf)
ifp = getif(t, skb->dev);
if (ifp)
ifp->lost = 0;
- if (d->htgt == t) /* I'll help myself, thank you. */
- d->htgt = NULL;
spin_unlock_irq(&d->lock);
break;
case ATA_CMD_ID_ATA:
if (skb->len < 512) {
- pr_info("aoe: runt data size in ataid. skb->len=%d\n",
+ pr_info("%s e%ld.%d. skb->len=%d need=512\n",
+ "aoe: runt data size in ataid from",
+ (long) d->aoemajor, d->aoeminor,
skb->len);
break;
}
@@ -1032,16 +1253,23 @@ noskb: if (buf)
be16_to_cpu(get_unaligned(&hin->major)),
hin->minor);
}
-badrsp:
+out:
spin_lock_irq(&d->lock);
+ if (t->taint > 0
+ && --t->taint > 0
+ && t->nout_probes == 0) {
+ count_targets(d, &untainted);
+ if (untainted > 0) {
+ probe(t);
+ t->nout_probes++;
+ }
+ }
aoe_freetframe(f);
if (buf && --buf->nframesout == 0 && buf->resid == 0)
aoe_end_buf(d, buf);
- aoecmd_work(d);
-
spin_unlock_irq(&d->lock);
aoedev_put(d);
dev_kfree_skb(skb);
@@ -1141,7 +1369,6 @@ aoecmd_ata_rsp(struct sk_buff *skb)
struct aoedev *d;
struct aoe_hdr *h;
struct frame *f;
- struct aoetgt *t;
u32 n;
ulong flags;
char ebuf[128];
@@ -1162,23 +1389,32 @@ aoecmd_ata_rsp(struct sk_buff *skb)
n = be32_to_cpu(get_unaligned(&h->tag));
f = getframe(d, n);
- if (f == NULL) {
- calc_rttavg(d, -tsince(n));
- spin_unlock_irqrestore(&d->lock, flags);
- aoedev_put(d);
- snprintf(ebuf, sizeof ebuf,
- "%15s e%d.%d tag=%08x@%08lx\n",
- "unexpected rsp",
- get_unaligned_be16(&h->major),
- h->minor,
- get_unaligned_be32(&h->tag),
- jiffies);
- aoechr_error(ebuf);
- return skb;
+ if (f) {
+ calc_rttavg(d, f->t, tsince_hr(f));
+ f->t->nout--;
+ if (f->flags & FFL_PROBE)
+ f->t->nout_probes--;
+ } else {
+ f = getframe_deferred(d, n);
+ if (f) {
+ calc_rttavg(d, NULL, tsince_hr(f));
+ } else {
+ calc_rttavg(d, NULL, tsince(n));
+ spin_unlock_irqrestore(&d->lock, flags);
+ aoedev_put(d);
+ snprintf(ebuf, sizeof(ebuf),
+ "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
+ "unexpected rsp",
+ get_unaligned_be16(&h->major),
+ h->minor,
+ get_unaligned_be32(&h->tag),
+ jiffies,
+ h->src,
+ h->dst);
+ aoechr_error(ebuf);
+ return skb;
+ }
}
- t = f->t;
- calc_rttavg(d, tsince(f->tag));
- t->nout--;
aoecmd_work(d);
spin_unlock_irqrestore(&d->lock, flags);
@@ -1201,7 +1437,7 @@ aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
aoenet_xmit(&queue);
}
-
+
struct sk_buff *
aoecmd_ata_id(struct aoedev *d)
{
@@ -1227,6 +1463,7 @@ aoecmd_ata_id(struct aoedev *d)
fhash(f);
t->nout++;
f->waited = 0;
+ f->waited_total = 0;
/* set up ata header */
ah->scnt = 1;
@@ -1235,41 +1472,69 @@ aoecmd_ata_id(struct aoedev *d)
skb->dev = t->ifp->nd;
- d->rttavg = MAXTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->timer.function = rexmit_timer;
- return skb_clone(skb, GFP_ATOMIC);
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ do_gettimeofday(&f->sent);
+ f->sent_jiffs = (u32) jiffies;
+ }
+
+ return skb;
}
-
+
+static struct aoetgt **
+grow_targets(struct aoedev *d)
+{
+ ulong oldn, newn;
+ struct aoetgt **tt;
+
+ oldn = d->ntargets;
+ newn = oldn * 2;
+ tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
+ if (!tt)
+ return NULL;
+ memmove(tt, d->targets, sizeof(*d->targets) * oldn);
+ d->tgt = tt + (d->tgt - d->targets);
+ kfree(d->targets);
+ d->targets = tt;
+ d->ntargets = newn;
+
+ return &d->targets[oldn];
+}
+
static struct aoetgt *
addtgt(struct aoedev *d, char *addr, ulong nframes)
{
struct aoetgt *t, **tt, **te;
tt = d->targets;
- te = tt + NTARGETS;
+ te = tt + d->ntargets;
for (; tt < te && *tt; tt++)
;
if (tt == te) {
- printk(KERN_INFO
- "aoe: device addtgt failure; too many targets\n");
- return NULL;
+ tt = grow_targets(d);
+ if (!tt)
+ goto nomem;
}
t = kzalloc(sizeof(*t), GFP_ATOMIC);
- if (!t) {
- printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
- return NULL;
- }
-
- d->ntargets++;
+ if (!t)
+ goto nomem;
t->nframes = nframes;
t->d = d;
memcpy(t->addr, addr, sizeof t->addr);
t->ifp = t->ifs;
- t->maxout = t->nframes;
+ aoecmd_wreset(t);
+ t->maxout = t->nframes / 2;
INIT_LIST_HEAD(&t->ffree);
return *tt = t;
+
+ nomem:
+ pr_info("aoe: cannot allocate memory to add target\n");
+ return NULL;
}
static void
@@ -1279,7 +1544,7 @@ setdbcnt(struct aoedev *d)
int bcnt = 0;
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
if (bcnt == 0 || bcnt > (*t)->minbcnt)
bcnt = (*t)->minbcnt;
@@ -1373,7 +1638,11 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
spin_lock_irqsave(&d->lock, flags);
t = gettgt(d, h->src);
- if (!t) {
+ if (t) {
+ t->nframes = n;
+ if (n < t->maxout)
+ aoecmd_wreset(t);
+ } else {
t = addtgt(d, h->src, n);
if (!t)
goto bail;
@@ -1402,17 +1671,26 @@ bail:
}
void
+aoecmd_wreset(struct aoetgt *t)
+{
+ t->maxout = 1;
+ t->ssthresh = t->nframes / 2;
+ t->next_cwnd = t->nframes;
+}
+
+void
aoecmd_cleanslate(struct aoedev *d)
{
struct aoetgt **t, **te;
- d->mintimer = MINTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->maxbcnt = 0;
t = d->targets;
- te = t + NTARGETS;
+ te = t + d->ntargets;
for (; t < te && *t; t++)
- (*t)->maxout = (*t)->nframes;
+ aoecmd_wreset(*t);
}
void
@@ -1460,6 +1738,14 @@ aoe_flush_iocq(void)
int __init
aoecmd_init(void)
{
+ void *p;
+
+ /* get_zeroed_page returns page with ref count 1 */
+ p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ if (!p)
+ return -ENOMEM;
+ empty_page = virt_to_page(p);
+
INIT_LIST_HEAD(&iocq.head);
spin_lock_init(&iocq.lock);
init_waitqueue_head(&ktiowq);
@@ -1475,4 +1761,7 @@ aoecmd_exit(void)
{
aoe_ktstop(&kts);
aoe_flush_iocq();
+
+ free_page((unsigned long) page_address(empty_page));
+ empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 90e5b537f94b..98f2965778b9 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -15,7 +15,6 @@
#include "aoe.h"
static void dummy_timer(ulong);
-static void aoedev_freedev(struct aoedev *);
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
@@ -69,25 +68,34 @@ minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
NPERSHELF = 16,
};
+ if (aoemin >= NPERSHELF) {
+ pr_err("aoe: %s %d slots per shelf\n",
+ "static minor device numbers support only",
+ NPERSHELF);
+ error = -1;
+ goto out;
+ }
+
n = aoemaj * NPERSHELF + aoemin;
- if (aoemin >= NPERSHELF || n >= N_DEVS) {
+ if (n >= N_DEVS) {
pr_err("aoe: %s with e%ld.%d\n",
"cannot use static minor device numbers",
aoemaj, aoemin);
error = -1;
- } else {
- spin_lock_irqsave(&used_minors_lock, flags);
- if (test_bit(n, used_minors)) {
- pr_err("aoe: %s %lu\n",
- "existing device already has static minor number",
- n);
- error = -1;
- } else
- set_bit(n, used_minors);
- spin_unlock_irqrestore(&used_minors_lock, flags);
+ goto out;
}
- *sysminor = n;
+ spin_lock_irqsave(&used_minors_lock, flags);
+ if (test_bit(n, used_minors)) {
+ pr_err("aoe: %s %lu\n",
+ "existing device already has static minor number",
+ n);
+ error = -1;
+ } else
+ set_bit(n, used_minors);
+ spin_unlock_irqrestore(&used_minors_lock, flags);
+ *sysminor = n * AOE_PARTITIONS;
+out:
return error;
}
@@ -170,41 +178,50 @@ aoe_failip(struct aoedev *d)
aoe_end_request(d, rq, 0);
}
+static void
+downdev_frame(struct list_head *pos)
+{
+ struct frame *f;
+
+ f = list_entry(pos, struct frame, head);
+ list_del(pos);
+ if (f->buf) {
+ f->buf->nframesout--;
+ aoe_failbuf(f->t->d, f->buf);
+ }
+ aoe_freetframe(f);
+}
+
void
aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
- struct frame *f;
struct list_head *head, *pos, *nx;
struct request *rq;
int i;
d->flags &= ~DEVFL_UP;
- /* clean out active buffers */
+ /* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
head = &d->factive[i];
- list_for_each_safe(pos, nx, head) {
- f = list_entry(pos, struct frame, head);
- list_del(pos);
- if (f->buf) {
- f->buf->nframesout--;
- aoe_failbuf(d, f->buf);
- }
- aoe_freetframe(f);
- }
+ list_for_each_safe(pos, nx, head)
+ downdev_frame(pos);
}
+ head = &d->rexmitq;
+ list_for_each_safe(pos, nx, head)
+ downdev_frame(pos);
+
/* reset window dressings */
tt = d->targets;
- te = tt + NTARGETS;
+ te = tt + d->ntargets;
for (; tt < te && (t = *tt); tt++) {
- t->maxout = t->nframes;
+ aoecmd_wreset(t);
t->nout = 0;
}
/* clean out the in-process request (if any) */
aoe_failip(d);
- d->htgt = NULL;
/* fast fail all pending I/O */
if (d->blkq) {
@@ -218,12 +235,48 @@ aoedev_downdev(struct aoedev *d)
set_capacity(d->gd, 0);
}
+/* return whether the user asked for this particular
+ * device to be flushed
+ */
+static int
+user_req(char *s, size_t slen, struct aoedev *d)
+{
+ char *p;
+ size_t lim;
+
+ if (!d->gd)
+ return 0;
+ p = strrchr(d->gd->disk_name, '/');
+ if (!p)
+ p = d->gd->disk_name;
+ else
+ p += 1;
+ lim = sizeof(d->gd->disk_name);
+ lim -= p - d->gd->disk_name;
+ if (slen < lim)
+ lim = slen;
+
+ return !strncmp(s, p, lim);
+}
+
static void
-aoedev_freedev(struct aoedev *d)
+freedev(struct aoedev *d)
{
struct aoetgt **t, **e;
+ int freeing = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ if (d->flags & DEVFL_TKILL
+ && !(d->flags & DEVFL_FREEING)) {
+ d->flags |= DEVFL_FREEING;
+ freeing = 1;
+ }
+ spin_unlock_irqrestore(&d->lock, flags);
+ if (!freeing)
+ return;
- cancel_work_sync(&d->work);
+ del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
@@ -231,61 +284,113 @@ aoedev_freedev(struct aoedev *d)
blk_cleanup_queue(d->blkq);
}
t = d->targets;
- e = t + NTARGETS;
+ e = t + d->ntargets;
for (; t < e && *t; t++)
freetgt(d, *t);
if (d->bufpool)
mempool_destroy(d->bufpool);
skbpoolfree(d);
minor_free(d->sysminor);
- kfree(d);
+
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags |= DEVFL_FREED;
+ spin_unlock_irqrestore(&d->lock, flags);
}
-int
-aoedev_flush(const char __user *str, size_t cnt)
+enum flush_parms {
+ NOT_EXITING = 0,
+ EXITING = 1,
+};
+
+static int
+flush(const char __user *str, size_t cnt, int exiting)
{
ulong flags;
struct aoedev *d, **dd;
- struct aoedev *rmd = NULL;
char buf[16];
int all = 0;
+ int specified = 0; /* flush a specific device */
+ unsigned int skipflags;
+
+ skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;
- if (cnt >= 3) {
+ if (!exiting && cnt >= 3) {
if (cnt > sizeof buf)
cnt = sizeof buf;
if (copy_from_user(buf, str, cnt))
return -EFAULT;
all = !strncmp(buf, "all", 3);
+ if (!all)
+ specified = 1;
}
+ flush_scheduled_work();
+ /* pass one: without sleeping, do aoedev_downdev */
spin_lock_irqsave(&devlist_lock, flags);
- dd = &devlist;
- while ((d = *dd)) {
+ for (d = devlist; d; d = d->next) {
spin_lock(&d->lock);
- if ((!all && (d->flags & DEVFL_UP))
- || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
+ if (exiting) {
+ /* unconditionally take each device down */
+ } else if (specified) {
+ if (!user_req(buf, cnt, d))
+ goto cont;
+ } else if ((!all && (d->flags & DEVFL_UP))
+ || d->flags & skipflags
|| d->nopen
- || d->ref) {
- spin_unlock(&d->lock);
- dd = &d->next;
- continue;
- }
- *dd = d->next;
+ || d->ref)
+ goto cont;
+
aoedev_downdev(d);
d->flags |= DEVFL_TKILL;
+cont:
spin_unlock(&d->lock);
- d->next = rmd;
- rmd = d;
}
spin_unlock_irqrestore(&devlist_lock, flags);
- while ((d = rmd)) {
- rmd = d->next;
- del_timer_sync(&d->timer);
- aoedev_freedev(d); /* must be able to sleep */
+
+ /* pass two: call freedev, which might sleep,
+ * for aoedevs marked with DEVFL_TKILL
+ */
+restart:
+ spin_lock_irqsave(&devlist_lock, flags);
+ for (d = devlist; d; d = d->next) {
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL
+ && !(d->flags & DEVFL_FREEING)) {
+ spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&devlist_lock, flags);
+ freedev(d);
+ goto restart;
+ }
+ spin_unlock(&d->lock);
}
+
+ /* pass three: remove aoedevs marked with DEVFL_FREED */
+ for (dd = &devlist, d = *dd; d; d = *dd) {
+ struct aoedev *doomed = NULL;
+
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_FREED) {
+ *dd = d->next;
+ doomed = d;
+ } else {
+ dd = &d->next;
+ }
+ spin_unlock(&d->lock);
+ if (doomed)
+ kfree(doomed->targets);
+ kfree(doomed);
+ }
+ spin_unlock_irqrestore(&devlist_lock, flags);
+
return 0;
}
+int
+aoedev_flush(const char __user *str, size_t cnt)
+{
+ return flush(str, cnt, NOT_EXITING);
+}
+
/* This has been confirmed to occur once with Tms=3*1000 due to the
* driver changing link and not processing its transmit ring. The
* problem is hard enough to solve by returning an error that I'm
@@ -332,13 +437,20 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
struct aoedev *d;
int i;
ulong flags;
- ulong sysminor;
+ ulong sysminor = 0;
spin_lock_irqsave(&devlist_lock, flags);
for (d=devlist; d; d=d->next)
if (d->aoemajor == maj && d->aoeminor == min) {
+ spin_lock(&d->lock);
+ if (d->flags & DEVFL_TKILL) {
+ spin_unlock(&d->lock);
+ d = NULL;
+ goto out;
+ }
d->ref++;
+ spin_unlock(&d->lock);
break;
}
if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
@@ -346,6 +458,13 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d = kcalloc(1, sizeof *d, GFP_ATOMIC);
if (!d)
goto out;
+ d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC);
+ if (!d->targets) {
+ kfree(d);
+ d = NULL;
+ goto out;
+ }
+ d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
skb_queue_head_init(&d->skbpool);
@@ -359,10 +478,12 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d->ref = 1;
for (i = 0; i < NFACTIVE; i++)
INIT_LIST_HEAD(&d->factive[i]);
+ INIT_LIST_HEAD(&d->rexmitq);
d->sysminor = sysminor;
d->aoemajor = maj;
d->aoeminor = min;
- d->mintimer = MINTIMER;
+ d->rttavg = RTTAVG_INIT;
+ d->rttdev = RTTDEV_INIT;
d->next = devlist;
devlist = d;
out:
@@ -396,21 +517,9 @@ freetgt(struct aoedev *d, struct aoetgt *t)
void
aoedev_exit(void)
{
- struct aoedev *d;
- ulong flags;
-
+ flush_scheduled_work();
aoe_flush_iocq();
- while ((d = devlist)) {
- devlist = d->next;
-
- spin_lock_irqsave(&d->lock, flags);
- aoedev_downdev(d);
- d->flags |= DEVFL_TKILL;
- spin_unlock_irqrestore(&d->lock, flags);
-
- del_timer_sync(&d->timer);
- aoedev_freedev(d);
- }
+ flush(NULL, 0, EXITING);
}
int __init
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 04793c2c701b..4b987c2fefbe 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -105,7 +105,7 @@ aoe_init(void)
aoechr_exit();
chr_fail:
aoedev_exit();
-
+
printk(KERN_INFO "aoe: initialisation failure.\n");
return ret;
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 162c6471275c..71d3ea8d3006 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -31,7 +31,7 @@ enum {
static char aoe_iflist[IFLISTSZ];
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
-MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"");
+MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=dev1[,dev2...]");
static wait_queue_head_t txwq;
static struct ktstate kts;
@@ -52,13 +52,18 @@ static struct sk_buff_head skbtxq;
/* enters with txlock held */
static int
-tx(void)
+tx(void) __must_hold(&txlock)
{
struct sk_buff *skb;
+ struct net_device *ifp;
while ((skb = skb_dequeue(&skbtxq))) {
spin_unlock_irq(&txlock);
- dev_queue_xmit(skb);
+ ifp = skb->dev;
+ if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
+ pr_warn("aoe: packet could not be sent on %s. %s\n",
+ ifp ? ifp->name : "netif",
+ "consider increasing tx_queue_len");
spin_lock_irq(&txlock);
}
return 0;
@@ -119,8 +124,8 @@ aoenet_xmit(struct sk_buff_head *queue)
}
}
-/*
- * (1) len doesn't include the header by default. I want this.
+/*
+ * (1) len doesn't include the header by default. I want this.
*/
static int
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index ca83f96756ad..ade58bc8f3c4 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -41,8 +41,9 @@
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
@@ -180,8 +181,8 @@ static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
sector_t total_size,
unsigned int block_size, InquiryData_struct *inq_buff,
drive_info_struct *drv);
-static void __devinit cciss_interrupt_mode(ctlr_info_t *);
-static int __devinit cciss_enter_simple_mode(struct ctlr_info *h);
+static void cciss_interrupt_mode(ctlr_info_t *);
+static int cciss_enter_simple_mode(struct ctlr_info *h);
static void start_io(ctlr_info_t *h);
static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
__u8 page_code, unsigned char scsi3addr[],
@@ -198,14 +199,13 @@ static void cciss_device_release(struct device *dev);
static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
static inline u32 next_command(ctlr_info_t *h);
-static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset);
-static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar);
+static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
-static __devinit int write_driver_ver_to_cfgtable(
- CfgTable_struct __iomem *cfgtable);
+static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
@@ -549,7 +549,7 @@ static const struct file_operations cciss_proc_fops = {
.write = cciss_proc_write,
};
-static void __devinit cciss_procinit(ctlr_info_t *h)
+static void cciss_procinit(ctlr_info_t *h)
{
struct proc_dir_entry *pde;
@@ -978,8 +978,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h)
i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
if (i == h->nr_cmds)
return NULL;
- } while (test_and_set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ } while (test_and_set_bit(i, h->cmd_pool_bits) != 0);
c = h->cmd_pool + i;
memset(c, 0, sizeof(CommandList_struct));
cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
@@ -1046,8 +1045,7 @@ static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
h->nr_frees++;
}
@@ -2664,8 +2662,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
return status;
}
-static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
- u8 reset_type)
+static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
+ u8 reset_type)
{
CommandList_struct *c;
int return_status;
@@ -3920,7 +3918,7 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
+static void cciss_wait_for_mode_change_ack(ctlr_info_t *h)
{
int i;
@@ -3934,8 +3932,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
}
}
-static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
- u32 use_short_tags)
+static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags)
{
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
@@ -4001,7 +3998,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
" performant mode\n");
}
-static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
{
__u32 trans_support;
@@ -4063,7 +4060,7 @@ clean_up:
* controllers that are capable. If not, we use IO-APIC mode.
*/
-static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
+static void cciss_interrupt_mode(ctlr_info_t *h)
{
#ifdef CONFIG_PCI_MSI
int err;
@@ -4109,7 +4106,7 @@ default_int_mode:
return;
}
-static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
int i;
u32 subsystem_vendor_id, subsystem_device_id;
@@ -4135,8 +4132,8 @@ static inline bool cciss_board_disabled(ctlr_info_t *h)
return ((command & PCI_COMMAND_MEMORY) == 0);
}
-static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar)
+static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
{
int i;
@@ -4152,8 +4149,8 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready)
+static int cciss_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
#define BOARD_READY 1
#define BOARD_NOT_READY 0
{
@@ -4180,9 +4177,9 @@ static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset)
+static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
{
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
@@ -4196,7 +4193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
return 0;
}
-static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
+static int cciss_find_cfgtables(ctlr_info_t *h)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4225,7 +4222,7 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
return 0;
}
-static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
+static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
@@ -4246,7 +4243,7 @@ static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
*/
-static void __devinit cciss_find_board_params(ctlr_info_t *h)
+static void cciss_find_board_params(ctlr_info_t *h)
{
cciss_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
@@ -4268,10 +4265,7 @@ static void __devinit cciss_find_board_params(ctlr_info_t *h)
static inline bool CISS_signature_present(ctlr_info_t *h)
{
- if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
- (readb(&h->cfgtable->Signature[1]) != 'I') ||
- (readb(&h->cfgtable->Signature[2]) != 'S') ||
- (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
@@ -4308,7 +4302,7 @@ static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
}
-static int __devinit cciss_pci_init(ctlr_info_t *h)
+static int cciss_pci_init(ctlr_info_t *h)
{
int prod_index, err;
@@ -4428,7 +4422,8 @@ static void free_hba(ctlr_info_t *h)
}
/* Send a message CDB to the firmware. */
-static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type)
+static int cciss_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
{
typedef struct {
CommandListHeader_struct CommandHeader;
@@ -4575,14 +4570,13 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
return 0;
}
-static __devinit void init_driver_version(char *driver_version, int len)
+static void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
}
-static __devinit int write_driver_ver_to_cfgtable(
- CfgTable_struct __iomem *cfgtable)
+static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable)
{
char *driver_version;
int i, size = sizeof(cfgtable->driver_version);
@@ -4598,8 +4592,8 @@ static __devinit int write_driver_ver_to_cfgtable(
return 0;
}
-static __devinit void read_driver_ver_from_cfgtable(
- CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver)
+static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable,
+ unsigned char *driver_ver)
{
int i;
@@ -4607,8 +4601,7 @@ static __devinit void read_driver_ver_from_cfgtable(
driver_ver[i] = readb(&cfgtable->driver_version[i]);
}
-static __devinit int controller_reset_failed(
- CfgTable_struct __iomem *cfgtable)
+static int controller_reset_failed(CfgTable_struct __iomem *cfgtable)
{
char *driver_ver, *old_driver_ver;
@@ -4631,7 +4624,7 @@ static __devinit int controller_reset_failed(
/* This does a hard reset of the controller using PCI power management
* states or using the doorbell register. */
-static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4776,7 +4769,7 @@ unmap_vaddr:
return rc;
}
-static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
+static int cciss_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
@@ -4810,10 +4803,9 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
return 0;
}
-static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
+static int cciss_allocate_cmd_pool(ctlr_info_t *h)
{
- h->cmd_pool_bits = kmalloc(
- DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
+ h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) *
sizeof(unsigned long), GFP_KERNEL);
h->cmd_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(CommandList_struct),
@@ -4830,7 +4822,7 @@ static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
return 0;
}
-static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h)
+static int cciss_allocate_scatterlists(ctlr_info_t *h)
{
int i;
@@ -4898,7 +4890,7 @@ static int cciss_request_irq(ctlr_info_t *h,
return -1;
}
-static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h)
+static int cciss_kdump_soft_reset(ctlr_info_t *h)
{
if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
@@ -4957,8 +4949,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
* stealing all these major device numbers.
* returns the number of block devices registered.
*/
-static int __devinit cciss_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int i;
int j = 0;
@@ -5068,9 +5059,7 @@ reinit_after_soft_reset:
pci_set_drvdata(pdev, h);
/* command and error info recs zeroed out before
they are used */
- memset(h->cmd_pool_bits, 0,
- DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
- * sizeof(unsigned long));
+ bitmap_zero(h->cmd_pool_bits, h->nr_cmds);
h->num_luns = 0;
h->highest_lun = -1;
@@ -5214,7 +5203,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
free_irq(h->intr[h->intr_mode], h);
}
-static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
+static int cciss_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
@@ -5236,7 +5225,7 @@ static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
}
-static void __devexit cciss_remove_one(struct pci_dev *pdev)
+static void cciss_remove_one(struct pci_dev *pdev)
{
ctlr_info_t *h;
int i, j;
@@ -5315,7 +5304,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
static struct pci_driver cciss_pci_driver = {
.name = "cciss",
.probe = cciss_init_one,
- .remove = __devexit_p(cciss_remove_one),
+ .remove = cciss_remove_one,
.id_table = cciss_pci_device_id, /* id_table */
.shutdown = cciss_shutdown,
};
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 9125bbeacd4d..3f087133a25a 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -320,7 +320,7 @@ static void release_io_mem(ctlr_info_t *c)
c->io_mem_length = 0;
}
-static void __devexit cpqarray_remove_one(int i)
+static void cpqarray_remove_one(int i)
{
int j;
char buff[4];
@@ -352,7 +352,7 @@ static void __devexit cpqarray_remove_one(int i)
free_hba(i);
}
-static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
+static void cpqarray_remove_one_pci(struct pci_dev *pdev)
{
int i;
ctlr_info_t *tmp_ptr;
@@ -377,7 +377,7 @@ static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
/* removing an instance that was not removed automatically..
* must be an eisa card.
*/
-static void __devexit cpqarray_remove_one_eisa (int i)
+static void cpqarray_remove_one_eisa(int i)
{
if (hba[i] == NULL) {
printk(KERN_ERR "cpqarray: controller %d appears to have"
@@ -388,7 +388,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
}
/* pdev is NULL for eisa */
-static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
+static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
{
struct request_queue *q;
int j;
@@ -505,8 +505,8 @@ Enomem4:
return -1;
}
-static int __devinit cpqarray_init_one( struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cpqarray_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int i;
@@ -536,7 +536,7 @@ static int __devinit cpqarray_init_one( struct pci_dev *pdev,
static struct pci_driver cpqarray_pci_driver = {
.name = "cpqarray",
.probe = cpqarray_init_one,
- .remove = __devexit_p(cpqarray_remove_one_pci),
+ .remove = cpqarray_remove_one_pci,
.id_table = cpqarray_pci_device_id,
};
@@ -742,7 +742,7 @@ __setup("smart2=", cpqarray_setup);
/*
* Find an EISA controller's signature. Set up an hba if we find it.
*/
-static int __devinit cpqarray_eisa_detect(void)
+static int cpqarray_eisa_detect(void)
{
int i=0, j;
__u32 board_id;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index df0983787390..7845bd6ee414 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -2,13 +2,14 @@
# DRBD device driver configuration
#
-comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
- depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
+comment "DRBD disabled because PROC_FS or INET not selected"
+ depends on PROC_FS='n' || INET='n'
config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
- depends on PROC_FS && INET && CONNECTOR
+ depends on PROC_FS && INET
select LRU_CACHE
+ select LIBCRC32C
default n
help
@@ -58,7 +59,8 @@ config DRBD_FAULT_INJECTION
32 data read
64 read ahead
128 kmalloc of bitmap
- 256 allocation of EE (epoch_entries)
+ 256 allocation of peer_requests
+ 512 insert data corruption on receiving side
fault_devs: bitmask of minor numbers
fault_rate: frequency in percent
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index 0d3f337ff5ff..8b450338075e 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -1,5 +1,7 @@
drbd-y := drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
+drbd-y += drbd_interval.o drbd_state.o
+drbd-y += drbd_nla.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 3fbef018ce55..92510f8ad013 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -24,21 +24,73 @@
*/
#include <linux/slab.h>
+#include <linux/crc32c.h>
#include <linux/drbd.h>
+#include <linux/drbd_limits.h>
+#include <linux/dynamic_debug.h>
#include "drbd_int.h"
#include "drbd_wrappers.h"
-/* We maintain a trivial checksum in our on disk activity log.
- * With that we can ensure correct operation even when the storage
- * device might do a partial (last) sector write while losing power.
- */
-struct __packed al_transaction {
- u32 magic;
- u32 tr_number;
- struct __packed {
- u32 pos;
- u32 extent; } updates[1 + AL_EXTENTS_PT];
- u32 xor_sum;
+
+enum al_transaction_types {
+ AL_TR_UPDATE = 0,
+ AL_TR_INITIALIZED = 0xffff
+};
+/* all fields on disc in big endian */
+struct __packed al_transaction_on_disk {
+ /* don't we all like magic */
+ __be32 magic;
+
+ /* to identify the most recent transaction block
+ * in the on disk ring buffer */
+ __be32 tr_number;
+
+ /* checksum on the full 4k block, with this field set to 0. */
+ __be32 crc32c;
+
+ /* type of transaction, special transaction types like:
+ * purge-all, set-all-idle, set-all-active, ... to-be-defined
+ * see also enum al_transaction_types */
+ __be16 transaction_type;
+
+ /* we currently allow only a few thousand extents,
+ * so 16bit will be enough for the slot number. */
+
+ /* how many updates in this transaction */
+ __be16 n_updates;
+
+ /* maximum slot number, "al-extents" in drbd.conf speak.
+ * Having this in each transaction should make reconfiguration
+ * of that parameter easier. */
+ __be16 context_size;
+
+ /* slot number the context starts with */
+ __be16 context_start_slot_nr;
+
+ /* Some reserved bytes. Expected usage is a 64bit counter of
+ * sectors-written since device creation, and other data generation tag
+ * supporting usage */
+ __be32 __reserved[4];
+
+ /* --- 36 byte used --- */
+
+ /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
+ * in one transaction, then use the remaining byte in the 4k block for
+ * context information. "Flexible" number of updates per transaction
+ * does not help, as we have to account for the case when all update
+ * slots are used anyways, so it would only complicate code without
+ * additional benefit.
+ */
+ __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
+
+ /* but the extent number is 32bit, which at an extent size of 4 MiB
+ * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
+ __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
+
+ /* --- 420 bytes used (36 + 64*6) --- */
+
+ /* 4096 - 420 = 3676 = 919 * 4 */
+ __be32 context[AL_CONTEXT_PER_TRANSACTION];
};
struct update_odbm_work {
@@ -48,22 +100,11 @@ struct update_odbm_work {
struct update_al_work {
struct drbd_work w;
- struct lc_element *al_ext;
struct completion event;
- unsigned int enr;
- /* if old_enr != LC_FREE, write corresponding bitmap sector, too */
- unsigned int old_enr;
-};
-
-struct drbd_atodb_wait {
- atomic_t count;
- struct completion io_done;
- struct drbd_conf *mdev;
- int error;
+ int err;
};
-
-int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+static int al_write_transaction(struct drbd_conf *mdev);
void *drbd_md_get_buffer(struct drbd_conf *mdev)
{
@@ -82,22 +123,24 @@ void drbd_md_put_buffer(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
-static bool md_io_allowed(struct drbd_conf *mdev)
-{
- enum drbd_disk_state ds = mdev->state.disk;
- return ds >= D_NEGOTIATING || ds == D_ATTACHING;
-}
-
-void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
unsigned int *done)
{
- long dt = bdev->dc.disk_timeout * HZ / 10;
+ long dt;
+
+ rcu_read_lock();
+ dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
+ rcu_read_unlock();
+ dt = dt * HZ / 10;
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
- dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
- if (dt == 0)
+ dt = wait_event_timeout(mdev->misc_wait,
+ *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
+ if (dt == 0) {
dev_err(DEV, "meta-data IO operation timed out\n");
+ drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
+ }
}
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
@@ -106,7 +149,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
int rw, int size)
{
struct bio *bio;
- int ok;
+ int err;
mdev->md_io.done = 0;
mdev->md_io.error = -ENODEV;
@@ -118,8 +161,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
- ok = (bio_add_page(bio, page, size, 0) == size);
- if (!ok)
+ err = -EIO;
+ if (bio_add_page(bio, page, size, 0) != size)
goto out;
bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
@@ -127,7 +170,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
- ok = 0;
+ err = -ENODEV;
goto out;
}
@@ -137,86 +180,47 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
- ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
+ wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
+ if (bio_flagged(bio, BIO_UPTODATE))
+ err = mdev->md_io.error;
out:
bio_put(bio);
- return ok;
+ return err;
}
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
- int logical_block_size, mask, ok;
- int offset = 0;
+ int err;
struct page *iop = mdev->md_io_page;
D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
- logical_block_size = bdev_logical_block_size(bdev->md_bdev);
- if (logical_block_size == 0)
- logical_block_size = MD_SECTOR_SIZE;
-
- /* in case logical_block_size != 512 [ s390 only? ] */
- if (logical_block_size != MD_SECTOR_SIZE) {
- mask = (logical_block_size / MD_SECTOR_SIZE) - 1;
- D_ASSERT(mask == 1 || mask == 3 || mask == 7);
- D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE);
- offset = sector & mask;
- sector = sector & ~mask;
- iop = mdev->md_io_tmpp;
-
- if (rw & WRITE) {
- /* these are GFP_KERNEL pages, pre-allocated
- * on device initialization */
- void *p = page_address(mdev->md_io_page);
- void *hp = page_address(mdev->md_io_tmpp);
-
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector,
- READ, logical_block_size);
-
- if (unlikely(!ok)) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,"
- "READ [logical_block_size!=512]) failed!\n",
- (unsigned long long)sector);
- return 0;
- }
-
- memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE);
- }
- }
+ dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
+ current->comm, current->pid, __func__,
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
if (sector < drbd_md_first_sector(bdev) ||
- sector > drbd_md_last_sector(bdev))
+ sector + 7 > drbd_md_last_sector(bdev))
dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
(unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size);
- if (unlikely(!ok)) {
- dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- return 0;
- }
-
- if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) {
- void *p = page_address(mdev->md_io_page);
- void *hp = page_address(mdev->md_io_tmpp);
-
- memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE);
+ err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
+ if (err) {
+ dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
}
-
- return ok;
+ return err;
}
static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
struct lc_element *tmp;
- unsigned long al_flags = 0;
int wake;
spin_lock_irq(&mdev->al_lock);
@@ -231,76 +235,92 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
return NULL;
}
}
- al_ext = lc_get(mdev->act_log, enr);
- al_flags = mdev->act_log->flags;
+ al_ext = lc_get(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
-
- /*
- if (!al_ext) {
- if (al_flags & LC_STARVING)
- dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n");
- if (al_flags & LC_DIRTY)
- dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n");
- }
- */
-
return al_ext;
}
-void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
- unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
- struct lc_element *al_ext;
- struct update_al_work al_work;
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
+ bool locked = false;
+
+ D_ASSERT(first <= last);
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
- wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
+ for (enr = first; enr <= last; enr++)
+ wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
+
+ /* Serialize multiple transactions.
+ * This uses test_and_set_bit, memory barrier is implicit.
+ */
+ wait_event(mdev->al_wait,
+ mdev->act_log->pending_changes == 0 ||
+ (locked = lc_try_lock_for_transaction(mdev->act_log)));
- if (al_ext->lc_number != enr) {
+ if (locked) {
/* drbd_al_write_transaction(mdev,al_ext,enr);
* recurses into generic_make_request(), which
* disallows recursion, bios being serialized on the
* current->bio_tail list now.
* we have to delegate updates to the activity log
* to the worker thread. */
- init_completion(&al_work.event);
- al_work.al_ext = al_ext;
- al_work.enr = enr;
- al_work.old_enr = al_ext->lc_number;
- al_work.w.cb = w_al_write_transaction;
- drbd_queue_work_front(&mdev->data.work, &al_work.w);
- wait_for_completion(&al_work.event);
-
- mdev->al_writ_cnt++;
-
- spin_lock_irq(&mdev->al_lock);
- lc_changed(mdev->act_log, al_ext);
- spin_unlock_irq(&mdev->al_lock);
+
+ /* Double check: it may have been committed by someone else,
+ * while we have been waiting for the lock. */
+ if (mdev->act_log->pending_changes) {
+ bool write_al_updates;
+
+ rcu_read_lock();
+ write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+ rcu_read_unlock();
+
+ if (write_al_updates) {
+ al_write_transaction(mdev);
+ mdev->al_writ_cnt++;
+ }
+
+ spin_lock_irq(&mdev->al_lock);
+ /* FIXME
+ if (err)
+ we need an "lc_cancel" here;
+ */
+ lc_committed(mdev->act_log);
+ spin_unlock_irq(&mdev->al_lock);
+ }
+ lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
}
}
-void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
+void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
{
- unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+ /* for bios crossing activity log extent boundaries,
+ * we may need to activate two extents in one go */
+ unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+ unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+ unsigned enr;
struct lc_element *extent;
unsigned long flags;
+ D_ASSERT(first <= last);
spin_lock_irqsave(&mdev->al_lock, flags);
- extent = lc_find(mdev->act_log, enr);
-
- if (!extent) {
- spin_unlock_irqrestore(&mdev->al_lock, flags);
- dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
- return;
+ for (enr = first; enr <= last; enr++) {
+ extent = lc_find(mdev->act_log, enr);
+ if (!extent) {
+ dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
+ continue;
+ }
+ lc_put(mdev->act_log, extent);
}
-
- if (lc_put(mdev->act_log, extent) == 0)
- wake_up(&mdev->al_wait);
-
spin_unlock_irqrestore(&mdev->al_lock, flags);
+ wake_up(&mdev->al_wait);
}
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
@@ -326,296 +346,148 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
return rs_enr >>
/* bit to page */
((PAGE_SHIFT + 3) -
- /* al extent number to bit */
+ /* resync extent number to bit */
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
-int
-w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int
+_al_write_transaction(struct drbd_conf *mdev)
{
- struct update_al_work *aw = container_of(w, struct update_al_work, w);
- struct lc_element *updated = aw->al_ext;
- const unsigned int new_enr = aw->enr;
- const unsigned int evicted = aw->old_enr;
- struct al_transaction *buffer;
+ struct al_transaction_on_disk *buffer;
+ struct lc_element *e;
sector_t sector;
- int i, n, mx;
- unsigned int extent_nr;
- u32 xor_sum = 0;
+ int i, mx;
+ unsigned extent_nr;
+ unsigned crc = 0;
+ int err = 0;
if (!get_ldev(mdev)) {
- dev_err(DEV,
- "disk is %s, cannot start al transaction (-%d +%d)\n",
- drbd_disk_str(mdev->state.disk), evicted, new_enr);
- complete(&((struct update_al_work *)w)->event);
- return 1;
+ dev_err(DEV, "disk is %s, cannot start al transaction\n",
+ drbd_disk_str(mdev->state.disk));
+ return -EIO;
}
- /* do we have to do a bitmap write, first?
- * TODO reduce maximum latency:
- * submit both bios, then wait for both,
- * instead of doing two synchronous sector writes.
- * For now, we must not write the transaction,
- * if we cannot write out the bitmap of the evicted extent. */
- if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
- drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
/* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
dev_err(DEV,
- "disk is %s, cannot write al transaction (-%d +%d)\n",
- drbd_disk_str(mdev->state.disk), evicted, new_enr);
- complete(&((struct update_al_work *)w)->event);
+ "disk is %s, cannot write al transaction\n",
+ drbd_disk_str(mdev->state.disk));
put_ldev(mdev);
- return 1;
+ return -EIO;
}
buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
if (!buffer) {
dev_err(DEV, "disk failed while waiting for md_io buffer\n");
- complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
- return 1;
+ return -ENODEV;
}
- buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
+ memset(buffer, 0, sizeof(*buffer));
+ buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
- n = lc_index_of(mdev->act_log, updated);
+ i = 0;
+
+ /* Even though no one can start to change this list
+ * once we set the LC_LOCKED -- from drbd_al_begin_io(),
+ * lc_try_lock_for_transaction() --, someone may still
+ * be in the process of changing it. */
+ spin_lock_irq(&mdev->al_lock);
+ list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
+ if (i == AL_UPDATES_PER_TRANSACTION) {
+ i++;
+ break;
+ }
+ buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
+ buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
+ if (e->lc_number != LC_FREE)
+ drbd_bm_mark_for_writeout(mdev,
+ al_extent_to_bm_page(e->lc_number));
+ i++;
+ }
+ spin_unlock_irq(&mdev->al_lock);
+ BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
- buffer->updates[0].pos = cpu_to_be32(n);
- buffer->updates[0].extent = cpu_to_be32(new_enr);
+ buffer->n_updates = cpu_to_be16(i);
+ for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
+ buffer->update_slot_nr[i] = cpu_to_be16(-1);
+ buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
+ }
- xor_sum ^= new_enr;
+ buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
+ buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
- mx = min_t(int, AL_EXTENTS_PT,
+ mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
mdev->act_log->nr_elements - mdev->al_tr_cycle);
for (i = 0; i < mx; i++) {
unsigned idx = mdev->al_tr_cycle + i;
extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
- buffer->updates[i+1].pos = cpu_to_be32(idx);
- buffer->updates[i+1].extent = cpu_to_be32(extent_nr);
- xor_sum ^= extent_nr;
- }
- for (; i < AL_EXTENTS_PT; i++) {
- buffer->updates[i+1].pos = __constant_cpu_to_be32(-1);
- buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE);
- xor_sum ^= LC_FREE;
+ buffer->context[i] = cpu_to_be32(extent_nr);
}
- mdev->al_tr_cycle += AL_EXTENTS_PT;
+ for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
+ buffer->context[i] = cpu_to_be32(LC_FREE);
+
+ mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
mdev->al_tr_cycle = 0;
- buffer->xor_sum = cpu_to_be32(xor_sum);
-
sector = mdev->ldev->md.md_offset
- + mdev->ldev->md.al_offset + mdev->al_tr_pos;
-
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
- drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ + mdev->ldev->md.al_offset
+ + mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
- if (++mdev->al_tr_pos >
- div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
- mdev->al_tr_pos = 0;
+ crc = crc32c(0, buffer, 4096);
+ buffer->crc32c = cpu_to_be32(crc);
- D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
- mdev->al_tr_number++;
+ if (drbd_bm_write_hinted(mdev))
+ err = -EIO;
+ /* drbd_chk_io_error done already */
+ else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ err = -EIO;
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+ } else {
+ /* advance ringbuffer position and transaction counter */
+ mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
+ mdev->al_tr_number++;
+ }
drbd_md_put_buffer(mdev);
-
- complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
- return 1;
+ return err;
}
-/**
- * drbd_al_read_tr() - Read a single transaction from the on disk activity log
- * @mdev: DRBD device.
- * @bdev: Block device to read form.
- * @b: pointer to an al_transaction.
- * @index: On disk slot of the transaction to read.
- *
- * Returns -1 on IO error, 0 on checksum error and 1 upon success.
- */
-static int drbd_al_read_tr(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev,
- struct al_transaction *b,
- int index)
-{
- sector_t sector;
- int rv, i;
- u32 xor_sum = 0;
-
- sector = bdev->md.md_offset + bdev->md.al_offset + index;
-
- /* Dont process error normally,
- * as this is done before disk is attached! */
- if (!drbd_md_sync_page_io(mdev, bdev, sector, READ))
- return -1;
-
- rv = (be32_to_cpu(b->magic) == DRBD_MAGIC);
-
- for (i = 0; i < AL_EXTENTS_PT + 1; i++)
- xor_sum ^= be32_to_cpu(b->updates[i].extent);
- rv &= (xor_sum == be32_to_cpu(b->xor_sum));
- return rv;
-}
-
-/**
- * drbd_al_read_log() - Restores the activity log from its on disk representation.
- * @mdev: DRBD device.
- * @bdev: Block device to read form.
- *
- * Returns 1 on success, returns 0 when reading the log failed due to IO errors.
- */
-int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+static int w_al_write_transaction(struct drbd_work *w, int unused)
{
- struct al_transaction *buffer;
- int i;
- int rv;
- int mx;
- int active_extents = 0;
- int transactions = 0;
- int found_valid = 0;
- int from = 0;
- int to = 0;
- u32 from_tnr = 0;
- u32 to_tnr = 0;
- u32 cnr;
-
- mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
-
- /* lock out all other meta data io for now,
- * and make sure the page is mapped.
- */
- buffer = drbd_md_get_buffer(mdev);
- if (!buffer)
- return 0;
-
- /* Find the valid transaction in the log */
- for (i = 0; i <= mx; i++) {
- rv = drbd_al_read_tr(mdev, bdev, buffer, i);
- if (rv == 0)
- continue;
- if (rv == -1) {
- drbd_md_put_buffer(mdev);
- return 0;
- }
- cnr = be32_to_cpu(buffer->tr_number);
-
- if (++found_valid == 1) {
- from = i;
- to = i;
- from_tnr = cnr;
- to_tnr = cnr;
- continue;
- }
- if ((int)cnr - (int)from_tnr < 0) {
- D_ASSERT(from_tnr - cnr + i - from == mx+1);
- from = i;
- from_tnr = cnr;
- }
- if ((int)cnr - (int)to_tnr > 0) {
- D_ASSERT(cnr - to_tnr == i - to);
- to = i;
- to_tnr = cnr;
- }
- }
-
- if (!found_valid) {
- dev_warn(DEV, "No usable activity log found.\n");
- drbd_md_put_buffer(mdev);
- return 1;
- }
-
- /* Read the valid transactions.
- * dev_info(DEV, "Reading from %d to %d.\n",from,to); */
- i = from;
- while (1) {
- int j, pos;
- unsigned int extent_nr;
- unsigned int trn;
-
- rv = drbd_al_read_tr(mdev, bdev, buffer, i);
- ERR_IF(rv == 0) goto cancel;
- if (rv == -1) {
- drbd_md_put_buffer(mdev);
- return 0;
- }
-
- trn = be32_to_cpu(buffer->tr_number);
-
- spin_lock_irq(&mdev->al_lock);
-
- /* This loop runs backwards because in the cyclic
- elements there might be an old version of the
- updated element (in slot 0). So the element in slot 0
- can overwrite old versions. */
- for (j = AL_EXTENTS_PT; j >= 0; j--) {
- pos = be32_to_cpu(buffer->updates[j].pos);
- extent_nr = be32_to_cpu(buffer->updates[j].extent);
-
- if (extent_nr == LC_FREE)
- continue;
-
- lc_set(mdev->act_log, extent_nr, pos);
- active_extents++;
- }
- spin_unlock_irq(&mdev->al_lock);
-
- transactions++;
-
-cancel:
- if (i == to)
- break;
- i++;
- if (i > mx)
- i = 0;
- }
-
- mdev->al_tr_number = to_tnr+1;
- mdev->al_tr_pos = to;
- if (++mdev->al_tr_pos >
- div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
- mdev->al_tr_pos = 0;
-
- /* ok, we are done with it */
- drbd_md_put_buffer(mdev);
+ struct update_al_work *aw = container_of(w, struct update_al_work, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
- dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
- transactions, active_extents);
+ err = _al_write_transaction(mdev);
+ aw->err = err;
+ complete(&aw->event);
- return 1;
+ return err != -EIO ? err : 0;
}
-/**
- * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
- * @mdev: DRBD device.
- */
-void drbd_al_apply_to_bm(struct drbd_conf *mdev)
+/* Calls from worker context (see w_restart_disk_io()) need to write the
+ transaction directly. Others came through generic_make_request(),
+ those need to delegate it to the worker. */
+static int al_write_transaction(struct drbd_conf *mdev)
{
- unsigned int enr;
- unsigned long add = 0;
- char ppb[10];
- int i, tmp;
-
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ struct update_al_work al_work;
- for (i = 0; i < mdev->act_log->nr_elements; i++) {
- enr = lc_element_by_index(mdev->act_log, i)->lc_number;
- if (enr == LC_FREE)
- continue;
- tmp = drbd_bm_ALe_set_all(mdev, enr);
- dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
- add += tmp;
- }
+ if (current == mdev->tconn->worker.task)
+ return _al_write_transaction(mdev);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
+ init_completion(&al_work.event);
+ al_work.w.cb = w_al_write_transaction;
+ al_work.w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+ wait_for_completion(&al_work.event);
- dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n",
- ppsize(ppb, Bit2KB(add)));
+ return al_work.err;
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
@@ -645,7 +517,7 @@ void drbd_al_shrink(struct drbd_conf *mdev)
struct lc_element *al_ext;
int i;
- D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags));
+ D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
for (i = 0; i < mdev->act_log->nr_elements; i++) {
al_ext = lc_element_by_index(mdev->act_log, i);
@@ -657,15 +529,17 @@ void drbd_al_shrink(struct drbd_conf *mdev)
wake_up(&mdev->al_wait);
}
-static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
+ struct drbd_conf *mdev = w->mdev;
+ struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
if (!get_ldev(mdev)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
kfree(udw);
- return 1;
+ return 0;
}
drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
@@ -683,9 +557,9 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
break;
}
}
- drbd_bcast_sync_progress(mdev);
+ drbd_bcast_event(mdev, &sib);
- return 1;
+ return 0;
}
@@ -755,7 +629,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
}
ext->rs_left = rs_left;
ext->rs_failed = success ? 0 : count;
- lc_changed(mdev->resync, &ext->lce);
+ /* we don't keep a persistent log of the resync lru,
+ * we can commit any change right away. */
+ lc_committed(mdev->resync);
}
lc_put(mdev->resync, &ext->lce);
/* no race, we are within the al_lock! */
@@ -767,7 +643,8 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
if (udw) {
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
- drbd_queue_work_front(&mdev->data.work, &udw->w);
+ udw->w.mdev = mdev;
+ drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
}
@@ -813,16 +690,22 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int wake_up = 0;
unsigned long flags;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
}
+
+ if (!get_ldev(mdev))
+ return; /* no disk, no metadata, no bitmap to clear bits in */
+
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors) return;
- ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+ if (!expect(sector < nr_sectors))
+ goto out;
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
@@ -830,7 +713,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
* round up start sector, round down end sector. we make sure we only
* clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
if (unlikely(esector < BM_SECT_PER_BIT-1))
- return;
+ goto out;
if (unlikely(esector == (nr_sectors-1)))
ebnr = lbnr;
else
@@ -838,14 +721,14 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
if (sbnr > ebnr)
- return;
+ goto out;
/*
* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors.
*/
count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
- if (count && get_ldev(mdev)) {
+ if (count) {
drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
spin_lock_irqsave(&mdev->al_lock, flags);
drbd_try_clear_on_disk_bm(mdev, sector, count, true);
@@ -854,8 +737,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
wake_up = 1;
- put_ldev(mdev);
}
+out:
+ put_ldev(mdev);
if (wake_up)
wake_up(&mdev->al_wait);
}
@@ -871,7 +755,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
- unsigned long sbnr, ebnr, lbnr, flags;
+ unsigned long sbnr, ebnr, flags;
sector_t esector, nr_sectors;
unsigned int enr, count = 0;
struct lc_element *e;
@@ -880,7 +764,7 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
if (size == 0)
return 0;
- if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
@@ -892,12 +776,10 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors)
+ if (!expect(sector < nr_sectors))
goto out;
- ERR_IF(esector >= nr_sectors)
- esector = (nr_sectors-1);
-
- lbnr = BM_SECT_TO_BIT(nr_sectors-1);
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
/* we set it out of sync,
* we do not need to round anything here */
@@ -940,7 +822,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
- lc_changed(mdev->resync, &bm_ext->lce);
+ lc_committed(mdev->resync);
wakeup = 1;
}
if (bm_ext->lce.refcnt == 1)
@@ -956,7 +838,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
- BUG_ON(rs_flags & LC_DIRTY);
+ BUG_ON(rs_flags & LC_LOCKED);
}
return bm_ext;
@@ -964,26 +846,12 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
{
- struct lc_element *al_ext;
- int rv = 0;
+ int rv;
spin_lock_irq(&mdev->al_lock);
- if (unlikely(enr == mdev->act_log->new_number))
- rv = 1;
- else {
- al_ext = lc_find(mdev->act_log, enr);
- if (al_ext) {
- if (al_ext->refcnt)
- rv = 1;
- }
- }
+ rv = lc_is_used(mdev->act_log, enr);
spin_unlock_irq(&mdev->al_lock);
- /*
- if (unlikely(rv)) {
- dev_info(DEV, "Delaying sync read until app's write is done\n");
- }
- */
return rv;
}
@@ -1113,13 +981,13 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n");
- BUG_ON(rs_flags & LC_DIRTY);
+ BUG_ON(rs_flags & LC_LOCKED);
goto try_again;
}
if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0;
- lc_changed(mdev->resync, &bm_ext->lce);
+ lc_committed(mdev->resync);
wake_up(&mdev->al_wait);
D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
}
@@ -1130,8 +998,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
}
check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
- if (unlikely(al_enr+i == mdev->act_log->new_number))
- goto try_again;
if (lc_is_used(mdev->act_log, al_enr+i))
goto try_again;
}
@@ -1266,7 +1132,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
return;
@@ -1274,8 +1140,10 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
nr_sectors = drbd_get_capacity(mdev->this_bdev);
esector = sector + (size >> 9) - 1;
- ERR_IF(sector >= nr_sectors) return;
- ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+ if (!expect(sector < nr_sectors))
+ return;
+ if (!expect(esector < nr_sectors))
+ esector = nr_sectors - 1;
lbnr = BM_SECT_TO_BIT(nr_sectors-1);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index d84566496746..8dc29502dc08 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -119,13 +119,9 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
if (!__ratelimit(&drbd_ratelimit_state))
return;
dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
- current == mdev->receiver.task ? "receiver" :
- current == mdev->asender.task ? "asender" :
- current == mdev->worker.task ? "worker" : current->comm,
- func, b->bm_why ?: "?",
- b->bm_task == mdev->receiver.task ? "receiver" :
- b->bm_task == mdev->asender.task ? "asender" :
- b->bm_task == mdev->worker.task ? "worker" : "?");
+ drbd_task_to_thread_name(mdev->tconn, current),
+ func, b->bm_why ?: "?",
+ drbd_task_to_thread_name(mdev->tconn, b->bm_task));
}
void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
@@ -142,13 +138,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
if (trylock_failed) {
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
- current == mdev->receiver.task ? "receiver" :
- current == mdev->asender.task ? "asender" :
- current == mdev->worker.task ? "worker" : current->comm,
- why, b->bm_why ?: "?",
- b->bm_task == mdev->receiver.task ? "receiver" :
- b->bm_task == mdev->asender.task ? "asender" :
- b->bm_task == mdev->worker.task ? "worker" : "?");
+ drbd_task_to_thread_name(mdev->tconn, current),
+ why, b->bm_why ?: "?",
+ drbd_task_to_thread_name(mdev->tconn, b->bm_task));
mutex_lock(&b->bm_change);
}
if (BM_LOCKED_MASK & b->bm_flags)
@@ -196,6 +188,9 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
/* to mark for lazy writeout once syncer cleared all clearable bits,
* we if bits have been cleared since last IO. */
#define BM_PAGE_LAZY_WRITEOUT 28
+/* pages marked with this "HINT" will be considered for writeout
+ * on activity log transactions */
+#define BM_PAGE_HINT_WRITEOUT 27
/* store_page_idx uses non-atomic assignment. It is only used directly after
* allocating the page. All other bm_set_page_* and bm_clear_page_* need to
@@ -227,8 +222,7 @@ static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
- clear_bit(BM_PAGE_IO_LOCK, addr);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
wake_up(&mdev->bitmap->bm_io_wait);
}
@@ -246,6 +240,27 @@ static void bm_set_page_need_writeout(struct page *page)
set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
}
+/**
+ * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
+ * @mdev: DRBD device.
+ * @page_nr: the bitmap page to mark with the "hint" flag
+ *
+ * From within an activity log transaction, we mark a few pages with these
+ * hints, then call drbd_bm_write_hinted(), which will only write out changed
+ * pages which are flagged with this mark.
+ */
+void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
+{
+ struct page *page;
+ if (page_nr >= mdev->bitmap->bm_number_of_pages) {
+ dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
+ page_nr, (int)mdev->bitmap->bm_number_of_pages);
+ return;
+ }
+ page = mdev->bitmap->bm_pages[page_nr];
+ set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
+}
+
static int bm_test_page_unchanged(struct page *page)
{
volatile const unsigned long *addr = &page_private(page);
@@ -373,14 +388,16 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
return old_pages;
/* Trying kmalloc first, falling back to vmalloc.
- * GFP_KERNEL is ok, as this is done when a lower level disk is
- * "attached" to the drbd. Context is receiver thread or cqueue
- * thread. As we have no disk yet, we are not in the IO path,
- * not even the IO path of the peer. */
+ * GFP_NOIO, as this is called while drbd IO is "suspended",
+ * and during resize or attach on diskless Primary,
+ * we must not block on IO to ourselves.
+ * Context is receiver thread or dmsetup. */
bytes = sizeof(struct page *)*want;
- new_pages = kzalloc(bytes, GFP_KERNEL);
+ new_pages = kzalloc(bytes, GFP_NOIO);
if (!new_pages) {
- new_pages = vzalloc(bytes);
+ new_pages = __vmalloc(bytes,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
if (!new_pages)
return NULL;
vmalloced = 1;
@@ -390,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
for (i = 0; i < have; i++)
new_pages[i] = old_pages[i];
for (; i < want; i++) {
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (!page) {
bm_free_pages(new_pages + have, i - have);
bm_vk_free(new_pages, vmalloced);
@@ -439,7 +456,8 @@ int drbd_bm_init(struct drbd_conf *mdev)
sector_t drbd_bm_capacity(struct drbd_conf *mdev)
{
- ERR_IF(!mdev->bitmap) return 0;
+ if (!expect(mdev->bitmap))
+ return 0;
return mdev->bitmap->bm_dev_capacity;
}
@@ -447,7 +465,8 @@ sector_t drbd_bm_capacity(struct drbd_conf *mdev)
*/
void drbd_bm_cleanup(struct drbd_conf *mdev)
{
- ERR_IF (!mdev->bitmap) return;
+ if (!expect(mdev->bitmap))
+ return;
bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
kfree(mdev->bitmap);
@@ -610,7 +629,8 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
int err = 0, growing;
int opages_vmalloced;
- ERR_IF(!b) return -ENOMEM;
+ if (!expect(b))
+ return -ENOMEM;
drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
@@ -732,8 +752,10 @@ unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long s;
unsigned long flags;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
s = b->bm_set;
@@ -756,8 +778,10 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
size_t drbd_bm_words(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
return b->bm_words;
}
@@ -765,7 +789,8 @@ size_t drbd_bm_words(struct drbd_conf *mdev)
unsigned long drbd_bm_bits(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return 0;
+ if (!expect(b))
+ return 0;
return b->bm_bits;
}
@@ -786,8 +811,10 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
end = offset + number;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
if (number == 0)
return;
WARN_ON(offset >= b->bm_words);
@@ -831,8 +858,10 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
end = offset + number;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
if ((offset >= b->bm_words) ||
@@ -860,8 +889,10 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
void drbd_bm_set_all(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0xff, b->bm_words);
@@ -874,8 +905,10 @@ void drbd_bm_set_all(struct drbd_conf *mdev)
void drbd_bm_clear_all(struct drbd_conf *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
- ERR_IF(!b) return;
- ERR_IF(!b->bm_pages) return;
+ if (!expect(b))
+ return;
+ if (!expect(b->bm_pages))
+ return;
spin_lock_irq(&b->bm_lock);
bm_memset(b, 0, 0, b->bm_words);
@@ -889,7 +922,8 @@ struct bm_aio_ctx {
unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
-#define BM_WRITE_ALL_PAGES 2
+#define BM_AIO_WRITE_HINTED 2
+#define BM_WRITE_ALL_PAGES 4
int error;
struct kref kref;
};
@@ -977,17 +1011,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- void *src, *dest;
page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
- dest = kmap_atomic(page);
- src = kmap_atomic(b->bm_pages[page_nr]);
- memcpy(dest, src, PAGE_SIZE);
- kunmap_atomic(src);
- kunmap_atomic(dest);
+ copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
-
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
@@ -1060,6 +1088,11 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
break;
if (rw & WRITE) {
+ if ((flags & BM_AIO_WRITE_HINTED) &&
+ !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
+ &page_private(b->bm_pages[i])))
+ continue;
+
if (!(flags & BM_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
@@ -1088,13 +1121,15 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
- dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
- rw == WRITE ? "WRITE" : "READ",
- count, jiffies - now);
+ /* summary for global bitmap IO */
+ if (flags == 0)
+ dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+ rw == WRITE ? "WRITE" : "READ",
+ count, jiffies - now);
if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
@@ -1103,7 +1138,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
if (atomic_read(&ctx->in_flight))
- err = -EIO; /* Disk failed during IO... */
+ err = -EIO; /* Disk timeout/force-detach during IO... */
now = jiffies;
if (rw == WRITE) {
@@ -1115,8 +1150,9 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
now = b->bm_set;
- dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
- ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ if (flags == 0)
+ dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
+ ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
@@ -1179,9 +1215,17 @@ int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
+/**
+ * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
+ * @mdev: DRBD device.
+ */
+int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
+}
/**
- * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
+ * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
* @mdev: DRBD device.
* @idx: bitmap page index
*
@@ -1222,11 +1266,11 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
if (ctx->error)
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
- /* that should force detach, so the in memory bitmap will be
+ /* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
@@ -1289,8 +1333,10 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
struct drbd_bitmap *b = mdev->bitmap;
unsigned long i = DRBD_END_OF_BITMAP;
- ERR_IF(!b) return i;
- ERR_IF(!b->bm_pages) return i;
+ if (!expect(b))
+ return i;
+ if (!expect(b->bm_pages))
+ return i;
spin_lock_irq(&b->bm_lock);
if (BM_DONT_TEST & b->bm_flags)
@@ -1391,8 +1437,10 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
struct drbd_bitmap *b = mdev->bitmap;
int c = 0;
- ERR_IF(!b) return 1;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 1;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
@@ -1423,13 +1471,21 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
+ int changed = 0;
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
- b->bm_set += BITS_PER_LONG - bits;
+ changed += BITS_PER_LONG - bits;
}
kunmap_atomic(paddr);
+ if (changed) {
+ /* We only need lazy writeout, the information is still in the
+ * remote bitmap as well, and is reconstructed during the next
+ * bitmap exchange, if lost locally due to a crash. */
+ bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
+ b->bm_set += changed;
+ }
}
/* Same thing as drbd_bm_set_bits,
@@ -1524,8 +1580,10 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
unsigned long *p_addr;
int i;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1559,8 +1617,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* robust in case we screwed up elsewhere, in that case pretend there
* was one dirty bit in the requested area, so we won't try to do a
* local read there (no bitmap probably implies no disk) */
- ERR_IF(!b) return 1;
- ERR_IF(!b->bm_pages) return 1;
+ if (!expect(b))
+ return 1;
+ if (!expect(b->bm_pages))
+ return 1;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1573,11 +1633,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
bm_unmap(p_addr);
p_addr = bm_map_pidx(b, idx);
}
- ERR_IF (bitnr >= b->bm_bits) {
- dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
- } else {
+ if (expect(bitnr < b->bm_bits))
c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
- }
+ else
+ dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
}
if (p_addr)
bm_unmap(p_addr);
@@ -1607,8 +1666,10 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
unsigned long flags;
unsigned long *p_addr, *bm;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
+ if (!expect(b))
+ return 0;
+ if (!expect(b->bm_pages))
+ return 0;
spin_lock_irqsave(&b->bm_lock, flags);
if (BM_DONT_TEST & b->bm_flags)
@@ -1630,47 +1691,3 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
spin_unlock_irqrestore(&b->bm_lock, flags);
return count;
}
-
-/* Set all bits covered by the AL-extent al_enr.
- * Returns number of bits changed. */
-unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
-{
- struct drbd_bitmap *b = mdev->bitmap;
- unsigned long *p_addr, *bm;
- unsigned long weight;
- unsigned long s, e;
- int count, i, do_now;
- ERR_IF(!b) return 0;
- ERR_IF(!b->bm_pages) return 0;
-
- spin_lock_irq(&b->bm_lock);
- if (BM_DONT_SET & b->bm_flags)
- bm_print_lock_info(mdev);
- weight = b->bm_set;
-
- s = al_enr * BM_WORDS_PER_AL_EXT;
- e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
- /* assert that s and e are on the same page */
- D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
- == s >> (PAGE_SHIFT - LN2_BPL + 3));
- count = 0;
- if (s < b->bm_words) {
- i = do_now = e-s;
- p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
- bm = p_addr + MLPP(s);
- while (i--) {
- count += hweight_long(*bm);
- *bm = -1UL;
- bm++;
- }
- bm_unmap(p_addr);
- b->bm_set += do_now*BITS_PER_LONG - count;
- if (e == b->bm_words)
- b->bm_set -= bm_clear_surplus(b);
- } else {
- dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
- }
- weight = b->bm_set - weight;
- spin_unlock_irq(&b->bm_lock);
- return weight;
-}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b953cc7c9c00..6b51afa1aae1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -39,9 +39,13 @@
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <net/tcp.h>
#include <linux/lru_cache.h>
#include <linux/prefetch.h>
+#include <linux/drbd_genl_api.h>
+#include <linux/drbd.h>
+#include "drbd_state.h"
#ifdef __CHECKER__
# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
@@ -61,7 +65,6 @@
extern unsigned int minor_count;
extern bool disable_sendpage;
extern bool allow_oos;
-extern unsigned int cn_idx;
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int enable_faults;
@@ -86,34 +89,44 @@ extern char usermode_helper[];
*/
#define DRBD_SIGKILL SIGHUP
-/* All EEs on the free list should have ID_VACANT (== 0)
- * freshly allocated EEs get !ID_VACANT (== 1)
- * so if it says "cannot dereference null pointer at address 0x00000001",
- * it is most likely one of these :( */
-
#define ID_IN_SYNC (4711ULL)
#define ID_OUT_OF_SYNC (4712ULL)
-
#define ID_SYNCER (-1ULL)
-#define ID_VACANT 0
-#define is_syncer_block_id(id) ((id) == ID_SYNCER)
+
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
struct drbd_conf;
+struct drbd_tconn;
/* to shorten dev_warn(DEV, "msg"); and relatives statements */
#define DEV (disk_to_dev(mdev->vdisk))
+#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
+ printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
+#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
+#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
+#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
+#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
+#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
+#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
+#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
+
#define D_ASSERT(exp) if (!(exp)) \
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
-#define ERR_IF(exp) if (({ \
- int _b = (exp) != 0; \
- if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
- __func__, #exp, __FILE__, __LINE__); \
- _b; \
- }))
+/**
+ * expect - Make an assertion
+ *
+ * Unlike the assert macro, this macro returns a boolean result.
+ */
+#define expect(exp) ({ \
+ bool _bool = (exp); \
+ if (!_bool) \
+ dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
+ #exp, __func__); \
+ _bool; \
+ })
/* Defines to control fault insertion */
enum {
@@ -150,15 +163,12 @@ drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
/* usual integer division */
#define div_floor(A, B) ((A)/(B))
-/* drbd_meta-data.c (still in drbd_main.c) */
-/* 4th incarnation of the disk layout. */
-#define DRBD_MD_MAGIC (DRBD_MAGIC+4)
-
-extern struct drbd_conf **minor_table;
extern struct ratelimit_state drbd_ratelimit_state;
+extern struct idr minors; /* RCU, updates: genl_lock() */
+extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
/* on the wire */
-enum drbd_packets {
+enum drbd_packet {
/* receiver (data socket) */
P_DATA = 0x00,
P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */
@@ -186,7 +196,7 @@ enum drbd_packets {
P_RECV_ACK = 0x15, /* Used in protocol B */
P_WRITE_ACK = 0x16, /* Used in protocol C */
P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
- P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */
+ P_SUPERSEDED = 0x18, /* Used in proto C, two-primaries conflict detection */
P_NEG_ACK = 0x19, /* Sent if local disk is unusable */
P_NEG_DREPLY = 0x1a, /* Local disk is broken... */
P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */
@@ -207,77 +217,23 @@ enum drbd_packets {
P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */
P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
+ P_CONN_ST_CHG_REQ = 0x2a, /* data sock: Connection wide state request */
+ P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */
+ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */
+ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
- P_MAX_CMD = 0x2A,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
/* special command ids for handshake */
- P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */
- P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */
+ P_INITIAL_META = 0xfff1, /* First Packet on the MetaSock */
+ P_INITIAL_DATA = 0xfff2, /* First Packet on the Socket */
- P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */
+ P_CONNECTION_FEATURES = 0xfffe /* FIXED for the next century! */
};
-static inline const char *cmdname(enum drbd_packets cmd)
-{
- /* THINK may need to become several global tables
- * when we want to support more than
- * one PRO_VERSION */
- static const char *cmdnames[] = {
- [P_DATA] = "Data",
- [P_DATA_REPLY] = "DataReply",
- [P_RS_DATA_REPLY] = "RSDataReply",
- [P_BARRIER] = "Barrier",
- [P_BITMAP] = "ReportBitMap",
- [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
- [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
- [P_UNPLUG_REMOTE] = "UnplugRemote",
- [P_DATA_REQUEST] = "DataRequest",
- [P_RS_DATA_REQUEST] = "RSDataRequest",
- [P_SYNC_PARAM] = "SyncParam",
- [P_SYNC_PARAM89] = "SyncParam89",
- [P_PROTOCOL] = "ReportProtocol",
- [P_UUIDS] = "ReportUUIDs",
- [P_SIZES] = "ReportSizes",
- [P_STATE] = "ReportState",
- [P_SYNC_UUID] = "ReportSyncUUID",
- [P_AUTH_CHALLENGE] = "AuthChallenge",
- [P_AUTH_RESPONSE] = "AuthResponse",
- [P_PING] = "Ping",
- [P_PING_ACK] = "PingAck",
- [P_RECV_ACK] = "RecvAck",
- [P_WRITE_ACK] = "WriteAck",
- [P_RS_WRITE_ACK] = "RSWriteAck",
- [P_DISCARD_ACK] = "DiscardAck",
- [P_NEG_ACK] = "NegAck",
- [P_NEG_DREPLY] = "NegDReply",
- [P_NEG_RS_DREPLY] = "NegRSDReply",
- [P_BARRIER_ACK] = "BarrierAck",
- [P_STATE_CHG_REQ] = "StateChgRequest",
- [P_STATE_CHG_REPLY] = "StateChgReply",
- [P_OV_REQUEST] = "OVRequest",
- [P_OV_REPLY] = "OVReply",
- [P_OV_RESULT] = "OVResult",
- [P_CSUM_RS_REQUEST] = "CsumRSRequest",
- [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
- [P_COMPRESSED_BITMAP] = "CBitmap",
- [P_DELAY_PROBE] = "DelayProbe",
- [P_OUT_OF_SYNC] = "OutOfSync",
- [P_MAX_CMD] = NULL,
- };
-
- if (cmd == P_HAND_SHAKE_M)
- return "HandShakeM";
- if (cmd == P_HAND_SHAKE_S)
- return "HandShakeS";
- if (cmd == P_HAND_SHAKE)
- return "HandShake";
- if (cmd >= P_MAX_CMD)
- return "Unknown";
- return cmdnames[cmd];
-}
+extern const char *cmdname(enum drbd_packet cmd);
/* for sending/receiving the bitmap,
* possibly in some encoding scheme */
@@ -337,37 +293,24 @@ struct p_header80 {
u32 magic;
u16 command;
u16 length; /* bytes of data after this header */
- u8 payload[0];
} __packed;
/* Header for big packets, Used for data packets exceeding 64kB */
struct p_header95 {
u16 magic; /* use DRBD_MAGIC_BIG here */
u16 command;
- u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */
- u8 payload[0];
+ u32 length;
} __packed;
-union p_header {
- struct p_header80 h80;
- struct p_header95 h95;
-};
-
-/*
- * short commands, packets without payload, plain p_header:
- * P_PING
- * P_PING_ACK
- * P_BECOME_SYNC_TARGET
- * P_BECOME_SYNC_SOURCE
- * P_UNPLUG_REMOTE
- */
+struct p_header100 {
+ u32 magic;
+ u16 volume;
+ u16 command;
+ u32 length;
+ u32 pad;
+} __packed;
-/*
- * commands with out-of-struct payload:
- * P_BITMAP (no additional fields)
- * P_DATA, P_DATA_REPLY (see p_data)
- * P_COMPRESSED_BITMAP (see receive_compressed_bitmap)
- */
+extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
/* these defines must not be changed without changing the protocol version */
#define DP_HARDBARRIER 1 /* depricated */
@@ -377,9 +320,10 @@ union p_header {
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
+#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
struct p_data {
- union p_header head;
u64 sector; /* 64 bits sector number */
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
@@ -390,21 +334,18 @@ struct p_data {
* commands which share a struct:
* p_block_ack:
* P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
- * P_DISCARD_ACK (proto C, two-primaries conflict detection)
+ * P_SUPERSEDED (proto C, two-primaries conflict detection)
* p_block_req:
* P_DATA_REQUEST, P_RS_DATA_REQUEST
*/
struct p_block_ack {
- struct p_header80 head;
u64 sector;
u64 block_id;
u32 blksize;
u32 seq_num;
} __packed;
-
struct p_block_req {
- struct p_header80 head;
u64 sector;
u64 block_id;
u32 blksize;
@@ -413,59 +354,52 @@ struct p_block_req {
/*
* commands with their own struct for additional fields:
- * P_HAND_SHAKE
+ * P_CONNECTION_FEATURES
* P_BARRIER
* P_BARRIER_ACK
* P_SYNC_PARAM
* ReportParams
*/
-struct p_handshake {
- struct p_header80 head; /* 8 bytes */
+struct p_connection_features {
u32 protocol_min;
u32 feature_flags;
u32 protocol_max;
/* should be more than enough for future enhancements
- * for now, feature_flags and the reserverd array shall be zero.
+ * for now, feature_flags and the reserved array shall be zero.
*/
u32 _pad;
- u64 reserverd[7];
+ u64 reserved[7];
} __packed;
-/* 80 bytes, FIXED for the next century */
struct p_barrier {
- struct p_header80 head;
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
} __packed;
struct p_barrier_ack {
- struct p_header80 head;
u32 barrier;
u32 set_size;
} __packed;
struct p_rs_param {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
/* Since protocol version 88 and higher. */
char verify_alg[0];
} __packed;
struct p_rs_param_89 {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
} __packed;
struct p_rs_param_95 {
- struct p_header80 head;
- u32 rate;
+ u32 resync_rate;
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
u32 c_plan_ahead;
@@ -475,12 +409,11 @@ struct p_rs_param_95 {
} __packed;
enum drbd_conn_flags {
- CF_WANT_LOSE = 1,
+ CF_DISCARD_MY_DATA = 1,
CF_DRY_RUN = 2,
};
struct p_protocol {
- struct p_header80 head;
u32 protocol;
u32 after_sb_0p;
u32 after_sb_1p;
@@ -494,17 +427,14 @@ struct p_protocol {
} __packed;
struct p_uuids {
- struct p_header80 head;
u64 uuid[UI_EXTENDED_SIZE];
} __packed;
struct p_rs_uuid {
- struct p_header80 head;
u64 uuid;
} __packed;
struct p_sizes {
- struct p_header80 head;
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
@@ -514,18 +444,15 @@ struct p_sizes {
} __packed;
struct p_state {
- struct p_header80 head;
u32 state;
} __packed;
struct p_req_state {
- struct p_header80 head;
u32 mask;
u32 val;
} __packed;
struct p_req_state_reply {
- struct p_header80 head;
u32 retcode;
} __packed;
@@ -539,15 +466,7 @@ struct p_drbd06_param {
u32 bit_map_gen[5];
} __packed;
-struct p_discard {
- struct p_header80 head;
- u64 block_id;
- u32 seq_num;
- u32 pad;
-} __packed;
-
struct p_block_desc {
- struct p_header80 head;
u64 sector;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
@@ -563,7 +482,6 @@ enum drbd_bitmap_code {
};
struct p_compressed_bm {
- struct p_header80 head;
/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
* (encoding & 0x80): polarity (set/unset) of first runlength
* ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
@@ -575,90 +493,22 @@ struct p_compressed_bm {
} __packed;
struct p_delay_probe93 {
- struct p_header80 head;
u32 seq_num; /* sequence number to match the two probe packets */
u32 offset; /* usecs the probe got sent after the reference time point */
} __packed;
-/* DCBP: Drbd Compressed Bitmap Packet ... */
-static inline enum drbd_bitmap_code
-DCBP_get_code(struct p_compressed_bm *p)
-{
- return (enum drbd_bitmap_code)(p->encoding & 0x0f);
-}
-
-static inline void
-DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
-{
- BUG_ON(code & ~0xf);
- p->encoding = (p->encoding & ~0xf) | code;
-}
-
-static inline int
-DCBP_get_start(struct p_compressed_bm *p)
-{
- return (p->encoding & 0x80) != 0;
-}
-
-static inline void
-DCBP_set_start(struct p_compressed_bm *p, int set)
-{
- p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
-}
-
-static inline int
-DCBP_get_pad_bits(struct p_compressed_bm *p)
-{
- return (p->encoding >> 4) & 0x7;
-}
-
-static inline void
-DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
-{
- BUG_ON(n & ~0x7);
- p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
-}
-
-/* one bitmap packet, including the p_header,
- * should fit within one _architecture independend_ page.
- * so we need to use the fixed size 4KiB page size
- * most architectures have used for a long time.
+/*
+ * Bitmap packets need to fit within a single page on the sender and receiver,
+ * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
*/
-#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80))
-#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
-#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
-#if (PAGE_SIZE < 4096)
-/* drbd_send_bitmap / receive_bitmap would break horribly */
-#error "PAGE_SIZE too small"
-#endif
-
-union p_polymorph {
- union p_header header;
- struct p_handshake handshake;
- struct p_data data;
- struct p_block_ack block_ack;
- struct p_barrier barrier;
- struct p_barrier_ack barrier_ack;
- struct p_rs_param_89 rs_param_89;
- struct p_rs_param_95 rs_param_95;
- struct p_protocol protocol;
- struct p_sizes sizes;
- struct p_uuids uuids;
- struct p_state state;
- struct p_req_state req_state;
- struct p_req_state_reply req_state_reply;
- struct p_block_req block_req;
- struct p_delay_probe93 delay_probe93;
- struct p_rs_uuid rs_uuid;
- struct p_block_desc block_desc;
-} __packed;
+#define DRBD_SOCKET_BUFFER_SIZE 4096
/**********************************************************************/
enum drbd_thread_state {
- None,
- Running,
- Exiting,
- Restarting
+ NONE,
+ RUNNING,
+ EXITING,
+ RESTARTING
};
struct drbd_thread {
@@ -667,8 +517,9 @@ struct drbd_thread {
struct completion stop;
enum drbd_thread_state t_state;
int (*function) (struct drbd_thread *);
- struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
int reset_cpu_mask;
+ char name[9];
};
static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
@@ -681,58 +532,54 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
return thi->t_state;
}
-struct drbd_work;
-typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
struct drbd_work {
struct list_head list;
- drbd_work_cb cb;
+ int (*cb)(struct drbd_work *, int cancel);
+ union {
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
+ };
};
-struct drbd_tl_epoch;
+#include "drbd_interval.h"
+
+extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
+
struct drbd_request {
struct drbd_work w;
- struct drbd_conf *mdev;
/* if local IO is not allowed, will be NULL.
* if local IO _is_ allowed, holds the locally submitted bio clone,
* or, after local IO completion, the ERR_PTR(error).
- * see drbd_endio_pri(). */
+ * see drbd_request_endio(). */
struct bio *private_bio;
- struct hlist_node collision;
- sector_t sector;
- unsigned int size;
- unsigned int epoch; /* barrier_nr */
+ struct drbd_interval i;
- /* barrier_nr: used to check on "completion" whether this req was in
+ /* epoch: used to check on "completion" whether this req was in
* the current epoch, and we therefore have to close it,
- * starting a new epoch...
+ * causing a p_barrier packet to be send, starting a new epoch.
+ *
+ * This corresponds to "barrier" in struct p_barrier[_ack],
+ * and to "barrier_nr" in struct drbd_epoch (and various
+ * comments/function parameters/local variable names).
*/
+ unsigned int epoch;
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
- unsigned long rq_state; /* see comments above _req_mod() */
unsigned long start_time;
-};
-
-struct drbd_tl_epoch {
- struct drbd_work w;
- struct list_head requests; /* requests before */
- struct drbd_tl_epoch *next; /* pointer to the next barrier */
- unsigned int br_number; /* the barriers identifier. */
- int n_writes; /* number of requests attached before this barrier */
-};
-struct drbd_request;
+ /* once it hits 0, we may complete the master_bio */
+ atomic_t completion_ref;
+ /* once it hits 0, we may destroy this drbd_request object */
+ struct kref kref;
-/* These Tl_epoch_entries may be in one of 6 lists:
- active_ee .. data packet being written
- sync_ee .. syncer block being written
- done_ee .. block written, need to send P_WRITE_ACK
- read_ee .. [RS]P_DATA_REQUEST being read
-*/
+ unsigned rq_state; /* see comments above _req_mod() */
+};
struct drbd_epoch {
+ struct drbd_tconn *tconn;
struct list_head list;
unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */
@@ -762,17 +609,14 @@ struct digest_info {
void *digest;
};
-struct drbd_epoch_entry {
+struct drbd_peer_request {
struct drbd_work w;
- struct hlist_node collision;
struct drbd_epoch *epoch; /* for writes */
- struct drbd_conf *mdev;
struct page *pages;
atomic_t pending_bios;
- unsigned int size;
+ struct drbd_interval i;
/* see comments on ee flag bits below */
unsigned long flags;
- sector_t sector;
union {
u64 block_id;
struct digest_info *digest;
@@ -793,31 +637,37 @@ enum {
* we need to resubmit without the barrier flag. */
__EE_RESUBMITTED,
- /* we may have several bios per epoch entry.
+ /* we may have several bios per peer request.
* if any of those fail, we set this flag atomically
* from the endio callback */
__EE_WAS_ERROR,
/* This ee has a pointer to a digest instead of a block id */
__EE_HAS_DIGEST,
+
+ /* Conflicting local requests need to be restarted after this request */
+ __EE_RESTART_REQUESTS,
+
+ /* The peer wants a write ACK for this (wire proto C) */
+ __EE_SEND_WRITE_ACK,
+
+ /* Is set when net_conf had two_primaries set while creating this peer_req */
+ __EE_IN_INTERVAL_TREE,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
+#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
+#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
+#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
-/* global flag bits */
+/* flag bits per mdev */
enum {
- CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
- SIGNAL_ASENDER, /* whether asender wants to be interrupted */
- SEND_PING, /* whether asender should send a ping asap */
-
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
- DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
- CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */
CL_ST_CHG_SUCCESS,
CL_ST_CHG_FAIL,
CRASHED_PRIMARY, /* This node was a crashed primary.
@@ -831,32 +681,18 @@ enum {
once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
- WAS_IO_ERROR, /* Local disk failed returned IO error */
+ WAS_IO_ERROR, /* Local disk failed, returned IO error */
+ WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
- NET_CONGESTED, /* The data socket is congested */
-
- CONFIG_PENDING, /* serialization of (re)configuration requests.
- * if set, also prevents the device from dying */
- DEVICE_DYING, /* device became unconfigured,
- * but worker thread is still handling the cleanup.
- * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed,
- * while this is set. */
RESIZE_PENDING, /* Size change detected locally, waiting for the response from
* the peer, if it changed there as well. */
- CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
- GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
- STATE_SENT, /* Do not change state/UUIDs while this is set */
-
- CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
- * pending, from drbd worker context.
- * If set, bdi_write_congested() returns true,
- * so shrink_page_list() would not recurse into,
- * and potentially deadlock on, this drbd worker.
- */
+ B_RS_H_DONE, /* Before resync handler done (already executed) */
+ DISCARD_MY_DATA, /* discard_my_data flag per volume */
+ READ_BALANCE_RR,
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -894,24 +730,24 @@ enum bm_flag {
struct drbd_work_queue {
struct list_head q;
- struct semaphore s; /* producers up it, worker down()s it */
spinlock_t q_lock; /* to protect the list. */
+ wait_queue_head_t q_wait;
};
struct drbd_socket {
- struct drbd_work_queue work;
struct mutex mutex;
struct socket *socket;
/* this way we get our
* send/receive buffers off the stack */
- union p_polymorph sbuf;
- union p_polymorph rbuf;
+ void *sbuf;
+ void *rbuf;
};
struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
+ spinlock_t uuid_lock;
u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
@@ -921,24 +757,16 @@ struct drbd_md {
s32 bm_offset; /* signed relative sector offset to bitmap */
/* u32 al_nr_extents; important for restoring the AL
- * is stored into sync_conf.al_extents, which in turn
+ * is stored into ldev->dc.al_extents, which in turn
* gets applied to act_log->nr_elements
*/
};
-/* for sync_conf and other types... */
-#define NL_PACKET(name, number, fields) struct name { fields };
-#define NL_INTEGER(pn,pr,member) int member;
-#define NL_INT64(pn,pr,member) __u64 member;
-#define NL_BIT(pn,pr,member) unsigned member:1;
-#define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
-#include <linux/drbd_nl.h>
-
struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
struct drbd_md md;
- struct disk_conf dc; /* The user provided config... */
+ struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
sector_t known_size; /* last known size of that backing device */
};
@@ -962,18 +790,116 @@ enum write_ordering_e {
};
struct fifo_buffer {
- int *values;
unsigned int head_index;
unsigned int size;
+ int total; /* sum of all values */
+ int values[0];
+};
+extern struct fifo_buffer *fifo_alloc(int fifo_size);
+
+/* flag bits per tconn */
+enum {
+ NET_CONGESTED, /* The data socket is congested */
+ RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
+ SEND_PING, /* whether asender should send a ping asap */
+ SIGNAL_ASENDER, /* whether asender wants to be interrupted */
+ GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
+ CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
+ CONN_WD_ST_CHG_OKAY,
+ CONN_WD_ST_CHG_FAIL,
+ CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
+ CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
+ CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
+ * pending, from drbd worker context.
+ * If set, bdi_write_congested() returns true,
+ * so shrink_page_list() would not recurse into,
+ * and potentially deadlock on, this drbd worker.
+ */
+ DISCONNECT_SENT,
+};
+
+struct drbd_tconn { /* is a resource from the config file */
+ char *name; /* Resource name */
+ struct list_head all_tconn; /* linked on global drbd_tconns */
+ struct kref kref;
+ struct idr volumes; /* <tconn, vnr> to mdev mapping */
+ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
+ unsigned susp:1; /* IO suspended by user */
+ unsigned susp_nod:1; /* IO suspended because no data */
+ unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
+ struct mutex cstate_mutex; /* Protects graceful disconnects */
+
+ unsigned long flags;
+ struct net_conf *net_conf; /* content protected by rcu */
+ struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
+ wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
+ struct res_opts res_opts;
+
+ struct sockaddr_storage my_addr;
+ int my_addr_len;
+ struct sockaddr_storage peer_addr;
+ int peer_addr_len;
+
+ struct drbd_socket data; /* data/barrier/cstate/parameter packets */
+ struct drbd_socket meta; /* ping/ack (metadata) packets */
+ int agreed_pro_version; /* actually used protocol version */
+ unsigned long last_received; /* in jiffies, either socket */
+ unsigned int ko_count;
+
+ spinlock_t req_lock;
+
+ struct list_head transfer_log; /* all requests not yet fully processed */
+
+ struct crypto_hash *cram_hmac_tfm;
+ struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */
+ struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_hash *csums_tfm;
+ struct crypto_hash *verify_tfm;
+ void *int_dig_in;
+ void *int_dig_vv;
+
+ /* receiver side */
+ struct drbd_epoch *current_epoch;
+ spinlock_t epoch_lock;
+ unsigned int epochs;
+ enum write_ordering_e write_ordering;
+ atomic_t current_tle_nr; /* transfer log epoch number */
+ unsigned current_tle_writes; /* writes seen within this tl epoch */
+
+ unsigned long last_reconnect_jif;
+ struct drbd_thread receiver;
+ struct drbd_thread worker;
+ struct drbd_thread asender;
+ cpumask_var_t cpu_mask;
+
+ /* sender side */
+ struct drbd_work_queue sender_work;
+
+ struct {
+ /* whether this sender thread
+ * has processed a single write yet. */
+ bool seen_any_write_yet;
+
+ /* Which barrier number to send with the next P_BARRIER */
+ int current_epoch_nr;
+
+ /* how many write requests have been sent
+ * with req->epoch == current_epoch_nr.
+ * If none, no P_BARRIER will be sent. */
+ unsigned current_epoch_writes;
+ } send;
};
struct drbd_conf {
+ struct drbd_tconn *tconn;
+ int vnr; /* volume number within the connection */
+ struct kref kref;
+
/* things that are stored as / read from meta data on disk */
unsigned long flags;
/* configured by drbdsetup */
- struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
- struct syncer_conf sync_conf;
struct drbd_backing_dev *ldev __protected_by(local);
sector_t p_size; /* partner's disk size */
@@ -981,11 +907,7 @@ struct drbd_conf {
struct block_device *this_bdev;
struct gendisk *vdisk;
- struct drbd_socket data; /* data/barrier/cstate/parameter packets */
- struct drbd_socket meta; /* ping/ack (metadata) packets */
- int agreed_pro_version; /* actually used protocol version */
- unsigned long last_received; /* in jiffies, either socket */
- unsigned int ko_count;
+ unsigned long last_reattach_jif;
struct drbd_work resync_work,
unplug_work,
go_diskless,
@@ -1005,10 +927,9 @@ struct drbd_conf {
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
- union drbd_state state;
+ union drbd_dev_state state;
wait_queue_head_t misc_wait;
wait_queue_head_t state_wait; /* upon each state change. */
- wait_queue_head_t net_cnt_wait;
unsigned int send_cnt;
unsigned int recv_cnt;
unsigned int read_cnt;
@@ -1018,17 +939,12 @@ struct drbd_conf {
atomic_t ap_bio_cnt; /* Requests we need to complete */
atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
- atomic_t unacked_cnt; /* Need to send replys for */
+ atomic_t unacked_cnt; /* Need to send replies for */
atomic_t local_cnt; /* Waiting for local completion */
- atomic_t net_cnt; /* Users of net_conf */
- spinlock_t req_lock;
- struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
- struct drbd_tl_epoch *newest_tle;
- struct drbd_tl_epoch *oldest_tle;
- struct list_head out_of_sequence_requests;
- struct list_head barrier_acked_requests;
- struct hlist_head *tl_hash;
- unsigned int tl_hash_s;
+
+ /* Interval tree of pending local requests */
+ struct rb_root read_requests;
+ struct rb_root write_requests;
/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
unsigned long rs_total;
@@ -1048,9 +964,11 @@ struct drbd_conf {
unsigned long rs_mark_time[DRBD_SYNC_MARKS];
/* current index into rs_mark_{left,time} */
int rs_last_mark;
+ unsigned long rs_last_bcast; /* [unit jiffies] */
/* where does the admin want us to start? (sector) */
sector_t ov_start_sector;
+ sector_t ov_stop_sector;
/* where are we now? (sector) */
sector_t ov_position;
/* Start sector of out of sync range (to merge printk reporting). */
@@ -1058,14 +976,7 @@ struct drbd_conf {
/* size of out-of-sync range in sectors. */
sector_t ov_last_oos_size;
unsigned long ov_left; /* in bits */
- struct crypto_hash *csums_tfm;
- struct crypto_hash *verify_tfm;
- unsigned long last_reattach_jif;
- unsigned long last_reconnect_jif;
- struct drbd_thread receiver;
- struct drbd_thread worker;
- struct drbd_thread asender;
struct drbd_bitmap *bitmap;
unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
@@ -1078,29 +989,19 @@ struct drbd_conf {
int open_cnt;
u64 *p_uuid;
- struct drbd_epoch *current_epoch;
- spinlock_t epoch_lock;
- unsigned int epochs;
- enum write_ordering_e write_ordering;
+
struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
- struct list_head done_ee; /* send ack */
- struct list_head read_ee; /* IO in progress (any read) */
+ struct list_head done_ee; /* need to send P_WRITE_ACK */
+ struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
struct list_head net_ee; /* zero-copy network send in progress */
- struct hlist_head *ee_hash; /* is proteced by req_lock! */
- unsigned int ee_hash_s;
-
- /* this one is protected by ee_lock, single thread */
- struct drbd_epoch_entry *last_write_w_barrier;
int next_barrier_nr;
- struct hlist_head *app_reads_hash; /* is proteced by req_lock */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
- struct page *md_io_tmpp; /* for logical_block_size != 512 */
struct drbd_md_io md_io;
atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
@@ -1109,22 +1010,16 @@ struct drbd_conf {
unsigned int al_tr_number;
int al_tr_cycle;
int al_tr_pos; /* position of the next transaction in the journal */
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */
- struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */
- void *int_dig_out;
- void *int_dig_in;
- void *int_dig_vv;
wait_queue_head_t seq_wait;
atomic_t packet_seq;
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned int minor;
unsigned long comm_bm_set; /* communicated number of set bits. */
- cpumask_var_t cpu_mask;
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
- struct mutex state_mutex;
+ struct mutex own_state_mutex;
+ struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -1132,9 +1027,8 @@ struct drbd_conf {
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
int c_sync_rate; /* current resync rate after syncer throttle magic */
- struct fifo_buffer rs_plan_s; /* correction values of resync planer */
+ struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
- int rs_planed; /* resync sectors already planned */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size;
unsigned int local_max_bio_size;
@@ -1142,11 +1036,7 @@ struct drbd_conf {
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
- struct drbd_conf *mdev;
-
- mdev = minor < minor_count ? minor_table[minor] : NULL;
-
- return mdev;
+ return (struct drbd_conf *)idr_find(&minors, minor);
}
static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
@@ -1154,29 +1044,9 @@ static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
return mdev->minor;
}
-/* returns 1 if it was successful,
- * returns 0 if there was no data socket.
- * so wherever you are going to use the data.socket, e.g. do
- * if (!drbd_get_data_sock(mdev))
- * return 0;
- * CODE();
- * drbd_put_data_sock(mdev);
- */
-static inline int drbd_get_data_sock(struct drbd_conf *mdev)
-{
- mutex_lock(&mdev->data.mutex);
- /* drbd_disconnect() could have called drbd_free_sock()
- * while we were waiting in down()... */
- if (unlikely(mdev->data.socket == NULL)) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
- return 1;
-}
-
-static inline void drbd_put_data_sock(struct drbd_conf *mdev)
+static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
{
- mutex_unlock(&mdev->data.mutex);
+ return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
}
/*
@@ -1185,106 +1055,77 @@ static inline void drbd_put_data_sock(struct drbd_conf *mdev)
/* drbd_main.c */
-enum chg_state_flags {
- CS_HARD = 1,
- CS_VERBOSE = 2,
- CS_WAIT_COMPLETE = 4,
- CS_SERIALIZE = 8,
- CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
-};
-
enum dds_flags {
DDSF_FORCED = 1,
DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
};
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
-extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
- enum chg_state_flags f,
- union drbd_state mask,
- union drbd_state val);
-extern void drbd_force_state(struct drbd_conf *, union drbd_state,
- union drbd_state);
-extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
- union drbd_state,
- union drbd_state,
- enum chg_state_flags);
-extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
- enum chg_state_flags,
- struct completion *done);
-extern void print_st_err(struct drbd_conf *, union drbd_state,
- union drbd_state, int);
extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
+extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
#ifdef CONFIG_SMP
-extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev);
-extern void drbd_calc_cpu_mask(struct drbd_conf *mdev);
+extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
+extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
#else
#define drbd_thread_current_set_cpu(A) ({})
#define drbd_calc_cpu_mask(A) ({})
#endif
-extern void drbd_free_resources(struct drbd_conf *mdev);
-extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
+extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
unsigned int set_size);
-extern void tl_clear(struct drbd_conf *mdev);
-extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
-extern void drbd_free_sock(struct drbd_conf *mdev);
-extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, unsigned msg_flags);
-extern int drbd_send_protocol(struct drbd_conf *mdev);
+extern void tl_clear(struct drbd_tconn *);
+extern void drbd_free_sock(struct drbd_tconn *tconn);
+extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
+ void *buf, size_t size, unsigned msg_flags);
+extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
+ unsigned);
+
+extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
+extern int drbd_send_protocol(struct drbd_tconn *tconn);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
+extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
extern int drbd_send_current_state(struct drbd_conf *mdev);
-extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size, unsigned msg_flags);
-#define USE_DATA_SOCKET 1
-#define USE_META_SOCKET 0
-extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size);
-extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
- char *data, size_t size);
-extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
-extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
- u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e);
-extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_block_req *rp);
-extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_data *dp, int data_size);
-extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+extern int drbd_send_sync_param(struct drbd_conf *mdev);
+extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
+ u32 set_size);
+extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
+extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_block_req *rp);
+extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_data *dp, int data_size);
+extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id);
-extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
-extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e);
+extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
+extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id);
-extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
- sector_t sector,int size,
- void *digest, int digest_size,
- enum drbd_packets cmd);
+extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
+ int size, void *digest, int digest_size,
+ enum drbd_packet cmd);
extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
extern int drbd_send_bitmap(struct drbd_conf *mdev);
-extern int _drbd_send_bitmap(struct drbd_conf *mdev);
-extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
+extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
+extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
+extern void conn_md_sync(struct drbd_tconn *tconn);
extern void drbd_md_sync(struct drbd_conf *mdev);
extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
-extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
+extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
+extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
@@ -1302,33 +1143,52 @@ extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
extern int drbd_bitmap_io(struct drbd_conf *mdev,
int (*io_fn)(struct drbd_conf *),
char *why, enum bm_flag flags);
+extern int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags);
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
-
/* Meta data layout
We reserve a 128MB Block (4k aligned)
* either at the end of the backing device
* or on a separate meta data device. */
-#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
/* The following numbers are sectors */
-#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
-#define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */
-/* Allows up to about 3.8TB */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
-
-/* Since the smalles IO unit is usually 512 byte */
-#define MD_SECTOR_SHIFT 9
-#define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT)
-
-/* activity log */
-#define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */
-#define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */
+/* Allows up to about 3.8TB, so if you want more,
+ * you need to use the "flexible" meta data format. */
+#define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */
+#define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */
+#define MD_AL_SECTORS 64 /* = 32 kB on disk activity log ring buffer */
+#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
+
+/* we do all meta data IO in 4k blocks */
+#define MD_BLOCK_SHIFT 12
+#define MD_BLOCK_SIZE (1<<MD_BLOCK_SHIFT)
+
+/* One activity log extent represents 4M of storage */
+#define AL_EXTENT_SHIFT 22
#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
+/* We could make these currently hardcoded constants configurable
+ * variables at create-md time (or even re-configurable at runtime?).
+ * Which will require some more changes to the DRBD "super block"
+ * and attach code.
+ *
+ * updates per transaction:
+ * This many changes to the active set can be logged with one transaction.
+ * This number is arbitrary.
+ * context per transaction:
+ * This many context extent numbers are logged with each transaction.
+ * This number is resulting from the transaction block size (4k), the layout
+ * of the transaction header, and the number of updates per transaction.
+ * See drbd_actlog.c:struct al_transaction_on_disk
+ * */
+#define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
+#define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
+
#if BITS_PER_LONG == 32
#define LN2_BPL 5
#define cpu_to_lel(A) cpu_to_le32(A)
@@ -1364,11 +1224,14 @@ struct bm_extent {
#define SLEEP_TIME (HZ/10)
-#define BM_BLOCK_SHIFT 12 /* 4k per bit */
+/* We do bitmap IO in units of 4k blocks.
+ * We also still have a hardcoded 4k per bit relation. */
+#define BM_BLOCK_SHIFT 12 /* 4k per bit */
#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
-/* (9+3) : 512 bytes @ 8 bits; representing 16M storage
- * per sector of on disk bitmap */
-#define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */
+/* mostly arbitrarily set the represented size of one bitmap extent,
+ * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
+ * at 4k per bit resolution) */
+#define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
@@ -1436,17 +1299,20 @@ struct bm_extent {
#endif
#endif
-/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables.
- * With a value of 8 all IO in one 128K block make it to the same slot of the
- * hash table. */
-#define HT_SHIFT 8
-#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
+/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
+ * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+ * Since we may live in a mixed-platform cluster,
+ * we limit us to a platform agnostic constant here for now.
+ * A followup commit may allow even bigger BIO sizes,
+ * once we thought that through. */
+#define DRBD_MAX_BIO_SIZE (1U << 20)
+#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#endif
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
-#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
-
-/* Number of elements in the app_reads_hash */
-#define APP_R_HSIZE 15
+#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
+#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
extern int drbd_bm_init(struct drbd_conf *mdev);
extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
@@ -1468,11 +1334,11 @@ extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
+extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
-extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
- unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
extern sector_t drbd_bm_capacity(struct drbd_conf *mdev);
@@ -1497,7 +1363,7 @@ extern void drbd_bm_unlock(struct drbd_conf *mdev);
/* drbd_main.c */
extern struct kmem_cache *drbd_request_cache;
-extern struct kmem_cache *drbd_ee_cache; /* epoch entries */
+extern struct kmem_cache *drbd_ee_cache; /* peer requests */
extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
@@ -1537,12 +1403,22 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock;
-extern struct drbd_conf *drbd_new_device(unsigned int minor);
-extern void drbd_free_mdev(struct drbd_conf *mdev);
+extern int conn_lowest_minor(struct drbd_tconn *tconn);
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
+extern void drbd_minor_destroy(struct kref *kref);
+
+extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
+extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
+extern void conn_destroy(struct kref *kref);
+struct drbd_tconn *conn_get_by_name(const char *name);
+extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+ void *peer_addr, int peer_addr_len);
+extern void conn_free_crypto(struct drbd_tconn *tconn);
extern int proc_details;
/* drbd_req */
+extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
@@ -1550,10 +1426,11 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
/* drbd_nl.c */
+extern int drbd_msg_put_info(const char *info);
extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
+extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
@@ -1561,13 +1438,14 @@ extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
enum drbd_role new_role,
int force);
-extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
-extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
+extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
+extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
/* drbd_worker.c */
extern int drbd_worker(struct drbd_thread *thi);
-extern int drbd_alter_sa(struct drbd_conf *mdev, int na);
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
+void drbd_resync_after_changed(struct drbd_conf *mdev);
extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
extern void resume_next_sg(struct drbd_conf *mdev);
extern void suspend_other_sg(struct drbd_conf *mdev);
@@ -1576,13 +1454,13 @@ extern int drbd_resync_finished(struct drbd_conf *mdev);
extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
-extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
- unsigned int *done);
-extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
+extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
+ struct drbd_backing_dev *bdev, unsigned int *done);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
-static inline void ov_oos_print(struct drbd_conf *mdev)
+static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
{
if (mdev->ov_last_oos_size) {
dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
@@ -1594,97 +1472,102 @@ static inline void ov_oos_print(struct drbd_conf *mdev)
extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
+ struct drbd_peer_request *, void *);
/* worker callbacks */
-extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
-extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
-extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
-extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
-extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
-extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
-extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_data_req(struct drbd_work *, int);
+extern int w_e_end_rsdata_req(struct drbd_work *, int);
+extern int w_e_end_csum_rs_req(struct drbd_work *, int);
+extern int w_e_end_ov_reply(struct drbd_work *, int);
+extern int w_e_end_ov_req(struct drbd_work *, int);
+extern int w_ov_finished(struct drbd_work *, int);
+extern int w_resync_timer(struct drbd_work *, int);
+extern int w_send_write_hint(struct drbd_work *, int);
+extern int w_make_resync_request(struct drbd_work *, int);
+extern int w_send_dblock(struct drbd_work *, int);
+extern int w_send_read_req(struct drbd_work *, int);
+extern int w_prev_work_done(struct drbd_work *, int);
+extern int w_e_reissue(struct drbd_work *, int);
+extern int w_restart_disk_io(struct drbd_work *, int);
+extern int w_send_out_of_sync(struct drbd_work *, int);
+extern int w_start_resync(struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
extern void start_resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type);
-extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
-extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local);
-extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- int is_net);
-#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
-#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
-extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
- struct list_head *head);
-extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
- struct list_head *head);
+extern int drbd_submit_peer_request(struct drbd_conf *,
+ struct drbd_peer_request *, const unsigned,
+ const int);
+extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
+extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
+ sector_t, unsigned int,
+ gfp_t) __must_hold(local);
+extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
+ int);
+#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
+#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
-extern void drbd_flush_workqueue(struct drbd_conf *mdev);
-extern void drbd_free_tl_hash(struct drbd_conf *mdev);
+extern void conn_flush_workqueue(struct drbd_tconn *tconn);
+extern int drbd_connected(struct drbd_conf *mdev);
+static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
+{
+ conn_flush_workqueue(mdev->tconn);
+}
-/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
- * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
+/* Yes, there is kernel_setsockopt, but only since 2.6.18.
+ * So we have our own copy of it here. */
static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char *optval, int optlen)
{
+ mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
int err;
+
+ uoptval = (char __user __force *)optval;
+
+ set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_setsockopt(sock, level, optname, optval, optlen);
+ err = sock_setsockopt(sock, level, optname, uoptval, optlen);
else
- err = sock->ops->setsockopt(sock, level, optname, optval,
+ err = sock->ops->setsockopt(sock, level, optname, uoptval,
optlen);
+ set_fs(oldfs);
return err;
}
static inline void drbd_tcp_cork(struct socket *sock)
{
- int __user val = 1;
+ int val = 1;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_uncork(struct socket *sock)
{
- int __user val = 0;
+ int val = 0;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_nodelay(struct socket *sock)
{
- int __user val = 1;
+ int val = 1;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
static inline void drbd_tcp_quickack(struct socket *sock)
{
- int __user val = 2;
+ int val = 2;
(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char __user *)&val, sizeof(val));
+ (char*)&val, sizeof(val));
}
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
@@ -1693,8 +1576,8 @@ extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
-extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
@@ -1702,7 +1585,6 @@ extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
extern int drbd_rs_del_all(struct drbd_conf *mdev);
extern void drbd_rs_failed_io(struct drbd_conf *mdev,
sector_t sector, int size);
-extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
@@ -1712,73 +1594,24 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
int size, const char *file, const unsigned int line);
#define drbd_set_out_of_sync(mdev, sector, size) \
__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
extern void drbd_al_shrink(struct drbd_conf *mdev);
-
/* drbd_nl.c */
-
-void drbd_nl_cleanup(void);
-int __init drbd_nl_init(void);
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
-void drbd_bcast_sync_progress(struct drbd_conf *mdev);
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e);
-
-
-/**
- * DOC: DRBD State macros
- *
- * These macros are used to express state changes in easily readable form.
- *
- * The NS macros expand to a mask and a value, that can be bit ored onto the
- * current state as soon as the spinlock (req_lock) was taken.
- *
- * The _NS macros are used for state functions that get called with the
- * spinlock. These macros expand directly to the new state value.
- *
- * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
- * to express state changes that affect more than one aspect of the state.
- *
- * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
- * Means that the network connection was established and that the peer
- * is in secondary role.
- */
-#define role_MASK R_MASK
-#define peer_MASK R_MASK
-#define disk_MASK D_MASK
-#define pdsk_MASK D_MASK
-#define conn_MASK C_MASK
-#define susp_MASK 1
-#define user_isp_MASK 1
-#define aftr_isp_MASK 1
-#define susp_nod_MASK 1
-#define susp_fen_MASK 1
-
-#define NS(T, S) \
- ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T = (S); val; })
-#define NS2(T1, S1, T2, S2) \
- ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
- mask.T2 = T2##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val; })
-#define NS3(T1, S1, T2, S2, T3, S3) \
- ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
- mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
- ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val.T3 = (S3); val; })
-
-#define _NS(D, T, S) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
-#define _NS2(D, T1, S1, T2, S2) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
- __ns.T2 = (S2); __ns; })
-#define _NS3(D, T1, S1, T2, S2, T3, S3) \
- D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
- __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+/* state info broadcast */
+struct sib_info {
+ enum drbd_state_info_bcast_reason sib_reason;
+ union {
+ struct {
+ char *helper_name;
+ unsigned helper_exit_code;
+ };
+ struct {
+ union drbd_state os;
+ union drbd_state ns;
+ };
+ };
+};
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
/*
* inline helper functions
@@ -1795,9 +1628,10 @@ static inline struct page *page_chain_next(struct page *page)
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+
+static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
{
- struct page *page = e->pages;
+ struct page *page = peer_req->pages;
page_chain_for_each(page) {
if (page_count(page) > 1)
return 1;
@@ -1805,18 +1639,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
return 0;
}
-static inline void drbd_state_lock(struct drbd_conf *mdev)
-{
- wait_event(mdev->misc_wait,
- !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
-}
-
-static inline void drbd_state_unlock(struct drbd_conf *mdev)
-{
- clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
- wake_up(&mdev->misc_wait);
-}
-
static inline enum drbd_state_rv
_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
@@ -1830,48 +1652,71 @@ _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
-/**
- * drbd_request_state() - Reqest a state change
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- *
- * This is the most graceful way of requesting a state change. It is verbose
- * quite verbose in case the state change is not possible, and all those
- * state changes are globally serialized.
- */
-static inline int drbd_request_state(struct drbd_conf *mdev,
- union drbd_state mask,
- union drbd_state val)
+static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
{
- return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+ union drbd_state rv;
+
+ rv.i = mdev->state.i;
+ rv.susp = mdev->tconn->susp;
+ rv.susp_nod = mdev->tconn->susp_nod;
+ rv.susp_fen = mdev->tconn->susp_fen;
+
+ return rv;
}
enum drbd_force_detach_flags {
- DRBD_IO_ERROR,
+ DRBD_READ_ERROR,
+ DRBD_WRITE_ERROR,
DRBD_META_IO_ERROR,
DRBD_FORCE_DETACH,
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
- enum drbd_force_detach_flags forcedetach,
+ enum drbd_force_detach_flags df,
const char *where)
{
- switch (mdev->ldev->dc.on_io_error) {
- case EP_PASS_ON:
- if (forcedetach == DRBD_IO_ERROR) {
+ enum drbd_io_error_p ep;
+
+ rcu_read_lock();
+ ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ rcu_read_unlock();
+ switch (ep) {
+ case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
+ if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
_drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* NOTE fall through to detach case if forcedetach set */
+ /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
+ /* Remember whether we saw a READ or WRITE error.
+ *
+ * Recovery of the affected area for WRITE failure is covered
+ * by the activity log.
+ * READ errors may fall outside that area though. Certain READ
+ * errors can be "healed" by writing good data to the affected
+ * blocks, which triggers block re-allocation in lower layers.
+ *
+ * If we can not write the bitmap after a READ error,
+ * we may need to trigger a full sync (see w_go_diskless()).
+ *
+ * Force-detach is not really an IO error, but rather a
+ * desperate measure to try to deal with a completely
+ * unresponsive lower level IO stack.
+ * Still it should be treated as a WRITE error.
+ *
+ * Meta IO error is always WRITE error:
+ * we read meta data only once during attach,
+ * which will fail in case of errors.
+ */
set_bit(WAS_IO_ERROR, &mdev->flags);
- if (forcedetach == DRBD_FORCE_DETACH)
+ if (df == DRBD_READ_ERROR)
+ set_bit(WAS_READ_ERROR, &mdev->flags);
+ if (df == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
@@ -1896,9 +1741,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
{
if (error) {
unsigned long flags;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__drbd_chk_io_error_(mdev, forcedetach, where);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
}
}
@@ -1910,9 +1755,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
* BTW, for internal meta data, this happens to be the maximum capacity
* we could agree upon with our peer node.
*/
-static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + bdev->md.bm_offset;
@@ -1922,13 +1767,30 @@ static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
}
}
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+{
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ return _drbd_md_first_sector(meta_dev_idx, bdev);
+}
+
/**
* drbd_md_last_sector() - Return the last sector number of the meta data area
* @bdev: Meta data block device.
*/
static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
return bdev->md.md_offset + MD_AL_OFFSET - 1;
@@ -1956,12 +1818,18 @@ static inline sector_t drbd_get_capacity(struct block_device *bdev)
static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
{
sector_t s;
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
case DRBD_MD_INDEX_INTERNAL:
case DRBD_MD_INDEX_FLEX_INT:
s = drbd_get_capacity(bdev->backing_bdev)
? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
- drbd_md_first_sector(bdev))
+ _drbd_md_first_sector(meta_dev_idx, bdev))
: 0;
break;
case DRBD_MD_INDEX_FLEX_EXT:
@@ -1987,9 +1855,15 @@ static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+ rcu_read_unlock();
+
+ switch (meta_dev_idx) {
default: /* external, some index */
- return MD_RESERVED_SECT * bdev->dc.meta_dev_idx;
+ return MD_RESERVED_SECT * meta_dev_idx;
case DRBD_MD_INDEX_INTERNAL:
/* with drbd08, internal meta data is always "flexible" */
case DRBD_MD_INDEX_FLEX_INT:
@@ -2015,9 +1889,8 @@ drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
unsigned long flags;
spin_lock_irqsave(&q->q_lock, flags);
list_add(&w->list, &q->q);
- up(&q->s); /* within the spinlock,
- see comment near end of drbd_worker() */
spin_unlock_irqrestore(&q->q_lock, flags);
+ wake_up(&q->q_wait);
}
static inline void
@@ -2026,41 +1899,35 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
unsigned long flags;
spin_lock_irqsave(&q->q_lock, flags);
list_add_tail(&w->list, &q->q);
- up(&q->s); /* within the spinlock,
- see comment near end of drbd_worker() */
spin_unlock_irqrestore(&q->q_lock, flags);
+ wake_up(&q->q_wait);
}
-static inline void wake_asender(struct drbd_conf *mdev)
-{
- if (test_bit(SIGNAL_ASENDER, &mdev->flags))
- force_sig(DRBD_SIG, mdev->asender.task);
-}
-
-static inline void request_ping(struct drbd_conf *mdev)
+static inline void wake_asender(struct drbd_tconn *tconn)
{
- set_bit(SEND_PING, &mdev->flags);
- wake_asender(mdev);
+ if (test_bit(SIGNAL_ASENDER, &tconn->flags))
+ force_sig(DRBD_SIG, tconn->asender.task);
}
-static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
- enum drbd_packets cmd)
+static inline void request_ping(struct drbd_tconn *tconn)
{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
+ set_bit(SEND_PING, &tconn->flags);
+ wake_asender(tconn);
}
-static inline int drbd_send_ping(struct drbd_conf *mdev)
-{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
-}
+extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
+extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
+extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
+ enum drbd_packet, unsigned int, void *,
+ unsigned int);
+extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
+ enum drbd_packet, unsigned int, void *,
+ unsigned int);
-static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
-{
- struct p_header80 h;
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
-}
+extern int drbd_send_ping(struct drbd_tconn *tconn);
+extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
+extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
+extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
@@ -2082,21 +1949,21 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* or implicit barrier packets as necessary.
* increased:
* w_send_barrier
- * _req_mod(req, queue_for_net_write or queue_for_net_read);
+ * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
* it is much easier and equally valid to count what we queue for the
* worker, even before it actually was queued or send.
* (drbd_make_request_common; recovery path on read io-error)
* decreased:
* got_BarrierAck (respective tl_clear, tl_clear_barrier)
- * _req_mod(req, data_received)
+ * _req_mod(req, DATA_RECEIVED)
* [from receive_DataReply]
- * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked)
+ * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
* [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
* for some reason it is NOT decreased in got_NegAck,
* but in the resulting cleanup code from report_params.
* we should try to remember the reason for that...
- * _req_mod(req, send_failed or send_canceled)
- * _req_mod(req, connection_lost_while_pending)
+ * _req_mod(req, SEND_FAILED or SEND_CANCELED)
+ * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier]
*/
static inline void inc_ap_pending(struct drbd_conf *mdev)
@@ -2104,17 +1971,19 @@ static inline void inc_ap_pending(struct drbd_conf *mdev)
atomic_inc(&mdev->ap_pending_cnt);
}
-#define ERR_IF_CNT_IS_NEGATIVE(which) \
- if (atomic_read(&mdev->which) < 0) \
+#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
+ if (atomic_read(&mdev->which) < 0) \
dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
- __func__ , __LINE__ , \
- atomic_read(&mdev->which))
+ func, line, \
+ atomic_read(&mdev->which))
-#define dec_ap_pending(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \
- wake_up(&mdev->misc_wait); \
- ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
+#define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
+{
+ if (atomic_dec_and_test(&mdev->ap_pending_cnt))
+ wake_up(&mdev->misc_wait);
+ ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
+}
/* counts how many resync-related answers we still expect from the peer
* increase decrease
@@ -2127,10 +1996,12 @@ static inline void inc_rs_pending(struct drbd_conf *mdev)
atomic_inc(&mdev->rs_pending_cnt);
}
-#define dec_rs_pending(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_dec(&mdev->rs_pending_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
+#define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
+{
+ atomic_dec(&mdev->rs_pending_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
+}
/* counts how many answers we still need to send to the peer.
* increased on
@@ -2146,38 +2017,18 @@ static inline void inc_unacked(struct drbd_conf *mdev)
atomic_inc(&mdev->unacked_cnt);
}
-#define dec_unacked(mdev) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_dec(&mdev->unacked_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-
-#define sub_unacked(mdev, n) do { \
- typecheck(struct drbd_conf *, mdev); \
- atomic_sub(n, &mdev->unacked_cnt); \
- ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-
-
-static inline void put_net_conf(struct drbd_conf *mdev)
+#define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
+static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
{
- if (atomic_dec_and_test(&mdev->net_cnt))
- wake_up(&mdev->net_cnt_wait);
+ atomic_dec(&mdev->unacked_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
-/**
- * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there
- * @mdev: DRBD device.
- *
- * You have to call put_net_conf() when finished working with mdev->net_conf.
- */
-static inline int get_net_conf(struct drbd_conf *mdev)
+#define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
+static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
{
- int have_net_conf;
-
- atomic_inc(&mdev->net_cnt);
- have_net_conf = mdev->state.conn >= C_UNCONNECTED;
- if (!have_net_conf)
- put_net_conf(mdev);
- return have_net_conf;
+ atomic_sub(n, &mdev->unacked_cnt);
+ ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
/**
@@ -2281,17 +2132,20 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* maybe re-implement using semaphores? */
static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
{
- int mxb = 1000000; /* arbitrary limit on open requests */
- if (get_net_conf(mdev)) {
- mxb = mdev->net_conf->max_buffers;
- put_net_conf(mdev);
- }
+ struct net_conf *nc;
+ int mxb;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
+ rcu_read_unlock();
+
return mxb;
}
static inline int drbd_state_is_stable(struct drbd_conf *mdev)
{
- union drbd_state s = mdev->state;
+ union drbd_dev_state s = mdev->state;
/* DO NOT add a default clause, we want the compiler to warn us
* for any newly introduced state we may have forgotten to add here */
@@ -2325,7 +2179,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S:
- if (mdev->agreed_pro_version < 96)
+ if (mdev->tconn->agreed_pro_version < 96)
return 0;
break;
@@ -2347,7 +2201,7 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
/* disk state is stable as well. */
break;
- /* no new io accepted during tansitional states */
+ /* no new io accepted during transitional states */
case D_ATTACHING:
case D_NEGOTIATING:
case D_UNKNOWN:
@@ -2359,16 +2213,18 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
return 1;
}
-static inline int is_susp(union drbd_state s)
+static inline int drbd_suspended(struct drbd_conf *mdev)
{
- return s.susp || s.susp_nod || s.susp_fen;
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ return tconn->susp || tconn->susp_fen || tconn->susp_nod;
}
static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
{
int mxb = drbd_get_max_buffers(mdev);
- if (is_susp(mdev->state))
+ if (drbd_suspended(mdev))
return false;
if (test_bit(SUSPEND_IO, &mdev->flags))
return false;
@@ -2390,30 +2246,30 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
return true;
}
-static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
+static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
{
bool rv = false;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
rv = may_inc_ap_bio(mdev);
if (rv)
- atomic_add(count, &mdev->ap_bio_cnt);
- spin_unlock_irq(&mdev->req_lock);
+ atomic_inc(&mdev->ap_bio_cnt);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
-static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+static inline void inc_ap_bio(struct drbd_conf *mdev)
{
/* we wait here
* as long as the device is suspended
* until the bitmap is no longer on the fly during connection
- * handshake as long as we would exeed the max_buffer limit.
+ * handshake as long as we would exceed the max_buffer limit.
*
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
- wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
+ wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
}
static inline void dec_ap_bio(struct drbd_conf *mdev)
@@ -2425,7 +2281,7 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
/* this currently does wake_up for every dec_ap_bio!
@@ -2435,6 +2291,12 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
+static inline bool verify_can_do_stop_sector(struct drbd_conf *mdev)
+{
+ return mdev->tconn->agreed_pro_version >= 97 &&
+ mdev->tconn->agreed_pro_version != 100;
+}
+
static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
{
int changed = mdev->ed_uuid != val;
@@ -2442,40 +2304,6 @@ static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
return changed;
}
-static inline int seq_cmp(u32 a, u32 b)
-{
- /* we assume wrap around at 32bit.
- * for wrap around at 24bit (old atomic_t),
- * we'd have to
- * a <<= 8; b <<= 8;
- */
- return (s32)(a) - (s32)(b);
-}
-#define seq_lt(a, b) (seq_cmp((a), (b)) < 0)
-#define seq_gt(a, b) (seq_cmp((a), (b)) > 0)
-#define seq_ge(a, b) (seq_cmp((a), (b)) >= 0)
-#define seq_le(a, b) (seq_cmp((a), (b)) <= 0)
-/* CAUTION: please no side effects in arguments! */
-#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
-
-static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq)
-{
- unsigned int m;
- spin_lock(&mdev->peer_seq_lock);
- m = seq_max(mdev->peer_seq, new_seq);
- mdev->peer_seq = m;
- spin_unlock(&mdev->peer_seq_lock);
- if (m == new_seq)
- wake_up(&mdev->seq_wait);
-}
-
-static inline void drbd_update_congested(struct drbd_conf *mdev)
-{
- struct sock *sk = mdev->data.socket->sk;
- if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &mdev->flags);
-}
-
static inline int drbd_queue_order_type(struct drbd_conf *mdev)
{
/* sorry, we currently have no working implementation
@@ -2490,10 +2318,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
+ if (mdev->ldev == NULL) {
+ dev_warn(DEV, "mdev->ldev == NULL in drbd_md_flush\n");
+ return;
+ }
+
if (test_bit(MD_NO_FUA, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
new file mode 100644
index 000000000000..89c497c630b4
--- /dev/null
+++ b/drivers/block/drbd/drbd_interval.c
@@ -0,0 +1,207 @@
+#include <asm/bug.h>
+#include <linux/rbtree_augmented.h>
+#include "drbd_interval.h"
+
+/**
+ * interval_end - return end of @node
+ */
+static inline
+sector_t interval_end(struct rb_node *node)
+{
+ struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
+ return this->end;
+}
+
+/**
+ * compute_subtree_last - compute end of @node
+ *
+ * The end of an interval is the highest (start + (size >> 9)) value of this
+ * node and of its children. Called for @node and its parents whenever the end
+ * may have changed.
+ */
+static inline sector_t
+compute_subtree_last(struct drbd_interval *node)
+{
+ sector_t max = node->sector + (node->size >> 9);
+
+ if (node->rb.rb_left) {
+ sector_t left = interval_end(node->rb.rb_left);
+ if (left > max)
+ max = left;
+ }
+ if (node->rb.rb_right) {
+ sector_t right = interval_end(node->rb.rb_right);
+ if (right > max)
+ max = right;
+ }
+ return max;
+}
+
+static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
+{
+ while (rb != stop) {
+ struct drbd_interval *node = rb_entry(rb, struct drbd_interval, rb);
+ sector_t subtree_last = compute_subtree_last(node);
+ if (node->end == subtree_last)
+ break;
+ node->end = subtree_last;
+ rb = rb_parent(&node->rb);
+ }
+}
+
+static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
+{
+ struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
+ struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
+
+ new->end = old->end;
+}
+
+static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
+{
+ struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
+ struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
+
+ new->end = old->end;
+ old->end = compute_subtree_last(old);
+}
+
+static const struct rb_augment_callbacks augment_callbacks = {
+ augment_propagate,
+ augment_copy,
+ augment_rotate,
+};
+
+/**
+ * drbd_insert_interval - insert a new interval into a tree
+ */
+bool
+drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+{
+ struct rb_node **new = &root->rb_node, *parent = NULL;
+
+ BUG_ON(!IS_ALIGNED(this->size, 512));
+
+ while (*new) {
+ struct drbd_interval *here =
+ rb_entry(*new, struct drbd_interval, rb);
+
+ parent = *new;
+ if (this->sector < here->sector)
+ new = &(*new)->rb_left;
+ else if (this->sector > here->sector)
+ new = &(*new)->rb_right;
+ else if (this < here)
+ new = &(*new)->rb_left;
+ else if (this > here)
+ new = &(*new)->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&this->rb, parent, new);
+ rb_insert_augmented(&this->rb, root, &augment_callbacks);
+ return true;
+}
+
+/**
+ * drbd_contains_interval - check if a tree contains a given interval
+ * @sector: start sector of @interval
+ * @interval: may not be a valid pointer
+ *
+ * Returns if the tree contains the node @interval with start sector @start.
+ * Does not dereference @interval until @interval is known to be a valid object
+ * in @tree. Returns %false if @interval is in the tree but with a different
+ * sector number.
+ */
+bool
+drbd_contains_interval(struct rb_root *root, sector_t sector,
+ struct drbd_interval *interval)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct drbd_interval *here =
+ rb_entry(node, struct drbd_interval, rb);
+
+ if (sector < here->sector)
+ node = node->rb_left;
+ else if (sector > here->sector)
+ node = node->rb_right;
+ else if (interval < here)
+ node = node->rb_left;
+ else if (interval > here)
+ node = node->rb_right;
+ else
+ return true;
+ }
+ return false;
+}
+
+/**
+ * drbd_remove_interval - remove an interval from a tree
+ */
+void
+drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
+{
+ rb_erase_augmented(&this->rb, root, &augment_callbacks);
+}
+
+/**
+ * drbd_find_overlap - search for an interval overlapping with [sector, sector + size)
+ * @sector: start sector
+ * @size: size, aligned to 512 bytes
+ *
+ * Returns an interval overlapping with [sector, sector + size), or NULL if
+ * there is none. When there is more than one overlapping interval in the
+ * tree, the interval with the lowest start sector is returned, and all other
+ * overlapping intervals will be on the right side of the tree, reachable with
+ * rb_next().
+ */
+struct drbd_interval *
+drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size)
+{
+ struct rb_node *node = root->rb_node;
+ struct drbd_interval *overlap = NULL;
+ sector_t end = sector + (size >> 9);
+
+ BUG_ON(!IS_ALIGNED(size, 512));
+
+ while (node) {
+ struct drbd_interval *here =
+ rb_entry(node, struct drbd_interval, rb);
+
+ if (node->rb_left &&
+ sector < interval_end(node->rb_left)) {
+ /* Overlap if any must be on left side */
+ node = node->rb_left;
+ } else if (here->sector < end &&
+ sector < here->sector + (here->size >> 9)) {
+ overlap = here;
+ break;
+ } else if (sector >= here->sector) {
+ /* Overlap if any must be on right side */
+ node = node->rb_right;
+ } else
+ break;
+ }
+ return overlap;
+}
+
+struct drbd_interval *
+drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size)
+{
+ sector_t end = sector + (size >> 9);
+ struct rb_node *node;
+
+ for (;;) {
+ node = rb_next(&i->rb);
+ if (!node)
+ return NULL;
+ i = rb_entry(node, struct drbd_interval, rb);
+ if (i->sector >= end)
+ return NULL;
+ if (sector < i->sector + (i->size >> 9))
+ return i;
+ }
+}
diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h
new file mode 100644
index 000000000000..f38fcb00c10d
--- /dev/null
+++ b/drivers/block/drbd/drbd_interval.h
@@ -0,0 +1,40 @@
+#ifndef __DRBD_INTERVAL_H
+#define __DRBD_INTERVAL_H
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+
+struct drbd_interval {
+ struct rb_node rb;
+ sector_t sector; /* start sector of the interval */
+ unsigned int size; /* size in bytes */
+ sector_t end; /* highest interval end in subtree */
+ int local:1 /* local or remote request? */;
+ int waiting:1;
+};
+
+static inline void drbd_clear_interval(struct drbd_interval *i)
+{
+ RB_CLEAR_NODE(&i->rb);
+}
+
+static inline bool drbd_interval_empty(struct drbd_interval *i)
+{
+ return RB_EMPTY_NODE(&i->rb);
+}
+
+extern bool drbd_insert_interval(struct rb_root *, struct drbd_interval *);
+extern bool drbd_contains_interval(struct rb_root *, sector_t,
+ struct drbd_interval *);
+extern void drbd_remove_interval(struct rb_root *, struct drbd_interval *);
+extern struct drbd_interval *drbd_find_overlap(struct rb_root *, sector_t,
+ unsigned int);
+extern struct drbd_interval *drbd_next_overlap(struct drbd_interval *, sector_t,
+ unsigned int);
+
+#define drbd_for_each_overlap(i, root, sector, size) \
+ for (i = drbd_find_overlap(root, sector, size); \
+ i; \
+ i = drbd_next_overlap(i, sector, size))
+
+#endif /* __DRBD_INTERVAL_H */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f55683ad4ffa..8c13eeb83c53 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -56,14 +56,6 @@
#include "drbd_vli.h"
-struct after_state_chg_work {
- struct drbd_work w;
- union drbd_state os;
- union drbd_state ns;
- enum chg_state_flags flags;
- struct completion *done;
-};
-
static DEFINE_MUTEX(drbd_main_mutex);
int drbdd_init(struct drbd_thread *);
int drbd_worker(struct drbd_thread *);
@@ -72,21 +64,17 @@ int drbd_asender(struct drbd_thread *);
int drbd_init(void);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static int drbd_release(struct gendisk *gd, fmode_t mode);
-static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum chg_state_flags flags);
-static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static int w_md_sync(struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
-static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
-static void _tl_clear(struct drbd_conf *mdev);
+static int w_bitmap_io(struct drbd_work *w, int unused);
+static int w_go_diskless(struct drbd_work *w, int unused);
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
"Lars Ellenberg <lars@linbit.com>");
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
MODULE_VERSION(REL_VERSION);
MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
+MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
__stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
@@ -98,7 +86,6 @@ MODULE_PARM_DESC(allow_oos, "DONT USE!");
module_param(minor_count, uint, 0444);
module_param(disable_sendpage, bool, 0644);
module_param(allow_oos, bool, 0);
-module_param(cn_idx, uint, 0444);
module_param(proc_details, int, 0644);
#ifdef CONFIG_DRBD_FAULT_INJECTION
@@ -120,7 +107,6 @@ module_param(fault_devs, int, 0644);
unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
bool disable_sendpage;
bool allow_oos;
-unsigned int cn_idx = CN_IDX_DRBD;
int proc_details; /* Detail level in proc drbd*/
/* Module parameter for setting the user mode helper program
@@ -132,10 +118,11 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct drbd_conf **minor_table;
+struct idr minors;
+struct list_head drbd_tconns; /* list of struct drbd_tconn */
struct kmem_cache *drbd_request_cache;
-struct kmem_cache *drbd_ee_cache; /* epoch entries */
+struct kmem_cache *drbd_ee_cache; /* peer requests */
struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
@@ -164,10 +151,15 @@ static const struct block_device_operations drbd_ops = {
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{
+ struct bio *bio;
+
if (!drbd_md_io_bio_set)
return bio_alloc(gfp_mask, 1);
- return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ return bio;
}
#ifdef __CHECKER__
@@ -190,158 +182,87 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
#endif
/**
- * DOC: The transfer log
- *
- * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
- * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
- * of the list. There is always at least one &struct drbd_tl_epoch object.
- *
- * Each &struct drbd_tl_epoch has a circular double linked list of requests
- * attached.
- */
-static int tl_init(struct drbd_conf *mdev)
-{
- struct drbd_tl_epoch *b;
-
- /* during device minor initialization, we may well use GFP_KERNEL */
- b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
- if (!b)
- return 0;
- INIT_LIST_HEAD(&b->requests);
- INIT_LIST_HEAD(&b->w.list);
- b->next = NULL;
- b->br_number = 4711;
- b->n_writes = 0;
- b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
-
- mdev->oldest_tle = b;
- mdev->newest_tle = b;
- INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
- INIT_LIST_HEAD(&mdev->barrier_acked_requests);
-
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
-
- return 1;
-}
-
-static void tl_cleanup(struct drbd_conf *mdev)
-{
- D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
- kfree(mdev->oldest_tle);
- mdev->oldest_tle = NULL;
- kfree(mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
-}
-
-/**
- * _tl_add_barrier() - Adds a barrier to the transfer log
- * @mdev: DRBD device.
- * @new: Barrier to be added before the current head of the TL.
- *
- * The caller must hold the req_lock.
- */
-void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
-{
- struct drbd_tl_epoch *newest_before;
-
- INIT_LIST_HEAD(&new->requests);
- INIT_LIST_HEAD(&new->w.list);
- new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
- new->next = NULL;
- new->n_writes = 0;
-
- newest_before = mdev->newest_tle;
- new->br_number = newest_before->br_number+1;
- if (mdev->newest_tle != new) {
- mdev->newest_tle->next = new;
- mdev->newest_tle = new;
- }
-}
-
-/**
- * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
- * @mdev: DRBD device.
+ * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
+ * @tconn: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier.
*
* In case the passed barrier_nr or set_size does not match the oldest
- * &struct drbd_tl_epoch objects this function will cause a termination
- * of the connection.
+ * epoch of not yet barrier-acked requests, this function will cause a
+ * termination of the connection.
*/
-void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
- unsigned int set_size)
+void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+ unsigned int set_size)
{
- struct drbd_tl_epoch *b, *nob; /* next old barrier */
- struct list_head *le, *tle;
struct drbd_request *r;
-
- spin_lock_irq(&mdev->req_lock);
-
- b = mdev->oldest_tle;
+ struct drbd_request *req = NULL;
+ int expect_epoch = 0;
+ int expect_size = 0;
+
+ spin_lock_irq(&tconn->req_lock);
+
+ /* find oldest not yet barrier-acked write request,
+ * count writes in its epoch. */
+ list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ const unsigned s = r->rq_state;
+ if (!req) {
+ if (!(s & RQ_WRITE))
+ continue;
+ if (!(s & RQ_NET_MASK))
+ continue;
+ if (s & RQ_NET_DONE)
+ continue;
+ req = r;
+ expect_epoch = req->epoch;
+ expect_size ++;
+ } else {
+ if (r->epoch != expect_epoch)
+ break;
+ if (!(s & RQ_WRITE))
+ continue;
+ /* if (s & RQ_DONE): not expected */
+ /* if (!(s & RQ_NET_MASK)): not expected */
+ expect_size++;
+ }
+ }
/* first some paranoia code */
- if (b == NULL) {
- dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
- barrier_nr);
+ if (req == NULL) {
+ conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+ barrier_nr);
goto bail;
}
- if (b->br_number != barrier_nr) {
- dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
- barrier_nr, b->br_number);
+ if (expect_epoch != barrier_nr) {
+ conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+ barrier_nr, expect_epoch);
goto bail;
}
- if (b->n_writes != set_size) {
- dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
- barrier_nr, set_size, b->n_writes);
+
+ if (expect_size != set_size) {
+ conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+ barrier_nr, set_size, expect_size);
goto bail;
}
- /* Clean up list of requests processed during current epoch */
- list_for_each_safe(le, tle, &b->requests) {
- r = list_entry(le, struct drbd_request, tl_requests);
- _req_mod(r, barrier_acked);
- }
- /* There could be requests on the list waiting for completion
- of the write to the local disk. To avoid corruptions of
- slab's data structures we have to remove the lists head.
-
- Also there could have been a barrier ack out of sequence, overtaking
- the write acks - which would be a bug and violating write ordering.
- To not deadlock in case we lose connection while such requests are
- still pending, we need some way to find them for the
- _req_mode(connection_lost_while_pending).
-
- These have been list_move'd to the out_of_sequence_requests list in
- _req_mod(, barrier_acked) above.
- */
- list_splice_init(&b->requests, &mdev->barrier_acked_requests);
-
- nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, b);
- if (nob)
- mdev->oldest_tle = nob;
- /* if nob == NULL b was the only barrier, and becomes the new
- barrier. Therefore mdev->oldest_tle points already to b */
- } else {
- D_ASSERT(nob != NULL);
- mdev->oldest_tle = nob;
- kfree(b);
+ /* Clean up list of requests processed during current epoch. */
+ /* this extra list walk restart is paranoia,
+ * to catch requests being barrier-acked "unexpectedly".
+ * It usually should find the same req again, or some READ preceding it. */
+ list_for_each_entry(req, &tconn->transfer_log, tl_requests)
+ if (req->epoch == expect_epoch)
+ break;
+ list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
+ if (req->epoch != expect_epoch)
+ break;
+ _req_mod(req, BARRIER_ACKED);
}
-
- spin_unlock_irq(&mdev->req_lock);
- dec_ap_pending(mdev);
+ spin_unlock_irq(&tconn->req_lock);
return;
bail:
- spin_unlock_irq(&mdev->req_lock);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ spin_unlock_irq(&tconn->req_lock);
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
@@ -350,85 +271,24 @@ bail:
* @mdev: DRBD device.
* @what: The action/event to perform with all request objects
*
- * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
- * restart_frozen_disk_io.
+ * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
+ * RESTART_FROZEN_DISK_IO.
*/
-static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
-{
- struct drbd_tl_epoch *b, *tmp, **pn;
- struct list_head *le, *tle, carry_reads;
- struct drbd_request *req;
- int rv, n_writes, n_reads;
-
- b = mdev->oldest_tle;
- pn = &mdev->oldest_tle;
- while (b) {
- n_writes = 0;
- n_reads = 0;
- INIT_LIST_HEAD(&carry_reads);
- list_for_each_safe(le, tle, &b->requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- rv = _req_mod(req, what);
-
- n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
- n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
- }
- tmp = b->next;
-
- if (n_writes) {
- if (what == resend) {
- b->n_writes = n_writes;
- if (b->w.cb == NULL) {
- b->w.cb = w_send_barrier;
- inc_ap_pending(mdev);
- set_bit(CREATE_BARRIER, &mdev->flags);
- }
-
- drbd_queue_work(&mdev->data.work, &b->w);
- }
- pn = &b->next;
- } else {
- if (n_reads)
- list_add(&carry_reads, &b->requests);
- /* there could still be requests on that ring list,
- * in case local io is still pending */
- list_del(&b->requests);
-
- /* dec_ap_pending corresponding to queue_barrier.
- * the newest barrier may not have been queued yet,
- * in which case w.cb is still NULL. */
- if (b->w.cb != NULL)
- dec_ap_pending(mdev);
-
- if (b == mdev->newest_tle) {
- /* recycle, but reinit! */
- D_ASSERT(tmp == NULL);
- INIT_LIST_HEAD(&b->requests);
- list_splice(&carry_reads, &b->requests);
- INIT_LIST_HEAD(&b->w.list);
- b->w.cb = NULL;
- b->br_number = net_random();
- b->n_writes = 0;
-
- *pn = b;
- break;
- }
- *pn = tmp;
- kfree(b);
- }
- b = tmp;
- list_splice(&carry_reads, &b->requests);
- }
-
- /* Actions operating on the disk state, also want to work on
- requests that got barrier acked. */
+/* must hold resource->req_lock */
+void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+{
+ struct drbd_request *req, *r;
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
+ list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
_req_mod(req, what);
- }
}
+void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
+{
+ spin_lock_irq(&tconn->req_lock);
+ _tl_restart(tconn, what);
+ spin_unlock_irq(&tconn->req_lock);
+}
/**
* tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
@@ -438,43 +298,9 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
* by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread.
*/
-void tl_clear(struct drbd_conf *mdev)
+void tl_clear(struct drbd_tconn *tconn)
{
- spin_lock_irq(&mdev->req_lock);
- _tl_clear(mdev);
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void _tl_clear(struct drbd_conf *mdev)
-{
- struct list_head *le, *tle;
- struct drbd_request *r;
-
- _tl_restart(mdev, connection_lost_while_pending);
-
- /* we expect this list to be empty. */
- D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
-
- /* but just in case, clean it up anyways! */
- list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
- r = list_entry(le, struct drbd_request, tl_requests);
- /* It would be nice to complete outside of spinlock.
- * But this is easier for now. */
- _req_mod(r, connection_lost_while_pending);
- }
-
- /* ensure bit indicating barrier is required is clear */
- clear_bit(CREATE_BARRIER, &mdev->flags);
-
- memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
-
-}
-
-void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
-{
- spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, what);
- spin_unlock_irq(&mdev->req_lock);
+ tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
}
/**
@@ -483,1377 +309,131 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
*/
void tl_abort_disk_io(struct drbd_conf *mdev)
{
- struct drbd_tl_epoch *b;
- struct list_head *le, *tle;
- struct drbd_request *req;
-
- spin_lock_irq(&mdev->req_lock);
- b = mdev->oldest_tle;
- while (b) {
- list_for_each_safe(le, tle, &b->requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- if (!(req->rq_state & RQ_LOCAL_PENDING))
- continue;
- _req_mod(req, abort_disk_io);
- }
- b = b->next;
- }
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct drbd_request *req, *r;
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
+ spin_lock_irq(&tconn->req_lock);
+ list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING))
continue;
- _req_mod(req, abort_disk_io);
- }
-
- spin_unlock_irq(&mdev->req_lock);
-}
-
-/**
- * cl_wide_st_chg() - true if the state change is a cluster wide one
- * @mdev: DRBD device.
- * @os: old (current) state.
- * @ns: new (wanted) state.
- */
-static int cl_wide_st_chg(struct drbd_conf *mdev,
- union drbd_state os, union drbd_state ns)
-{
- return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
- ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
- (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
- (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
- (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
- (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
-}
-
-enum drbd_state_rv
-drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
- union drbd_state mask, union drbd_state val)
-{
- unsigned long flags;
- union drbd_state os, ns;
- enum drbd_state_rv rv;
-
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- rv = _drbd_set_state(mdev, ns, f, NULL);
- ns = mdev->state;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- return rv;
-}
-
-/**
- * drbd_force_state() - Impose a change which happens outside our control on our state
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- */
-void drbd_force_state(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
-{
- drbd_change_state(mdev, CS_HARD, mask, val);
-}
-
-static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
-static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
- union drbd_state,
- union drbd_state);
-enum sanitize_state_warnings {
- NO_WARNING,
- ABORTED_ONLINE_VERIFY,
- ABORTED_RESYNC,
- CONNECTION_LOST_NEGOTIATING,
- IMPLICITLY_UPGRADED_DISK,
- IMPLICITLY_UPGRADED_PDSK,
-};
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum sanitize_state_warnings *warn);
-int drbd_send_state_req(struct drbd_conf *,
- union drbd_state, union drbd_state);
-
-static enum drbd_state_rv
-_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val)
-{
- union drbd_state os, ns;
- unsigned long flags;
- enum drbd_state_rv rv;
-
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
- return SS_CW_SUCCESS;
-
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
- return SS_CW_FAILED_BY_PEER;
-
- rv = 0;
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- ns = sanitize_state(mdev, os, ns, NULL);
-
- if (!cl_wide_st_chg(mdev, os, ns))
- rv = SS_CW_NO_NEED;
- if (!rv) {
- rv = is_valid_state(mdev, ns);
- if (rv == SS_SUCCESS) {
- rv = is_valid_state_transition(mdev, ns, os);
- if (rv == SS_SUCCESS)
- rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
- }
- }
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- return rv;
-}
-
-/**
- * drbd_req_state() - Perform an eventually cluster wide state change
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- * @f: flags
- *
- * Should not be called directly, use drbd_request_state() or
- * _drbd_request_state().
- */
-static enum drbd_state_rv
-drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
-{
- struct completion done;
- unsigned long flags;
- union drbd_state os, ns;
- enum drbd_state_rv rv;
-
- init_completion(&done);
-
- if (f & CS_SERIALIZE)
- mutex_lock(&mdev->state_mutex);
-
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- ns = sanitize_state(mdev, os, ns, NULL);
-
- if (cl_wide_st_chg(mdev, os, ns)) {
- rv = is_valid_state(mdev, ns);
- if (rv == SS_SUCCESS)
- rv = is_valid_state_transition(mdev, ns, os);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- if (rv < SS_SUCCESS) {
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
-
- drbd_state_lock(mdev);
- if (!drbd_send_state_req(mdev, mask, val)) {
- drbd_state_unlock(mdev);
- rv = SS_CW_FAILED_BY_PEER;
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
-
- wait_event(mdev->state_wait,
- (rv = _req_st_cond(mdev, mask, val)));
-
- if (rv < SS_SUCCESS) {
- drbd_state_unlock(mdev);
- if (f & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- goto abort;
- }
- spin_lock_irqsave(&mdev->req_lock, flags);
- os = mdev->state;
- ns.i = (os.i & ~mask.i) | val.i;
- rv = _drbd_set_state(mdev, ns, f, &done);
- drbd_state_unlock(mdev);
- } else {
- rv = _drbd_set_state(mdev, ns, f, &done);
- }
-
- spin_unlock_irqrestore(&mdev->req_lock, flags);
-
- if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
- D_ASSERT(current != mdev->worker.task);
- wait_for_completion(&done);
- }
-
-abort:
- if (f & CS_SERIALIZE)
- mutex_unlock(&mdev->state_mutex);
-
- return rv;
-}
-
-/**
- * _drbd_request_state() - Request a state change (with flags)
- * @mdev: DRBD device.
- * @mask: mask of state bits to change.
- * @val: value of new state bits.
- * @f: flags
- *
- * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
- * flag, or when logging of failed state change requests is not desired.
- */
-enum drbd_state_rv
-_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
- union drbd_state val, enum chg_state_flags f)
-{
- enum drbd_state_rv rv;
-
- wait_event(mdev->state_wait,
- (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
-
- return rv;
-}
-
-static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
-{
- dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
- name,
- drbd_conn_str(ns.conn),
- drbd_role_str(ns.role),
- drbd_role_str(ns.peer),
- drbd_disk_str(ns.disk),
- drbd_disk_str(ns.pdsk),
- is_susp(ns) ? 's' : 'r',
- ns.aftr_isp ? 'a' : '-',
- ns.peer_isp ? 'p' : '-',
- ns.user_isp ? 'u' : '-'
- );
-}
-
-void print_st_err(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum drbd_state_rv err)
-{
- if (err == SS_IN_TRANSIENT_STATE)
- return;
- dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
- print_st(mdev, " state", os);
- print_st(mdev, "wanted", ns);
-}
-
-
-/**
- * is_valid_state() - Returns an SS_ error code if ns is not valid
- * @mdev: DRBD device.
- * @ns: State to consider.
- */
-static enum drbd_state_rv
-is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
-{
- /* See drbd_state_sw_errors in drbd_strings.c */
-
- enum drbd_fencing_p fp;
- enum drbd_state_rv rv = SS_SUCCESS;
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (get_net_conf(mdev)) {
- if (!mdev->net_conf->two_primaries &&
- ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
- rv = SS_TWO_PRIMARIES;
- put_net_conf(mdev);
- }
-
- if (rv <= 0)
- /* already found a reason to abort */;
- else if (ns.role == R_SECONDARY && mdev->open_cnt)
- rv = SS_DEVICE_IN_USE;
-
- else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if (fp >= FP_RESOURCE &&
- ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
- rv = SS_PRIMARY_NOP;
-
- else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
- rv = SS_NO_LOCAL_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
- rv = SS_NO_REMOTE_DISK;
-
- else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
- rv = SS_NO_UP_TO_DATE_DISK;
-
- else if ((ns.conn == C_CONNECTED ||
- ns.conn == C_WF_BITMAP_S ||
- ns.conn == C_SYNC_SOURCE ||
- ns.conn == C_PAUSED_SYNC_S) &&
- ns.disk == D_OUTDATED)
- rv = SS_CONNECTED_OUTDATES;
-
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- (mdev->sync_conf.verify_alg[0] == 0))
- rv = SS_NO_VERIFY_ALG;
-
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- mdev->agreed_pro_version < 88)
- rv = SS_NOT_SUPPORTED;
-
- else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
- rv = SS_CONNECTED_OUTDATES;
-
- return rv;
-}
-
-/**
- * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
- * @mdev: DRBD device.
- * @ns: new state.
- * @os: old state.
- */
-static enum drbd_state_rv
-is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
- union drbd_state os)
-{
- enum drbd_state_rv rv = SS_SUCCESS;
-
- if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
- os.conn > C_CONNECTED)
- rv = SS_RESYNC_RUNNING;
-
- if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
- rv = SS_ALREADY_STANDALONE;
-
- if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
- rv = SS_IS_DISKLESS;
-
- if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
- rv = SS_NO_NET_CONFIG;
-
- if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
- rv = SS_LOWER_THAN_OUTDATED;
-
- if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
- rv = SS_IN_TRANSIENT_STATE;
-
- if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
- rv = SS_IN_TRANSIENT_STATE;
-
- /* While establishing a connection only allow cstate to change.
- Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &mdev->flags) &&
- !(os.conn == C_WF_REPORT_PARAMS ||
- (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
- rv = SS_IN_TRANSIENT_STATE;
-
- if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
- rv = SS_NEED_CONNECTION;
-
- if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
- ns.conn != os.conn && os.conn > C_CONNECTED)
- rv = SS_RESYNC_RUNNING;
-
- if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
- os.conn < C_CONNECTED)
- rv = SS_NEED_CONNECTION;
-
- if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
- && os.conn < C_WF_REPORT_PARAMS)
- rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
-
- return rv;
-}
-
-static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
-{
- static const char *msg_table[] = {
- [NO_WARNING] = "",
- [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
- [ABORTED_RESYNC] = "Resync aborted.",
- [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
- [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
- [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
- };
-
- if (warn != NO_WARNING)
- dev_warn(DEV, "%s\n", msg_table[warn]);
-}
-
-/**
- * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
- * @mdev: DRBD device.
- * @os: old state.
- * @ns: new state.
- * @warn_sync_abort:
- *
- * When we loose connection, we have to set the state of the peers disk (pdsk)
- * to D_UNKNOWN. This rule and many more along those lines are in this function.
- */
-static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum sanitize_state_warnings *warn)
-{
- enum drbd_fencing_p fp;
- enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
-
- if (warn)
- *warn = NO_WARNING;
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- /* Disallow Network errors to configure a device's network part */
- if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
- os.conn <= C_DISCONNECTING)
- ns.conn = os.conn;
-
- /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
- * If you try to go into some Sync* state, that shall fail (elsewhere). */
- if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
- ns.conn = os.conn;
-
- /* we cannot fail (again) if we already detached */
- if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
- ns.disk = D_DISKLESS;
-
- /* After C_DISCONNECTING only C_STANDALONE may follow */
- if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
- ns.conn = os.conn;
-
- if (ns.conn < C_CONNECTED) {
- ns.peer_isp = 0;
- ns.peer = R_UNKNOWN;
- if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
- ns.pdsk = D_UNKNOWN;
- }
-
- /* Clear the aftr_isp when becoming unconfigured */
- if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
- ns.aftr_isp = 0;
-
- /* Abort resync if a disk fails/detaches */
- if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
- (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn)
- *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
- ns.conn = C_CONNECTED;
- }
-
- /* Connection breaks down before we finished "Negotiating" */
- if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
- get_ldev_if_state(mdev, D_NEGOTIATING)) {
- if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
- ns.disk = mdev->new_state_tmp.disk;
- ns.pdsk = mdev->new_state_tmp.pdsk;
- } else {
- if (warn)
- *warn = CONNECTION_LOST_NEGOTIATING;
- ns.disk = D_DISKLESS;
- ns.pdsk = D_UNKNOWN;
- }
- put_ldev(mdev);
- }
-
- /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
- if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
- if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
- ns.disk = D_UP_TO_DATE;
- if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
- ns.pdsk = D_UP_TO_DATE;
- }
-
- /* Implications of the connection stat on the disk states */
- disk_min = D_DISKLESS;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_UNKNOWN;
- switch ((enum drbd_conns)ns.conn) {
- case C_WF_BITMAP_T:
- case C_PAUSED_SYNC_T:
- case C_STARTING_SYNC_T:
- case C_WF_SYNC_UUID:
- case C_BEHIND:
- disk_min = D_INCONSISTENT;
- disk_max = D_OUTDATED;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_VERIFY_S:
- case C_VERIFY_T:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_CONNECTED:
- disk_min = D_DISKLESS;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_DISKLESS;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_WF_BITMAP_S:
- case C_PAUSED_SYNC_S:
- case C_STARTING_SYNC_S:
- case C_AHEAD:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
- break;
- case C_SYNC_TARGET:
- disk_min = D_INCONSISTENT;
- disk_max = D_INCONSISTENT;
- pdsk_min = D_UP_TO_DATE;
- pdsk_max = D_UP_TO_DATE;
- break;
- case C_SYNC_SOURCE:
- disk_min = D_UP_TO_DATE;
- disk_max = D_UP_TO_DATE;
- pdsk_min = D_INCONSISTENT;
- pdsk_max = D_INCONSISTENT;
- break;
- case C_STANDALONE:
- case C_DISCONNECTING:
- case C_UNCONNECTED:
- case C_TIMEOUT:
- case C_BROKEN_PIPE:
- case C_NETWORK_FAILURE:
- case C_PROTOCOL_ERROR:
- case C_TEAR_DOWN:
- case C_WF_CONNECTION:
- case C_WF_REPORT_PARAMS:
- case C_MASK:
- break;
- }
- if (ns.disk > disk_max)
- ns.disk = disk_max;
-
- if (ns.disk < disk_min) {
- if (warn)
- *warn = IMPLICITLY_UPGRADED_DISK;
- ns.disk = disk_min;
- }
- if (ns.pdsk > pdsk_max)
- ns.pdsk = pdsk_max;
-
- if (ns.pdsk < pdsk_min) {
- if (warn)
- *warn = IMPLICITLY_UPGRADED_PDSK;
- ns.pdsk = pdsk_min;
- }
-
- if (fp == FP_STONITH &&
- (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
- !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
- ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
-
- if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
- !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
- ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
-
- if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
- if (ns.conn == C_SYNC_SOURCE)
- ns.conn = C_PAUSED_SYNC_S;
- if (ns.conn == C_SYNC_TARGET)
- ns.conn = C_PAUSED_SYNC_T;
- } else {
- if (ns.conn == C_PAUSED_SYNC_S)
- ns.conn = C_SYNC_SOURCE;
- if (ns.conn == C_PAUSED_SYNC_T)
- ns.conn = C_SYNC_TARGET;
- }
-
- return ns;
-}
-
-/* helper for __drbd_set_state */
-static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
-{
- if (mdev->agreed_pro_version < 90)
- mdev->ov_start_sector = 0;
- mdev->rs_total = drbd_bm_bits(mdev);
- mdev->ov_position = 0;
- if (cs == C_VERIFY_T) {
- /* starting online verify from an arbitrary position
- * does not fit well into the existing protocol.
- * on C_VERIFY_T, we initialize ov_left and friends
- * implicitly in receive_DataRequest once the
- * first P_OV_REQUEST is received */
- mdev->ov_start_sector = ~(sector_t)0;
- } else {
- unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
- if (bit >= mdev->rs_total) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(mdev->rs_total - 1);
- mdev->rs_total = 1;
- } else
- mdev->rs_total -= bit;
- mdev->ov_position = mdev->ov_start_sector;
- }
- mdev->ov_left = mdev->rs_total;
-}
-
-static void drbd_resume_al(struct drbd_conf *mdev)
-{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
- dev_info(DEV, "Resumed AL updates\n");
-}
-
-/**
- * __drbd_set_state() - Set a new DRBD state
- * @mdev: DRBD device.
- * @ns: new state.
- * @flags: Flags
- * @done: Optional completion, that will get completed after the after_state_ch() finished
- *
- * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
- */
-enum drbd_state_rv
-__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
- enum chg_state_flags flags, struct completion *done)
-{
- union drbd_state os;
- enum drbd_state_rv rv = SS_SUCCESS;
- enum sanitize_state_warnings ssw;
- struct after_state_chg_work *ascw;
-
- os = mdev->state;
-
- ns = sanitize_state(mdev, os, ns, &ssw);
-
- if (ns.i == os.i)
- return SS_NOTHING_TO_DO;
-
- if (!(flags & CS_HARD)) {
- /* pre-state-change checks ; only look at ns */
- /* See drbd_state_sw_errors in drbd_strings.c */
-
- rv = is_valid_state(mdev, ns);
- if (rv < SS_SUCCESS) {
- /* If the old state was illegal as well, then let
- this happen...*/
-
- if (is_valid_state(mdev, os) == rv)
- rv = is_valid_state_transition(mdev, ns, os);
- } else
- rv = is_valid_state_transition(mdev, ns, os);
- }
-
- if (rv < SS_SUCCESS) {
- if (flags & CS_VERBOSE)
- print_st_err(mdev, os, ns, rv);
- return rv;
- }
-
- print_sanitize_warnings(mdev, ssw);
-
- {
- char *pbp, pb[300];
- pbp = pb;
- *pbp = 0;
- if (ns.role != os.role)
- pbp += sprintf(pbp, "role( %s -> %s ) ",
- drbd_role_str(os.role),
- drbd_role_str(ns.role));
- if (ns.peer != os.peer)
- pbp += sprintf(pbp, "peer( %s -> %s ) ",
- drbd_role_str(os.peer),
- drbd_role_str(ns.peer));
- if (ns.conn != os.conn)
- pbp += sprintf(pbp, "conn( %s -> %s ) ",
- drbd_conn_str(os.conn),
- drbd_conn_str(ns.conn));
- if (ns.disk != os.disk)
- pbp += sprintf(pbp, "disk( %s -> %s ) ",
- drbd_disk_str(os.disk),
- drbd_disk_str(ns.disk));
- if (ns.pdsk != os.pdsk)
- pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
- drbd_disk_str(os.pdsk),
- drbd_disk_str(ns.pdsk));
- if (is_susp(ns) != is_susp(os))
- pbp += sprintf(pbp, "susp( %d -> %d ) ",
- is_susp(os),
- is_susp(ns));
- if (ns.aftr_isp != os.aftr_isp)
- pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
- os.aftr_isp,
- ns.aftr_isp);
- if (ns.peer_isp != os.peer_isp)
- pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
- os.peer_isp,
- ns.peer_isp);
- if (ns.user_isp != os.user_isp)
- pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
- os.user_isp,
- ns.user_isp);
- dev_info(DEV, "%s\n", pb);
- }
-
- /* solve the race between becoming unconfigured,
- * worker doing the cleanup, and
- * admin reconfiguring us:
- * on (re)configure, first set CONFIG_PENDING,
- * then wait for a potentially exiting worker,
- * start the worker, and schedule one no_op.
- * then proceed with configuration.
- */
- if (ns.disk == D_DISKLESS &&
- ns.conn == C_STANDALONE &&
- ns.role == R_SECONDARY &&
- !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
- set_bit(DEVICE_DYING, &mdev->flags);
-
- /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
- * on the ldev here, to be sure the transition -> D_DISKLESS resp.
- * drbd_ldev_destroy() won't happen before our corresponding
- * after_state_ch works run, where we put_ldev again. */
- if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
- atomic_inc(&mdev->local_cnt);
-
- mdev->state = ns;
-
- if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
- drbd_print_uuids(mdev, "attached to UUIDs");
-
- wake_up(&mdev->misc_wait);
- wake_up(&mdev->state_wait);
-
- /* aborted verify run. log the last position */
- if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
- ns.conn < C_CONNECTED) {
- mdev->ov_start_sector =
- BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
- dev_info(DEV, "Online Verify reached sector %llu\n",
- (unsigned long long)mdev->ov_start_sector);
- }
-
- if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
- (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
- dev_info(DEV, "Syncer continues.\n");
- mdev->rs_paused += (long)jiffies
- -(long)mdev->rs_mark_time[mdev->rs_last_mark];
- if (ns.conn == C_SYNC_TARGET)
- mod_timer(&mdev->resync_timer, jiffies);
- }
-
- if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
- (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
- dev_info(DEV, "Resync suspended\n");
- mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
- }
-
- if (os.conn == C_CONNECTED &&
- (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
- unsigned long now = jiffies;
- int i;
-
- set_ov_position(mdev, ns.conn);
- mdev->rs_start = now;
- mdev->rs_last_events = 0;
- mdev->rs_last_sect_ev = 0;
- mdev->ov_last_oos_size = 0;
- mdev->ov_last_oos_start = 0;
-
- for (i = 0; i < DRBD_SYNC_MARKS; i++) {
- mdev->rs_mark_left[i] = mdev->ov_left;
- mdev->rs_mark_time[i] = now;
- }
-
- drbd_rs_controller_reset(mdev);
-
- if (ns.conn == C_VERIFY_S) {
- dev_info(DEV, "Starting Online Verify from sector %llu\n",
- (unsigned long long)mdev->ov_position);
- mod_timer(&mdev->resync_timer, jiffies);
- }
- }
-
- if (get_ldev(mdev)) {
- u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
- MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
- MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
-
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
- mdf |= MDF_CRASHED_PRIMARY;
- if (mdev->state.role == R_PRIMARY ||
- (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
- mdf |= MDF_PRIMARY_IND;
- if (mdev->state.conn > C_WF_REPORT_PARAMS)
- mdf |= MDF_CONNECTED_IND;
- if (mdev->state.disk > D_INCONSISTENT)
- mdf |= MDF_CONSISTENT;
- if (mdev->state.disk > D_OUTDATED)
- mdf |= MDF_WAS_UP_TO_DATE;
- if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
- mdf |= MDF_PEER_OUT_DATED;
- if (mdf != mdev->ldev->md.flags) {
- mdev->ldev->md.flags = mdf;
- drbd_md_mark_dirty(mdev);
- }
- if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
- drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
- put_ldev(mdev);
- }
-
- /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
- if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
- os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
-
- /* Receiver should clean up itself */
- if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
- drbd_thread_stop_nowait(&mdev->receiver);
-
- /* Now the receiver finished cleaning up itself, it should die */
- if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
- drbd_thread_stop_nowait(&mdev->receiver);
-
- /* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_WF_CONNECTION &&
- ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
- drbd_thread_restart_nowait(&mdev->receiver);
-
- /* Resume AL writing if we get a connection */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- drbd_resume_al(mdev);
-
- /* remember last connect and attach times so request_timer_fn() won't
- * kill newly established sessions while we are still trying to thaw
- * previously frozen IO */
- if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
- mdev->last_reconnect_jif = jiffies;
- if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- ns.disk > D_NEGOTIATING)
- mdev->last_reattach_jif = jiffies;
-
- ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
- if (ascw) {
- ascw->os = os;
- ascw->ns = ns;
- ascw->flags = flags;
- ascw->w.cb = w_after_state_ch;
- ascw->done = done;
- drbd_queue_work(&mdev->data.work, &ascw->w);
- } else {
- dev_warn(DEV, "Could not kmalloc an ascw\n");
- }
-
- return rv;
-}
-
-static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
-{
- struct after_state_chg_work *ascw =
- container_of(w, struct after_state_chg_work, w);
- after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
- if (ascw->flags & CS_WAIT_COMPLETE) {
- D_ASSERT(ascw->done != NULL);
- complete(ascw->done);
- }
- kfree(ascw);
-
- return 1;
-}
-
-static void abw_start_sync(struct drbd_conf *mdev, int rv)
-{
- if (rv) {
- dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
- _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
- return;
- }
-
- switch (mdev->state.conn) {
- case C_STARTING_SYNC_T:
- _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
- break;
- case C_STARTING_SYNC_S:
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- break;
- }
-}
-
-int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
- int (*io_fn)(struct drbd_conf *),
- char *why, enum bm_flag flags)
-{
- int rv;
-
- D_ASSERT(current == mdev->worker.task);
-
- /* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
-
- drbd_bm_lock(mdev, why, flags);
- rv = io_fn(mdev);
- drbd_bm_unlock(mdev);
-
- drbd_resume_io(mdev);
-
- return rv;
-}
-
-/**
- * after_state_ch() - Perform after state change actions that may sleep
- * @mdev: DRBD device.
- * @os: old state.
- * @ns: new state.
- * @flags: Flags
- */
-static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, enum chg_state_flags flags)
-{
- enum drbd_fencing_p fp;
- enum drbd_req_event what = nothing;
- union drbd_state nsm = (union drbd_state){ .i = -1 };
-
- if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
- if (mdev->p_uuid)
- mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
- }
-
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- /* Inform userspace about the change... */
- drbd_bcast_state(mdev, ns);
-
- if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
- (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
- drbd_khelper(mdev, "pri-on-incon-degr");
-
- /* Here we have the actions that are performed after a
- state change. This function might sleep */
-
- if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
- mod_timer(&mdev->request_timer, jiffies + HZ);
-
- nsm.i = -1;
- if (ns.susp_nod) {
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
- what = resend;
-
- if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- ns.disk > D_NEGOTIATING)
- what = restart_frozen_disk_io;
-
- if (what != nothing)
- nsm.susp_nod = 0;
- }
-
- if (ns.susp_fen) {
- /* case1: The outdate peer handler is successful: */
- if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
- drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- }
- spin_lock_irq(&mdev->req_lock);
- _tl_clear(mdev);
- _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
- }
- /* case2: The connection was established again: */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- clear_bit(NEW_CUR_UUID, &mdev->flags);
- what = resend;
- nsm.susp_fen = 0;
- }
- }
-
- if (what != nothing) {
- spin_lock_irq(&mdev->req_lock);
- _tl_restart(mdev, what);
- nsm.i &= mdev->state.i;
- _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
- }
-
- /* Became sync source. With protocol >= 96, we still need to send out
- * the sync uuid now. Need to do that before any drbd_send_state, or
- * the other side may go "paused sync" before receiving the sync uuids,
- * which is unexpected. */
- if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
- (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
- mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
- drbd_gen_and_send_sync_uuid(mdev);
- put_ldev(mdev);
- }
-
- /* Do not change the order of the if above and the two below... */
- if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
- /* we probably will start a resync soon.
- * make sure those things are properly reset. */
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
- drbd_rs_cancel_all(mdev);
-
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
- }
- /* No point in queuing send_bitmap if we don't have a connection
- * anymore, so check also the _current_ state, not only the new state
- * at the time this work was queued. */
- if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
- mdev->state.conn == C_WF_BITMAP_S)
- drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
- "send_bitmap (WFBitMapS)",
- BM_LOCKED_TEST_ALLOWED);
-
- /* Lost contact to peer's copy of the data */
- if ((os.pdsk >= D_INCONSISTENT &&
- os.pdsk != D_UNKNOWN &&
- os.pdsk != D_OUTDATED)
- && (ns.pdsk < D_INCONSISTENT ||
- ns.pdsk == D_UNKNOWN ||
- ns.pdsk == D_OUTDATED)) {
- if (get_ldev(mdev)) {
- if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- if (is_susp(mdev->state)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
- } else {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
- }
- put_ldev(mdev);
- }
- }
-
- if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
- /* D_DISKLESS Peer becomes secondary */
- if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
- /* We may still be Primary ourselves.
- * No harm done if the bitmap still changes,
- * redirtied pages will follow later. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
- "demote diskless peer", BM_LOCKED_SET_ALLOWED);
- put_ldev(mdev);
- }
-
- /* Write out all changed bits on demote.
- * Though, no need to da that just yet
- * if there is a resync going on still */
- if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
- mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
- /* No changes to the bitmap expected this time, so assert that,
- * even though no harm was done if it did change. */
- drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
- "demote", BM_LOCKED_TEST_ALLOWED);
- put_ldev(mdev);
- }
-
- /* Last part of the attaching process ... */
- if (ns.conn >= C_CONNECTED &&
- os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
- drbd_send_sizes(mdev, 0, 0); /* to start sync... */
- drbd_send_uuids(mdev);
- drbd_send_state(mdev, ns);
- }
-
- /* We want to pause/continue resync, tell peer. */
- if (ns.conn >= C_CONNECTED &&
- ((os.aftr_isp != ns.aftr_isp) ||
- (os.user_isp != ns.user_isp)))
- drbd_send_state(mdev, ns);
-
- /* In case one of the isp bits got set, suspend other devices. */
- if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
- (ns.aftr_isp || ns.peer_isp || ns.user_isp))
- suspend_other_sg(mdev);
-
- /* Make sure the peer gets informed about eventual state
- changes (ISP bits) while we were in WFReportParams. */
- if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev, ns);
-
- /* We are in the progress to start a full sync... */
- if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
- (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
- /* no other bitmap changes expected during this phase */
- drbd_queue_bitmap_io(mdev,
- &drbd_bmio_set_n_write, &abw_start_sync,
- "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
-
- /* We are invalidating our self... */
- if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
- os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
- /* other bitmap operation expected during this phase */
- drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
- "set_n_write from invalidate", BM_LOCKED_MASK);
-
- /* first half of local IO error, failure to attach,
- * or administrative detach */
- if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh = EP_PASS_ON;
- int was_io_error = 0;
- /* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS.
- * But is is still not save to dreference ldev here, since
- * we might come from an failed Attach before ldev was set. */
- if (mdev->ldev) {
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- if (was_io_error && eh == EP_CALL_HELPER)
- drbd_khelper(mdev, "local-io-error");
-
- /* Immediately allow completion of all application IO,
- * that waits for completion from the local disk,
- * if this was a force-detach due to disk_timeout
- * or administrator request (drbdsetup detach --force).
- * Do NOT abort otherwise.
- * Aborting local requests may cause serious problems,
- * if requests are completed to upper layers already,
- * and then later the already submitted local bio completes.
- * This can cause DMA into former bio pages that meanwhile
- * have been re-used for other things.
- * So aborting local requests may cause crashes,
- * or even worse, silent data corruption.
- */
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
- tl_abort_disk_io(mdev);
-
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
-
- if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- drbd_rs_cancel_all(mdev);
-
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
- }
- put_ldev(mdev);
- }
-
- /* second half of local IO error, failure to attach,
- * or administrative detach,
- * after local_cnt references have reached zero again */
- if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
- /* We must still be diskless,
- * re-attach has to be serialized with this! */
- if (mdev->state.disk != D_DISKLESS)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s while going diskless\n",
- drbd_disk_str(mdev->state.disk));
-
- if (ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* corresponding get_ldev in __drbd_set_state
- * this may finally trigger drbd_ldev_destroy. */
- put_ldev(mdev);
- }
-
- /* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* Disks got bigger while they were detached */
- if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
- if (ns.conn == C_CONNECTED)
- resync_after_online_grow(mdev);
- }
-
- /* A resync finished or aborted, wake paused devices... */
- if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
- (os.peer_isp && !ns.peer_isp) ||
- (os.user_isp && !ns.user_isp))
- resume_next_sg(mdev);
-
- /* sync target done with resync. Explicitly notify peer, even though
- * it should (at least for non-empty resyncs) already know itself. */
- if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev, ns);
-
- /* Wake up role changes, that were delayed because of connection establishing */
- if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &mdev->flags);
- wake_up(&mdev->state_wait);
- }
-
- /* This triggers bitmap writeout of potentially still unwritten pages
- * if the resync finished cleanly, or aborted because of peer disk
- * failure, or because of connection loss.
- * For resync aborted because of local disk failure, we cannot do
- * any bitmap writeout anymore.
- * No harm done if some bits change during this phase.
- */
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
- "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
- put_ldev(mdev);
- }
-
- /* free tl_hash if we Got thawed and are C_STANDALONE */
- if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
- drbd_free_tl_hash(mdev);
-
- /* Upon network connection, we need to start the receiver */
- if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
- drbd_thread_start(&mdev->receiver);
-
- /* Terminate worker thread if we are unconfigured - it will be
- restarted as needed... */
- if (ns.disk == D_DISKLESS &&
- ns.conn == C_STANDALONE &&
- ns.role == R_SECONDARY) {
- if (os.aftr_isp != ns.aftr_isp)
- resume_next_sg(mdev);
- /* set in __drbd_set_state, unless CONFIG_PENDING was set */
- if (test_bit(DEVICE_DYING, &mdev->flags))
- drbd_thread_stop_nowait(&mdev->worker);
+ if (req->w.mdev != mdev)
+ continue;
+ _req_mod(req, ABORT_DISK_IO);
}
-
- drbd_md_sync(mdev);
+ spin_unlock_irq(&tconn->req_lock);
}
-
static int drbd_thread_setup(void *arg)
{
struct drbd_thread *thi = (struct drbd_thread *) arg;
- struct drbd_conf *mdev = thi->mdev;
+ struct drbd_tconn *tconn = thi->tconn;
unsigned long flags;
int retval;
+ snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
+ thi->name[0], thi->tconn->name);
+
restart:
retval = thi->function(thi);
spin_lock_irqsave(&thi->t_lock, flags);
- /* if the receiver has been "Exiting", the last thing it did
+ /* if the receiver has been "EXITING", the last thing it did
* was set the conn state to "StandAlone",
* if now a re-connect request comes in, conn state goes C_UNCONNECTED,
* and receiver thread will be "started".
- * drbd_thread_start needs to set "Restarting" in that case.
+ * drbd_thread_start needs to set "RESTARTING" in that case.
* t_state check and assignment needs to be within the same spinlock,
- * so either thread_start sees Exiting, and can remap to Restarting,
- * or thread_start see None, and can proceed as normal.
+ * so either thread_start sees EXITING, and can remap to RESTARTING,
+ * or thread_start see NONE, and can proceed as normal.
*/
- if (thi->t_state == Restarting) {
- dev_info(DEV, "Restarting %s\n", current->comm);
- thi->t_state = Running;
+ if (thi->t_state == RESTARTING) {
+ conn_info(tconn, "Restarting %s thread\n", thi->name);
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart;
}
thi->task = NULL;
- thi->t_state = None;
+ thi->t_state = NONE;
smp_mb();
- complete(&thi->stop);
+ complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags);
- dev_info(DEV, "Terminating %s\n", current->comm);
+ conn_info(tconn, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */
+
+ kref_put(&tconn->kref, &conn_destroy);
module_put(THIS_MODULE);
return retval;
}
-static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
- int (*func) (struct drbd_thread *))
+static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
+ int (*func) (struct drbd_thread *), char *name)
{
spin_lock_init(&thi->t_lock);
thi->task = NULL;
- thi->t_state = None;
+ thi->t_state = NONE;
thi->function = func;
- thi->mdev = mdev;
+ thi->tconn = tconn;
+ strncpy(thi->name, name, ARRAY_SIZE(thi->name));
}
int drbd_thread_start(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
+ struct drbd_tconn *tconn = thi->tconn;
struct task_struct *nt;
unsigned long flags;
- const char *me =
- thi == &mdev->receiver ? "receiver" :
- thi == &mdev->asender ? "asender" :
- thi == &mdev->worker ? "worker" : "NONSENSE";
-
/* is used from state engine doing drbd_thread_stop_nowait,
* while holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
switch (thi->t_state) {
- case None:
- dev_info(DEV, "Starting %s thread (from %s [%d])\n",
- me, current->comm, current->pid);
+ case NONE:
+ conn_info(tconn, "Starting %s thread (from %s [%d])\n",
+ thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) {
- dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
+ conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return false;
}
+ kref_get(&thi->tconn->kref);
+
init_completion(&thi->stop);
- D_ASSERT(thi->task == NULL);
thi->reset_cpu_mask = 1;
- thi->t_state = Running;
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi,
- "drbd%d_%s", mdev_to_minor(mdev), me);
+ "drbd_%c_%s", thi->name[0], thi->tconn->name);
if (IS_ERR(nt)) {
- dev_err(DEV, "Couldn't start thread\n");
+ conn_err(tconn, "Couldn't start thread\n");
+ kref_put(&tconn->kref, &conn_destroy);
module_put(THIS_MODULE);
return false;
}
spin_lock_irqsave(&thi->t_lock, flags);
thi->task = nt;
- thi->t_state = Running;
+ thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags);
wake_up_process(nt);
break;
- case Exiting:
- thi->t_state = Restarting;
- dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
- me, current->comm, current->pid);
+ case EXITING:
+ thi->t_state = RESTARTING;
+ conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
+ thi->name, current->comm, current->pid);
/* fall through */
- case Running:
- case Restarting:
+ case RUNNING:
+ case RESTARTING:
default:
spin_unlock_irqrestore(&thi->t_lock, flags);
break;
@@ -1867,12 +447,12 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
{
unsigned long flags;
- enum drbd_thread_state ns = restart ? Restarting : Exiting;
+ enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
/* may be called from state engine, holding the req lock irqsave */
spin_lock_irqsave(&thi->t_lock, flags);
- if (thi->t_state == None) {
+ if (thi->t_state == NONE) {
spin_unlock_irqrestore(&thi->t_lock, flags);
if (restart)
drbd_thread_start(thi);
@@ -1890,7 +470,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
init_completion(&thi->stop);
if (thi->task != current)
force_sig(DRBD_SIGKILL, thi->task);
-
}
spin_unlock_irqrestore(&thi->t_lock, flags);
@@ -1899,6 +478,35 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
+static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
+{
+ struct drbd_thread *thi =
+ task == tconn->receiver.task ? &tconn->receiver :
+ task == tconn->asender.task ? &tconn->asender :
+ task == tconn->worker.task ? &tconn->worker : NULL;
+
+ return thi;
+}
+
+char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
+{
+ struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
+ return thi ? thi->name : task->comm;
+}
+
+int conn_lowest_minor(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr = 0, m;
+
+ rcu_read_lock();
+ mdev = idr_get_next(&tconn->volumes, &vnr);
+ m = mdev ? mdev_to_minor(mdev) : -1;
+ rcu_read_unlock();
+
+ return m;
+}
+
#ifdef CONFIG_SMP
/**
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
@@ -1907,238 +515,345 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
* Forces all threads of a device onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration.
*/
-void drbd_calc_cpu_mask(struct drbd_conf *mdev)
+void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
{
int ord, cpu;
/* user override. */
- if (cpumask_weight(mdev->cpu_mask))
+ if (cpumask_weight(tconn->cpu_mask))
return;
- ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
+ ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
- cpumask_set_cpu(cpu, mdev->cpu_mask);
+ cpumask_set_cpu(cpu, tconn->cpu_mask);
return;
}
}
/* should not be reached */
- cpumask_setall(mdev->cpu_mask);
+ cpumask_setall(tconn->cpu_mask);
}
/**
* drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
* @mdev: DRBD device.
+ * @thi: drbd_thread object
*
* call in the "main loop" of _all_ threads, no need for any mutex, current won't die
* prematurely.
*/
-void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
+void drbd_thread_current_set_cpu(struct drbd_thread *thi)
{
struct task_struct *p = current;
- struct drbd_thread *thi =
- p == mdev->asender.task ? &mdev->asender :
- p == mdev->receiver.task ? &mdev->receiver :
- p == mdev->worker.task ? &mdev->worker :
- NULL;
- ERR_IF(thi == NULL)
- return;
+
if (!thi->reset_cpu_mask)
return;
thi->reset_cpu_mask = 0;
- set_cpus_allowed_ptr(p, mdev->cpu_mask);
+ set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
}
#endif
-/* the appropriate socket mutex must be held already */
-int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
- enum drbd_packets cmd, struct p_header80 *h,
- size_t size, unsigned msg_flags)
+/**
+ * drbd_header_size - size of a packet header
+ *
+ * The header size is a multiple of 8, so any payload following the header is
+ * word aligned on 64-bit architectures. (The bitmap send and receive code
+ * relies on this.)
+ */
+unsigned int drbd_header_size(struct drbd_tconn *tconn)
{
- int sent, ok;
+ if (tconn->agreed_pro_version >= 100) {
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
+ return sizeof(struct p_header100);
+ } else {
+ BUILD_BUG_ON(sizeof(struct p_header80) !=
+ sizeof(struct p_header95));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
+ return sizeof(struct p_header80);
+ }
+}
- ERR_IF(!h) return false;
- ERR_IF(!size) return false;
+static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
+{
+ h->magic = cpu_to_be32(DRBD_MAGIC);
+ h->command = cpu_to_be16(cmd);
+ h->length = cpu_to_be16(size);
+ return sizeof(struct p_header80);
+}
- h->magic = BE_DRBD_MAGIC;
+static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
+{
+ h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
h->command = cpu_to_be16(cmd);
- h->length = cpu_to_be16(size-sizeof(struct p_header80));
+ h->length = cpu_to_be32(size);
+ return sizeof(struct p_header95);
+}
- sent = drbd_send(mdev, sock, h, size, msg_flags);
+static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
+ int size, int vnr)
+{
+ h->magic = cpu_to_be32(DRBD_MAGIC_100);
+ h->volume = cpu_to_be16(vnr);
+ h->command = cpu_to_be16(cmd);
+ h->length = cpu_to_be32(size);
+ h->pad = 0;
+ return sizeof(struct p_header100);
+}
- ok = (sent == size);
- if (!ok && !signal_pending(current))
- dev_warn(DEV, "short sent %s size=%d sent=%d\n",
- cmdname(cmd), (int)size, sent);
- return ok;
+static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
+ void *buffer, enum drbd_packet cmd, int size)
+{
+ if (tconn->agreed_pro_version >= 100)
+ return prepare_header100(buffer, cmd, size, vnr);
+ else if (tconn->agreed_pro_version >= 95 &&
+ size > DRBD_MAX_SIZE_H80_PACKET)
+ return prepare_header95(buffer, cmd, size);
+ else
+ return prepare_header80(buffer, cmd, size);
}
-/* don't pass the socket. we may only look at it
- * when we hold the appropriate socket mutex.
- */
-int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
- enum drbd_packets cmd, struct p_header80 *h, size_t size)
+static void *__conn_prepare_command(struct drbd_tconn *tconn,
+ struct drbd_socket *sock)
{
- int ok = 0;
- struct socket *sock;
+ if (!sock->socket)
+ return NULL;
+ return sock->sbuf + drbd_header_size(tconn);
+}
- if (use_data_socket) {
- mutex_lock(&mdev->data.mutex);
- sock = mdev->data.socket;
- } else {
- mutex_lock(&mdev->meta.mutex);
- sock = mdev->meta.socket;
- }
+void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
+{
+ void *p;
- /* drbd_disconnect() could have called drbd_free_sock()
- * while we were waiting in down()... */
- if (likely(sock != NULL))
- ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
+ mutex_lock(&sock->mutex);
+ p = __conn_prepare_command(tconn, sock);
+ if (!p)
+ mutex_unlock(&sock->mutex);
- if (use_data_socket)
- mutex_unlock(&mdev->data.mutex);
- else
- mutex_unlock(&mdev->meta.mutex);
- return ok;
+ return p;
}
-int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
- size_t size)
+void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
{
- struct p_header80 h;
- int ok;
+ return conn_prepare_command(mdev->tconn, sock);
+}
- h.magic = BE_DRBD_MAGIC;
- h.command = cpu_to_be16(cmd);
- h.length = cpu_to_be16(size);
+static int __send_command(struct drbd_tconn *tconn, int vnr,
+ struct drbd_socket *sock, enum drbd_packet cmd,
+ unsigned int header_size, void *data,
+ unsigned int size)
+{
+ int msg_flags;
+ int err;
- if (!drbd_get_data_sock(mdev))
- return 0;
+ /*
+ * Called with @data == NULL and the size of the data blocks in @size
+ * for commands that send data blocks. For those commands, omit the
+ * MSG_MORE flag: this will increase the likelihood that data blocks
+ * which are page aligned on the sender will end up page aligned on the
+ * receiver.
+ */
+ msg_flags = data ? MSG_MORE : 0;
+
+ header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
+ header_size + size);
+ err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
+ msg_flags);
+ if (data && !err)
+ err = drbd_send_all(tconn, sock->socket, data, size, 0);
+ return err;
+}
- ok = (sizeof(h) ==
- drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
- ok = ok && (size ==
- drbd_send(mdev, mdev->data.socket, data, size, 0));
+static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
+{
+ return __send_command(tconn, 0, sock, cmd, header_size, data, size);
+}
- drbd_put_data_sock(mdev);
+int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
+{
+ int err;
- return ok;
+ err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
+ mutex_unlock(&sock->mutex);
+ return err;
}
-int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
+int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
+ enum drbd_packet cmd, unsigned int header_size,
+ void *data, unsigned int size)
{
+ int err;
+
+ err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
+ data, size);
+ mutex_unlock(&sock->mutex);
+ return err;
+}
+
+int drbd_send_ping(struct drbd_tconn *tconn)
+{
+ struct drbd_socket *sock;
+
+ sock = &tconn->meta;
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
+}
+
+int drbd_send_ping_ack(struct drbd_tconn *tconn)
+{
+ struct drbd_socket *sock;
+
+ sock = &tconn->meta;
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
+}
+
+int drbd_send_sync_param(struct drbd_conf *mdev)
+{
+ struct drbd_socket *sock;
struct p_rs_param_95 *p;
- struct socket *sock;
- int size, rv;
- const int apv = mdev->agreed_pro_version;
+ int size;
+ const int apv = mdev->tconn->agreed_pro_version;
+ enum drbd_packet cmd;
+ struct net_conf *nc;
+ struct disk_conf *dc;
+
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
- + strlen(mdev->sync_conf.verify_alg) + 1
+ + strlen(nc->verify_alg) + 1
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- /* used from admin command context and receiver/worker context.
- * to avoid kmalloc, grab the socket right here,
- * then use the pre-allocated sbuf there */
- mutex_lock(&mdev->data.mutex);
- sock = mdev->data.socket;
-
- if (likely(sock != NULL)) {
- enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
-
- p = &mdev->data.sbuf.rs_param_95;
+ cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
- /* initialize verify_alg and csums_alg */
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ /* initialize verify_alg and csums_alg */
+ memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- p->rate = cpu_to_be32(sc->rate);
- p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
- p->c_delay_target = cpu_to_be32(sc->c_delay_target);
- p->c_fill_target = cpu_to_be32(sc->c_fill_target);
- p->c_max_rate = cpu_to_be32(sc->c_max_rate);
-
- if (apv >= 88)
- strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
- if (apv >= 89)
- strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
-
- rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
- } else
- rv = 0; /* not ok */
+ if (get_ldev(mdev)) {
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+ p->resync_rate = cpu_to_be32(dc->resync_rate);
+ p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
+ p->c_delay_target = cpu_to_be32(dc->c_delay_target);
+ p->c_fill_target = cpu_to_be32(dc->c_fill_target);
+ p->c_max_rate = cpu_to_be32(dc->c_max_rate);
+ put_ldev(mdev);
+ } else {
+ p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
+ p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
+ p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
+ p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
+ p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
+ }
- mutex_unlock(&mdev->data.mutex);
+ if (apv >= 88)
+ strcpy(p->verify_alg, nc->verify_alg);
+ if (apv >= 89)
+ strcpy(p->csums_alg, nc->csums_alg);
+ rcu_read_unlock();
- return rv;
+ return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
}
-int drbd_send_protocol(struct drbd_conf *mdev)
+int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
{
+ struct drbd_socket *sock;
struct p_protocol *p;
- int size, cf, rv;
+ struct net_conf *nc;
+ int size, cf;
- size = sizeof(struct p_protocol);
+ sock = &tconn->data;
+ p = __conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
- if (mdev->agreed_pro_version >= 87)
- size += strlen(mdev->net_conf->integrity_alg) + 1;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- /* we must not recurse into our own queue,
- * as that is blocked during handshake */
- p = kmalloc(size, GFP_NOIO);
- if (p == NULL)
- return 0;
+ if (nc->tentative && tconn->agreed_pro_version < 92) {
+ rcu_read_unlock();
+ mutex_unlock(&sock->mutex);
+ conn_err(tconn, "--dry-run is not supported by peer");
+ return -EOPNOTSUPP;
+ }
- p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
- p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
- p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
- p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
- p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
+ size = sizeof(*p);
+ if (tconn->agreed_pro_version >= 87)
+ size += strlen(nc->integrity_alg) + 1;
+ p->protocol = cpu_to_be32(nc->wire_protocol);
+ p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
+ p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
+ p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
+ p->two_primaries = cpu_to_be32(nc->two_primaries);
cf = 0;
- if (mdev->net_conf->want_lose)
- cf |= CF_WANT_LOSE;
- if (mdev->net_conf->dry_run) {
- if (mdev->agreed_pro_version >= 92)
- cf |= CF_DRY_RUN;
- else {
- dev_err(DEV, "--dry-run is not supported by peer");
- kfree(p);
- return -1;
- }
- }
+ if (nc->discard_my_data)
+ cf |= CF_DISCARD_MY_DATA;
+ if (nc->tentative)
+ cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf);
- if (mdev->agreed_pro_version >= 87)
- strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
+ if (tconn->agreed_pro_version >= 87)
+ strcpy(p->integrity_alg, nc->integrity_alg);
+ rcu_read_unlock();
- rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
- (struct p_header80 *)p, size);
- kfree(p);
- return rv;
+ return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
+}
+
+int drbd_send_protocol(struct drbd_tconn *tconn)
+{
+ int err;
+
+ mutex_lock(&tconn->data.mutex);
+ err = __drbd_send_protocol(tconn, P_PROTOCOL);
+ mutex_unlock(&tconn->data.mutex);
+
+ return err;
}
int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
{
- struct p_uuids p;
+ struct drbd_socket *sock;
+ struct p_uuids *p;
int i;
if (!get_ldev_if_state(mdev, D_NEGOTIATING))
- return 1;
+ return 0;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p) {
+ put_ldev(mdev);
+ return -EIO;
+ }
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
- p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
+ p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
- p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
- uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
+ p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+ rcu_read_lock();
+ uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
+ rcu_read_unlock();
uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
- p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
+ p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
put_ldev(mdev);
-
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
- (struct p_header80 *)&p, sizeof(p));
+ return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
}
int drbd_send_uuids(struct drbd_conf *mdev)
@@ -2169,9 +884,10 @@ void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
}
}
-int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
+void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
{
- struct p_rs_uuid p;
+ struct drbd_socket *sock;
+ struct p_rs_uuid *p;
u64 uuid;
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
@@ -2184,24 +900,29 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
- p.uuid = cpu_to_be64(uuid);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
- (struct p_header80 *)&p, sizeof(p));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (p) {
+ p->uuid = cpu_to_be64(uuid);
+ drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
+ }
}
int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
{
- struct p_sizes p;
+ struct drbd_socket *sock;
+ struct p_sizes *p;
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
- int ok;
if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
D_ASSERT(mdev->ldev->backing_bdev);
d_size = drbd_get_max_capacity(mdev->ldev);
- u_size = mdev->ldev->dc.disk_size;
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
q_order_type = drbd_queue_order_type(mdev);
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
@@ -2213,20 +934,23 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
- /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
- if (mdev->agreed_pro_version <= 94)
- max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
- p.d_size = cpu_to_be64(d_size);
- p.u_size = cpu_to_be64(u_size);
- p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
- p.max_bio_size = cpu_to_be32(max_bio_size);
- p.queue_order_type = cpu_to_be16(q_order_type);
- p.dds_flags = cpu_to_be16(flags);
+ if (mdev->tconn->agreed_pro_version <= 94)
+ max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ else if (mdev->tconn->agreed_pro_version < 100)
+ max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ p->d_size = cpu_to_be64(d_size);
+ p->u_size = cpu_to_be64(u_size);
+ p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+ p->max_bio_size = cpu_to_be32(max_bio_size);
+ p->queue_order_type = cpu_to_be16(q_order_type);
+ p->dds_flags = cpu_to_be16(flags);
+ return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
}
/**
@@ -2235,34 +959,21 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
*/
int drbd_send_current_state(struct drbd_conf *mdev)
{
- struct socket *sock;
- struct p_state p;
- int ok = 0;
-
- /* Grab state lock so we wont send state if we're in the middle
- * of a cluster wide state change on another thread */
- drbd_state_lock(mdev);
-
- mutex_lock(&mdev->data.mutex);
-
- p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
- sock = mdev->data.socket;
+ struct drbd_socket *sock;
+ struct p_state *p;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
- }
-
- mutex_unlock(&mdev->data.mutex);
-
- drbd_state_unlock(mdev);
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
}
/**
* drbd_send_state() - After a state change, sends the new state to the peer
- * @mdev: DRBD device.
- * @state: the state to send, not necessarily the current state.
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
*
* Each state change queues an "after_state_ch" work, which will eventually
* send the resulting new state to the peer. If more state changes happen
@@ -2271,50 +982,95 @@ int drbd_send_current_state(struct drbd_conf *mdev)
*/
int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
{
- struct socket *sock;
- struct p_state p;
- int ok = 0;
+ struct drbd_socket *sock;
+ struct p_state *p;
- mutex_lock(&mdev->data.mutex);
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->state = cpu_to_be32(state.i); /* Within the send mutex */
+ return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
+}
- p.state = cpu_to_be32(state.i);
- sock = mdev->data.socket;
+int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
+{
+ struct drbd_socket *sock;
+ struct p_req_state *p;
- if (likely(sock != NULL)) {
- ok = _drbd_send_cmd(mdev, sock, P_STATE,
- (struct p_header80 *)&p, sizeof(p), 0);
- }
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->mask = cpu_to_be32(mask.i);
+ p->val = cpu_to_be32(val.i);
+ return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
+}
- mutex_unlock(&mdev->data.mutex);
+int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+{
+ enum drbd_packet cmd;
+ struct drbd_socket *sock;
+ struct p_req_state *p;
- return ok;
+ cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
+ p->mask = cpu_to_be32(mask.i);
+ p->val = cpu_to_be32(val.i);
+ return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_state_req(struct drbd_conf *mdev,
- union drbd_state mask, union drbd_state val)
+void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
{
- struct p_req_state p;
+ struct drbd_socket *sock;
+ struct p_req_state_reply *p;
- p.mask = cpu_to_be32(mask.i);
- p.val = cpu_to_be32(val.i);
+ sock = &mdev->tconn->meta;
+ p = drbd_prepare_command(mdev, sock);
+ if (p) {
+ p->retcode = cpu_to_be32(retcode);
+ drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
+ }
+}
+
+void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
+{
+ struct drbd_socket *sock;
+ struct p_req_state_reply *p;
+ enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
- (struct p_header80 *)&p, sizeof(p));
+ sock = &tconn->meta;
+ p = conn_prepare_command(tconn, sock);
+ if (p) {
+ p->retcode = cpu_to_be32(retcode);
+ conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
+ }
}
-int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
+static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
{
- struct p_req_state_reply p;
+ BUG_ON(code & ~0xf);
+ p->encoding = (p->encoding & ~0xf) | code;
+}
- p.retcode = cpu_to_be32(retcode);
+static void dcbp_set_start(struct p_compressed_bm *p, int set)
+{
+ p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
+}
- return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
- (struct p_header80 *)&p, sizeof(p));
+static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
+{
+ BUG_ON(n & ~0x7);
+ p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
}
int fill_bitmap_rle_bits(struct drbd_conf *mdev,
- struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct p_compressed_bm *p,
+ unsigned int size,
+ struct bm_xfer_ctx *c)
{
struct bitstream bs;
unsigned long plain_bits;
@@ -2322,19 +1078,21 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
unsigned long rl;
unsigned len;
unsigned toggle;
- int bits;
+ int bits, use_rle;
/* may we use this feature? */
- if ((mdev->sync_conf.use_rle == 0) ||
- (mdev->agreed_pro_version < 90))
- return 0;
+ rcu_read_lock();
+ use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
+ rcu_read_unlock();
+ if (!use_rle || mdev->tconn->agreed_pro_version < 90)
+ return 0;
if (c->bit_offset >= c->bm_bits)
return 0; /* nothing to do. */
/* use at most thus many bytes */
- bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
- memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
+ bitstream_init(&bs, p->code, size, 0);
+ memset(p->code, 0, size);
/* plain bits covered in this code string */
plain_bits = 0;
@@ -2356,12 +1114,12 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
if (rl == 0) {
/* the first checked bit was set,
* store start value, */
- DCBP_set_start(p, 1);
+ dcbp_set_start(p, 1);
/* but skip encoding of zero run length */
toggle = !toggle;
continue;
}
- DCBP_set_start(p, 0);
+ dcbp_set_start(p, 0);
}
/* paranoia: catch zero runlength.
@@ -2401,7 +1159,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
bm_xfer_ctx_bit_to_word_offset(c);
/* store pad_bits */
- DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
+ dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
return len;
}
@@ -2413,48 +1171,52 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
* code upon failure.
*/
static int
-send_bitmap_rle_or_plain(struct drbd_conf *mdev,
- struct p_header80 *h, struct bm_xfer_ctx *c)
+send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
{
- struct p_compressed_bm *p = (void*)h;
- unsigned long num_words;
- int len;
- int ok;
-
- len = fill_bitmap_rle_bits(mdev, p, c);
+ struct drbd_socket *sock = &mdev->tconn->data;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ struct p_compressed_bm *p = sock->sbuf + header_size;
+ int len, err;
+ len = fill_bitmap_rle_bits(mdev, p,
+ DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
if (len < 0)
return -EIO;
if (len) {
- DCBP_set_code(p, RLE_VLI_Bits);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
- sizeof(*p) + len, 0);
-
+ dcbp_set_code(p, RLE_VLI_Bits);
+ err = __send_command(mdev->tconn, mdev->vnr, sock,
+ P_COMPRESSED_BITMAP, sizeof(*p) + len,
+ NULL, 0);
c->packets[0]++;
- c->bytes[0] += sizeof(*p) + len;
+ c->bytes[0] += header_size + sizeof(*p) + len;
if (c->bit_offset >= c->bm_bits)
len = 0; /* DONE */
} else {
/* was not compressible.
* send a buffer full of plain text bits instead. */
- num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- len = num_words * sizeof(long);
+ unsigned int data_size;
+ unsigned long num_words;
+ unsigned long *p = sock->sbuf + header_size;
+
+ data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ len = num_words * sizeof(*p);
if (len)
- drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
- h, sizeof(struct p_header80) + len, 0);
+ drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
c->packets[1]++;
- c->bytes[1] += sizeof(struct p_header80) + len;
+ c->bytes[1] += header_size + len;
if (c->bit_offset > c->bm_bits)
c->bit_offset = c->bm_bits;
}
- if (ok) {
+ if (!err) {
if (len == 0) {
INFO_bm_xfer_stats(mdev, "send", c);
return 0;
@@ -2465,21 +1227,13 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
}
/* See the comment at receive_bitmap() */
-int _drbd_send_bitmap(struct drbd_conf *mdev)
+static int _drbd_send_bitmap(struct drbd_conf *mdev)
{
struct bm_xfer_ctx c;
- struct p_header80 *p;
int err;
- ERR_IF(!mdev->bitmap) return false;
-
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- p = (struct p_header80 *) __get_free_page(GFP_NOIO);
- if (!p) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
+ if (!expect(mdev->bitmap))
return false;
- }
if (get_ldev(mdev)) {
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
@@ -2504,37 +1258,39 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
};
do {
- err = send_bitmap_rle_or_plain(mdev, p, &c);
+ err = send_bitmap_rle_or_plain(mdev, &c);
} while (err > 0);
- free_page((unsigned long) p);
return err == 0;
}
int drbd_send_bitmap(struct drbd_conf *mdev)
{
- int err;
+ struct drbd_socket *sock = &mdev->tconn->data;
+ int err = -1;
- if (!drbd_get_data_sock(mdev))
- return -1;
- err = !_drbd_send_bitmap(mdev);
- drbd_put_data_sock(mdev);
+ mutex_lock(&sock->mutex);
+ if (sock->socket)
+ err = !_drbd_send_bitmap(mdev);
+ mutex_unlock(&sock->mutex);
return err;
}
-int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
+void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
{
- int ok;
- struct p_barrier_ack p;
+ struct drbd_socket *sock;
+ struct p_barrier_ack *p;
- p.barrier = barrier_nr;
- p.set_size = cpu_to_be32(set_size);
+ if (tconn->cstate < C_WF_REPORT_PARAMS)
+ return;
- if (mdev->state.conn < C_CONNECTED)
- return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &tconn->meta;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return;
+ p->barrier = barrier_nr;
+ p->set_size = cpu_to_be32(set_size);
+ conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
}
/**
@@ -2545,62 +1301,62 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
* @blksize: size in byte, needs to be in big endian byte order
* @block_id: Id, big endian byte order
*/
-static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- u64 sector,
- u32 blksize,
- u64 block_id)
+static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+ u64 sector, u32 blksize, u64 block_id)
{
- int ok;
- struct p_block_ack p;
+ struct drbd_socket *sock;
+ struct p_block_ack *p;
- p.sector = sector;
- p.block_id = block_id;
- p.blksize = blksize;
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+ if (mdev->state.conn < C_CONNECTED)
+ return -EIO;
- if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
- return false;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->meta;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = sector;
+ p->block_id = block_id;
+ p->blksize = blksize;
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
}
/* dp->sector and dp->block_id already/still in network byte order,
* data_size is payload size according to dp->head,
* and may need to be corrected for digest size. */
-int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_data *dp, int data_size)
+void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_data *dp, int data_size)
{
- data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
- return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
- dp->block_id);
+ if (mdev->tconn->peer_integrity_tfm)
+ data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+ dp->block_id);
}
-int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct p_block_req *rp)
+void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct p_block_req *rp)
{
- return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+ _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
}
/**
* drbd_send_ack() - Sends an ack packet
- * @mdev: DRBD device.
- * @cmd: Packet command code.
- * @e: Epoch entry.
+ * @mdev: DRBD device
+ * @cmd: packet command code
+ * @peer_req: peer request
*/
-int drbd_send_ack(struct drbd_conf *mdev,
- enum drbd_packets cmd, struct drbd_epoch_entry *e)
+int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct drbd_peer_request *peer_req)
{
return _drbd_send_ack(mdev, cmd,
- cpu_to_be64(e->sector),
- cpu_to_be32(e->size),
- e->block_id);
+ cpu_to_be64(peer_req->i.sector),
+ cpu_to_be32(peer_req->i.size),
+ peer_req->block_id);
}
/* This function misuses the block_id field to signal if the blocks
* are is sync or not. */
-int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id)
{
return _drbd_send_ack(mdev, cmd,
@@ -2612,85 +1368,87 @@ int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id)
{
- int ok;
- struct p_block_req p;
-
- p.sector = cpu_to_be64(sector);
- p.block_id = block_id;
- p.blksize = cpu_to_be32(size);
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = block_id;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
}
-int drbd_send_drequest_csum(struct drbd_conf *mdev,
- sector_t sector, int size,
- void *digest, int digest_size,
- enum drbd_packets cmd)
+int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
+ void *digest, int digest_size, enum drbd_packet cmd)
{
- int ok;
- struct p_block_req p;
-
- p.sector = cpu_to_be64(sector);
- p.block_id = BE_DRBD_MAGIC + 0xbeef;
- p.blksize = cpu_to_be32(size);
-
- p.head.magic = BE_DRBD_MAGIC;
- p.head.command = cpu_to_be16(cmd);
- p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- mutex_lock(&mdev->data.mutex);
+ /* FIXME: Put the digest into the preallocated socket buffer. */
- ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
- ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
-
- mutex_unlock(&mdev->data.mutex);
-
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = ID_SYNCER /* unused */;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, cmd, sizeof(*p),
+ digest, digest_size);
}
int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
{
- int ok;
- struct p_block_req p;
+ struct drbd_socket *sock;
+ struct p_block_req *p;
- p.sector = cpu_to_be64(sector);
- p.block_id = BE_DRBD_MAGIC + 0xbabe;
- p.blksize = cpu_to_be32(size);
-
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
- (struct p_header80 *)&p, sizeof(p));
- return ok;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(sector);
+ p->block_id = ID_SYNCER /* unused */;
+ p->blksize = cpu_to_be32(size);
+ return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
}
/* called on sndtimeo
* returns false if we should retry,
* true if we think connection is dead
*/
-static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
+static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
{
int drop_it;
/* long elapsed = (long)(jiffies - mdev->last_received); */
- drop_it = mdev->meta.socket == sock
- || !mdev->asender.task
- || get_t_state(&mdev->asender) != Running
- || mdev->state.conn < C_CONNECTED;
+ drop_it = tconn->meta.socket == sock
+ || !tconn->asender.task
+ || get_t_state(&tconn->asender) != RUNNING
+ || tconn->cstate < C_WF_REPORT_PARAMS;
if (drop_it)
return true;
- drop_it = !--mdev->ko_count;
+ drop_it = !--tconn->ko_count;
if (!drop_it) {
- dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
- current->comm, current->pid, mdev->ko_count);
- request_ping(mdev);
+ conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+ current->comm, current->pid, tconn->ko_count);
+ request_ping(tconn);
}
return drop_it; /* && (mdev->state == R_PRIMARY) */;
}
+static void drbd_update_congested(struct drbd_tconn *tconn)
+{
+ struct sock *sk = tconn->data.socket->sk;
+ if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
+ set_bit(NET_CONGESTED, &tconn->flags);
+}
+
/* The idea of sendpage seems to be to put some kind of reference
* to the page into the skb, and to hand it over to the NIC. In
* this process get_page() gets called.
@@ -2713,21 +1471,28 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
* with page_count == 0 or PageSlab.
*/
static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
- int offset, size_t size, unsigned msg_flags)
+ int offset, size_t size, unsigned msg_flags)
{
- int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
+ struct socket *socket;
+ void *addr;
+ int err;
+
+ socket = mdev->tconn->data.socket;
+ addr = kmap(page) + offset;
+ err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
kunmap(page);
- if (sent == size)
- mdev->send_cnt += size>>9;
- return sent == size;
+ if (!err)
+ mdev->send_cnt += size >> 9;
+ return err;
}
static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
int offset, size_t size, unsigned msg_flags)
{
+ struct socket *socket = mdev->tconn->data.socket;
mm_segment_t oldfs = get_fs();
- int sent, ok;
int len = size;
+ int err = -EIO;
/* e.g. XFS meta- & log-data is in slab pages, which have a
* page_count of 0 and/or have PageSlab() set.
@@ -2739,34 +1504,35 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
- drbd_update_congested(mdev);
+ drbd_update_congested(mdev->tconn);
set_fs(KERNEL_DS);
do {
- sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
- offset, len,
- msg_flags);
- if (sent == -EAGAIN) {
- if (we_should_drop_the_connection(mdev,
- mdev->data.socket))
- break;
- else
- continue;
- }
+ int sent;
+
+ sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) {
+ if (sent == -EAGAIN) {
+ if (we_should_drop_the_connection(mdev->tconn, socket))
+ break;
+ continue;
+ }
dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
__func__, (int)size, len, sent);
+ if (sent < 0)
+ err = sent;
break;
}
len -= sent;
offset += sent;
} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->flags);
+ clear_bit(NET_CONGESTED, &mdev->tconn->flags);
- ok = (len == 0);
- if (likely(ok))
- mdev->send_cnt += size>>9;
- return ok;
+ if (len == 0) {
+ err = 0;
+ mdev->send_cnt += size >> 9;
+ }
+ return err;
}
static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
@@ -2775,12 +1541,15 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
int i;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
- if (!_drbd_no_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
- return 0;
+ int err;
+
+ err = _drbd_no_send_page(mdev, bvec->bv_page,
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ if (err)
+ return err;
}
- return 1;
+ return 0;
}
static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
@@ -2789,32 +1558,40 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
int i;
/* hint all but last page with MSG_MORE */
bio_for_each_segment(bvec, bio, i) {
- if (!_drbd_send_page(mdev, bvec->bv_page,
- bvec->bv_offset, bvec->bv_len,
- i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
- return 0;
+ int err;
+
+ err = _drbd_send_page(mdev, bvec->bv_page,
+ bvec->bv_offset, bvec->bv_len,
+ i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
+ if (err)
+ return err;
}
- return 1;
+ return 0;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static int _drbd_send_zc_ee(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
{
- struct page *page = e->pages;
- unsigned len = e->size;
+ struct page *page = peer_req->pages;
+ unsigned len = peer_req->i.size;
+ int err;
+
/* hint all but last page with MSG_MORE */
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- if (!_drbd_send_page(mdev, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0))
- return 0;
+
+ err = _drbd_send_page(mdev, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ if (err)
+ return err;
len -= l;
}
- return 1;
+ return 0;
}
static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
{
- if (mdev->agreed_pro_version >= 95)
+ if (mdev->tconn->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -2828,50 +1605,36 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
*/
int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
{
- int ok = 1;
- struct p_data p;
+ struct drbd_socket *sock;
+ struct p_data *p;
unsigned int dp_flags = 0;
- void *dgb;
int dgs;
+ int err;
- if (!drbd_get_data_sock(mdev))
- return 0;
-
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
- crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
-
- if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
- p.head.h80.magic = BE_DRBD_MAGIC;
- p.head.h80.command = cpu_to_be16(P_DATA);
- p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
- } else {
- p.head.h95.magic = BE_DRBD_MAGIC_BIG;
- p.head.h95.command = cpu_to_be16(P_DATA);
- p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
- }
-
- p.sector = cpu_to_be64(req->sector);
- p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(req->i.sector);
+ p->block_id = (unsigned long)req;
+ p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
-
if (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
-
- p.dp_flags = cpu_to_be32(dp_flags);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
- ok = (sizeof(p) ==
- drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
- if (ok && dgs) {
- dgb = mdev->int_dig_out;
- drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
- ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
- }
- if (ok) {
+ if (mdev->tconn->agreed_pro_version >= 100) {
+ if (req->rq_state & RQ_EXP_RECEIVE_ACK)
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ if (req->rq_state & RQ_EXP_WRITE_ACK)
+ dp_flags |= DP_SEND_WRITE_ACK;
+ }
+ p->dp_flags = cpu_to_be32(dp_flags);
+ if (dgs)
+ drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
+ if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
* as soon as we handed it over to tcp, at which point the data
@@ -2883,92 +1646,76 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
* out ok after sending on this side, but does not fit on the
* receiving side, we sure have detected corruption elsewhere.
*/
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
- ok = _drbd_send_bio(mdev, req->master_bio);
+ if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
+ err = _drbd_send_bio(mdev, req->master_bio);
else
- ok = _drbd_send_zc_bio(mdev, req->master_bio);
+ err = _drbd_send_zc_bio(mdev, req->master_bio);
/* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */
unsigned char digest[64];
- drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
- if (memcmp(mdev->int_dig_out, digest, dgs)) {
+ drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
+ if (memcmp(p + 1, digest, dgs)) {
dev_warn(DEV,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
- (unsigned long long)req->sector, req->size);
+ (unsigned long long)req->i.sector, req->i.size);
}
} /* else if (dgs > 64) {
... Be noisy about digest too large ...
} */
}
+ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
- drbd_put_data_sock(mdev);
-
- return ok;
+ return err;
}
/* answer packet, used to send data back for read requests:
* Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
-int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
- struct drbd_epoch_entry *e)
+int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
+ struct drbd_peer_request *peer_req)
{
- int ok;
- struct p_data p;
- void *dgb;
+ struct drbd_socket *sock;
+ struct p_data *p;
+ int err;
int dgs;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
- crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
-
- if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
- p.head.h80.magic = BE_DRBD_MAGIC;
- p.head.h80.command = cpu_to_be16(cmd);
- p.head.h80.length =
- cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
- } else {
- p.head.h95.magic = BE_DRBD_MAGIC_BIG;
- p.head.h95.command = cpu_to_be16(cmd);
- p.head.h95.length =
- cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
- }
-
- p.sector = cpu_to_be64(e->sector);
- p.block_id = e->block_id;
- /* p.seq_num = 0; No sequence numbers here.. */
-
- /* Only called by our kernel thread.
- * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
- * in response to admin command or module unload.
- */
- if (!drbd_get_data_sock(mdev))
- return 0;
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
- ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
- if (ok && dgs) {
- dgb = mdev->int_dig_out;
- drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
- ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
- }
- if (ok)
- ok = _drbd_send_zc_ee(mdev, e);
+ dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
- drbd_put_data_sock(mdev);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(peer_req->i.sector);
+ p->block_id = peer_req->block_id;
+ p->seq_num = 0; /* unused */
+ p->dp_flags = 0;
+ if (dgs)
+ drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
+ err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
+ if (!err)
+ err = _drbd_send_zc_ee(mdev, peer_req);
+ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
- return ok;
+ return err;
}
-int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
+int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
{
- struct p_block_desc p;
-
- p.sector = cpu_to_be64(req->sector);
- p.blksize = cpu_to_be32(req->size);
+ struct drbd_socket *sock;
+ struct p_block_desc *p;
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
+ sock = &mdev->tconn->data;
+ p = drbd_prepare_command(mdev, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(req->i.sector);
+ p->blksize = cpu_to_be32(req->i.size);
+ return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
}
/*
@@ -2987,7 +1734,7 @@ int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
/*
* you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/
-int drbd_send(struct drbd_conf *mdev, struct socket *sock,
+int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov;
@@ -2995,7 +1742,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
int rv, sent = 0;
if (!sock)
- return -1000;
+ return -EBADR;
/* THINK if (signal_pending) return ... ? */
@@ -3008,9 +1755,11 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
- if (sock == mdev->data.socket) {
- mdev->ko_count = mdev->net_conf->ko_count;
- drbd_update_congested(mdev);
+ if (sock == tconn->data.socket) {
+ rcu_read_lock();
+ tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
+ rcu_read_unlock();
+ drbd_update_congested(tconn);
}
do {
/* STRANGE
@@ -3024,12 +1773,11 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
*/
rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) {
- if (we_should_drop_the_connection(mdev, sock))
+ if (we_should_drop_the_connection(tconn, sock))
break;
else
continue;
}
- D_ASSERT(rv != 0);
if (rv == -EINTR) {
flush_signals(current);
rv = 0;
@@ -3041,22 +1789,40 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
iov.iov_len -= rv;
} while (sent < size);
- if (sock == mdev->data.socket)
- clear_bit(NET_CONGESTED, &mdev->flags);
+ if (sock == tconn->data.socket)
+ clear_bit(NET_CONGESTED, &tconn->flags);
if (rv <= 0) {
if (rv != -EAGAIN) {
- dev_err(DEV, "%s_sendmsg returned %d\n",
- sock == mdev->meta.socket ? "msock" : "sock",
- rv);
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_err(tconn, "%s_sendmsg returned %d\n",
+ sock == tconn->meta.socket ? "msock" : "sock",
+ rv);
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else
- drbd_force_state(mdev, NS(conn, C_TIMEOUT));
+ conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
}
return sent;
}
+/**
+ * drbd_send_all - Send an entire buffer
+ *
+ * Returns 0 upon success and a negative error value otherwise.
+ */
+int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
+ size_t size, unsigned msg_flags)
+{
+ int err;
+
+ err = drbd_send(tconn, sock, buffer, size, msg_flags);
+ if (err < 0)
+ return err;
+ if (err != size)
+ return -EIO;
+ return 0;
+}
+
static int drbd_open(struct block_device *bdev, fmode_t mode)
{
struct drbd_conf *mdev = bdev->bd_disk->private_data;
@@ -3064,7 +1830,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
int rv = 0;
mutex_lock(&drbd_main_mutex);
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
/* to have a stable mdev->state.role
* and no race with updating open_cnt */
@@ -3077,7 +1843,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (!rv)
mdev->open_cnt++;
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
mutex_unlock(&drbd_main_mutex);
return rv;
@@ -3094,35 +1860,14 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
static void drbd_set_defaults(struct drbd_conf *mdev)
{
- /* This way we get a compile error when sync_conf grows,
- and we forgot to initialize it here */
- mdev->sync_conf = (struct syncer_conf) {
- /* .rate = */ DRBD_RATE_DEF,
- /* .after = */ DRBD_AFTER_DEF,
- /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
- /* .verify_alg = */ {}, 0,
- /* .cpu_mask = */ {}, 0,
- /* .csums_alg = */ {}, 0,
- /* .use_rle = */ 0,
- /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
- /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
- /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
- /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
- /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
- /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
- };
-
- /* Have to use that way, because the layout differs between
- big endian and little endian */
- mdev->state = (union drbd_state) {
+ /* Beware! The actual layout differs
+ * between big endian and little endian */
+ mdev->state = (union drbd_dev_state) {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
.conn = C_STANDALONE,
.disk = D_DISKLESS,
.pdsk = D_UNKNOWN,
- .susp = 0,
- .susp_nod = 0,
- .susp_fen = 0
} };
}
@@ -3138,28 +1883,17 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
atomic_set(&mdev->unacked_cnt, 0);
atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->net_cnt, 0);
- atomic_set(&mdev->packet_seq, 0);
- atomic_set(&mdev->pp_in_use, 0);
atomic_set(&mdev->pp_in_use_by_net, 0);
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->data.mutex);
- mutex_init(&mdev->meta.mutex);
- sema_init(&mdev->data.work.s, 0);
- sema_init(&mdev->meta.work.s, 0);
- mutex_init(&mdev->state_mutex);
-
- spin_lock_init(&mdev->data.work.q_lock);
- spin_lock_init(&mdev->meta.work.q_lock);
+ mutex_init(&mdev->own_state_mutex);
+ mdev->state_mutex = &mdev->own_state_mutex;
spin_lock_init(&mdev->al_lock);
- spin_lock_init(&mdev->req_lock);
spin_lock_init(&mdev->peer_seq_lock);
- spin_lock_init(&mdev->epoch_lock);
INIT_LIST_HEAD(&mdev->active_ee);
INIT_LIST_HEAD(&mdev->sync_ee);
@@ -3167,8 +1901,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->read_ee);
INIT_LIST_HEAD(&mdev->net_ee);
INIT_LIST_HEAD(&mdev->resync_reads);
- INIT_LIST_HEAD(&mdev->data.work.q);
- INIT_LIST_HEAD(&mdev->meta.work.q);
INIT_LIST_HEAD(&mdev->resync_work.list);
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->go_diskless.list);
@@ -3182,6 +1914,14 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
mdev->start_resync_work.cb = w_start_resync;
+
+ mdev->resync_work.mdev = mdev;
+ mdev->unplug_work.mdev = mdev;
+ mdev->go_diskless.mdev = mdev;
+ mdev->md_sync_work.mdev = mdev;
+ mdev->bm_io_work.w.mdev = mdev;
+ mdev->start_resync_work.mdev = mdev;
+
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
init_timer(&mdev->start_resync_timer);
@@ -3197,17 +1937,10 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
- init_waitqueue_head(&mdev->net_cnt_wait);
init_waitqueue_head(&mdev->ee_wait);
init_waitqueue_head(&mdev->al_wait);
init_waitqueue_head(&mdev->seq_wait);
- drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
- drbd_thread_init(mdev, &mdev->worker, drbd_worker);
- drbd_thread_init(mdev, &mdev->asender, drbd_asender);
-
- mdev->agreed_pro_version = PRO_VERSION_MAX;
- mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE;
mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
@@ -3216,13 +1949,10 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
void drbd_mdev_cleanup(struct drbd_conf *mdev)
{
int i;
- if (mdev->receiver.t_state != None)
+ if (mdev->tconn->receiver.t_state != NONE)
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
- mdev->receiver.t_state);
+ mdev->tconn->receiver.t_state);
- /* no need to lock it, I'm the only thread alive */
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
mdev->al_writ_cnt =
mdev->bm_writ_cnt =
mdev->read_cnt =
@@ -3239,7 +1969,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
mdev->rs_mark_left[i] = 0;
mdev->rs_mark_time[i] = 0;
}
- D_ASSERT(mdev->net_conf == NULL);
+ D_ASSERT(mdev->tconn->net_conf == NULL);
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
@@ -3248,21 +1978,18 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
drbd_bm_cleanup(mdev);
}
- drbd_free_resources(mdev);
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;
+
clear_bit(AL_SUSPENDED, &mdev->flags);
- /*
- * currently we drbd_init_ee only on module load, so
- * we may do drbd_release_ee only on module unload!
- */
D_ASSERT(list_empty(&mdev->active_ee));
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->net_ee));
D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->data.work.q));
- D_ASSERT(list_empty(&mdev->meta.work.q));
+ D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
@@ -3336,7 +2063,7 @@ static int drbd_create_mempools(void)
goto Enomem;
drbd_ee_cache = kmem_cache_create(
- "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
+ "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
if (drbd_ee_cache == NULL)
goto Enomem;
@@ -3351,11 +2078,9 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
-#ifdef COMPAT_HAVE_BIOSET_CREATE
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
-#endif
drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_page_pool == NULL)
@@ -3404,73 +2129,53 @@ static struct notifier_block drbd_notifier = {
.notifier_call = drbd_notify_sys,
};
-static void drbd_release_ee_lists(struct drbd_conf *mdev)
+static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
{
int rr;
- rr = drbd_release_ee(mdev, &mdev->active_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
if (rr)
dev_err(DEV, "%d EEs in active list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->sync_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
if (rr)
dev_err(DEV, "%d EEs in sync list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->read_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
if (rr)
dev_err(DEV, "%d EEs in read list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->done_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
if (rr)
dev_err(DEV, "%d EEs in done list found!\n", rr);
- rr = drbd_release_ee(mdev, &mdev->net_ee);
+ rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (rr)
dev_err(DEV, "%d EEs in net list found!\n", rr);
}
-/* caution. no locking.
- * currently only used from module cleanup code. */
-static void drbd_delete_device(unsigned int minor)
+/* caution. no locking. */
+void drbd_minor_destroy(struct kref *kref)
{
- struct drbd_conf *mdev = minor_to_mdev(minor);
-
- if (!mdev)
- return;
+ struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
+ struct drbd_tconn *tconn = mdev->tconn;
del_timer_sync(&mdev->request_timer);
/* paranoia asserts */
- if (mdev->open_cnt != 0)
- dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
- __FILE__ , __LINE__);
-
- ERR_IF (!list_empty(&mdev->data.work.q)) {
- struct list_head *lp;
- list_for_each(lp, &mdev->data.work.q) {
- dev_err(DEV, "lp = %p\n", lp);
- }
- };
+ D_ASSERT(mdev->open_cnt == 0);
/* end paranoia asserts */
- del_gendisk(mdev->vdisk);
-
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
if (mdev->this_bdev)
bdput(mdev->this_bdev);
- drbd_free_resources(mdev);
+ drbd_free_bc(mdev->ldev);
+ mdev->ldev = NULL;
- drbd_release_ee_lists(mdev);
-
- /* should be freed on disconnect? */
- kfree(mdev->ee_hash);
- /*
- mdev->ee_hash_s = 0;
- mdev->ee_hash = NULL;
- */
+ drbd_release_all_peer_reqs(mdev);
lc_destroy(mdev->act_log);
lc_destroy(mdev->resync);
@@ -3478,19 +2183,101 @@ static void drbd_delete_device(unsigned int minor)
kfree(mdev->p_uuid);
/* mdev->p_uuid = NULL; */
- kfree(mdev->int_dig_out);
- kfree(mdev->int_dig_in);
- kfree(mdev->int_dig_vv);
+ if (mdev->bitmap) /* should no longer be there. */
+ drbd_bm_cleanup(mdev);
+ __free_page(mdev->md_io_page);
+ put_disk(mdev->vdisk);
+ blk_cleanup_queue(mdev->rq_queue);
+ kfree(mdev->rs_plan_s);
+ kfree(mdev);
- /* cleanup the rest that has been
- * allocated from drbd_new_device
- * and actually free the mdev itself */
- drbd_free_mdev(mdev);
+ kref_put(&tconn->kref, &conn_destroy);
}
+/* One global retry thread, if we need to push back some bio and have it
+ * reinserted through our make request function.
+ */
+static struct retry_worker {
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ spinlock_t lock;
+ struct list_head writes;
+} retry;
+
+static void do_retry(struct work_struct *ws)
+{
+ struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
+ LIST_HEAD(writes);
+ struct drbd_request *req, *tmp;
+
+ spin_lock_irq(&retry->lock);
+ list_splice_init(&retry->writes, &writes);
+ spin_unlock_irq(&retry->lock);
+
+ list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
+ struct drbd_conf *mdev = req->w.mdev;
+ struct bio *bio = req->master_bio;
+ unsigned long start_time = req->start_time;
+ bool expected;
+
+ expected =
+ expect(atomic_read(&req->completion_ref) == 0) &&
+ expect(req->rq_state & RQ_POSTPONED) &&
+ expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
+ (req->rq_state & RQ_LOCAL_ABORTED) != 0);
+
+ if (!expected)
+ dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ req, atomic_read(&req->completion_ref),
+ req->rq_state);
+
+ /* We still need to put one kref associated with the
+ * "completion_ref" going zero in the code path that queued it
+ * here. The request object may still be referenced by a
+ * frozen local req->private_bio, in case we force-detached.
+ */
+ kref_put(&req->kref, drbd_req_destroy);
+
+ /* A single suspended or otherwise blocking device may stall
+ * all others as well. Fortunately, this code path is to
+ * recover from a situation that "should not happen":
+ * concurrent writes in multi-primary setup.
+ * In a "normal" lifecycle, this workqueue is supposed to be
+ * destroyed without ever doing anything.
+ * If it turns out to be an issue anyways, we can do per
+ * resource (replication group) or per device (minor) retry
+ * workqueues instead.
+ */
+
+ /* We are not just doing generic_make_request(),
+ * as we want to keep the start_time information. */
+ inc_ap_bio(mdev);
+ __drbd_make_request(mdev, bio, start_time);
+ }
+}
+
+void drbd_restart_request(struct drbd_request *req)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&retry.lock, flags);
+ list_move_tail(&req->tl_requests, &retry.writes);
+ spin_unlock_irqrestore(&retry.lock, flags);
+
+ /* Drop the extra reference that would otherwise
+ * have been dropped by complete_master_bio.
+ * do_retry() needs to grab a new one. */
+ dec_ap_bio(req->w.mdev);
+
+ queue_work(retry.wq, &retry.worker);
+}
+
+
static void drbd_cleanup(void)
{
unsigned int i;
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn, *tmp;
unregister_reboot_notifier(&drbd_notifier);
@@ -3505,19 +2292,31 @@ static void drbd_cleanup(void)
if (drbd_proc)
remove_proc_entry("drbd", NULL);
- drbd_nl_cleanup();
+ if (retry.wq)
+ destroy_workqueue(retry.wq);
+
+ drbd_genl_unregister();
- if (minor_table) {
- i = minor_count;
- while (i--)
- drbd_delete_device(i);
- drbd_destroy_mempools();
+ idr_for_each_entry(&minors, mdev, i) {
+ idr_remove(&minors, mdev_to_minor(mdev));
+ idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ del_gendisk(mdev->vdisk);
+ /* synchronize_rcu(); No other threads running at this point */
+ kref_put(&mdev->kref, &drbd_minor_destroy);
}
- kfree(minor_table);
+ /* not _rcu since, no other updater anymore. Genl already unregistered */
+ list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+ list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
+ /* synchronize_rcu(); */
+ kref_put(&tconn->kref, &conn_destroy);
+ }
+ drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
+ idr_destroy(&minors);
+
printk(KERN_INFO "drbd: module cleanup done.\n");
}
@@ -3542,7 +2341,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+ if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
@@ -3566,7 +2365,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
@@ -3576,20 +2375,243 @@ out:
return r;
}
-struct drbd_conf *drbd_new_device(unsigned int minor)
+static void drbd_init_workqueue(struct drbd_work_queue* wq)
+{
+ spin_lock_init(&wq->q_lock);
+ INIT_LIST_HEAD(&wq->q);
+ init_waitqueue_head(&wq->q_wait);
+}
+
+struct drbd_tconn *conn_get_by_name(const char *name)
+{
+ struct drbd_tconn *tconn;
+
+ if (!name || !name[0])
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
+ if (!strcmp(tconn->name, name)) {
+ kref_get(&tconn->kref);
+ goto found;
+ }
+ }
+ tconn = NULL;
+found:
+ rcu_read_unlock();
+ return tconn;
+}
+
+struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
+ void *peer_addr, int peer_addr_len)
+{
+ struct drbd_tconn *tconn;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
+ if (tconn->my_addr_len == my_addr_len &&
+ tconn->peer_addr_len == peer_addr_len &&
+ !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
+ !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
+ kref_get(&tconn->kref);
+ goto found;
+ }
+ }
+ tconn = NULL;
+found:
+ rcu_read_unlock();
+ return tconn;
+}
+
+static int drbd_alloc_socket(struct drbd_socket *socket)
+{
+ socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!socket->rbuf)
+ return -ENOMEM;
+ socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!socket->sbuf)
+ return -ENOMEM;
+ return 0;
+}
+
+static void drbd_free_socket(struct drbd_socket *socket)
+{
+ free_page((unsigned long) socket->sbuf);
+ free_page((unsigned long) socket->rbuf);
+}
+
+void conn_free_crypto(struct drbd_tconn *tconn)
+{
+ drbd_free_sock(tconn);
+
+ crypto_free_hash(tconn->csums_tfm);
+ crypto_free_hash(tconn->verify_tfm);
+ crypto_free_hash(tconn->cram_hmac_tfm);
+ crypto_free_hash(tconn->integrity_tfm);
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+
+ tconn->csums_tfm = NULL;
+ tconn->verify_tfm = NULL;
+ tconn->cram_hmac_tfm = NULL;
+ tconn->integrity_tfm = NULL;
+ tconn->peer_integrity_tfm = NULL;
+ tconn->int_dig_in = NULL;
+ tconn->int_dig_vv = NULL;
+}
+
+int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
+{
+ cpumask_var_t new_cpu_mask;
+ int err;
+
+ if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+ /*
+ retcode = ERR_NOMEM;
+ drbd_msg_put_info("unable to allocate cpumask");
+ */
+
+ /* silently ignore cpu mask on UP kernel */
+ if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
+ /* FIXME: Get rid of constant 32 here */
+ err = bitmap_parse(res_opts->cpu_mask, 32,
+ cpumask_bits(new_cpu_mask), nr_cpu_ids);
+ if (err) {
+ conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
+ /* retcode = ERR_CPU_MASK_PARSE; */
+ goto fail;
+ }
+ }
+ tconn->res_opts = *res_opts;
+ if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
+ cpumask_copy(tconn->cpu_mask, new_cpu_mask);
+ drbd_calc_cpu_mask(tconn);
+ tconn->receiver.reset_cpu_mask = 1;
+ tconn->asender.reset_cpu_mask = 1;
+ tconn->worker.reset_cpu_mask = 1;
+ }
+ err = 0;
+
+fail:
+ free_cpumask_var(new_cpu_mask);
+ return err;
+
+}
+
+/* caller must be under genl_lock() */
+struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
+{
+ struct drbd_tconn *tconn;
+
+ tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
+ if (!tconn)
+ return NULL;
+
+ tconn->name = kstrdup(name, GFP_KERNEL);
+ if (!tconn->name)
+ goto fail;
+
+ if (drbd_alloc_socket(&tconn->data))
+ goto fail;
+ if (drbd_alloc_socket(&tconn->meta))
+ goto fail;
+
+ if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
+ goto fail;
+
+ if (set_resource_options(tconn, res_opts))
+ goto fail;
+
+ tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
+ if (!tconn->current_epoch)
+ goto fail;
+
+ INIT_LIST_HEAD(&tconn->transfer_log);
+
+ INIT_LIST_HEAD(&tconn->current_epoch->list);
+ tconn->epochs = 1;
+ spin_lock_init(&tconn->epoch_lock);
+ tconn->write_ordering = WO_bdev_flush;
+
+ tconn->send.seen_any_write_yet = false;
+ tconn->send.current_epoch_nr = 0;
+ tconn->send.current_epoch_writes = 0;
+
+ tconn->cstate = C_STANDALONE;
+ mutex_init(&tconn->cstate_mutex);
+ spin_lock_init(&tconn->req_lock);
+ mutex_init(&tconn->conf_update);
+ init_waitqueue_head(&tconn->ping_wait);
+ idr_init(&tconn->volumes);
+
+ drbd_init_workqueue(&tconn->sender_work);
+ mutex_init(&tconn->data.mutex);
+ mutex_init(&tconn->meta.mutex);
+
+ drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
+ drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
+ drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
+
+ kref_init(&tconn->kref);
+ list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
+
+ return tconn;
+
+fail:
+ kfree(tconn->current_epoch);
+ free_cpumask_var(tconn->cpu_mask);
+ drbd_free_socket(&tconn->meta);
+ drbd_free_socket(&tconn->data);
+ kfree(tconn->name);
+ kfree(tconn);
+
+ return NULL;
+}
+
+void conn_destroy(struct kref *kref)
+{
+ struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
+
+ if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
+ conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+ kfree(tconn->current_epoch);
+
+ idr_destroy(&tconn->volumes);
+
+ free_cpumask_var(tconn->cpu_mask);
+ drbd_free_socket(&tconn->meta);
+ drbd_free_socket(&tconn->data);
+ kfree(tconn->name);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ kfree(tconn);
+}
+
+enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
{
struct drbd_conf *mdev;
struct gendisk *disk;
struct request_queue *q;
+ int vnr_got = vnr;
+ int minor_got = minor;
+ enum drbd_ret_code err = ERR_NOMEM;
+
+ mdev = minor_to_mdev(minor);
+ if (mdev)
+ return ERR_MINOR_EXISTS;
/* GFP_KERNEL, we are outside of all write-out paths */
mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
if (!mdev)
- return NULL;
- if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
- goto out_no_cpumask;
+ return ERR_NOMEM;
+
+ kref_get(&tconn->kref);
+ mdev->tconn = tconn;
mdev->minor = minor;
+ mdev->vnr = vnr;
drbd_init_set_defaults(mdev);
@@ -3627,7 +2649,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->req_lock;
+ q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page)
@@ -3635,30 +2657,44 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
if (drbd_bm_init(mdev))
goto out_no_bitmap;
- /* no need to lock access, we are still initializing this minor device. */
- if (!tl_init(mdev))
- goto out_no_tl;
-
- mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
- if (!mdev->app_reads_hash)
- goto out_no_app_reads;
-
- mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
- if (!mdev->current_epoch)
- goto out_no_epoch;
-
- INIT_LIST_HEAD(&mdev->current_epoch->list);
- mdev->epochs = 1;
-
- return mdev;
-
-/* out_whatever_else:
- kfree(mdev->current_epoch); */
-out_no_epoch:
- kfree(mdev->app_reads_hash);
-out_no_app_reads:
- tl_cleanup(mdev);
-out_no_tl:
+ mdev->read_requests = RB_ROOT;
+ mdev->write_requests = RB_ROOT;
+
+ if (!idr_pre_get(&minors, GFP_KERNEL))
+ goto out_no_minor_idr;
+ if (idr_get_new_above(&minors, mdev, minor, &minor_got))
+ goto out_no_minor_idr;
+ if (minor_got != minor) {
+ err = ERR_MINOR_EXISTS;
+ drbd_msg_put_info("requested minor exists already");
+ goto out_idr_remove_minor;
+ }
+
+ if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
+ goto out_idr_remove_minor;
+ if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
+ goto out_idr_remove_minor;
+ if (vnr_got != vnr) {
+ err = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("requested volume exists already");
+ goto out_idr_remove_vol;
+ }
+ add_disk(disk);
+ kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
+
+ /* inherit the connection state */
+ mdev->state.conn = tconn->cstate;
+ if (mdev->state.conn == C_WF_REPORT_PARAMS)
+ drbd_connected(mdev);
+
+ return NO_ERROR;
+
+out_idr_remove_vol:
+ idr_remove(&tconn->volumes, vnr_got);
+out_idr_remove_minor:
+ idr_remove(&minors, minor_got);
+ synchronize_rcu();
+out_no_minor_idr:
drbd_bm_cleanup(mdev);
out_no_bitmap:
__free_page(mdev->md_io_page);
@@ -3667,55 +2703,25 @@ out_no_io_page:
out_no_disk:
blk_cleanup_queue(q);
out_no_q:
- free_cpumask_var(mdev->cpu_mask);
-out_no_cpumask:
- kfree(mdev);
- return NULL;
-}
-
-/* counterpart of drbd_new_device.
- * last part of drbd_delete_device. */
-void drbd_free_mdev(struct drbd_conf *mdev)
-{
- kfree(mdev->current_epoch);
- kfree(mdev->app_reads_hash);
- tl_cleanup(mdev);
- if (mdev->bitmap) /* should no longer be there. */
- drbd_bm_cleanup(mdev);
- __free_page(mdev->md_io_page);
- put_disk(mdev->vdisk);
- blk_cleanup_queue(mdev->rq_queue);
- free_cpumask_var(mdev->cpu_mask);
- drbd_free_tl_hash(mdev);
kfree(mdev);
+ kref_put(&tconn->kref, &conn_destroy);
+ return err;
}
-
int __init drbd_init(void)
{
int err;
- if (sizeof(struct p_handshake) != 80) {
- printk(KERN_ERR
- "drbd: never change the size or layout "
- "of the HandShake packet.\n");
- return -EINVAL;
- }
-
if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
printk(KERN_ERR
- "drbd: invalid minor_count (%d)\n", minor_count);
+ "drbd: invalid minor_count (%d)\n", minor_count);
#ifdef MODULE
return -EINVAL;
#else
- minor_count = 8;
+ minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
- err = drbd_nl_init();
- if (err)
- return err;
-
err = register_blkdev(DRBD_MAJOR, "drbd");
if (err) {
printk(KERN_ERR
@@ -3724,6 +2730,13 @@ int __init drbd_init(void)
return err;
}
+ err = drbd_genl_register();
+ if (err) {
+ printk(KERN_ERR "drbd: unable to register generic netlink family\n");
+ goto fail;
+ }
+
+
register_reboot_notifier(&drbd_notifier);
/*
@@ -3734,22 +2747,29 @@ int __init drbd_init(void)
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
- GFP_KERNEL);
- if (!minor_table)
- goto Enomem;
+ idr_init(&minors);
err = drbd_create_mempools();
if (err)
- goto Enomem;
+ goto fail;
drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
- goto Enomem;
+ goto fail;
}
rwlock_init(&global_state_lock);
+ INIT_LIST_HEAD(&drbd_tconns);
+
+ retry.wq = create_singlethread_workqueue("drbd-reissue");
+ if (!retry.wq) {
+ printk(KERN_ERR "drbd: unable to create retry workqueue\n");
+ goto fail;
+ }
+ INIT_WORK(&retry.worker, do_retry);
+ spin_lock_init(&retry.lock);
+ INIT_LIST_HEAD(&retry.writes);
printk(KERN_INFO "drbd: initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
@@ -3757,11 +2777,10 @@ int __init drbd_init(void)
printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
printk(KERN_INFO "drbd: registered as block device major %d\n",
DRBD_MAJOR);
- printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
return 0; /* Success! */
-Enomem:
+fail:
drbd_cleanup();
if (err == -ENOMEM)
/* currently always the case */
@@ -3782,47 +2801,42 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev);
}
-void drbd_free_sock(struct drbd_conf *mdev)
+void drbd_free_sock(struct drbd_tconn *tconn)
{
- if (mdev->data.socket) {
- mutex_lock(&mdev->data.mutex);
- kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
- sock_release(mdev->data.socket);
- mdev->data.socket = NULL;
- mutex_unlock(&mdev->data.mutex);
+ if (tconn->data.socket) {
+ mutex_lock(&tconn->data.mutex);
+ kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
+ sock_release(tconn->data.socket);
+ tconn->data.socket = NULL;
+ mutex_unlock(&tconn->data.mutex);
}
- if (mdev->meta.socket) {
- mutex_lock(&mdev->meta.mutex);
- kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
- sock_release(mdev->meta.socket);
- mdev->meta.socket = NULL;
- mutex_unlock(&mdev->meta.mutex);
+ if (tconn->meta.socket) {
+ mutex_lock(&tconn->meta.mutex);
+ kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
+ sock_release(tconn->meta.socket);
+ tconn->meta.socket = NULL;
+ mutex_unlock(&tconn->meta.mutex);
}
}
+/* meta data management */
-void drbd_free_resources(struct drbd_conf *mdev)
+void conn_md_sync(struct drbd_tconn *tconn)
{
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = NULL;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = NULL;
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
- crypto_free_hash(mdev->integrity_w_tfm);
- mdev->integrity_w_tfm = NULL;
- crypto_free_hash(mdev->integrity_r_tfm);
- mdev->integrity_r_tfm = NULL;
-
- drbd_free_sock(mdev);
+ struct drbd_conf *mdev;
+ int vnr;
- __no_warn(local,
- drbd_free_bc(mdev->ldev);
- mdev->ldev = NULL;);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_md_sync(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
}
-/* meta data management */
-
struct meta_data_on_disk {
u64 la_size; /* last agreed size. */
u64 uuid[UI_SIZE]; /* UUIDs. */
@@ -3833,7 +2847,7 @@ struct meta_data_on_disk {
u32 md_size_sect;
u32 al_offset; /* offset to this block */
u32 al_nr_extents; /* important for restoring the AL */
- /* `-- act_log->nr_elements <-- sync_conf.al_extents */
+ /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
u32 bm_offset; /* offset to the bitmap, from here */
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 la_peer_max_bio_size; /* last peer max_bio_size */
@@ -3871,7 +2885,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
- buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
+ buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
@@ -3885,7 +2899,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
sector = mdev->ldev->md.md_offset;
- if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
@@ -3906,11 +2920,12 @@ out:
* @bdev: Device from which the meta data should be read in.
*
* Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
- * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
+ * something goes wrong.
*/
int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
{
struct meta_data_on_disk *buffer;
+ u32 magic, flags;
int i, rv = NO_ERROR;
if (!get_ldev_if_state(mdev, D_ATTACHING))
@@ -3920,7 +2935,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!buffer)
goto out;
- if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
dev_err(DEV, "Error while reading metadata.\n");
@@ -3928,8 +2943,20 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
goto err;
}
- if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
- dev_err(DEV, "Error while reading metadata, magic not found.\n");
+ magic = be32_to_cpu(buffer->magic);
+ flags = be32_to_cpu(buffer->flags);
+ if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
+ (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
+ /* btw: that's Activity Log clean, not "all" clean. */
+ dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
+ rv = ERR_MD_UNCLEAN;
+ goto err;
+ }
+ if (magic != DRBD_MD_MAGIC_08) {
+ if (magic == DRBD_MD_MAGIC_07)
+ dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
+ else
+ dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
rv = ERR_MD_INVALID;
goto err;
}
@@ -3963,20 +2990,16 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
for (i = UI_CURRENT; i < UI_SIZE; i++)
bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
bdev->md.flags = be32_to_cpu(buffer->flags);
- mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED) {
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer;
}
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->sync_conf.al_extents < 7)
- mdev->sync_conf.al_extents = 127;
+ spin_unlock_irq(&mdev->tconn->req_lock);
err:
drbd_md_put_buffer(mdev);
@@ -4011,7 +3034,7 @@ void drbd_md_mark_dirty(struct drbd_conf *mdev)
}
#endif
-static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
@@ -4019,7 +3042,7 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (mdev->state.role == R_PRIMARY)
@@ -4034,14 +3057,24 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
drbd_md_mark_dirty(mdev);
}
+void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+}
void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
}
- _drbd_uuid_set(mdev, idx, val);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
/**
@@ -4054,15 +3087,20 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid;
+
+ get_random_bytes(&val, sizeof(u64));
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(mdev, UI_CURRENT, val);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
- get_random_bytes(&val, sizeof(u64));
- _drbd_uuid_set(mdev, UI_CURRENT, val);
drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
@@ -4070,9 +3108,11 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
+ unsigned long flags;
if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
@@ -4084,6 +3124,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+
drbd_md_mark_dirty(mdev);
}
@@ -4135,9 +3177,10 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
return rv;
}
-static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_bitmap_io(struct drbd_work *w, int unused)
{
struct bm_io_work *work = container_of(w, struct bm_io_work, w);
+ struct drbd_conf *mdev = w->mdev;
int rv = -EIO;
D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
@@ -4149,8 +3192,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
put_ldev(mdev);
}
- clear_bit(BITMAP_IO, &mdev->flags);
- smp_mb__after_clear_bit();
+ clear_bit_unlock(BITMAP_IO, &mdev->flags);
wake_up(&mdev->misc_wait);
if (work->done)
@@ -4160,7 +3202,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
work->why = NULL;
work->flags = 0;
- return 1;
+ return 0;
}
void drbd_ldev_destroy(struct drbd_conf *mdev)
@@ -4173,29 +3215,51 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
- if (mdev->md_io_tmpp) {
- __free_page(mdev->md_io_tmpp);
- mdev->md_io_tmpp = NULL;
- }
clear_bit(GO_DISKLESS, &mdev->flags);
}
-static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_go_diskless(struct drbd_work *w, int unused)
{
+ struct drbd_conf *mdev = w->mdev;
+
D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
* again, it will be safe to free them. */
+
+ /* Try to write changed bitmap pages, read errors may have just
+ * set some bits outside the area covered by the activity log.
+ *
+ * If we have an IO error during the bitmap writeout,
+ * we will want a full sync next time, just in case.
+ * (Do we want a specific meta data flag for this?)
+ *
+ * If that does not make it to stable storage either,
+ * we cannot do anything about that anymore.
+ *
+ * We still need to check if both bitmap and ldev are present, we may
+ * end up here after a failed attach, before ldev was even assigned.
+ */
+ if (mdev->bitmap && mdev->ldev) {
+ if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ "detach", BM_LOCKED_MASK)) {
+ if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
+ drbd_md_set_flag(mdev, MDF_FULL_SYNC);
+ drbd_md_sync(mdev);
+ }
+ }
+ }
+
drbd_force_state(mdev, NS(disk, D_DISKLESS));
- return 1;
+ return 0;
}
void drbd_go_diskless(struct drbd_conf *mdev)
{
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
}
/**
@@ -4215,7 +3279,7 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
void (*done)(struct drbd_conf *, int),
char *why, enum bm_flag flags)
{
- D_ASSERT(current == mdev->worker.task);
+ D_ASSERT(current == mdev->tconn->worker.task);
D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
@@ -4229,13 +3293,13 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
mdev->bm_io_work.why = why;
mdev->bm_io_work.flags = flags;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/**
@@ -4252,7 +3316,7 @@ int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
{
int rv;
- D_ASSERT(current != mdev->worker.task);
+ D_ASSERT(current != mdev->tconn->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
drbd_suspend_io(mdev);
@@ -4291,18 +3355,127 @@ static void md_sync_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
+ /* must not double-queue! */
+ if (list_empty(&mdev->md_sync_work.list))
+ drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
}
-static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int w_md_sync(struct drbd_work *w, int unused)
{
+ struct drbd_conf *mdev = w->mdev;
+
dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
#ifdef DEBUG
dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
#endif
drbd_md_sync(mdev);
- return 1;
+ return 0;
+}
+
+const char *cmdname(enum drbd_packet cmd)
+{
+ /* THINK may need to become several global tables
+ * when we want to support more than
+ * one PRO_VERSION */
+ static const char *cmdnames[] = {
+ [P_DATA] = "Data",
+ [P_DATA_REPLY] = "DataReply",
+ [P_RS_DATA_REPLY] = "RSDataReply",
+ [P_BARRIER] = "Barrier",
+ [P_BITMAP] = "ReportBitMap",
+ [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
+ [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
+ [P_UNPLUG_REMOTE] = "UnplugRemote",
+ [P_DATA_REQUEST] = "DataRequest",
+ [P_RS_DATA_REQUEST] = "RSDataRequest",
+ [P_SYNC_PARAM] = "SyncParam",
+ [P_SYNC_PARAM89] = "SyncParam89",
+ [P_PROTOCOL] = "ReportProtocol",
+ [P_UUIDS] = "ReportUUIDs",
+ [P_SIZES] = "ReportSizes",
+ [P_STATE] = "ReportState",
+ [P_SYNC_UUID] = "ReportSyncUUID",
+ [P_AUTH_CHALLENGE] = "AuthChallenge",
+ [P_AUTH_RESPONSE] = "AuthResponse",
+ [P_PING] = "Ping",
+ [P_PING_ACK] = "PingAck",
+ [P_RECV_ACK] = "RecvAck",
+ [P_WRITE_ACK] = "WriteAck",
+ [P_RS_WRITE_ACK] = "RSWriteAck",
+ [P_SUPERSEDED] = "Superseded",
+ [P_NEG_ACK] = "NegAck",
+ [P_NEG_DREPLY] = "NegDReply",
+ [P_NEG_RS_DREPLY] = "NegRSDReply",
+ [P_BARRIER_ACK] = "BarrierAck",
+ [P_STATE_CHG_REQ] = "StateChgRequest",
+ [P_STATE_CHG_REPLY] = "StateChgReply",
+ [P_OV_REQUEST] = "OVRequest",
+ [P_OV_REPLY] = "OVReply",
+ [P_OV_RESULT] = "OVResult",
+ [P_CSUM_RS_REQUEST] = "CsumRSRequest",
+ [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
+ [P_COMPRESSED_BITMAP] = "CBitmap",
+ [P_DELAY_PROBE] = "DelayProbe",
+ [P_OUT_OF_SYNC] = "OutOfSync",
+ [P_RETRY_WRITE] = "RetryWrite",
+ [P_RS_CANCEL] = "RSCancel",
+ [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
+ [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
+ [P_RETRY_WRITE] = "retry_write",
+ [P_PROTOCOL_UPDATE] = "protocol_update",
+
+ /* enum drbd_packet, but not commands - obsoleted flags:
+ * P_MAY_IGNORE
+ * P_MAX_OPT_CMD
+ */
+ };
+
+ /* too big for the array: 0xfffX */
+ if (cmd == P_INITIAL_META)
+ return "InitialMeta";
+ if (cmd == P_INITIAL_DATA)
+ return "InitialData";
+ if (cmd == P_CONNECTION_FEATURES)
+ return "ConnectionFeatures";
+ if (cmd >= ARRAY_SIZE(cmdnames))
+ return "Unknown";
+ return cmdnames[cmd];
+}
+
+/**
+ * drbd_wait_misc - wait for a request to make progress
+ * @mdev: device associated with the request
+ * @i: the struct drbd_interval embedded in struct drbd_request or
+ * struct drbd_peer_request
+ */
+int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+ struct net_conf *nc;
+ DEFINE_WAIT(wait);
+ long timeout;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -ETIMEDOUT;
+ }
+ timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
+ rcu_read_unlock();
+
+ /* Indicate to wake up mdev->misc_wait on progress. */
+ i->waiting = true;
+ prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ timeout = schedule_timeout(timeout);
+ finish_wait(&mdev->misc_wait, &wait);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (!timeout || mdev->state.conn < C_CONNECTED)
+ return -ETIMEDOUT;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ return 0;
}
#ifdef CONFIG_DRBD_FAULT_INJECTION
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index edb490aad8b4..2af26fc95280 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -29,159 +29,317 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/slab.h>
-#include <linux/connector.h>
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
#include "drbd_req.h"
#include "drbd_wrappers.h"
#include <asm/unaligned.h>
-#include <linux/drbd_tag_magic.h>
#include <linux/drbd_limits.h>
-#include <linux/compiler.h>
#include <linux/kthread.h>
-static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
-static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
-static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
-
-/* see get_sb_bdev and bd_claim */
+#include <net/genetlink.h>
+
+/* .doit */
+// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
+// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
+
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
+/* .dumpit */
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
+
+#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
+#include <linux/genl_magic_func.h>
+
+/* used blkdev_get_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
-/* Generate the tag_list to struct functions */
-#define NL_PACKET(name, number, fields) \
-static int name ## _from_tags(struct drbd_conf *mdev, \
- unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
-static int name ## _from_tags(struct drbd_conf *mdev, \
- unsigned short *tags, struct name *arg) \
-{ \
- int tag; \
- int dlen; \
- \
- while ((tag = get_unaligned(tags++)) != TT_END) { \
- dlen = get_unaligned(tags++); \
- switch (tag_number(tag)) { \
- fields \
- default: \
- if (tag & T_MANDATORY) { \
- dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
- return 0; \
- } \
- } \
- tags = (unsigned short *)((char *)tags + dlen); \
- } \
- return 1; \
-}
-#define NL_INTEGER(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
- arg->member = get_unaligned((int *)(tags)); \
- break;
-#define NL_INT64(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
- arg->member = get_unaligned((u64 *)(tags)); \
+/* Configuration is strictly serialized, because generic netlink message
+ * processing is strictly serialized by the genl_lock().
+ * Which means we can use one static global drbd_config_context struct.
+ */
+static struct drbd_config_context {
+ /* assigned from drbd_genlmsghdr */
+ unsigned int minor;
+ /* assigned from request attributes, if present */
+ unsigned int volume;
+#define VOLUME_UNSPECIFIED (-1U)
+ /* pointer into the request skb,
+ * limited lifetime! */
+ char *resource_name;
+ struct nlattr *my_addr;
+ struct nlattr *peer_addr;
+
+ /* reply buffer */
+ struct sk_buff *reply_skb;
+ /* pointer into reply buffer */
+ struct drbd_genlmsghdr *reply_dh;
+ /* resolved from attributes, if possible */
+ struct drbd_conf *mdev;
+ struct drbd_tconn *tconn;
+} adm_ctx;
+
+static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
+ if (genlmsg_reply(skb, info))
+ printk(KERN_ERR "drbd: error sending genl reply\n");
+}
+
+/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
+ * reason it could fail was no space in skb, and there are 4k available. */
+int drbd_msg_put_info(const char *info)
+{
+ struct sk_buff *skb = adm_ctx.reply_skb;
+ struct nlattr *nla;
+ int err = -EMSGSIZE;
+
+ if (!info || !info[0])
+ return 0;
+
+ nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ if (!nla)
+ return err;
+
+ err = nla_put_string(skb, T_info_text, info);
+ if (err) {
+ nla_nest_cancel(skb, nla);
+ return err;
+ } else
+ nla_nest_end(skb, nla);
+ return 0;
+}
+
+/* This would be a good candidate for a "pre_doit" hook,
+ * and per-family private info->pointers.
+ * But we need to stay compatible with older kernels.
+ * If it returns successfully, adm_ctx members are valid.
+ */
+#define DRBD_ADM_NEED_MINOR 1
+#define DRBD_ADM_NEED_RESOURCE 2
+#define DRBD_ADM_NEED_CONNECTION 4
+static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
+ unsigned flags)
+{
+ struct drbd_genlmsghdr *d_in = info->userhdr;
+ const u8 cmd = info->genlhdr->cmd;
+ int err;
+
+ memset(&adm_ctx, 0, sizeof(adm_ctx));
+
+ /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
+ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!adm_ctx.reply_skb) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
+ info, &drbd_genl_family, 0, cmd);
+ /* put of a few bytes into a fresh skb of >= 4k will always succeed.
+ * but anyways */
+ if (!adm_ctx.reply_dh) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ adm_ctx.reply_dh->minor = d_in->minor;
+ adm_ctx.reply_dh->ret_code = NO_ERROR;
+
+ adm_ctx.volume = VOLUME_UNSPECIFIED;
+ if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
+ struct nlattr *nla;
+ /* parse and validate only */
+ err = drbd_cfg_context_from_attrs(NULL, info);
+ if (err)
+ goto fail;
+
+ /* It was present, and valid,
+ * copy it over to the reply skb. */
+ err = nla_put_nohdr(adm_ctx.reply_skb,
+ info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
+ info->attrs[DRBD_NLA_CFG_CONTEXT]);
+ if (err)
+ goto fail;
+
+ /* and assign stuff to the global adm_ctx */
+ nla = nested_attr_tb[__nla_type(T_ctx_volume)];
+ if (nla)
+ adm_ctx.volume = nla_get_u32(nla);
+ nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
+ if (nla)
+ adm_ctx.resource_name = nla_data(nla);
+ adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+ adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+ if ((adm_ctx.my_addr &&
+ nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+ (adm_ctx.peer_addr &&
+ nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ adm_ctx.minor = d_in->minor;
+ adm_ctx.mdev = minor_to_mdev(d_in->minor);
+ adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
+
+ if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+ drbd_msg_put_info("unknown minor");
+ return ERR_MINOR_INVALID;
+ }
+ if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("unknown resource");
+ return ERR_INVALID_REQUEST;
+ }
+
+ if (flags & DRBD_ADM_NEED_CONNECTION) {
+ if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("no resource name expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev) {
+ drbd_msg_put_info("no minor number expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.my_addr && adm_ctx.peer_addr)
+ adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+ nla_len(adm_ctx.my_addr),
+ nla_data(adm_ctx.peer_addr),
+ nla_len(adm_ctx.peer_addr));
+ if (!adm_ctx.tconn) {
+ drbd_msg_put_info("unknown connection");
+ return ERR_INVALID_REQUEST;
+ }
+ }
+
+ /* some more paranoia, if the request was over-determined */
+ if (adm_ctx.mdev && adm_ctx.tconn &&
+ adm_ctx.mdev->tconn != adm_ctx.tconn) {
+ pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
+ adm_ctx.minor, adm_ctx.resource_name,
+ adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists in different resource");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev &&
+ adm_ctx.volume != VOLUME_UNSPECIFIED &&
+ adm_ctx.volume != adm_ctx.mdev->vnr) {
+ pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
+ adm_ctx.minor, adm_ctx.volume,
+ adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists as different volume");
+ return ERR_INVALID_REQUEST;
+ }
+
+ return NO_ERROR;
+
+fail:
+ nlmsg_free(adm_ctx.reply_skb);
+ adm_ctx.reply_skb = NULL;
+ return err;
+}
+
+static int drbd_adm_finish(struct genl_info *info, int retcode)
+{
+ if (adm_ctx.tconn) {
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+ adm_ctx.tconn = NULL;
+ }
+
+ if (!adm_ctx.reply_skb)
+ return -ENOMEM;
+
+ adm_ctx.reply_dh->ret_code = retcode;
+ drbd_adm_send_reply(adm_ctx.reply_skb, info);
+ return 0;
+}
+
+static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+{
+ char *afs;
+
+ /* FIXME: A future version will not allow this case. */
+ if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+ return;
+
+ switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+ case AF_INET6:
+ afs = "ipv6";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+ &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
break;
-#define NL_BIT(pn, pr, member) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
- arg->member = *(char *)(tags) ? 1 : 0; \
+ case AF_INET:
+ afs = "ipv4";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
break;
-#define NL_STRING(pn, pr, member, len) \
- case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
- if (dlen > len) { \
- dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
- #member, dlen, (unsigned int)len); \
- return 0; \
- } \
- arg->member ## _len = dlen; \
- memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
- break;
-#include <linux/drbd_nl.h>
-
-/* Generate the struct to tag_list functions */
-#define NL_PACKET(name, number, fields) \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
- struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
-static unsigned short* \
-name ## _to_tags(struct drbd_conf *mdev, \
- struct name *arg, unsigned short *tags) \
-{ \
- fields \
- return tags; \
-}
-
-#define NL_INTEGER(pn, pr, member) \
- put_unaligned(pn | pr | TT_INTEGER, tags++); \
- put_unaligned(sizeof(int), tags++); \
- put_unaligned(arg->member, (int *)tags); \
- tags = (unsigned short *)((char *)tags+sizeof(int));
-#define NL_INT64(pn, pr, member) \
- put_unaligned(pn | pr | TT_INT64, tags++); \
- put_unaligned(sizeof(u64), tags++); \
- put_unaligned(arg->member, (u64 *)tags); \
- tags = (unsigned short *)((char *)tags+sizeof(u64));
-#define NL_BIT(pn, pr, member) \
- put_unaligned(pn | pr | TT_BIT, tags++); \
- put_unaligned(sizeof(char), tags++); \
- *(char *)tags = arg->member; \
- tags = (unsigned short *)((char *)tags+sizeof(char));
-#define NL_STRING(pn, pr, member, len) \
- put_unaligned(pn | pr | TT_STRING, tags++); \
- put_unaligned(arg->member ## _len, tags++); \
- memcpy(tags, arg->member, arg->member ## _len); \
- tags = (unsigned short *)((char *)tags + arg->member ## _len);
-#include <linux/drbd_nl.h>
-
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
-void drbd_nl_send_reply(struct cn_msg *, int);
+ default:
+ afs = "ssocks";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ }
+ snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
+}
int drbd_khelper(struct drbd_conf *mdev, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
- NULL, /* Will be set to address family */
- NULL, /* Will be set to address */
+ (char[20]) { }, /* address family */
+ (char[60]) { }, /* address */
NULL };
-
- char mb[12], af[20], ad[60], *afs;
+ char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL };
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct sib_info sib;
int ret;
- if (current == mdev->worker.task)
- set_bit(CALLBACK_PENDING, &mdev->flags);
+ if (current == tconn->worker.task)
+ set_bit(CALLBACK_PENDING, &tconn->flags);
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
-
- if (get_net_conf(mdev)) {
- switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
- case AF_INET6:
- afs = "ipv6";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
- break;
- case AF_INET:
- afs = "ipv4";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
- break;
- default:
- afs = "ssocks";
- snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
- }
- snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
- envp[3]=af;
- envp[4]=ad;
- put_net_conf(mdev);
- }
+ setup_khelper_env(tconn, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
drbd_md_sync(mdev);
dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
-
- drbd_bcast_ev_helper(mdev, cmd);
+ sib.sib_reason = SIB_HELPER_PRE;
+ sib.helper_name = cmd;
+ drbd_bcast_event(mdev, &sib);
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
@@ -191,9 +349,46 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
+ sib.sib_reason = SIB_HELPER_POST;
+ sib.helper_exit_code = ret;
+ drbd_bcast_event(mdev, &sib);
+
+ if (current == tconn->worker.task)
+ clear_bit(CALLBACK_PENDING, &tconn->flags);
+
+ if (ret < 0) /* Ignore any ERRNOs we got. */
+ ret = 0;
+
+ return ret;
+}
+
+int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+{
+ char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ (char[20]) { }, /* address family */
+ (char[60]) { }, /* address */
+ NULL };
+ char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+ int ret;
+
+ setup_khelper_env(tconn, envp);
+ conn_md_sync(tconn);
- if (current == mdev->worker.task)
- clear_bit(CALLBACK_PENDING, &mdev->flags);
+ conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+ /* TODO: conn_bcast_event() ?? */
+
+ ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ if (ret)
+ conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, tconn->name,
+ (ret >> 8) & 0xff, ret);
+ else
+ conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+ usermode_helper, cmd, tconn->name,
+ (ret >> 8) & 0xff, ret);
+ /* TODO: conn_bcast_event() ?? */
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -201,116 +396,129 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
return ret;
}
-enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
{
+ enum drbd_fencing_p fp = FP_NOT_AVAIL;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (get_ldev_if_state(mdev, D_CONSISTENT)) {
+ fp = max_t(enum drbd_fencing_p, fp,
+ rcu_dereference(mdev->ldev->disk_conf)->fencing);
+ put_ldev(mdev);
+ }
+ }
+ rcu_read_unlock();
+
+ return fp;
+}
+
+bool conn_try_outdate_peer(struct drbd_tconn *tconn)
+{
+ union drbd_state mask = { };
+ union drbd_state val = { };
+ enum drbd_fencing_p fp;
char *ex_to_string;
int r;
- enum drbd_disk_state nps;
- enum drbd_fencing_p fp;
- D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+ if (tconn->cstate >= C_WF_REPORT_PARAMS) {
+ conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+ return false;
+ }
- if (get_ldev_if_state(mdev, D_CONSISTENT)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- } else {
- dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
- nps = mdev->state.pdsk;
+ fp = highest_fencing_policy(tconn);
+ switch (fp) {
+ case FP_NOT_AVAIL:
+ conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
goto out;
+ case FP_DONT_CARE:
+ return true;
+ default: ;
}
- r = drbd_khelper(mdev, "fence-peer");
+ r = conn_khelper(tconn, "fence-peer");
switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
- nps = D_INCONSISTENT;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_INCONSISTENT;
break;
case 4: /* peer got outdated, or was already outdated */
ex_to_string = "peer was fenced";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
break;
case 5: /* peer was down */
- if (mdev->state.disk == D_UP_TO_DATE) {
+ if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
} else {
ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
- nps = mdev->state.pdsk;
}
break;
case 6: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
- dev_warn(DEV, "Peer is primary, outdating myself.\n");
- nps = D_UNKNOWN;
- _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
+ conn_warn(tconn, "Peer is primary, outdating myself.\n");
+ mask.disk = D_MASK;
+ val.disk = D_OUTDATED;
break;
case 7:
if (fp != FP_STONITH)
- dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
+ conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
- nps = D_OUTDATED;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
break;
default:
/* The script is broken ... */
- nps = D_UNKNOWN;
- dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
- return nps;
+ conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+ return false; /* Eventually leave IO frozen */
}
- dev_info(DEV, "fence-peer helper returned %d (%s)\n",
- (r>>8) & 0xff, ex_to_string);
+ conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+ (r>>8) & 0xff, ex_to_string);
-out:
- if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
- /* The handler was not successful... unfreeze here, the
- state engine can not unfreeze... */
- _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
- }
+ out:
- return nps;
+ /* Not using
+ conn_request_state(tconn, mask, val, CS_VERBOSE);
+ here, because we might were able to re-establish the connection in the
+ meantime. */
+ spin_lock_irq(&tconn->req_lock);
+ if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
+ _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ spin_unlock_irq(&tconn->req_lock);
+
+ return conn_highest_pdsk(tconn) <= D_OUTDATED;
}
static int _try_outdate_peer_async(void *data)
{
- struct drbd_conf *mdev = (struct drbd_conf *)data;
- enum drbd_disk_state nps;
- union drbd_state ns;
+ struct drbd_tconn *tconn = (struct drbd_tconn *)data;
- nps = drbd_try_outdate_peer(mdev);
-
- /* Not using
- drbd_request_state(mdev, NS(pdsk, nps));
- here, because we might were able to re-establish the connection
- in the meantime. This can only partially be solved in the state's
- engine is_valid_state() and is_valid_state_transition()
- functions.
-
- nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
- pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
- therefore we have to have the pre state change check here.
- */
- spin_lock_irq(&mdev->req_lock);
- ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
- ns.pdsk = nps;
- _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
+ conn_try_outdate_peer(tconn);
+ kref_put(&tconn->kref, &conn_destroy);
return 0;
}
-void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
+void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
{
struct task_struct *opa;
- opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
- if (IS_ERR(opa))
- dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
+ kref_get(&tconn->kref);
+ opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ if (IS_ERR(opa)) {
+ conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
+ kref_put(&tconn->kref, &conn_destroy);
+ }
}
enum drbd_state_rv
@@ -318,15 +526,15 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
{
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
+ struct net_conf *nc;
int try = 0;
int forced = 0;
union drbd_state mask, val;
- enum drbd_disk_state nps;
if (new_role == R_PRIMARY)
- request_ping(mdev); /* Detect a dead peer ASAP */
+ request_ping(mdev->tconn); /* Detect a dead peer ASAP */
- mutex_lock(&mdev->state_mutex);
+ mutex_lock(mdev->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
@@ -354,38 +562,34 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (rv == SS_NO_UP_TO_DATE_DISK &&
mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
- nps = drbd_try_outdate_peer(mdev);
- if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
+ if (conn_try_outdate_peer(mdev->tconn)) {
val.disk = D_UP_TO_DATE;
mask.disk = D_MASK;
}
-
- val.pdsk = nps;
- mask.pdsk = D_MASK;
-
continue;
}
if (rv == SS_NOTHING_TO_DO)
- goto fail;
+ goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
- nps = drbd_try_outdate_peer(mdev);
-
- if (force && nps > D_OUTDATED) {
+ if (!conn_try_outdate_peer(mdev->tconn) && force) {
dev_warn(DEV, "Forced into split brain situation!\n");
- nps = D_OUTDATED;
- }
-
- mask.pdsk = D_MASK;
- val.pdsk = nps;
+ mask.pdsk = D_MASK;
+ val.pdsk = D_OUTDATED;
+ }
continue;
}
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
+ int timeo;
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
if (try < max_tries)
try = max_tries - 1;
continue;
@@ -394,13 +598,13 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
rv = _drbd_request_state(mdev, mask, val,
CS_VERBOSE + CS_WAIT_COMPLETE);
if (rv < SS_SUCCESS)
- goto fail;
+ goto out;
}
break;
}
if (rv < SS_SUCCESS)
- goto fail;
+ goto out;
if (forced)
dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
@@ -408,6 +612,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* Wait until nothing is on the fly :) */
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+ /* FIXME also wait for all pending P_BARRIER_ACK? */
+
if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, true);
if (get_ldev(mdev)) {
@@ -415,10 +621,12 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
put_ldev(mdev);
}
} else {
- if (get_net_conf(mdev)) {
- mdev->net_conf->want_lose = 0;
- put_net_conf(mdev);
- }
+ mutex_lock(&mdev->tconn->conf_update);
+ nc = mdev->tconn->net_conf;
+ if (nc)
+ nc->discard_my_data = 0; /* without copy; single bit op is atomic */
+ mutex_unlock(&mdev->tconn->conf_update);
+
set_disk_ro(mdev->vdisk, false);
if (get_ldev(mdev)) {
if (((mdev->state.conn < C_CONNECTED ||
@@ -444,67 +652,47 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
drbd_md_sync(mdev);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- fail:
- mutex_unlock(&mdev->state_mutex);
+out:
+ mutex_unlock(mdev->state_mutex);
return rv;
}
-static struct drbd_conf *ensure_mdev(int minor, int create)
+static const char *from_attrs_err_to_txt(int err)
{
- struct drbd_conf *mdev;
-
- if (minor >= minor_count)
- return NULL;
-
- mdev = minor_to_mdev(minor);
-
- if (!mdev && create) {
- struct gendisk *disk = NULL;
- mdev = drbd_new_device(minor);
-
- spin_lock_irq(&drbd_pp_lock);
- if (minor_table[minor] == NULL) {
- minor_table[minor] = mdev;
- disk = mdev->vdisk;
- mdev = NULL;
- } /* else: we lost the race */
- spin_unlock_irq(&drbd_pp_lock);
-
- if (disk) /* we won the race above */
- /* in case we ever add a drbd_delete_device(),
- * don't forget the del_gendisk! */
- add_disk(disk);
- else /* we lost the race above */
- drbd_free_mdev(mdev);
-
- mdev = minor_to_mdev(minor);
- }
-
- return mdev;
+ return err == -ENOMSG ? "required attribute missing" :
+ err == -EOPNOTSUPP ? "unknown mandatory attribute" :
+ err == -EEXIST ? "can not change invariant setting" :
+ "invalid attribute value";
}
-static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
{
- struct primary primary_args;
-
- memset(&primary_args, 0, sizeof(struct primary));
- if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
- }
-
- reply->ret_code =
- drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
+ struct set_role_parms parms;
+ int err;
+ enum drbd_ret_code retcode;
- return 0;
-}
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
-static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
-{
- reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
+ memset(&parms, 0, sizeof(parms));
+ if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
+ err = set_role_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
+ }
+ if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
+ retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+ else
+ retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -514,7 +702,12 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
- switch (bdev->dc.meta_dev_idx) {
+ int meta_dev_idx;
+
+ rcu_read_lock();
+ meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+
+ switch (meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
bdev->md.md_size_sect = MD_RESERVED_SECT;
@@ -533,7 +726,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
case DRBD_MD_INDEX_FLEX_INT:
bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
/* al size is still fixed */
- bdev->md.al_offset = -MD_AL_MAX_SIZE;
+ bdev->md.al_offset = -MD_AL_SECTORS;
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -549,6 +742,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
break;
}
+ rcu_read_unlock();
}
/* input size is expected to be in KB */
@@ -581,10 +775,16 @@ char *ppsize(char *buf, unsigned long long size)
* R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
* peer may not initiate a resize.
*/
+/* Note these are not to be confused with
+ * drbd_adm_suspend_io/drbd_adm_resume_io,
+ * which are (sub) state changes triggered by admin (drbdsetup),
+ * and can be long lived.
+ * This changes an mdev->flag, is triggered by drbd internals,
+ * and should be short-lived. */
void drbd_suspend_io(struct drbd_conf *mdev)
{
set_bit(SUSPEND_IO, &mdev->flags);
- if (is_susp(mdev->state))
+ if (drbd_suspended(mdev))
return;
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
}
@@ -605,7 +805,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
- sector_t la_size;
+ sector_t la_size, u_size;
sector_t size;
char ppb[10];
@@ -633,7 +833,10 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+ size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
@@ -696,12 +899,12 @@ out:
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
sector_t m_size; /* my size */
- sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
sector_t size = 0;
m_size = drbd_get_max_capacity(bdev);
@@ -750,24 +953,21 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int ass
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
-static int drbd_check_al_size(struct drbd_conf *mdev)
+static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
unsigned int in_use;
int i;
- ERR_IF(mdev->sync_conf.al_extents < 7)
- mdev->sync_conf.al_extents = 127;
-
if (mdev->act_log &&
- mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
+ mdev->act_log->nr_elements == dc->al_extents)
return 0;
in_use = 0;
t = mdev->act_log;
- n = lc_create("act_log", drbd_al_ext_cache,
- mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
+ n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
+ dc->al_extents, sizeof(struct lc_element), 0);
if (n == NULL) {
dev_err(DEV, "Cannot allocate act_log lru!\n");
@@ -808,7 +1008,9 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
- max_segments = mdev->ldev->dc.max_bio_bvecs;
+ rcu_read_lock();
+ max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+ rcu_read_unlock();
put_ldev(mdev);
}
@@ -852,12 +1054,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94) {
- peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ if (mdev->tconn->agreed_pro_version < 94)
+ peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- } else if (mdev->agreed_pro_version == 94)
+ else if (mdev->tconn->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
- else /* drbd 8.3.8 onwards */
+ else if (mdev->tconn->agreed_pro_version < 100)
+ peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
+ else
peer = DRBD_MAX_BIO_SIZE;
}
@@ -872,36 +1076,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
drbd_setup_queue_param(mdev, new);
}
-/* serialize deconfig (worker exiting, doing cleanup)
- * and reconfig (drbdsetup disk, drbdsetup net)
- *
- * Wait for a potentially exiting worker, then restart it,
- * or start a new one. Flush any pending work, there may still be an
- * after_state_change queued.
- */
-static void drbd_reconfig_start(struct drbd_conf *mdev)
+/* Starts the worker thread */
+static void conn_reconfig_start(struct drbd_tconn *tconn)
{
- wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
- wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
- drbd_thread_start(&mdev->worker);
- drbd_flush_workqueue(mdev);
+ drbd_thread_start(&tconn->worker);
+ conn_flush_workqueue(tconn);
}
-/* if still unconfigured, stops worker again.
- * if configured now, clears CONFIG_PENDING.
- * wakes potential waiters */
-static void drbd_reconfig_done(struct drbd_conf *mdev)
+/* if still unconfigured, stops worker again. */
+static void conn_reconfig_done(struct drbd_tconn *tconn)
{
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.disk == D_DISKLESS &&
- mdev->state.conn == C_STANDALONE &&
- mdev->state.role == R_SECONDARY) {
- set_bit(DEVICE_DYING, &mdev->flags);
- drbd_thread_stop_nowait(&mdev->worker);
- } else
- clear_bit(CONFIG_PENDING, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
- wake_up(&mdev->state_wait);
+ bool stop_threads;
+ spin_lock_irq(&tconn->req_lock);
+ stop_threads = conn_all_vols_unconf(tconn) &&
+ tconn->cstate == C_STANDALONE;
+ spin_unlock_irq(&tconn->req_lock);
+ if (stop_threads) {
+ /* asender is implicitly stopped by receiver
+ * in conn_disconnect() */
+ drbd_thread_stop(&tconn->receiver);
+ drbd_thread_stop(&tconn->worker);
+ }
}
/* Make sure IO is suspended before calling this function(). */
@@ -909,42 +1104,187 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
{
int s = 0;
- if (lc_try_lock(mdev->act_log)) {
- drbd_al_shrink(mdev);
- lc_unlock(mdev->act_log);
- } else {
+ if (!lc_try_lock(mdev->act_log)) {
dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
return;
}
- spin_lock_irq(&mdev->req_lock);
+ drbd_al_shrink(mdev);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
-
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ lc_unlock(mdev->act_log);
if (s)
dev_info(DEV, "Suspended AL updates\n");
}
-/* does always return 0;
- * interesting return code is in reply->ret_code */
-static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+
+static bool should_set_defaults(struct genl_info *info)
+{
+ unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
+ return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
+}
+
+static void enforce_disk_conf_limits(struct disk_conf *dc)
+{
+ if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
+ dc->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
+ dc->al_extents = DRBD_AL_EXTENTS_MAX;
+
+ if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+ dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+}
+
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
+ struct drbd_conf *mdev;
+ struct disk_conf *new_disk_conf, *old_disk_conf;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
+ int err, fifo_size;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
+
+ /* we also need a disk
+ * to change the options on */
+ if (!get_ldev(mdev)) {
+ retcode = ERR_NO_DISK;
+ goto out;
+ }
+
+ new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ if (should_set_defaults(info))
+ set_disk_conf_defaults(new_disk_conf);
+
+ err = disk_conf_from_attrs_for_change(new_disk_conf, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ }
+
+ if (!expect(new_disk_conf->resync_rate >= 1))
+ new_disk_conf->resync_rate = 1;
+
+ enforce_disk_conf_limits(new_disk_conf);
+
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
+ dev_err(DEV, "kmalloc of fifo_buffer failed");
+ retcode = ERR_NOMEM;
+ goto fail_unlock;
+ }
+ }
+
+ drbd_suspend_io(mdev);
+ wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+ drbd_al_shrink(mdev);
+ err = drbd_check_al_size(mdev, new_disk_conf);
+ lc_unlock(mdev->act_log);
+ wake_up(&mdev->al_wait);
+ drbd_resume_io(mdev);
+
+ if (err) {
+ retcode = ERR_NOMEM;
+ goto fail_unlock;
+ }
+
+ write_lock_irq(&global_state_lock);
+ retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+ if (retcode == NO_ERROR) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ drbd_resync_after_changed(mdev);
+ }
+ write_unlock_irq(&global_state_lock);
+
+ if (retcode != NO_ERROR)
+ goto fail_unlock;
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+
+ if (new_disk_conf->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ if (new_disk_conf->md_flushes)
+ clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
+
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+
+ drbd_md_sync(mdev);
+
+ if (mdev->state.conn >= C_CONNECTED)
+ drbd_send_sync_param(mdev);
+
+ synchronize_rcu();
+ kfree(old_disk_conf);
+ kfree(old_plan);
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+ goto success;
+
+fail_unlock:
+ mutex_unlock(&mdev->tconn->conf_update);
+ fail:
+ kfree(new_disk_conf);
+ kfree(new_plan);
+success:
+ put_ldev(mdev);
+ out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_conf *mdev;
+ int err;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
+ struct disk_conf *new_disk_conf = NULL;
struct block_device *bdev;
struct lru_cache *resync_lru = NULL;
+ struct fifo_buffer *new_plan = NULL;
union drbd_state ns, os;
enum drbd_state_rv rv;
- int cp_discovered = 0;
- int logical_block_size;
+ struct net_conf *nc;
- drbd_reconfig_start(mdev);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto finish;
+
+ mdev = adm_ctx.mdev;
+ conn_reconfig_start(mdev->tconn);
/* if you want to reconfigure, please tear down first */
if (mdev->state.disk > D_DISKLESS) {
@@ -959,47 +1299,65 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* make sure there is no leftover from previous force-detach attempts */
clear_bit(FORCE_DETACH, &mdev->flags);
+ clear_bit(WAS_IO_ERROR, &mdev->flags);
+ clear_bit(WAS_READ_ERROR, &mdev->flags);
/* and no leftover from previously aborted resync or verify, either */
mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- /* allocation not in the IO path, cqueue thread context */
+ /* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
retcode = ERR_NOMEM;
goto fail;
}
+ spin_lock_init(&nbc->md.uuid_lock);
- nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
- nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
- nbc->dc.fencing = DRBD_FENCING_DEF;
- nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+ nbc->disk_conf = new_disk_conf;
- if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
+ set_disk_conf_defaults(new_disk_conf);
+ err = disk_conf_from_attrs(new_disk_conf, info);
+ if (err) {
retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+ enforce_disk_conf_limits(new_disk_conf);
+
+ new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
+ if (!new_plan) {
+ retcode = ERR_NOMEM;
+ goto fail;
+ }
+
+ if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
- if (get_net_conf(mdev)) {
- int prot = mdev->net_conf->wire_protocol;
- put_net_conf(mdev);
- if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc) {
+ if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
+ rcu_read_unlock();
retcode = ERR_STONITH_AND_PROT_A;
goto fail;
}
}
+ rcu_read_unlock();
- bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+ bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
+ dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
@@ -1014,12 +1372,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+ bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
- (nbc->dc.meta_dev_idx < 0) ?
+ (new_disk_conf->meta_dev_idx < 0) ?
(void *)mdev : (void *)drbd_m_holder);
if (IS_ERR(bdev)) {
- dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
+ dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
@@ -1027,14 +1385,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->md_bdev = bdev;
if ((nbc->backing_bdev == nbc->md_bdev) !=
- (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
- nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+ (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
retcode = ERR_MD_IDX_INVALID;
goto fail;
}
resync_lru = lc_create("resync", drbd_bm_ext_cache,
- 61, sizeof(struct bm_extent),
+ 1, 61, sizeof(struct bm_extent),
offsetof(struct bm_extent, lce));
if (!resync_lru) {
retcode = ERR_NOMEM;
@@ -1044,21 +1402,21 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
drbd_md_set_sector_offsets(mdev, nbc);
- if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
+ if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
- (unsigned long long) nbc->dc.disk_size);
+ (unsigned long long) new_disk_conf->disk_size);
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
- if (nbc->dc.meta_dev_idx < 0) {
+ if (new_disk_conf->meta_dev_idx < 0) {
max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
/* at least one MB, otherwise it does not make sense */
min_md_device_sectors = (2<<10);
} else {
max_possible_sectors = DRBD_MAX_SECTORS;
- min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
+ min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
@@ -1083,14 +1441,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_warn(DEV, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
- if (nbc->dc.meta_dev_idx >= 0)
+ if (new_disk_conf->meta_dev_idx >= 0)
dev_warn(DEV, "==>> using internal or flexible "
"meta data may help <<==\n");
}
drbd_suspend_io(mdev);
/* also wait for the last barrier ack. */
- wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
+ /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
+ * We need a way to either ignore barrier acks for barriers sent before a device
+ * was attached, or a way to wait for all pending barrier acks to come in.
+ * As barriers are counted per resource,
+ * we'd need to suspend io on all devices of a resource.
+ */
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
/* and for any other previously queued work */
drbd_flush_workqueue(mdev);
@@ -1105,25 +1469,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_md_set_sector_offsets(mdev, nbc);
- /* allocate a second IO page if logical_block_size != 512 */
- logical_block_size = bdev_logical_block_size(nbc->md_bdev);
- if (logical_block_size == 0)
- logical_block_size = MD_SECTOR_SIZE;
-
- if (logical_block_size != MD_SECTOR_SIZE) {
- if (!mdev->md_io_tmpp) {
- struct page *page = alloc_page(GFP_NOIO);
- if (!page)
- goto force_diskless_dec;
-
- dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
- logical_block_size, MD_SECTOR_SIZE);
- dev_warn(DEV, "Workaround engaged (has performance impact).\n");
-
- mdev->md_io_tmpp = page;
- }
- }
-
if (!mdev->bitmap) {
if (drbd_bm_init(mdev)) {
retcode = ERR_NOMEM;
@@ -1145,30 +1490,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
/* Since we are diskless, fix the activity log first... */
- if (drbd_check_al_size(mdev)) {
+ if (drbd_check_al_size(mdev, new_disk_conf)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
}
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
+ drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
- if (!drbd_al_read_log(mdev, nbc)) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
-
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
- if (nbc->dc.no_md_flush)
- set_bit(MD_NO_FUA, &mdev->flags);
- else
+ if (new_disk_conf->md_flushes)
clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
@@ -1177,11 +1517,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
D_ASSERT(mdev->ldev == NULL);
mdev->ldev = nbc;
mdev->resync = resync_lru;
+ mdev->rs_plan_s = new_plan;
nbc = NULL;
resync_lru = NULL;
+ new_disk_conf = NULL;
+ new_plan = NULL;
- mdev->write_ordering = WO_bdev_flush;
- drbd_bump_write_ordering(mdev, WO_bdev_flush);
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1189,10 +1531,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
clear_bit(CRASHED_PRIMARY, &mdev->flags);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
+ !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
set_bit(CRASHED_PRIMARY, &mdev->flags);
- cp_discovered = 1;
- }
mdev->send_cnt = 0;
mdev->recv_cnt = 0;
@@ -1228,7 +1568,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
} else if (dd == grew)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+ if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
+ (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
+ drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
dev_info(DEV, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
@@ -1238,16 +1580,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
} else {
if (drbd_bitmap_io(mdev, &drbd_bm_read,
- "read from attaching", BM_LOCKED_MASK) < 0) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
- }
-
- if (cp_discovered) {
- drbd_al_apply_to_bm(mdev);
- if (drbd_bitmap_io(mdev, &drbd_bm_write,
- "crashed primary apply AL", BM_LOCKED_MASK)) {
+ "read from attaching", BM_LOCKED_MASK)) {
retcode = ERR_IO_MD_DISK;
goto force_diskless_dec;
}
@@ -1256,9 +1589,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
drbd_suspend_al(mdev); /* IO is still suspended here... */
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- ns.i = os.i;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ os = drbd_read_state(mdev);
+ ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state,
otherwise investigate MDF_WasUpToDate...
If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
@@ -1276,8 +1609,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
ns.pdsk = D_OUTDATED;
- if ( ns.disk == D_CONSISTENT &&
- (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
+ rcu_read_lock();
+ if (ns.disk == D_CONSISTENT &&
+ (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1285,6 +1619,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
this point, because drbd_request_state() modifies these
flags. */
+ if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ rcu_read_unlock();
+
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
if (mdev->state.conn == C_CONNECTED) {
@@ -1300,12 +1641,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS)
goto force_diskless_dec;
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
if (mdev->state.role == R_PRIMARY)
mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
@@ -1316,16 +1658,17 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(mdev);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ conn_reconfig_done(mdev->tconn);
+ drbd_adm_finish(info, retcode);
return 0;
force_diskless_dec:
put_ldev(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
fail:
+ conn_reconfig_done(mdev->tconn);
if (nbc) {
if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev,
@@ -1335,34 +1678,24 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(nbc);
}
+ kfree(new_disk_conf);
lc_destroy(resync_lru);
+ kfree(new_plan);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ finish:
+ drbd_adm_finish(info, retcode);
return 0;
}
-/* Detaching the disk is a process in multiple stages. First we need to lock
- * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
- * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
- * internal references as well.
- * Only then we have finally detached. */
-static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+static int adm_detach(struct drbd_conf *mdev, int force)
{
- enum drbd_ret_code retcode;
+ enum drbd_state_rv retcode;
int ret;
- struct detach dt = {};
- if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- goto out;
- }
-
- if (dt.detach_force) {
+ if (force) {
set_bit(FORCE_DETACH, &mdev->flags);
drbd_force_state(mdev, NS(disk, D_FAILED));
- reply->ret_code = SS_SUCCESS;
+ retcode = SS_SUCCESS;
goto out;
}
@@ -1374,326 +1707,529 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
-
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
- reply->ret_code = retcode;
out:
- return 0;
+ return retcode;
}
-static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+/* Detaching the disk is a process in multiple stages. First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
+int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
- int i, ns;
enum drbd_ret_code retcode;
- struct net_conf *new_conf = NULL;
- struct crypto_hash *tfm = NULL;
- struct crypto_hash *integrity_w_tfm = NULL;
- struct crypto_hash *integrity_r_tfm = NULL;
- struct hlist_head *new_tl_hash = NULL;
- struct hlist_head *new_ee_hash = NULL;
- struct drbd_conf *odev;
- char hmac_name[CRYPTO_MAX_ALG_NAME];
- void *int_dig_out = NULL;
- void *int_dig_in = NULL;
- void *int_dig_vv = NULL;
- struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
+ struct detach_parms parms = { };
+ int err;
- drbd_reconfig_start(mdev);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- if (mdev->state.conn > C_STANDALONE) {
- retcode = ERR_NET_CONFIGURED;
- goto fail;
+ if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
+ err = detach_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
}
- /* allocation not in the IO path, cqueue thread context */
+ retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+static bool conn_resync_running(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = false;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_SYNC_SOURCE ||
+ mdev->state.conn == C_SYNC_TARGET ||
+ mdev->state.conn == C_PAUSED_SYNC_S ||
+ mdev->state.conn == C_PAUSED_SYNC_T) {
+ rv = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+static bool conn_ov_running(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = false;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_VERIFY_S ||
+ mdev->state.conn == C_VERIFY_T) {
+ rv = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+static enum drbd_ret_code
+_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+{
+ struct drbd_conf *mdev;
+ int i;
+
+ if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
+ if (new_conf->wire_protocol != old_conf->wire_protocol)
+ return ERR_NEED_APV_100;
+
+ if (new_conf->two_primaries != old_conf->two_primaries)
+ return ERR_NEED_APV_100;
+
+ if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+ return ERR_NEED_APV_100;
+ }
+
+ if (!new_conf->two_primaries &&
+ conn_highest_role(tconn) == R_PRIMARY &&
+ conn_highest_peer(tconn) == R_PRIMARY)
+ return ERR_NEED_ALLOW_TWO_PRI;
+
+ if (new_conf->two_primaries &&
+ (new_conf->wire_protocol != DRBD_PROT_C))
+ return ERR_NOT_PROTO_C;
+
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ if (get_ldev(mdev)) {
+ enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ put_ldev(mdev);
+ if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+ return ERR_STONITH_AND_PROT_A;
+ }
+ if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+ return ERR_DISCARD_IMPOSSIBLE;
+ }
+
+ if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+ return ERR_CONG_NOT_PROTO_A;
+
+ return NO_ERROR;
+}
+
+static enum drbd_ret_code
+check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+{
+ static enum drbd_ret_code rv;
+ struct drbd_conf *mdev;
+ int i;
+
+ rcu_read_lock();
+ rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+ rcu_read_unlock();
+
+ /* tconn->volumes protected by genl_lock() here */
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ if (!mdev->bitmap) {
+ if(drbd_bm_init(mdev))
+ return ERR_NOMEM;
+ }
+ }
+
+ return rv;
+}
+
+struct crypto {
+ struct crypto_hash *verify_tfm;
+ struct crypto_hash *csums_tfm;
+ struct crypto_hash *cram_hmac_tfm;
+ struct crypto_hash *integrity_tfm;
+};
+
+static int
+alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+{
+ if (!tfm_name[0])
+ return NO_ERROR;
+
+ *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(*tfm)) {
+ *tfm = NULL;
+ return err_alg;
+ }
+
+ return NO_ERROR;
+}
+
+static enum drbd_ret_code
+alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
+{
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ enum drbd_ret_code rv;
+
+ rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+ ERR_CSUMS_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+ ERR_VERIFY_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+ ERR_INTEGRITY_ALG);
+ if (rv != NO_ERROR)
+ return rv;
+ if (new_conf->cram_hmac_alg[0] != 0) {
+ snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+ new_conf->cram_hmac_alg);
+
+ rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
+ ERR_AUTH_ALG);
+ }
+
+ return rv;
+}
+
+static void free_crypto(struct crypto *crypto)
+{
+ crypto_free_hash(crypto->cram_hmac_tfm);
+ crypto_free_hash(crypto->integrity_tfm);
+ crypto_free_hash(crypto->csums_tfm);
+ crypto_free_hash(crypto->verify_tfm);
+}
+
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct drbd_tconn *tconn;
+ struct net_conf *old_conf, *new_conf = NULL;
+ int err;
+ int ovr; /* online verify running */
+ int rsr; /* re-sync running */
+ struct crypto crypto = { };
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ tconn = adm_ctx.tconn;
+
new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
+ goto out;
+ }
+
+ conn_reconfig_start(tconn);
+
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+
+ if (!old_conf) {
+ drbd_msg_put_info("net conf missing, try connect");
+ retcode = ERR_INVALID_REQUEST;
goto fail;
}
- new_conf->timeout = DRBD_TIMEOUT_DEF;
- new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
- new_conf->ping_int = DRBD_PING_INT_DEF;
- new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
- new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
- new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
- new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
- new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
- new_conf->ko_count = DRBD_KO_COUNT_DEF;
- new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
- new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
- new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
- new_conf->want_lose = 0;
- new_conf->two_primaries = 0;
- new_conf->wire_protocol = DRBD_PROT_C;
- new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
- new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
- new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
- new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
-
- if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
+ *new_conf = *old_conf;
+ if (should_set_defaults(info))
+ set_net_conf_defaults(new_conf);
+
+ err = net_conf_from_attrs_for_change(new_conf, info);
+ if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- if (new_conf->two_primaries
- && (new_conf->wire_protocol != DRBD_PROT_C)) {
- retcode = ERR_NOT_PROTO_C;
+ retcode = check_net_options(tconn, new_conf);
+ if (retcode != NO_ERROR)
goto fail;
- }
- if (get_ldev(mdev)) {
- enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
- retcode = ERR_STONITH_AND_PROT_A;
- goto fail;
- }
+ /* re-sync running */
+ rsr = conn_resync_running(tconn);
+ if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+ retcode = ERR_CSUMS_RESYNC_RUNNING;
+ goto fail;
}
- if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
- retcode = ERR_CONG_NOT_PROTO_A;
+ /* online verify running */
+ ovr = conn_ov_running(tconn);
+ if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+ retcode = ERR_VERIFY_RUNNING;
goto fail;
}
- if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
- retcode = ERR_DISCARD;
+ retcode = alloc_crypto(&crypto, new_conf);
+ if (retcode != NO_ERROR)
goto fail;
- }
- retcode = NO_ERROR;
+ rcu_assign_pointer(tconn->net_conf, new_conf);
- new_my_addr = (struct sockaddr *)&new_conf->my_addr;
- new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev || odev == mdev)
- continue;
- if (get_net_conf(odev)) {
- taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
- if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
- !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
- retcode = ERR_LOCAL_ADDR;
-
- taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
- if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
- !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
- retcode = ERR_PEER_ADDR;
-
- put_net_conf(odev);
- if (retcode != NO_ERROR)
- goto fail;
- }
+ if (!rsr) {
+ crypto_free_hash(tconn->csums_tfm);
+ tconn->csums_tfm = crypto.csums_tfm;
+ crypto.csums_tfm = NULL;
+ }
+ if (!ovr) {
+ crypto_free_hash(tconn->verify_tfm);
+ tconn->verify_tfm = crypto.verify_tfm;
+ crypto.verify_tfm = NULL;
}
- if (new_conf->cram_hmac_alg[0] != 0) {
- snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- new_conf->cram_hmac_alg);
- tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- tfm = NULL;
- retcode = ERR_AUTH_ALG;
- goto fail;
- }
+ crypto_free_hash(tconn->integrity_tfm);
+ tconn->integrity_tfm = crypto.integrity_tfm;
+ if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
+ /* Do this without trying to take tconn->data.mutex again. */
+ __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- retcode = ERR_AUTH_ALG_ND;
- goto fail;
- }
- }
+ crypto_free_hash(tconn->cram_hmac_tfm);
+ tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
- if (new_conf->integrity_alg[0]) {
- integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(integrity_w_tfm)) {
- integrity_w_tfm = NULL;
- retcode=ERR_INTEGRITY_ALG;
- goto fail;
- }
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+ synchronize_rcu();
+ kfree(old_conf);
- if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
- retcode=ERR_INTEGRITY_ALG_ND;
- goto fail;
- }
+ if (tconn->cstate >= C_WF_REPORT_PARAMS)
+ drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
- integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(integrity_r_tfm)) {
- integrity_r_tfm = NULL;
- retcode=ERR_INTEGRITY_ALG;
- goto fail;
- }
+ goto done;
+
+ fail:
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+ free_crypto(&crypto);
+ kfree(new_conf);
+ done:
+ conn_reconfig_done(tconn);
+ out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct drbd_conf *mdev;
+ struct net_conf *old_conf, *new_conf = NULL;
+ struct crypto crypto = { };
+ struct drbd_tconn *tconn;
+ enum drbd_ret_code retcode;
+ int i;
+ int err;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+ if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
+ drbd_msg_put_info("connection endpoint(s) missing");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
}
- ns = new_conf->max_epoch_size/8;
- if (mdev->tl_hash_s != ns) {
- new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
- if (!new_tl_hash) {
- retcode = ERR_NOMEM;
- goto fail;
+ /* No need for _rcu here. All reconfiguration is
+ * strictly serialized on genl_lock(). We are protected against
+ * concurrent reconfiguration/addition/deletion */
+ list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
+ if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
+ !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
+ retcode = ERR_LOCAL_ADDR;
+ goto out;
}
- }
- ns = new_conf->max_buffers/8;
- if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
- new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
- if (!new_ee_hash) {
- retcode = ERR_NOMEM;
- goto fail;
+ if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
+ retcode = ERR_PEER_ADDR;
+ goto out;
}
}
- ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+ tconn = adm_ctx.tconn;
+ conn_reconfig_start(tconn);
- if (integrity_w_tfm) {
- i = crypto_hash_digestsize(integrity_w_tfm);
- int_dig_out = kmalloc(i, GFP_KERNEL);
- if (!int_dig_out) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- int_dig_in = kmalloc(i, GFP_KERNEL);
- if (!int_dig_in) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- int_dig_vv = kmalloc(i, GFP_KERNEL);
- if (!int_dig_vv) {
- retcode = ERR_NOMEM;
- goto fail;
- }
+ if (tconn->cstate > C_STANDALONE) {
+ retcode = ERR_NET_CONFIGURED;
+ goto fail;
}
- if (!mdev->bitmap) {
- if(drbd_bm_init(mdev)) {
- retcode = ERR_NOMEM;
- goto fail;
- }
+ /* allocation not in the IO path, drbdsetup / netlink process context */
+ new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
+ if (!new_conf) {
+ retcode = ERR_NOMEM;
+ goto fail;
}
- drbd_flush_workqueue(mdev);
- spin_lock_irq(&mdev->req_lock);
- if (mdev->net_conf != NULL) {
- retcode = ERR_NET_CONFIGURED;
- spin_unlock_irq(&mdev->req_lock);
+ set_net_conf_defaults(new_conf);
+
+ err = net_conf_from_attrs(new_conf, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
}
- mdev->net_conf = new_conf;
- mdev->send_cnt = 0;
- mdev->recv_cnt = 0;
+ retcode = check_net_options(tconn, new_conf);
+ if (retcode != NO_ERROR)
+ goto fail;
- if (new_tl_hash) {
- kfree(mdev->tl_hash);
- mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
- mdev->tl_hash = new_tl_hash;
- }
+ retcode = alloc_crypto(&crypto, new_conf);
+ if (retcode != NO_ERROR)
+ goto fail;
+
+ ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+
+ conn_flush_workqueue(tconn);
- if (new_ee_hash) {
- kfree(mdev->ee_hash);
- mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
- mdev->ee_hash = new_ee_hash;
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+ if (old_conf) {
+ retcode = ERR_NET_CONFIGURED;
+ mutex_unlock(&tconn->conf_update);
+ goto fail;
}
+ rcu_assign_pointer(tconn->net_conf, new_conf);
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = tfm;
+ conn_free_crypto(tconn);
+ tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+ tconn->integrity_tfm = crypto.integrity_tfm;
+ tconn->csums_tfm = crypto.csums_tfm;
+ tconn->verify_tfm = crypto.verify_tfm;
- crypto_free_hash(mdev->integrity_w_tfm);
- mdev->integrity_w_tfm = integrity_w_tfm;
+ tconn->my_addr_len = nla_len(adm_ctx.my_addr);
+ memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
+ tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
+ memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
- crypto_free_hash(mdev->integrity_r_tfm);
- mdev->integrity_r_tfm = integrity_r_tfm;
+ mutex_unlock(&tconn->conf_update);
- kfree(mdev->int_dig_out);
- kfree(mdev->int_dig_in);
- kfree(mdev->int_dig_vv);
- mdev->int_dig_out=int_dig_out;
- mdev->int_dig_in=int_dig_in;
- mdev->int_dig_vv=int_dig_vv;
- retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, i) {
+ mdev->send_cnt = 0;
+ mdev->recv_cnt = 0;
+ }
+ rcu_read_unlock();
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ conn_reconfig_done(tconn);
+ drbd_adm_finish(info, retcode);
return 0;
fail:
- kfree(int_dig_out);
- kfree(int_dig_in);
- kfree(int_dig_vv);
- crypto_free_hash(tfm);
- crypto_free_hash(integrity_w_tfm);
- crypto_free_hash(integrity_r_tfm);
- kfree(new_tl_hash);
- kfree(new_ee_hash);
+ free_crypto(&crypto);
kfree(new_conf);
- reply->ret_code = retcode;
- drbd_reconfig_done(mdev);
+ conn_reconfig_done(tconn);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
{
- int retcode;
- struct disconnect dc;
-
- memset(&dc, 0, sizeof(struct disconnect));
- if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
- retcode = ERR_MANDATORY_TAG;
- goto fail;
- }
-
- if (dc.force) {
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.conn >= C_WF_CONNECTION)
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
- spin_unlock_irq(&mdev->req_lock);
- goto done;
- }
+ enum drbd_state_rv rv;
- retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ force ? CS_HARD : 0);
- if (retcode == SS_NOTHING_TO_DO)
- goto done;
- else if (retcode == SS_ALREADY_STANDALONE)
- goto done;
- else if (retcode == SS_PRIMARY_NOP) {
- /* Our statche checking code wants to see the peer outdated. */
- retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
- pdsk, D_OUTDATED));
- } else if (retcode == SS_CW_FAILED_BY_PEER) {
+ switch (rv) {
+ case SS_NOTHING_TO_DO:
+ break;
+ case SS_ALREADY_STANDALONE:
+ return SS_SUCCESS;
+ case SS_PRIMARY_NOP:
+ /* Our state checking code wants to see the peer outdated. */
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ pdsk, D_OUTDATED), CS_VERBOSE);
+ break;
+ case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */
- retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
- disk, D_OUTDATED),
- CS_ORDERED);
- if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- retcode = SS_SUCCESS;
+ rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+ disk, D_OUTDATED), 0);
+ if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
+ rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+ CS_HARD);
}
+ break;
+ default:;
+ /* no special handling necessary */
+ }
+
+ if (rv >= SS_SUCCESS) {
+ enum drbd_state_rv rv2;
+ /* No one else can reconfigure the network while I am here.
+ * The state handling only uses drbd_thread_stop_nowait(),
+ * we want to really wait here until the receiver is no more.
+ */
+ drbd_thread_stop(&adm_ctx.tconn->receiver);
+
+ /* Race breaker. This additional state change request may be
+ * necessary, if this was a forced disconnect during a receiver
+ * restart. We may have "killed" the receiver thread just
+ * after drbdd_init() returned. Typically, we should be
+ * C_STANDALONE already, now, and this becomes a no-op.
+ */
+ rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+ CS_VERBOSE | CS_HARD);
+ if (rv2 < SS_SUCCESS)
+ conn_err(tconn,
+ "unexpected rv2=%d in conn_try_disconnect()\n",
+ rv2);
}
+ return rv;
+}
- if (retcode < SS_SUCCESS)
- goto fail;
+int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct disconnect_parms parms;
+ struct drbd_tconn *tconn;
+ enum drbd_state_rv rv;
+ enum drbd_ret_code retcode;
+ int err;
- if (wait_event_interruptible(mdev->state_wait,
- mdev->state.conn != C_DISCONNECTING)) {
- /* Do not test for mdev->state.conn == C_STANDALONE, since
- someone else might connect us in the mean time! */
- retcode = ERR_INTR;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
goto fail;
+
+ tconn = adm_ctx.tconn;
+ memset(&parms, 0, sizeof(parms));
+ if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
+ err = disconnect_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
+ }
}
- done:
- retcode = NO_ERROR;
+ rv = conn_try_disconnect(tconn, parms.force_disconnect);
+ if (rv < SS_SUCCESS)
+ retcode = rv; /* FIXME: Type mismatch. */
+ else
+ retcode = NO_ERROR;
fail:
- drbd_md_sync(mdev);
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -1705,7 +2241,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
- iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1713,20 +2249,34 @@ void resync_after_online_grow(struct drbd_conf *mdev)
_drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
}
-static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
- struct resize rs;
- int retcode = NO_ERROR;
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+ struct resize_parms rs;
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
enum determine_dev_size dd;
enum dds_flags ddsf;
+ sector_t u_size;
+ int err;
- memset(&rs, 0, sizeof(struct resize));
- if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
- retcode = ERR_MANDATORY_TAG;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
goto fail;
+
+ memset(&rs, 0, sizeof(struct resize_parms));
+ if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
+ err = resize_parms_from_attrs(&rs, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
+ }
}
+ mdev = adm_ctx.mdev;
if (mdev->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
goto fail;
@@ -1743,15 +2293,36 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (rs.no_resync && mdev->agreed_pro_version < 93) {
+ if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
goto fail_ldev;
}
+ rcu_read_lock();
+ u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+ if (u_size != (sector_t)rs.resize_size) {
+ new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ retcode = ERR_NOMEM;
+ goto fail_ldev;
+ }
+ }
+
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
- mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
+ if (new_disk_conf) {
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = (sector_t)rs.resize_size;
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+ }
+
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
dd = drbd_determine_dev_size(mdev, ddsf);
drbd_md_sync(mdev);
@@ -1770,7 +2341,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
fail:
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
fail_ldev:
@@ -1778,204 +2349,55 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
-static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ enum drbd_ret_code retcode;
+ struct drbd_tconn *tconn;
+ struct res_opts res_opts;
int err;
- int ovr; /* online verify running */
- int rsr; /* re-sync running */
- struct crypto_hash *verify_tfm = NULL;
- struct crypto_hash *csums_tfm = NULL;
- struct syncer_conf sc;
- cpumask_var_t new_cpu_mask;
- int *rs_plan_s = NULL;
- int fifo_size;
-
- if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
- retcode = ERR_NOMEM;
- goto fail;
- }
- if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
- memset(&sc, 0, sizeof(struct syncer_conf));
- sc.rate = DRBD_RATE_DEF;
- sc.after = DRBD_AFTER_DEF;
- sc.al_extents = DRBD_AL_EXTENTS_DEF;
- sc.on_no_data = DRBD_ON_NO_DATA_DEF;
- sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
- sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
- sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
- sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
- sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
- } else
- memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
-
- if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
- retcode = ERR_MANDATORY_TAG;
- goto fail;
- }
-
- /* re-sync running */
- rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
- mdev->state.conn == C_SYNC_TARGET ||
- mdev->state.conn == C_PAUSED_SYNC_S ||
- mdev->state.conn == C_PAUSED_SYNC_T );
-
- if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
- retcode = ERR_CSUMS_RESYNC_RUNNING;
- goto fail;
- }
-
- if (!rsr && sc.csums_alg[0]) {
- csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(csums_tfm)) {
- csums_tfm = NULL;
- retcode = ERR_CSUMS_ALG;
- goto fail;
- }
-
- if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
- retcode = ERR_CSUMS_ALG_ND;
- goto fail;
- }
- }
-
- /* online verify running */
- ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
-
- if (ovr) {
- if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
- retcode = ERR_VERIFY_RUNNING;
- goto fail;
- }
- }
-
- if (!ovr && sc.verify_alg[0]) {
- verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(verify_tfm)) {
- verify_tfm = NULL;
- retcode = ERR_VERIFY_ALG;
- goto fail;
- }
-
- if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
- retcode = ERR_VERIFY_ALG_ND;
- goto fail;
- }
- }
-
- /* silently ignore cpu mask on UP kernel */
- if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
- err = bitmap_parse(sc.cpu_mask, 32,
- cpumask_bits(new_cpu_mask), nr_cpu_ids);
- if (err) {
- dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
- retcode = ERR_CPU_MASK_PARSE;
- goto fail;
- }
- }
-
- ERR_IF (sc.rate < 1) sc.rate = 1;
- ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
-#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
- if (sc.al_extents > AL_MAX) {
- dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
- sc.al_extents = AL_MAX;
- }
-#undef AL_MAX
-
- /* to avoid spurious errors when configuring minors before configuring
- * the minors they depend on: if necessary, first create the minor we
- * depend on */
- if (sc.after >= 0)
- ensure_mdev(sc.after, 1);
-
- /* most sanity checks done, try to assign the new sync-after
- * dependency. need to hold the global lock in there,
- * to avoid a race in the dependency loop check. */
- retcode = drbd_alter_sa(mdev, sc.after);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
if (retcode != NO_ERROR)
goto fail;
+ tconn = adm_ctx.tconn;
- fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
- if (!rs_plan_s) {
- dev_err(DEV, "kmalloc of fifo_buffer failed");
- retcode = ERR_NOMEM;
- goto fail;
- }
- }
+ res_opts = tconn->res_opts;
+ if (should_set_defaults(info))
+ set_res_opts_defaults(&res_opts);
- /* ok, assign the rest of it as well.
- * lock against receive_SyncParam() */
- spin_lock(&mdev->peer_seq_lock);
- mdev->sync_conf = sc;
-
- if (!rsr) {
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- csums_tfm = NULL;
- }
-
- if (!ovr) {
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- verify_tfm = NULL;
- }
-
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
- rs_plan_s = NULL;
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto fail;
}
- spin_unlock(&mdev->peer_seq_lock);
-
- if (get_ldev(mdev)) {
- wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
- drbd_al_shrink(mdev);
- err = drbd_check_al_size(mdev);
- lc_unlock(mdev->act_log);
- wake_up(&mdev->al_wait);
-
- put_ldev(mdev);
- drbd_md_sync(mdev);
-
- if (err) {
+ err = set_resource_options(tconn, &res_opts);
+ if (err) {
+ retcode = ERR_INVALID_REQUEST;
+ if (err == -ENOMEM)
retcode = ERR_NOMEM;
- goto fail;
- }
}
- if (mdev->state.conn >= C_CONNECTED)
- drbd_send_sync_param(mdev, &sc);
-
- if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
- cpumask_copy(mdev->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(mdev);
- mdev->receiver.reset_cpu_mask = 1;
- mdev->asender.reset_cpu_mask = 1;
- mdev->worker.reset_cpu_mask = 1;
- }
-
- kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail:
- kfree(rs_plan_s);
- free_cpumask_var(new_cpu_mask);
- crypto_free_hash(csums_tfm);
- crypto_free_hash(verify_tfm);
- reply->ret_code = retcode;
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
- int retcode;
+ struct drbd_conf *mdev;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
@@ -1990,10 +2412,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
while (retcode == SS_NEED_CONNECTION) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.conn < C_CONNECTED)
retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (retcode != SS_NEED_CONNECTION)
break;
@@ -2002,7 +2424,25 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
}
drbd_resume_io(mdev);
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+ union drbd_state mask, union drbd_state val)
+{
+ enum drbd_ret_code retcode;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
@@ -2015,10 +2455,18 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
return rv;
}
-static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
- int retcode;
+ int retcode; /* drbd_ret_code, drbd_state_rv */
+ struct drbd_conf *mdev;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync.
@@ -2028,16 +2476,15 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
-
if (retcode < SS_SUCCESS) {
if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
- /* The peer will get a resync upon connect anyways. Just make that
- into a full resync. */
+ /* The peer will get a resync upon connect anyways.
+ * Just make that into a full resync. */
retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
if (retcode >= SS_SUCCESS) {
if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
- "set_n_write from invalidate_peer",
- BM_LOCKED_SET_ALLOWED))
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
retcode = ERR_IO_MD_DISK;
}
} else
@@ -2045,30 +2492,41 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
}
drbd_resume_io(mdev);
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ enum drbd_ret_code retcode;
- if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
- retcode = ERR_PAUSE_IS_SET;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- reply->ret_code = retcode;
+ if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+ retcode = ERR_PAUSE_IS_SET;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
- union drbd_state s;
+ union drbd_dev_state s;
+ enum drbd_ret_code retcode;
- if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
- s = mdev->state;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+ s = adm_ctx.mdev->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2077,172 +2535,482 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
}
}
- reply->ret_code = retcode;
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
{
- reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
-
- return 0;
+ return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
}
-static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
+ struct drbd_conf *mdev;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
}
drbd_suspend_io(mdev);
- reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
- if (reply->ret_code == SS_SUCCESS) {
+ retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+ if (retcode == SS_SUCCESS) {
if (mdev->state.conn < C_CONNECTED)
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
- tl_restart(mdev, fail_frozen_disk_io);
+ tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(mdev);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
{
- reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
- return 0;
+ return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
-static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
{
- unsigned short *tl;
+ struct nlattr *nla;
+ nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+ if (!nla)
+ goto nla_put_failure;
+ if (vnr != VOLUME_UNSPECIFIED &&
+ nla_put_u32(skb, T_ctx_volume, vnr))
+ goto nla_put_failure;
+ if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+ goto nla_put_failure;
+ if (tconn->my_addr_len &&
+ nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+ goto nla_put_failure;
+ if (tconn->peer_addr_len &&
+ nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+ goto nla_put_failure;
+ nla_nest_end(skb, nla);
+ return 0;
- tl = reply->tag_list;
+nla_put_failure:
+ if (nla)
+ nla_nest_cancel(skb, nla);
+ return -EMSGSIZE;
+}
- if (get_ldev(mdev)) {
- tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
- put_ldev(mdev);
- }
+int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+ const struct sib_info *sib)
+{
+ struct state_info *si = NULL; /* for sizeof(si->member); */
+ struct net_conf *nc;
+ struct nlattr *nla;
+ int got_ldev;
+ int err = 0;
+ int exclude_sensitive;
+
+ /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
+ * to. So we better exclude_sensitive information.
+ *
+ * If sib == NULL, this is drbd_adm_get_status, executed synchronously
+ * in the context of the requesting user process. Exclude sensitive
+ * information, unless current has superuser.
+ *
+ * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
+ * relies on the current implementation of netlink_dump(), which
+ * executes the dump callback successively from netlink_recvmsg(),
+ * always in the context of the receiving process */
+ exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
+
+ got_ldev = get_ldev(mdev);
+
+ /* We need to add connection name and volume number information still.
+ * Minor number is in drbd_genlmsghdr. */
+ if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+ goto nla_put_failure;
+
+ if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+ goto nla_put_failure;
+
+ rcu_read_lock();
+ if (got_ldev)
+ if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
+ goto nla_put_failure;
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc)
+ err = net_conf_to_skb(skb, nc, exclude_sensitive);
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+
+ nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+ if (!nla)
+ goto nla_put_failure;
+ if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
+ nla_put_u32(skb, T_current_state, mdev->state.i) ||
+ nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
+ nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
+ nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
+ nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
+ nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
+ nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
+ nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
+ nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
+ nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
+ nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
+ nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+ goto nla_put_failure;
+
+ if (got_ldev) {
+ int err;
- if (get_net_conf(mdev)) {
- tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
- put_net_conf(mdev);
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+
+ if (err)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
+ nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
+ nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+ goto nla_put_failure;
+ if (C_SYNC_SOURCE <= mdev->state.conn &&
+ C_PAUSED_SYNC_T >= mdev->state.conn) {
+ if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
+ nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+ goto nla_put_failure;
+ }
}
- tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ if (sib) {
+ switch(sib->sib_reason) {
+ case SIB_SYNC_PROGRESS:
+ case SIB_GET_STATUS_REPLY:
+ break;
+ case SIB_STATE_CHANGE:
+ if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
+ nla_put_u32(skb, T_new_state, sib->ns.i))
+ goto nla_put_failure;
+ break;
+ case SIB_HELPER_POST:
+ if (nla_put_u32(skb, T_helper_exit_code,
+ sib->helper_exit_code))
+ goto nla_put_failure;
+ /* fall through */
+ case SIB_HELPER_PRE:
+ if (nla_put_string(skb, T_helper, sib->helper_name))
+ goto nla_put_failure;
+ break;
+ }
+ }
+ nla_nest_end(skb, nla);
- return (int)((char *)tl - (char *)reply->tag_list);
+ if (0)
+nla_put_failure:
+ err = -EMSGSIZE;
+ if (got_ldev)
+ put_ldev(mdev);
+ return err;
}
-static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
{
- unsigned short *tl = reply->tag_list;
- union drbd_state s = mdev->state;
- unsigned long rs_left;
- unsigned int res;
+ enum drbd_ret_code retcode;
+ int err;
- tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* no local ref, no bitmap, no syncer progress. */
- if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
- if (get_ldev(mdev)) {
- drbd_get_syncer_progress(mdev, &rs_left, &res);
- tl = tl_add_int(tl, T_sync_progress, &res);
- put_ldev(mdev);
- }
+ err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+ if (err) {
+ nlmsg_free(adm_ctx.reply_skb);
+ return err;
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- return (int)((char *)tl - (char *)reply->tag_list);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
- unsigned short *tl;
-
- tl = reply->tag_list;
+ struct drbd_conf *mdev;
+ struct drbd_genlmsghdr *dh;
+ struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
+ struct drbd_tconn *tconn = NULL;
+ struct drbd_tconn *tmp;
+ unsigned volume = cb->args[1];
+
+ /* Open coded, deferred, iteration:
+ * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+ * idr_for_each_entry(&tconn->volumes, mdev, i) {
+ * ...
+ * }
+ * }
+ * where tconn is cb->args[0];
+ * and i is cb->args[1];
+ *
+ * cb->args[2] indicates if we shall loop over all resources,
+ * or just dump all volumes of a single resource.
+ *
+ * This may miss entries inserted after this dump started,
+ * or entries deleted before they are reached.
+ *
+ * We need to make sure the mdev won't disappear while
+ * we are looking at it, and revalidate our iterators
+ * on each iteration.
+ */
- if (get_ldev(mdev)) {
- tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
- tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
- put_ldev(mdev);
+ /* synchronize with conn_create()/conn_destroy() */
+ rcu_read_lock();
+ /* revalidate iterator position */
+ list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+ if (pos == NULL) {
+ /* first iteration */
+ pos = tmp;
+ tconn = pos;
+ break;
+ }
+ if (tmp == pos) {
+ tconn = pos;
+ break;
+ }
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ if (tconn) {
+next_tconn:
+ mdev = idr_get_next(&tconn->volumes, &volume);
+ if (!mdev) {
+ /* No more volumes to dump on this tconn.
+ * Advance tconn iterator. */
+ pos = list_entry_rcu(tconn->all_tconn.next,
+ struct drbd_tconn, all_tconn);
+ /* Did we dump any volume on this tconn yet? */
+ if (volume != 0) {
+ /* If we reached the end of the list,
+ * or only a single resource dump was requested,
+ * we are done. */
+ if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+ goto out;
+ volume = 0;
+ tconn = pos;
+ goto next_tconn;
+ }
+ }
+
+ dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &drbd_genl_family,
+ NLM_F_MULTI, DRBD_ADM_GET_STATUS);
+ if (!dh)
+ goto out;
+
+ if (!mdev) {
+ /* This is a tconn without a single volume.
+ * Suprisingly enough, it may have a network
+ * configuration. */
+ struct net_conf *nc;
+ dh->minor = -1U;
+ dh->ret_code = NO_ERROR;
+ if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
+ goto cancel;
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ goto cancel;
+ goto done;
+ }
+
+ D_ASSERT(mdev->vnr == volume);
+ D_ASSERT(mdev->tconn == tconn);
+
+ dh->minor = mdev_to_minor(mdev);
+ dh->ret_code = NO_ERROR;
- return (int)((char *)tl - (char *)reply->tag_list);
+ if (nla_put_status_info(skb, mdev, NULL)) {
+cancel:
+ genlmsg_cancel(skb, dh);
+ goto out;
+ }
+done:
+ genlmsg_end(skb, dh);
+ }
+
+out:
+ rcu_read_unlock();
+ /* where to start the next iteration */
+ cb->args[0] = (long)pos;
+ cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+
+ /* No more tconns/volumes/minors found results in an empty skb.
+ * Which will terminate the dump. */
+ return skb->len;
}
-/**
- * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
- * @mdev: DRBD device.
- * @nlp: Netlink/connector packet from drbdsetup
- * @reply: Reply packet for drbdsetup
+/*
+ * Request status of all resources, or of all volumes within a single resource.
+ *
+ * This is a dump, as the answer may not fit in a single reply skb otherwise.
+ * Which means we cannot use the family->attrbuf or other such members, because
+ * dump is NOT protected by the genl_lock(). During dump, we only have access
+ * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
+ *
+ * Once things are setup properly, we call into get_one_status().
*/
-static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
{
- unsigned short *tl;
- char rv;
+ const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
+ struct nlattr *nla;
+ const char *resource_name;
+ struct drbd_tconn *tconn;
+ int maxtype;
+
+ /* Is this a followup call? */
+ if (cb->args[0]) {
+ /* ... of a single resource dump,
+ * and the resource iterator has been advanced already? */
+ if (cb->args[2] && cb->args[2] != cb->args[0])
+ return 0; /* DONE. */
+ goto dump;
+ }
+
+ /* First call (from netlink_dump_start). We need to figure out
+ * which resource(s) the user wants us to dump. */
+ nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
+ nlmsg_attrlen(cb->nlh, hdrlen),
+ DRBD_NLA_CFG_CONTEXT);
+
+ /* No explicit context given. Dump all. */
+ if (!nla)
+ goto dump;
+ maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
+ nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
+ if (IS_ERR(nla))
+ return PTR_ERR(nla);
+ /* context given, but no name present? */
+ if (!nla)
+ return -EINVAL;
+ resource_name = nla_data(nla);
+ tconn = conn_get_by_name(resource_name);
+
+ if (!tconn)
+ return -ENODEV;
+
+ kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+
+ /* prime iterators, and set "filter" mode mark:
+ * only dump this tconn. */
+ cb->args[0] = (long)tconn;
+ /* cb->args[1] = 0; passed in this way. */
+ cb->args[2] = (long)tconn;
+
+dump:
+ return get_one_status(skb, cb);
+}
- tl = reply->tag_list;
+int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct timeout_parms tp;
+ int err;
- rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ tp.timeout_type =
+ adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+ test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+ UT_DEFAULT;
- return (int)((char *)tl - (char *)reply->tag_list);
+ err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
+ if (err) {
+ nlmsg_free(adm_ctx.reply_skb);
+ return err;
+ }
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
- /* default to resume from last known position, if possible */
- struct start_ov args =
- { .start_sector = mdev->ov_start_sector };
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
+ struct start_ov_parms parms;
- if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ mdev = adm_ctx.mdev;
+
+ /* resume from last known position, if possible */
+ parms.ov_start_sector = mdev->ov_start_sector;
+ parms.ov_stop_sector = ULLONG_MAX;
+ if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
+ int err = start_ov_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
}
+ /* w_make_ov_request expects position to be aligned */
+ mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+ mdev->ov_stop_sector = parms.ov_stop_sector;
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-
- /* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
- reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
drbd_resume_io(mdev);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
- struct drbd_nl_cfg_reply *reply)
+int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
- int retcode = NO_ERROR;
+ struct drbd_conf *mdev;
+ enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
+ struct new_c_uuid_parms args;
- struct new_c_uuid args;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out_nolock;
- memset(&args, 0, sizeof(struct new_c_uuid));
- if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
- reply->ret_code = ERR_MANDATORY_TAG;
- return 0;
+ mdev = adm_ctx.mdev;
+ memset(&args, 0, sizeof(args));
+ if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
+ err = new_c_uuid_parms_from_attrs(&args, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out_nolock;
+ }
}
- mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
+ mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
@@ -2250,7 +3018,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
}
/* this is "skip initial sync", assume to be clean */
- if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
+ if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
dev_info(DEV, "Preparing to skip initial sync\n");
skip_initial_sync = 1;
@@ -2273,10 +3041,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
drbd_send_uuids_skip_initial_sync(mdev);
_drbd_uuid_set(mdev, UI_BITMAP, 0);
drbd_print_uuids(mdev, "cleared bitmap UUID");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
}
@@ -2284,416 +3052,284 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
out_dec:
put_ldev(mdev);
out:
- mutex_unlock(&mdev->state_mutex);
-
- reply->ret_code = retcode;
+ mutex_unlock(mdev->state_mutex);
+out_nolock:
+ drbd_adm_finish(info, retcode);
return 0;
}
-struct cn_handler_struct {
- int (*function)(struct drbd_conf *,
- struct drbd_nl_cfg_req *,
- struct drbd_nl_cfg_reply *);
- int reply_body_size;
-};
-
-static struct cn_handler_struct cnd_table[] = {
- [ P_primary ] = { &drbd_nl_primary, 0 },
- [ P_secondary ] = { &drbd_nl_secondary, 0 },
- [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
- [ P_detach ] = { &drbd_nl_detach, 0 },
- [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
- [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
- [ P_resize ] = { &drbd_nl_resize, 0 },
- [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
- [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
- [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
- [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
- [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
- [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
- [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
- [ P_outdate ] = { &drbd_nl_outdate, 0 },
- [ P_get_config ] = { &drbd_nl_get_config,
- sizeof(struct syncer_conf_tag_len_struct) +
- sizeof(struct disk_conf_tag_len_struct) +
- sizeof(struct net_conf_tag_len_struct) },
- [ P_get_state ] = { &drbd_nl_get_state,
- sizeof(struct get_state_tag_len_struct) +
- sizeof(struct sync_progress_tag_len_struct) },
- [ P_get_uuids ] = { &drbd_nl_get_uuids,
- sizeof(struct get_uuids_tag_len_struct) },
- [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
- sizeof(struct get_timeout_flag_tag_len_struct)},
- [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
- [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
-};
-
-static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
+static enum drbd_ret_code
+drbd_check_resource_name(const char *name)
{
- struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
- struct cn_handler_struct *cm;
- struct cn_msg *cn_reply;
- struct drbd_nl_cfg_reply *reply;
- struct drbd_conf *mdev;
- int retcode, rr;
- int reply_size = sizeof(struct cn_msg)
- + sizeof(struct drbd_nl_cfg_reply)
- + sizeof(short int);
-
- if (!try_module_get(THIS_MODULE)) {
- printk(KERN_ERR "drbd: try_module_get() failed!\n");
- return;
+ if (!name || !name[0]) {
+ drbd_msg_put_info("resource name missing");
+ return ERR_MANDATORY_TAG;
}
-
- if (!capable(CAP_SYS_ADMIN)) {
- retcode = ERR_PERM;
- goto fail;
- }
-
- mdev = ensure_mdev(nlp->drbd_minor,
- (nlp->flags & DRBD_NL_CREATE_DEVICE));
- if (!mdev) {
- retcode = ERR_MINOR_INVALID;
- goto fail;
+ /* if we want to use these in sysfs/configfs/debugfs some day,
+ * we must not allow slashes */
+ if (strchr(name, '/')) {
+ drbd_msg_put_info("invalid resource name");
+ return ERR_INVALID_REQUEST;
}
+ return NO_ERROR;
+}
- if (nlp->packet_type >= P_nl_after_last_packet ||
- nlp->packet_type == P_return_code_only) {
- retcode = ERR_PACKET_NR;
- goto fail;
- }
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
+{
+ enum drbd_ret_code retcode;
+ struct res_opts res_opts;
+ int err;
- cm = cnd_table + nlp->packet_type;
+ retcode = drbd_adm_prepare(skb, info, 0);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* This may happen if packet number is 0: */
- if (cm->function == NULL) {
- retcode = ERR_PACKET_NR;
- goto fail;
+ set_res_opts_defaults(&res_opts);
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
}
- reply_size += cm->reply_body_size;
+ retcode = drbd_check_resource_name(adm_ctx.resource_name);
+ if (retcode != NO_ERROR)
+ goto out;
- /* allocation not in the IO path, cqueue thread context */
- cn_reply = kzalloc(reply_size, GFP_KERNEL);
- if (!cn_reply) {
- retcode = ERR_NOMEM;
- goto fail;
+ if (adm_ctx.tconn) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
+ retcode = ERR_INVALID_REQUEST;
+ drbd_msg_put_info("resource exists");
+ }
+ /* else: still NO_ERROR */
+ goto out;
}
- reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
-
- reply->packet_type =
- cm->reply_body_size ? nlp->packet_type : P_return_code_only;
- reply->minor = nlp->drbd_minor;
- reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
- /* reply->tag_list; might be modified by cm->function. */
-
- rr = cm->function(mdev, nlp, reply);
-
- cn_reply->id = req->id;
- cn_reply->seq = req->seq;
- cn_reply->ack = req->ack + 1;
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
- cn_reply->flags = 0;
-
- rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
- if (rr && rr != -ESRCH)
- printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
- kfree(cn_reply);
- module_put(THIS_MODULE);
- return;
- fail:
- drbd_nl_send_reply(req, retcode);
- module_put(THIS_MODULE);
+ if (!conn_create(adm_ctx.resource_name, &res_opts))
+ retcode = ERR_NOMEM;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-
-static unsigned short *
-__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
- unsigned short len, int nul_terminated)
+int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
{
- unsigned short l = tag_descriptions[tag_number(tag)].max_len;
- len = (len < l) ? len : l;
- put_unaligned(tag, tl++);
- put_unaligned(len, tl++);
- memcpy(tl, data, len);
- tl = (unsigned short*)((char*)tl + len);
- if (nul_terminated)
- *((char*)tl - 1) = 0;
- return tl;
-}
+ struct drbd_genlmsghdr *dh = info->userhdr;
+ enum drbd_ret_code retcode;
-static unsigned short *
-tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
-{
- return __tl_add_blob(tl, tag, data, len, 0);
-}
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
-static unsigned short *
-tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
-{
- return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
-}
+ if (dh->minor > MINORMASK) {
+ drbd_msg_put_info("requested minor out of range");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
+ if (adm_ctx.volume > DRBD_VOLUME_MAX) {
+ drbd_msg_put_info("requested volume id out of range");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
-static unsigned short *
-tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
-{
- put_unaligned(tag, tl++);
- switch(tag_type(tag)) {
- case TT_INTEGER:
- put_unaligned(sizeof(int), tl++);
- put_unaligned(*(int *)val, (int *)tl);
- tl = (unsigned short*)((char*)tl+sizeof(int));
- break;
- case TT_INT64:
- put_unaligned(sizeof(u64), tl++);
- put_unaligned(*(u64 *)val, (u64 *)tl);
- tl = (unsigned short*)((char*)tl+sizeof(u64));
- break;
- default:
- /* someone did something stupid. */
- ;
+ /* drbd_adm_prepare made sure already
+ * that mdev->tconn and mdev->vnr match the request. */
+ if (adm_ctx.mdev) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+ retcode = ERR_MINOR_EXISTS;
+ /* else: still NO_ERROR */
+ goto out;
}
- return tl;
+
+ retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
+static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct get_state_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
-
- /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
- tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
-
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
-
- reply->packet_type = P_get_state;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
-
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ if (mdev->state.disk == D_DISKLESS &&
+ /* no need to be mdev->state.conn == C_STANDALONE &&
+ * we may want to delete a minor from a live replication group.
+ */
+ mdev->state.role == R_SECONDARY) {
+ _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+ CS_VERBOSE + CS_WAIT_COMPLETE);
+ idr_remove(&mdev->tconn->volumes, mdev->vnr);
+ idr_remove(&minors, mdev_to_minor(mdev));
+ del_gendisk(mdev->vdisk);
+ synchronize_rcu();
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return NO_ERROR;
+ } else
+ return ERR_MINOR_CONFIGURED;
}
-void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
+int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct call_helper_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
-
- /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
-
- tl = tl_add_str(tl, T_helper, helper_name);
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
+ enum drbd_ret_code retcode;
- reply->packet_type = P_call_helper;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ retcode = adm_delete_minor(adm_ctx.mdev);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e)
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
- struct cn_msg *cn_reply;
- struct drbd_nl_cfg_reply *reply;
- unsigned short *tl;
- struct page *page;
- unsigned len;
+ int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+ struct drbd_conf *mdev;
+ unsigned i;
- if (!e)
- return;
- if (!reason || !reason[0])
- return;
+ retcode = drbd_adm_prepare(skb, info, 0);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- /* apparently we have to memcpy twice, first to prepare the data for the
- * struct cn_msg, then within cn_netlink_send from the cn_msg to the
- * netlink skb. */
- /* receiver thread context, which is not in the writeout path (of this node),
- * but may be in the writeout path of the _other_ node.
- * GFP_NOIO to avoid potential "distributed deadlock". */
- cn_reply = kzalloc(
- sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct dump_ee_tag_len_struct)+
- sizeof(short int),
- GFP_NOIO);
-
- if (!cn_reply) {
- dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
- (unsigned long long)e->sector, e->size);
- return;
+ if (!adm_ctx.tconn) {
+ retcode = ERR_RES_NOT_KNOWN;
+ goto out;
}
- reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
- tl = reply->tag_list;
-
- tl = tl_add_str(tl, T_dump_ee_reason, reason);
- tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
- tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
- tl = tl_add_int(tl, T_ee_sector, &e->sector);
- tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
-
- /* dump the first 32k */
- len = min_t(unsigned, e->size, 32 << 10);
- put_unaligned(T_ee_data, tl++);
- put_unaligned(len, tl++);
-
- page = e->pages;
- page_chain_for_each(page) {
- void *d = kmap_atomic(page);
- unsigned l = min_t(unsigned, len, PAGE_SIZE);
- memcpy(tl, d, l);
- kunmap_atomic(d);
- tl = (unsigned short*)((char*)tl + l);
- len -= l;
- if (len == 0)
- break;
+ /* demote */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to demote");
+ goto out;
+ }
}
- put_unaligned(TT_END, tl++); /* Close the tag list */
-
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
- cn_reply->ack = 0; // not used here.
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char*)tl - (char*)reply->tag_list);
- cn_reply->flags = 0;
-
- reply->packet_type = P_dump_ee;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
- kfree(cn_reply);
-}
-
-void drbd_bcast_sync_progress(struct drbd_conf *mdev)
-{
- char buffer[sizeof(struct cn_msg)+
- sizeof(struct drbd_nl_cfg_reply)+
- sizeof(struct sync_progress_tag_len_struct)+
- sizeof(short int)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- unsigned short *tl = reply->tag_list;
- unsigned long rs_left;
- unsigned int res;
+ retcode = conn_try_disconnect(adm_ctx.tconn, 0);
+ if (retcode < SS_SUCCESS) {
+ drbd_msg_put_info("failed to disconnect");
+ goto out;
+ }
- /* no local ref, no bitmap, no syncer progress, no broadcast. */
- if (!get_ldev(mdev))
- return;
- drbd_get_syncer_progress(mdev, &rs_left, &res);
- put_ldev(mdev);
+ /* detach */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = adm_detach(mdev, 0);
+ if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
+ drbd_msg_put_info("failed to detach");
+ goto out;
+ }
+ }
- tl = tl_add_int(tl, T_sync_progress, &res);
- put_unaligned(TT_END, tl++); /* Close the tag list */
+ /* If we reach this, all volumes (of this tconn) are Secondary,
+ * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
+ * actually stopped, state handling only does drbd_thread_stop_nowait(). */
+ drbd_thread_stop(&adm_ctx.tconn->worker);
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
+ /* Now, nothing can fail anymore */
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
- cn_reply->flags = 0;
+ /* delete volumes */
+ idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+ retcode = adm_delete_minor(mdev);
+ if (retcode != NO_ERROR) {
+ /* "can not happen" */
+ drbd_msg_put_info("failed to delete volume");
+ goto out;
+ }
+ }
- reply->packet_type = P_sync_progress;
- reply->minor = mdev_to_minor(mdev);
- reply->ret_code = NO_ERROR;
+ /* delete connection */
+ if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+ list_del_rcu(&adm_ctx.tconn->all_tconn);
+ synchronize_rcu();
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
- cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
+ retcode = NO_ERROR;
+ } else {
+ /* "can not happen" */
+ retcode = ERR_RES_IN_USE;
+ drbd_msg_put_info("failed to delete connection");
+ }
+ goto out;
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
}
-int __init drbd_nl_init(void)
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
{
- static struct cb_id cn_id_drbd;
- int err, try=10;
+ enum drbd_ret_code retcode;
- cn_id_drbd.val = CN_VAL_DRBD;
- do {
- cn_id_drbd.idx = cn_idx;
- err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
- if (!err)
- break;
- cn_idx = (cn_idx + CN_IDX_STEP);
- } while (try--);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
- if (err) {
- printk(KERN_ERR "drbd: cn_drbd failed to register\n");
- return err;
+ if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+ list_del_rcu(&adm_ctx.tconn->all_tconn);
+ synchronize_rcu();
+ kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+
+ retcode = NO_ERROR;
+ } else {
+ retcode = ERR_RES_IN_USE;
}
+ if (retcode == NO_ERROR)
+ drbd_thread_stop(&adm_ctx.tconn->worker);
+out:
+ drbd_adm_finish(info, retcode);
return 0;
}
-void drbd_nl_cleanup(void)
+void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
{
- static struct cb_id cn_id_drbd;
-
- cn_id_drbd.idx = cn_idx;
- cn_id_drbd.val = CN_VAL_DRBD;
-
- cn_del_callback(&cn_id_drbd);
-}
+ static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+ struct sk_buff *msg;
+ struct drbd_genlmsghdr *d_out;
+ unsigned seq;
+ int err = -ENOMEM;
+
+ if (sib->sib_reason == SIB_SYNC_PROGRESS) {
+ if (time_after(jiffies, mdev->rs_last_bcast + HZ))
+ mdev->rs_last_bcast = jiffies;
+ else
+ return;
+ }
-void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
-{
- char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
- struct cn_msg *cn_reply = (struct cn_msg *) buffer;
- struct drbd_nl_cfg_reply *reply =
- (struct drbd_nl_cfg_reply *)cn_reply->data;
- int rr;
+ seq = atomic_inc_return(&drbd_genl_seq);
+ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
+ if (!msg)
+ goto failed;
- memset(buffer, 0, sizeof(buffer));
- cn_reply->id = req->id;
+ err = -EMSGSIZE;
+ d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
+ if (!d_out) /* cannot happen, but anyways. */
+ goto nla_put_failure;
+ d_out->minor = mdev_to_minor(mdev);
+ d_out->ret_code = NO_ERROR;
- cn_reply->seq = req->seq;
- cn_reply->ack = req->ack + 1;
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
- cn_reply->flags = 0;
+ if (nla_put_status_info(msg, mdev, sib))
+ goto nla_put_failure;
+ genlmsg_end(msg, d_out);
+ err = drbd_genl_multicast_events(msg, 0);
+ /* msg has been consumed or freed in netlink_broadcast() */
+ if (err && err != -ESRCH)
+ goto failed;
- reply->packet_type = P_return_code_only;
- reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
- reply->ret_code = ret_code;
+ return;
- rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
- if (rr && rr != -ESRCH)
- printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
+nla_put_failure:
+ nlmsg_free(msg);
+failed:
+ dev_err(DEV, "Error %d while broadcasting event. "
+ "Event seq:%u sib_reason:%u\n",
+ err, seq, sib->sib_reason);
}
-
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
new file mode 100644
index 000000000000..fa672b6df8d6
--- /dev/null
+++ b/drivers/block/drbd/drbd_nla.c
@@ -0,0 +1,55 @@
+#include "drbd_wrappers.h"
+#include <linux/kernel.h>
+#include <net/netlink.h>
+#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
+
+static int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla)
+{
+ struct nlattr *head = nla_data(nla);
+ int len = nla_len(nla);
+ int rem;
+
+ /*
+ * validate_nla (called from nla_parse_nested) ignores attributes
+ * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag.
+ * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY
+ * flag set also, check and remove that flag before calling
+ * nla_parse_nested.
+ */
+
+ nla_for_each_attr(nla, head, len, rem) {
+ if (nla->nla_type & DRBD_GENLA_F_MANDATORY) {
+ nla->nla_type &= ~DRBD_GENLA_F_MANDATORY;
+ if (nla_type(nla) > maxtype)
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ const struct nla_policy *policy)
+{
+ int err;
+
+ err = drbd_nla_check_mandatory(maxtype, nla);
+ if (!err)
+ err = nla_parse_nested(tb, maxtype, nla, policy);
+
+ return err;
+}
+
+struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype)
+{
+ int err;
+ /*
+ * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and
+ * we don't know about that attribute, reject all the nested
+ * attributes.
+ */
+ err = drbd_nla_check_mandatory(maxtype, nla);
+ if (err)
+ return ERR_PTR(err);
+ return nla_find_nested(nla, attrtype);
+}
diff --git a/drivers/block/drbd/drbd_nla.h b/drivers/block/drbd/drbd_nla.h
new file mode 100644
index 000000000000..679c2d5b4535
--- /dev/null
+++ b/drivers/block/drbd/drbd_nla.h
@@ -0,0 +1,8 @@
+#ifndef __DRBD_NLA_H
+#define __DRBD_NLA_H
+
+extern int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
+ const struct nla_policy *policy);
+extern struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype);
+
+#endif /* __DRBD_NLA_H */
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 5496104f90b9..56672a61eb94 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -167,18 +167,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(mdev);
unsigned long bit_pos;
+ unsigned long long stop_sector = 0;
if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
+ mdev->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - mdev->ov_left;
- else
+ if (verify_can_do_stop_sector(mdev))
+ stop_sector = mdev->ov_stop_sector;
+ } else
bit_pos = mdev->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
- "\t%3d%% sector pos: %llu/%llu\n",
+ "\t%3d%% sector pos: %llu/%llu",
(int)(bit_pos / (bm_bits/100+1)),
(unsigned long long)bit_pos * BM_SECT_PER_BIT,
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
+ if (stop_sector != 0 && stop_sector != ULLONG_MAX)
+ seq_printf(seq, " stop sector: %llu", stop_sector);
+ seq_printf(seq, "\n");
}
}
@@ -194,9 +200,11 @@ static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
static int drbd_seq_show(struct seq_file *seq, void *v)
{
- int i, hole = 0;
+ int i, prev_i = -1;
const char *sn;
struct drbd_conf *mdev;
+ struct net_conf *nc;
+ char wp;
static char write_ordering_chars[] = {
[WO_none] = 'n',
@@ -227,16 +235,11 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
oos .. known out-of-sync kB
*/
- for (i = 0; i < minor_count; i++) {
- mdev = minor_to_mdev(i);
- if (!mdev) {
- hole = 1;
- continue;
- }
- if (hole) {
- hole = 0;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, mdev, i) {
+ if (prev_i != i - 1)
seq_printf(seq, "\n");
- }
+ prev_i = i;
sn = drbd_conn_str(mdev->state.conn);
@@ -248,6 +251,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
/* reset mdev->congestion_reason */
bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
@@ -257,9 +262,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
drbd_role_str(mdev->state.peer),
drbd_disk_str(mdev->state.disk),
drbd_disk_str(mdev->state.pdsk),
- (mdev->net_conf == NULL ? ' ' :
- (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')),
- is_susp(mdev->state) ? 's' : 'r',
+ wp,
+ drbd_suspended(mdev) ? 's' : 'r',
mdev->state.aftr_isp ? 'a' : '-',
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
@@ -276,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
atomic_read(&mdev->rs_pending_cnt),
atomic_read(&mdev->unacked_cnt),
atomic_read(&mdev->ap_bio_cnt),
- mdev->epochs,
- write_ordering_chars[mdev->write_ordering]
+ mdev->tconn->epochs,
+ write_ordering_chars[mdev->tconn->write_ordering]
);
seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long)
@@ -302,6 +306,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
}
}
}
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c74ca2df7431..a9eccfc6079b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -48,17 +48,25 @@
#include "drbd_vli.h"
+struct packet_info {
+ enum drbd_packet cmd;
+ unsigned int size;
+ unsigned int vnr;
+ void *data;
+};
+
enum finish_epoch {
FE_STILL_LIVE,
FE_DESTROYED,
FE_RECYCLED,
};
-static int drbd_do_handshake(struct drbd_conf *mdev);
-static int drbd_do_auth(struct drbd_conf *mdev);
+static int drbd_do_features(struct drbd_tconn *tconn);
+static int drbd_do_auth(struct drbd_tconn *tconn);
+static int drbd_disconnected(struct drbd_conf *mdev);
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
-static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
+static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
@@ -142,11 +150,12 @@ static void page_chain_add(struct page **head,
*head = chain_first;
}
-static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
+static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
+ unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
- int i = 0;
+ unsigned int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
@@ -175,7 +184,7 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return page;
/* Not enough pages immediately available this time.
- * No need to jump around here, drbd_pp_alloc will retry this
+ * No need to jump around here, drbd_alloc_pages will retry this
* function "soon". */
if (page) {
tmp = page_chain_tail(page, NULL);
@@ -187,9 +196,10 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return NULL;
}
-static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
+static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
+ struct list_head *to_be_freed)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct list_head *le, *tle;
/* The EEs are always appended to the end of the list. Since
@@ -198,8 +208,8 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
stop to examine the list... */
list_for_each_safe(le, tle, &mdev->net_ee) {
- e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_ee_has_active_page(e))
+ peer_req = list_entry(le, struct drbd_peer_request, w.list);
+ if (drbd_peer_req_has_active_page(peer_req))
break;
list_move(le, to_be_freed);
}
@@ -208,18 +218,18 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
{
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
}
/**
- * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
+ * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
* @number: number of pages requested
* @retry: whether to retry, if not enough pages are available right now
@@ -230,23 +240,31 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
*
* Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
+struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
+ bool retry)
{
struct page *page = NULL;
+ struct net_conf *nc;
DEFINE_WAIT(wait);
+ int mxb;
/* Yes, we may run up to @number over max_buffers. If we
* follow it strictly, the admin will get it wrong anyways. */
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ mxb = nc ? nc->max_buffers : 1000000;
+ rcu_read_unlock();
+
+ if (atomic_read(&mdev->pp_in_use) < mxb)
+ page = __drbd_alloc_pages(mdev, number);
while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_pages_or_try_alloc(mdev, number);
+ if (atomic_read(&mdev->pp_in_use) < mxb) {
+ page = __drbd_alloc_pages(mdev, number);
if (page)
break;
}
@@ -255,7 +273,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
break;
if (signal_pending(current)) {
- dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
+ dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
break;
}
@@ -268,11 +286,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
return page;
}
-/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
{
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i;
@@ -280,7 +298,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -302,127 +320,130 @@ You need to hold the req_lock:
_drbd_wait_ee_list_empty()
You must not have the req_lock:
- drbd_free_ee()
- drbd_alloc_ee()
- drbd_init_ee()
- drbd_release_ee()
+ drbd_free_peer_req()
+ drbd_alloc_peer_req()
+ drbd_free_peer_reqs()
drbd_ee_fix_bhs()
- drbd_process_done_ee()
+ drbd_finish_peer_reqs()
drbd_clear_done_ee()
drbd_wait_ee_list_empty()
*/
-struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local)
+struct drbd_peer_request *
+drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
+ unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
return NULL;
- e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
- if (!e) {
+ peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
+ if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
+ dev_err(DEV, "%s: allocation failed\n", __func__);
return NULL;
}
if (data_size) {
- page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
if (!page)
goto fail;
}
- INIT_HLIST_NODE(&e->collision);
- e->epoch = NULL;
- e->mdev = mdev;
- e->pages = page;
- atomic_set(&e->pending_bios, 0);
- e->size = data_size;
- e->flags = 0;
- e->sector = sector;
- e->block_id = id;
+ drbd_clear_interval(&peer_req->i);
+ peer_req->i.size = data_size;
+ peer_req->i.sector = sector;
+ peer_req->i.local = false;
+ peer_req->i.waiting = false;
+
+ peer_req->epoch = NULL;
+ peer_req->w.mdev = mdev;
+ peer_req->pages = page;
+ atomic_set(&peer_req->pending_bios, 0);
+ peer_req->flags = 0;
+ /*
+ * The block_id is opaque to the receiver. It is not endianness
+ * converted, and sent back to the sender unchanged.
+ */
+ peer_req->block_id = id;
- return e;
+ return peer_req;
fail:
- mempool_free(e, drbd_ee_mempool);
+ mempool_free(peer_req, drbd_ee_mempool);
return NULL;
}
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
+void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+ int is_net)
{
- if (e->flags & EE_HAS_DIGEST)
- kfree(e->digest);
- drbd_pp_free(mdev, e->pages, is_net);
- D_ASSERT(atomic_read(&e->pending_bios) == 0);
- D_ASSERT(hlist_unhashed(&e->collision));
- mempool_free(e, drbd_ee_mempool);
+ if (peer_req->flags & EE_HAS_DIGEST)
+ kfree(peer_req->digest);
+ drbd_free_pages(mdev, peer_req->pages, is_net);
+ D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
+ mempool_free(peer_req, drbd_ee_mempool);
}
-int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
+int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
{
LIST_HEAD(work_list);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *peer_req, *t;
int count = 0;
int is_net = list == &mdev->net_ee;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &work_list, w.list) {
- drbd_free_some_ee(mdev, e, is_net);
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ __drbd_free_peer_req(mdev, peer_req, is_net);
count++;
}
return count;
}
-
/*
- * This function is called from _asender only_
- * but see also comments in _req_mod(,barrier_acked)
- * and receive_Barrier.
- *
- * Move entries from net_ee to done_ee, if ready.
- * Grab done_ee, call all callbacks, free the entries.
- * The callbacks typically send out ACKs.
+ * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
*/
-static int drbd_process_done_ee(struct drbd_conf *mdev)
+static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
- int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
+ struct drbd_peer_request *peer_req, *t;
+ int err = 0;
- spin_lock_irq(&mdev->req_lock);
- reclaim_net_ee(mdev, &reclaimed);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ reclaim_finished_net_peer_reqs(mdev, &reclaimed);
list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- list_for_each_entry_safe(e, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, e);
+ list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
+ drbd_free_net_peer_req(mdev, peer_req);
/* possible callbacks here:
- * e_end_block, and e_end_resync_block, e_send_discard_ack.
+ * e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
*/
- list_for_each_entry_safe(e, t, &work_list, w.list) {
+ list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
+ int err2;
+
/* list_del not necessary, next/prev members not touched */
- ok = e->w.cb(mdev, &e->w, !ok) && ok;
- drbd_free_ee(mdev, e);
+ err2 = peer_req->w.cb(&peer_req->w, !!err);
+ if (!err)
+ err = err2;
+ drbd_free_peer_req(mdev, peer_req);
}
wake_up(&mdev->ee_wait);
- return ok;
+ return err;
}
-void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
DEFINE_WAIT(wait);
@@ -430,55 +451,22 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
io_schedule();
finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
}
-void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
+static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+ struct list_head *head)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->req_lock);
-}
-
-/* see also kernel_accept; which is only present since 2.6.18.
- * also we want to log which part of it failed, exactly */
-static int drbd_accept(struct drbd_conf *mdev, const char **what,
- struct socket *sock, struct socket **newsock)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- *what = "listen";
- err = sock->ops->listen(sock, 5);
- if (err < 0)
- goto out;
-
- *what = "sock_create_lite";
- err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
- newsock);
- if (err < 0)
- goto out;
-
- *what = "accept";
- err = sock->ops->accept(sock, *newsock, 0);
- if (err < 0) {
- sock_release(*newsock);
- *newsock = NULL;
- goto out;
- }
- (*newsock)->ops = sock->ops;
- __module_get((*newsock)->ops->owner);
-
-out:
- return err;
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
-static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
- void *buf, size_t size, int flags)
+static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
{
mm_segment_t oldfs;
struct kvec iov = {
@@ -500,59 +488,62 @@ static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
return rv;
}
-static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
+static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
{
- mm_segment_t oldfs;
- struct kvec iov = {
- .iov_base = buf,
- .iov_len = size,
- };
- struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&iov,
- .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
- };
int rv;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
+ rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
- for (;;) {
- rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
- if (rv == size)
- break;
+ if (rv < 0) {
+ if (rv == -ECONNRESET)
+ conn_info(tconn, "sock was reset by peer\n");
+ else if (rv != -ERESTARTSYS)
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
+ } else if (rv == 0) {
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
- /* Note:
- * ECONNRESET other side closed the connection
- * ERESTARTSYS (on sock) we got a signal
- */
+ t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
- if (rv < 0) {
- if (rv == -ECONNRESET)
- dev_info(DEV, "sock was reset by peer\n");
- else if (rv != -ERESTARTSYS)
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
- break;
- } else if (rv == 0) {
- dev_info(DEV, "sock was shut down by peer\n");
- break;
- } else {
- /* signal came in, or peer/link went down,
- * after we read a partial message
- */
- /* D_ASSERT(signal_pending(current)); */
- break;
+ if (t)
+ goto out;
}
- };
-
- set_fs(oldfs);
+ conn_info(tconn, "sock was shut down by peer\n");
+ }
if (rv != size)
- drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+ conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
+out:
return rv;
}
+static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv(tconn, buf, size);
+ if (err != size) {
+ if (err >= 0)
+ err = -EIO;
+ } else
+ err = 0;
+ return err;
+}
+
+static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
+{
+ int err;
+
+ err = drbd_recv_all(tconn, buf, size);
+ if (err && !signal_pending(current))
+ conn_warn(tconn, "short read (expected size %d)\n", (int)size);
+ return err;
+}
+
/* quoting tcp(7):
* On individual connections, the socket buffer size must be set prior to the
* listen(2) or connect(2) calls in order to have it take effect.
@@ -572,29 +563,50 @@ static void drbd_setbufsize(struct socket *sock, unsigned int snd,
}
}
-static struct socket *drbd_try_connect(struct drbd_conf *mdev)
+static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
{
const char *what;
struct socket *sock;
struct sockaddr_in6 src_in6;
- int err;
+ struct sockaddr_in6 peer_in6;
+ struct net_conf *nc;
+ int err, peer_addr_len, my_addr_len;
+ int sndbuf_size, rcvbuf_size, connect_int;
int disconnect_on_error = 1;
- if (!get_net_conf(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
return NULL;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
+ memcpy(&src_in6, &tconn->my_addr, my_addr_len);
+
+ if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
+ src_in6.sin6_port = 0;
+ else
+ ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
+
+ peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
+ memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &sock);
+ err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
goto out;
}
sock->sk->sk_rcvtimeo =
- sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
- drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ sock->sk->sk_sndtimeo = connect_int * HZ;
+ drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
/* explicitly bind to the configured IP as source IP
* for the outgoing connections.
@@ -603,17 +615,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* Make sure to use 0 as port number, so linux selects
* a free one dynamically.
*/
- memcpy(&src_in6, mdev->net_conf->my_addr,
- min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
- if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
- src_in6.sin6_port = 0;
- else
- ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
-
what = "bind before connect";
- err = sock->ops->bind(sock,
- (struct sockaddr *) &src_in6,
- mdev->net_conf->my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -621,9 +624,7 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock,
- (struct sockaddr *)mdev->net_conf->peer_addr,
- mdev->net_conf->peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -641,91 +642,174 @@ out:
disconnect_on_error = 0;
break;
default:
- dev_err(DEV, "%s failed, err = %d\n", what, err);
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
}
if (disconnect_on_error)
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- put_net_conf(mdev);
+
return sock;
}
-static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
+struct accept_wait_data {
+ struct drbd_tconn *tconn;
+ struct socket *s_listen;
+ struct completion door_bell;
+ void (*original_sk_state_change)(struct sock *sk);
+
+};
+
+static void drbd_incoming_connection(struct sock *sk)
{
- int timeo, err;
- struct socket *s_estab = NULL, *s_listen;
+ struct accept_wait_data *ad = sk->sk_user_data;
+ void (*state_change)(struct sock *sk);
+
+ state_change = ad->original_sk_state_change;
+ if (sk->sk_state == TCP_ESTABLISHED)
+ complete(&ad->door_bell);
+ state_change(sk);
+}
+
+static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
+{
+ int err, sndbuf_size, rcvbuf_size, my_addr_len;
+ struct sockaddr_in6 my_addr;
+ struct socket *s_listen;
+ struct net_conf *nc;
const char *what;
- if (!get_net_conf(mdev))
- return NULL;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return -EIO;
+ }
+ sndbuf_size = nc->sndbuf_size;
+ rcvbuf_size = nc->rcvbuf_size;
+ rcu_read_unlock();
+
+ my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
+ memcpy(&my_addr, &tconn->my_addr, my_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
- SOCK_STREAM, IPPROTO_TCP, &s_listen);
+ err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+ SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
goto out;
}
- timeo = mdev->net_conf->try_connect_int * HZ;
- timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
-
- s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- s_listen->sk->sk_rcvtimeo = timeo;
- s_listen->sk->sk_sndtimeo = timeo;
- drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
- mdev->net_conf->rcvbuf_size);
+ s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen,
- (struct sockaddr *) mdev->net_conf->my_addr,
- mdev->net_conf->my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
if (err < 0)
goto out;
- err = drbd_accept(mdev, &what, s_listen, &s_estab);
+ ad->s_listen = s_listen;
+ write_lock_bh(&s_listen->sk->sk_callback_lock);
+ ad->original_sk_state_change = s_listen->sk->sk_state_change;
+ s_listen->sk->sk_state_change = drbd_incoming_connection;
+ s_listen->sk->sk_user_data = ad;
+ write_unlock_bh(&s_listen->sk->sk_callback_lock);
+
+ what = "listen";
+ err = s_listen->ops->listen(s_listen, 5);
+ if (err < 0)
+ goto out;
+ return 0;
out:
if (s_listen)
sock_release(s_listen);
if (err < 0) {
if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
- dev_err(DEV, "%s failed, err = %d\n", what, err);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_err(tconn, "%s failed, err = %d\n", what, err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
}
- put_net_conf(mdev);
- return s_estab;
+ return -EIO;
}
-static int drbd_send_fp(struct drbd_conf *mdev,
- struct socket *sock, enum drbd_packets cmd)
+static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
{
- struct p_header80 *h = &mdev->data.sbuf.header.h80;
-
- return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_state_change = ad->original_sk_state_change;
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
}
-static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
+static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
{
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
- int rr;
+ int timeo, connect_int, err = 0;
+ struct socket *s_estab = NULL;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (!nc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ connect_int = nc->connect_int;
+ rcu_read_unlock();
+
+ timeo = connect_int * HZ;
+ timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
+
+ err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
+ if (err <= 0)
+ return NULL;
+
+ err = kernel_accept(ad->s_listen, &s_estab, 0);
+ if (err < 0) {
+ if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
+ conn_err(tconn, "accept failed, err = %d\n", err);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ }
+ }
+
+ if (s_estab)
+ unregister_state_change(s_estab->sk, ad);
+
+ return s_estab;
+}
- rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
+static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
- if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
- return be16_to_cpu(h->command);
+static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
+ enum drbd_packet cmd)
+{
+ if (!conn_prepare_command(tconn, sock))
+ return -EIO;
+ return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
+}
- return 0xffff;
+static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
+{
+ unsigned int header_size = drbd_header_size(tconn);
+ struct packet_info pi;
+ int err;
+
+ err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
+ if (err != header_size) {
+ if (err >= 0)
+ err = -EIO;
+ return err;
+ }
+ err = decode_header(tconn, tconn->data.rbuf, &pi);
+ if (err)
+ return err;
+ return pi.cmd;
}
/**
* drbd_socket_okay() - Free the socket if its connection is not okay
- * @mdev: DRBD device.
* @sock: pointer to the pointer to the socket.
*/
-static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
+static int drbd_socket_okay(struct socket **sock)
{
int rr;
char tb[4];
@@ -733,7 +817,7 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
if (!*sock)
return false;
- rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
+ rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
if (rr > 0 || rr == -EAGAIN) {
return true;
@@ -743,6 +827,31 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
return false;
}
}
+/* Gets called if a connection is established, or if a new minor gets created
+ in a connection */
+int drbd_connected(struct drbd_conf *mdev)
+{
+ int err;
+
+ atomic_set(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
+ &mdev->tconn->cstate_mutex :
+ &mdev->own_state_mutex;
+
+ err = drbd_send_sync_param(mdev);
+ if (!err)
+ err = drbd_send_sizes(mdev, 0, 0);
+ if (!err)
+ err = drbd_send_uuids(mdev);
+ if (!err)
+ err = drbd_send_current_state(mdev);
+ clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+ clear_bit(RESIZE_PENDING, &mdev->flags);
+ mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ return err;
+}
/*
* return values:
@@ -752,232 +861,315 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
* no point in trying again, please go standalone.
* -2 We do not have a network config...
*/
-static int drbd_connect(struct drbd_conf *mdev)
+static int conn_connect(struct drbd_tconn *tconn)
{
- struct socket *s, *sock, *msock;
- int try, h, ok;
+ struct drbd_socket sock, msock;
+ struct drbd_conf *mdev;
+ struct net_conf *nc;
+ int vnr, timeout, h, ok;
+ bool discard_my_data;
enum drbd_state_rv rv;
+ struct accept_wait_data ad = {
+ .tconn = tconn,
+ .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
+ };
- D_ASSERT(!mdev->data.socket);
-
- if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
+ clear_bit(DISCONNECT_SENT, &tconn->flags);
+ if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
return -2;
- clear_bit(DISCARD_CONCURRENT, &mdev->flags);
+ mutex_init(&sock.mutex);
+ sock.sbuf = tconn->data.sbuf;
+ sock.rbuf = tconn->data.rbuf;
+ sock.socket = NULL;
+ mutex_init(&msock.mutex);
+ msock.sbuf = tconn->meta.sbuf;
+ msock.rbuf = tconn->meta.rbuf;
+ msock.socket = NULL;
+
+ /* Assume that the peer only understands protocol 80 until we know better. */
+ tconn->agreed_pro_version = 80;
- sock = NULL;
- msock = NULL;
+ if (prepare_listen_socket(tconn, &ad))
+ return 0;
do {
- for (try = 0;;) {
- /* 3 tries, this should take less than a second! */
- s = drbd_try_connect(mdev);
- if (s || ++try >= 3)
- break;
- /* give the other side time to call bind() & listen() */
- schedule_timeout_interruptible(HZ / 10);
- }
+ struct socket *s;
+ s = drbd_try_connect(tconn);
if (s) {
- if (!sock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
- sock = s;
- s = NULL;
- } else if (!msock) {
- drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
- msock = s;
- s = NULL;
+ if (!sock.socket) {
+ sock.socket = s;
+ send_first_packet(tconn, &sock, P_INITIAL_DATA);
+ } else if (!msock.socket) {
+ clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ msock.socket = s;
+ send_first_packet(tconn, &msock, P_INITIAL_META);
} else {
- dev_err(DEV, "Logic error in drbd_connect()\n");
+ conn_err(tconn, "Logic error in conn_connect()\n");
goto out_release_sockets;
}
}
- if (sock && msock) {
- schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
+ if (sock.socket && msock.socket) {
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ timeout = nc->ping_timeo * HZ / 10;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeout);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
if (ok)
break;
}
retry:
- s = drbd_wait_for_connect(mdev);
+ s = drbd_wait_for_connect(tconn, &ad);
if (s) {
- try = drbd_recv_fp(mdev, s);
- drbd_socket_okay(mdev, &sock);
- drbd_socket_okay(mdev, &msock);
- switch (try) {
- case P_HAND_SHAKE_S:
- if (sock) {
- dev_warn(DEV, "initial packet S crossed\n");
- sock_release(sock);
+ int fp = receive_first_packet(tconn, s);
+ drbd_socket_okay(&sock.socket);
+ drbd_socket_okay(&msock.socket);
+ switch (fp) {
+ case P_INITIAL_DATA:
+ if (sock.socket) {
+ conn_warn(tconn, "initial packet S crossed\n");
+ sock_release(sock.socket);
+ sock.socket = s;
+ goto randomize;
}
- sock = s;
+ sock.socket = s;
break;
- case P_HAND_SHAKE_M:
- if (msock) {
- dev_warn(DEV, "initial packet M crossed\n");
- sock_release(msock);
+ case P_INITIAL_META:
+ set_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ if (msock.socket) {
+ conn_warn(tconn, "initial packet M crossed\n");
+ sock_release(msock.socket);
+ msock.socket = s;
+ goto randomize;
}
- msock = s;
- set_bit(DISCARD_CONCURRENT, &mdev->flags);
+ msock.socket = s;
break;
default:
- dev_warn(DEV, "Error receiving initial packet\n");
+ conn_warn(tconn, "Error receiving initial packet\n");
sock_release(s);
+randomize:
if (random32() & 1)
goto retry;
}
}
- if (mdev->state.conn <= C_DISCONNECTING)
+ if (tconn->cstate <= C_DISCONNECTING)
goto out_release_sockets;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&mdev->receiver) == Exiting)
+ if (get_t_state(&tconn->receiver) == EXITING)
goto out_release_sockets;
}
- if (sock && msock) {
- ok = drbd_socket_okay(mdev, &sock);
- ok = drbd_socket_okay(mdev, &msock) && ok;
- if (ok)
- break;
- }
- } while (1);
+ ok = drbd_socket_okay(&sock.socket);
+ ok = drbd_socket_okay(&msock.socket) && ok;
+ } while (!ok);
+
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
- msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+ msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
- sock->sk->sk_allocation = GFP_NOIO;
- msock->sk->sk_allocation = GFP_NOIO;
+ sock.socket->sk->sk_allocation = GFP_NOIO;
+ msock.socket->sk->sk_allocation = GFP_NOIO;
- sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
- msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
+ sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+ msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
/* NOT YET ...
- * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- * first set it to the P_HAND_SHAKE timeout,
+ * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+ * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ * first set it to the P_CONNECTION_FEATURES timeout,
* which we set to 4x the configured ping_timeout. */
- sock->sk->sk_sndtimeo =
- sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+ sock.socket->sk->sk_sndtimeo =
+ sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
+
+ msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
+ timeout = nc->timeout * HZ / 10;
+ discard_my_data = nc->discard_my_data;
+ rcu_read_unlock();
+
+ msock.socket->sk->sk_sndtimeo = timeout;
/* we don't want delays.
* we use TCP_CORK where appropriate, though */
- drbd_tcp_nodelay(sock);
- drbd_tcp_nodelay(msock);
-
- mdev->data.socket = sock;
- mdev->meta.socket = msock;
- mdev->last_received = jiffies;
+ drbd_tcp_nodelay(sock.socket);
+ drbd_tcp_nodelay(msock.socket);
- D_ASSERT(mdev->asender.task == NULL);
+ tconn->data.socket = sock.socket;
+ tconn->meta.socket = msock.socket;
+ tconn->last_received = jiffies;
- h = drbd_do_handshake(mdev);
+ h = drbd_do_features(tconn);
if (h <= 0)
return h;
- if (mdev->cram_hmac_tfm) {
+ if (tconn->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */
- switch (drbd_do_auth(mdev)) {
+ switch (drbd_do_auth(tconn)) {
case -1:
- dev_err(DEV, "Authentication of peer failed\n");
+ conn_err(tconn, "Authentication of peer failed\n");
return -1;
case 0:
- dev_err(DEV, "Authentication of peer failed, trying again.\n");
+ conn_err(tconn, "Authentication of peer failed, trying again.\n");
return 0;
}
}
- sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ tconn->data.socket->sk->sk_sndtimeo = timeout;
+ tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- atomic_set(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
-
- if (drbd_send_protocol(mdev) == -1)
+ if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
return -1;
- set_bit(STATE_SENT, &mdev->flags);
- drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0, 0);
- drbd_send_uuids(mdev);
- drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
- rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
- if (mdev->state.conn != C_WF_REPORT_PARAMS)
- clear_bit(STATE_SENT, &mdev->flags);
- spin_unlock_irq(&mdev->req_lock);
+ set_bit(STATE_SENT, &tconn->flags);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ /* Prevent a race between resync-handshake and
+ * being promoted to Primary.
+ *
+ * Grab and release the state mutex, so we know that any current
+ * drbd_set_role() is finished, and any incoming drbd_set_role
+ * will see the STATE_SENT flag, and wait for it to be cleared.
+ */
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
+
+ rcu_read_unlock();
+
+ if (discard_my_data)
+ set_bit(DISCARD_MY_DATA, &mdev->flags);
+ else
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
+
+ drbd_connected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
- if (rv < SS_SUCCESS)
+ rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
+ if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &tconn->flags);
return 0;
+ }
- drbd_thread_start(&mdev->asender);
- mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
+ drbd_thread_start(&tconn->asender);
- return 1;
+ mutex_lock(&tconn->conf_update);
+ /* The discard_my_data flag is a single-shot modifier to the next
+ * connection attempt, the handshake of which is now well underway.
+ * No need for rcu style copying of the whole struct
+ * just to clear a single value. */
+ tconn->net_conf->discard_my_data = 0;
+ mutex_unlock(&tconn->conf_update);
+
+ return h;
out_release_sockets:
- if (sock)
- sock_release(sock);
- if (msock)
- sock_release(msock);
+ if (ad.s_listen)
+ sock_release(ad.s_listen);
+ if (sock.socket)
+ sock_release(sock.socket);
+ if (msock.socket)
+ sock_release(msock.socket);
return -1;
}
-static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
+static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
{
- union p_header *h = &mdev->data.rbuf.header;
- int r;
-
- r = drbd_recv(mdev, h, sizeof(*h));
- if (unlikely(r != sizeof(*h))) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
- return false;
- }
-
- if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
- *cmd = be16_to_cpu(h->h80.command);
- *packet_size = be16_to_cpu(h->h80.length);
- } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
- *cmd = be16_to_cpu(h->h95.command);
- *packet_size = be32_to_cpu(h->h95.length);
+ unsigned int header_size = drbd_header_size(tconn);
+
+ if (header_size == sizeof(struct p_header100) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
+ struct p_header100 *h = header;
+ if (h->pad != 0) {
+ conn_err(tconn, "Header padding is not zero\n");
+ return -EINVAL;
+ }
+ pi->vnr = be16_to_cpu(h->volume);
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ } else if (header_size == sizeof(struct p_header95) &&
+ *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
+ struct p_header95 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be32_to_cpu(h->length);
+ pi->vnr = 0;
+ } else if (header_size == sizeof(struct p_header80) &&
+ *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
+ struct p_header80 *h = header;
+ pi->cmd = be16_to_cpu(h->command);
+ pi->size = be16_to_cpu(h->length);
+ pi->vnr = 0;
} else {
- dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->h80.magic),
- be16_to_cpu(h->h80.command),
- be16_to_cpu(h->h80.length));
- return false;
+ conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
+ be32_to_cpu(*(__be32 *)header),
+ tconn->agreed_pro_version);
+ return -EINVAL;
}
- mdev->last_received = jiffies;
+ pi->data = header + header_size;
+ return 0;
+}
- return true;
+static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int err;
+
+ err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
+ if (err)
+ return err;
+
+ err = decode_header(tconn, buffer, pi);
+ tconn->last_received = jiffies;
+
+ return err;
}
-static void drbd_flush(struct drbd_conf *mdev)
+static void drbd_flush(struct drbd_tconn *tconn)
{
int rv;
+ struct drbd_conf *mdev;
+ int vnr;
- if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
- NULL);
- if (rv) {
- dev_info(DEV, "local disk flush failed with status %d\n", rv);
- /* would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0
- * if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(mdev, WO_drain_io);
+ if (tconn->write_ordering >= WO_bdev_flush) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev(mdev))
+ continue;
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+ GFP_NOIO, NULL);
+ if (rv) {
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
+ /* would rather check on EOPNOTSUPP, but that is not reliable.
+ * don't try again for ANY return value != 0
+ * if (rv == -EOPNOTSUPP) */
+ drbd_bump_write_ordering(tconn, WO_drain_io);
+ }
+ put_ldev(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+
+ rcu_read_lock();
+ if (rv)
+ break;
}
- put_ldev(mdev);
+ rcu_read_unlock();
}
}
@@ -987,7 +1179,7 @@ static void drbd_flush(struct drbd_conf *mdev)
* @epoch: Epoch object.
* @ev: Epoch event.
*/
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
struct drbd_epoch *epoch,
enum epoch_event ev)
{
@@ -995,7 +1187,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct drbd_epoch *next_epoch;
enum finish_epoch rv = FE_STILL_LIVE;
- spin_lock(&mdev->epoch_lock);
+ spin_lock(&tconn->epoch_lock);
do {
next_epoch = NULL;
@@ -1017,18 +1209,22 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
atomic_read(&epoch->active) == 0 &&
(test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
- spin_unlock(&mdev->epoch_lock);
- drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
- spin_lock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
+ drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
+ spin_lock(&tconn->epoch_lock);
}
+#if 0
+ /* FIXME: dec unacked on connection, once we have
+ * something to count pending connection packets in. */
if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
- dec_unacked(mdev);
+ dec_unacked(epoch->tconn);
+#endif
- if (mdev->current_epoch != epoch) {
+ if (tconn->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
- mdev->epochs--;
+ tconn->epochs--;
kfree(epoch);
if (rv == FE_STILL_LIVE)
@@ -1039,7 +1235,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
/* atomic_set(&epoch->active, 0); is already zero */
if (rv == FE_STILL_LIVE)
rv = FE_RECYCLED;
- wake_up(&mdev->ee_wait);
}
}
@@ -1049,40 +1244,52 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
epoch = next_epoch;
} while (1);
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
return rv;
}
/**
* drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @mdev: DRBD device.
+ * @tconn: DRBD connection.
* @wo: Write ordering method to try.
*/
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
{
+ struct disk_conf *dc;
+ struct drbd_conf *mdev;
enum write_ordering_e pwo;
+ int vnr;
static char *write_ordering_str[] = {
[WO_none] = "none",
[WO_drain_io] = "drain",
[WO_bdev_flush] = "flush",
};
- pwo = mdev->write_ordering;
+ pwo = tconn->write_ordering;
wo = min(pwo, wo);
- if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
- wo = WO_drain_io;
- if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
- wo = WO_none;
- mdev->write_ordering = wo;
- if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
- dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (!get_ldev_if_state(mdev, D_ATTACHING))
+ continue;
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+
+ if (wo == WO_bdev_flush && !dc->disk_flushes)
+ wo = WO_drain_io;
+ if (wo == WO_drain_io && !dc->disk_drain)
+ wo = WO_none;
+ put_ldev(mdev);
+ }
+ rcu_read_unlock();
+ tconn->write_ordering = wo;
+ if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
+ conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
}
/**
- * drbd_submit_ee()
+ * drbd_submit_peer_request()
* @mdev: DRBD device.
- * @e: epoch entry
+ * @peer_req: peer request
* @rw: flag field, see bio->bi_rw
*
* May spread the pages to multiple bios,
@@ -1096,14 +1303,15 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type)
+int drbd_submit_peer_request(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req,
+ const unsigned rw, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
- struct page *page = e->pages;
- sector_t sector = e->sector;
- unsigned ds = e->size;
+ struct page *page = peer_req->pages;
+ sector_t sector = peer_req->i.sector;
+ unsigned ds = peer_req->i.size;
unsigned n_bios = 0;
unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
@@ -1122,12 +1330,12 @@ next_bio:
dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
goto fail;
}
- /* > e->sector, unless this is the first bio */
+ /* > peer_req->i.sector, unless this is the first bio */
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
- bio->bi_private = e;
- bio->bi_end_io = drbd_endio_sec;
+ bio->bi_private = peer_req;
+ bio->bi_end_io = drbd_peer_request_endio;
bio->bi_next = bios;
bios = bio;
@@ -1156,7 +1364,7 @@ next_bio:
D_ASSERT(page == NULL);
D_ASSERT(ds == 0);
- atomic_set(&e->pending_bios, n_bios);
+ atomic_set(&peer_req->pending_bios, n_bios);
do {
bio = bios;
bios = bios->bi_next;
@@ -1175,26 +1383,57 @@ fail:
return err;
}
-static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_interval *i = &peer_req->i;
+
+ drbd_remove_interval(&mdev->write_requests, i);
+ drbd_clear_interval(i);
+
+ /* Wake up any processes waiting for this peer request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
+void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
+
+static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
{
int rv;
- struct p_barrier *p = &mdev->data.rbuf.barrier;
+ struct p_barrier *p = pi->data;
struct drbd_epoch *epoch;
- inc_unacked(mdev);
-
- mdev->current_epoch->barrier_nr = p->barrier;
- rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
+ /* FIXME these are unacked on connection,
+ * not a specific (peer)device.
+ */
+ tconn->current_epoch->barrier_nr = p->barrier;
+ tconn->current_epoch->tconn = tconn;
+ rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
* the activity log, which means it would not be resynced in case the
* R_PRIMARY crashes now.
* Therefore we must send the barrier_ack after the barrier request was
* completed. */
- switch (mdev->write_ordering) {
+ switch (tconn->write_ordering) {
case WO_none:
if (rv == FE_RECYCLED)
- return true;
+ return 0;
/* receiver context, in the writeout path of the other node.
* avoid potential distributed deadlock */
@@ -1202,81 +1441,75 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
if (epoch)
break;
else
- dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+ conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
/* Fall through */
case WO_bdev_flush:
case WO_drain_io:
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- drbd_flush(mdev);
+ conn_wait_active_ee_empty(tconn);
+ drbd_flush(tconn);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (epoch)
break;
}
- epoch = mdev->current_epoch;
- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
-
- D_ASSERT(atomic_read(&epoch->active) == 0);
- D_ASSERT(epoch->flags == 0);
-
- return true;
+ return 0;
default:
- dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
- return false;
+ conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
+ return -EIO;
}
epoch->flags = 0;
atomic_set(&epoch->epoch_size, 0);
atomic_set(&epoch->active, 0);
- spin_lock(&mdev->epoch_lock);
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
- list_add(&epoch->list, &mdev->current_epoch->list);
- mdev->current_epoch = epoch;
- mdev->epochs++;
+ spin_lock(&tconn->epoch_lock);
+ if (atomic_read(&tconn->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &tconn->current_epoch->list);
+ tconn->current_epoch = epoch;
+ tconn->epochs++;
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
}
- spin_unlock(&mdev->epoch_lock);
+ spin_unlock(&tconn->epoch_lock);
- return true;
+ return 0;
}
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
-static struct drbd_epoch_entry *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
+static struct drbd_peer_request *
+read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+ int data_size) __must_hold(local)
{
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
struct page *page;
- int dgs, ds, rr;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
+ int dgs, ds, err;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
unsigned long *data;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
-
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data digest: read %d expected %d\n",
- rr, dgs);
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ /*
+ * FIXME: Receive the incoming digest into the receive buffer
+ * here, together with its struct p_data?
+ */
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
return NULL;
- }
+ data_size -= dgs;
}
- data_size -= dgs;
-
- ERR_IF(data_size & 0x1ff) return NULL;
- ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
+ if (!expect(IS_ALIGNED(data_size, 512)))
+ return NULL;
+ if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
+ return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
@@ -1291,47 +1524,42 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
+ if (!peer_req)
return NULL;
if (!data_size)
- return e;
+ return peer_req;
ds = data_size;
- page = e->pages;
+ page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
data = kmap(page);
- rr = drbd_recv(mdev, data, len);
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
dev_err(DEV, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
kunmap(page);
- if (rr != len) {
- drbd_free_ee(mdev, e);
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, len);
+ if (err) {
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
- ds -= rr;
+ ds -= len;
}
if (dgs) {
- drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
+ drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_bcast_ee(mdev, "digest failed",
- dgs, dig_in, dig_vv, e);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
}
mdev->recv_cnt += data_size>>9;
- return e;
+ return peer_req;
}
/* drbd_drain_block() just takes a data block
@@ -1340,30 +1568,26 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
{
struct page *page;
- int rr, rv = 1;
+ int err = 0;
void *data;
if (!data_size)
- return true;
+ return 0;
- page = drbd_pp_alloc(mdev, 1, 1);
+ page = drbd_alloc_pages(mdev, 1, 1);
data = kmap(page);
while (data_size) {
- rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
- if (rr != min_t(int, data_size, PAGE_SIZE)) {
- rv = 0;
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data: read %d expected %d\n",
- rr, min_t(int, data_size, PAGE_SIZE));
+ unsigned int len = min_t(int, data_size, PAGE_SIZE);
+
+ err = drbd_recv_all_warn(mdev->tconn, data, len);
+ if (err)
break;
- }
- data_size -= rr;
+ data_size -= len;
}
kunmap(page);
- drbd_pp_free(mdev, page, 0);
- return rv;
+ drbd_free_pages(mdev, page, 0);
+ return err;
}
static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
@@ -1371,26 +1595,19 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
{
struct bio_vec *bvec;
struct bio *bio;
- int dgs, rr, i, expect;
- void *dig_in = mdev->int_dig_in;
- void *dig_vv = mdev->int_dig_vv;
-
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
- crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
+ int dgs, err, i, expect;
+ void *dig_in = mdev->tconn->int_dig_in;
+ void *dig_vv = mdev->tconn->int_dig_vv;
- if (dgs) {
- rr = drbd_recv(mdev, dig_in, dgs);
- if (rr != dgs) {
- if (!signal_pending(current))
- dev_warn(DEV,
- "short read receiving data reply digest: read %d expected %d\n",
- rr, dgs);
- return 0;
- }
+ dgs = 0;
+ if (mdev->tconn->peer_integrity_tfm) {
+ dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
+ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
+ if (err)
+ return err;
+ data_size -= dgs;
}
- data_size -= dgs;
-
/* optimistically update recv_cnt. if receiving fails below,
* we disconnect anyways, and counters will be reset. */
mdev->recv_cnt += data_size>>9;
@@ -1399,63 +1616,61 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
D_ASSERT(sector == bio->bi_sector);
bio_for_each_segment(bvec, bio, i) {
+ void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
expect = min_t(int, data_size, bvec->bv_len);
- rr = drbd_recv(mdev,
- kmap(bvec->bv_page)+bvec->bv_offset,
- expect);
+ err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
kunmap(bvec->bv_page);
- if (rr != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving data reply: "
- "read %d expected %d\n",
- rr, expect);
- return 0;
- }
- data_size -= rr;
+ if (err)
+ return err;
+ data_size -= expect;
}
if (dgs) {
- drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
- return 0;
+ return -EINVAL;
}
}
D_ASSERT(data_size == 0);
- return 1;
+ return 0;
}
-/* e_end_resync_block() is called via
- * drbd_process_done_ee() by asender only */
-static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+/*
+ * e_end_resync_block() is called in asender context via
+ * drbd_finish_peer_reqs().
+ */
+static int e_end_resync_block(struct drbd_work *w, int unused)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err;
- D_ASSERT(hlist_unhashed(&e->collision));
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- drbd_set_in_sync(mdev, sector, e->size);
- ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
+ err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
} else {
/* Record failure to sync */
- drbd_rs_failed_io(mdev, sector, e->size);
+ drbd_rs_failed_io(mdev, sector, peer_req->i.size);
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
}
dec_unacked(mdev);
- return ok;
+ return err;
}
static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
- e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e)
+ peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
+ if (!peer_req)
goto fail;
dec_rs_pending(mdev);
@@ -1464,64 +1679,88 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
- e->w.cb = e_end_resync_block;
+ peer_req->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->sync_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
fail:
put_ldev(mdev);
- return false;
+ return -EIO;
}
-static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static struct drbd_request *
+find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+ sector_t sector, bool missing_ok, const char *func)
{
struct drbd_request *req;
+
+ /* Request object according to our peer */
+ req = (struct drbd_request *)(unsigned long)id;
+ if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
+ return req;
+ if (!missing_ok) {
+ dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
+ (unsigned long)id, (unsigned long long)sector);
+ }
+ return NULL;
+}
+
+static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct drbd_request *req;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->req_lock);
- req = _ar_id_to_req(mdev, p->block_id, sector);
- spin_unlock_irq(&mdev->req_lock);
- if (unlikely(!req)) {
- dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
- return false;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (unlikely(!req))
+ return -EIO;
/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */
- ok = recv_dless_read(mdev, req, sector, data_size);
-
- if (ok)
- req_mod(req, data_received);
+ err = recv_dless_read(mdev, req, sector, pi->size);
+ if (!err)
+ req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */
- return ok;
+ return err;
}
-static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ int err;
+ struct p_data *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
D_ASSERT(p->block_id == ID_SYNCER);
@@ -1529,42 +1768,63 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
if (get_ldev(mdev)) {
/* data is submitted to disk within recv_resync_read.
* corresponding put_ldev done below on error,
- * or in drbd_endio_write_sec. */
- ok = recv_resync_read(mdev, sector, data_size);
+ * or in drbd_peer_request_endio. */
+ err = recv_resync_read(mdev, sector, pi->size);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not write resync data to local disk.\n");
- ok = drbd_drain_block(mdev, data_size);
+ err = drbd_drain_block(mdev, pi->size);
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
}
- atomic_add(data_size >> 9, &mdev->rs_sect_in);
+ atomic_add(pi->size >> 9, &mdev->rs_sect_in);
- return ok;
+ return err;
}
-/* e_end_block() is called via drbd_process_done_ee().
- * this means this function only runs in the asender thread
- */
-static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void restart_conflicting_writes(struct drbd_conf *mdev,
+ sector_t sector, int size)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- sector_t sector = e->sector;
- int ok = 1, pcmd;
+ struct drbd_interval *i;
+ struct drbd_request *req;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED))
+ continue;
+ /* as it is RQ_POSTPONED, this will cause it to
+ * be queued on the retry workqueue. */
+ __req_mod(req, CONFLICT_RESOLVED, NULL);
+ }
+}
+
+/*
+ * e_end_block() is called in asender context via drbd_finish_peer_reqs().
+ */
+static int e_end_block(struct drbd_work *w, int cancel)
+{
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ int err = 0, pcmd;
+
+ if (peer_req->flags & EE_SEND_WRITE_ACK) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
- e->flags & EE_MAY_SET_IN_SYNC) ?
+ peer_req->flags & EE_MAY_SET_IN_SYNC) ?
P_RS_WRITE_ACK : P_WRITE_ACK;
- ok &= drbd_send_ack(mdev, pcmd, e);
+ err = drbd_send_ack(mdev, pcmd, peer_req);
if (pcmd == P_RS_WRITE_ACK)
- drbd_set_in_sync(mdev, sector, e->size);
+ drbd_set_in_sync(mdev, sector, peer_req->i.size);
} else {
- ok = drbd_send_ack(mdev, P_NEG_ACK, e);
+ err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
/* we expect it to be marked out of sync anyways...
* maybe assert this? */
}
@@ -1572,52 +1832,115 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
- if (mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- } else {
- D_ASSERT(hlist_unhashed(&e->collision));
- }
+ if (peer_req->flags & EE_IN_INTERVAL_TREE) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ D_ASSERT(!drbd_interval_empty(&peer_req->i));
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ if (peer_req->flags & EE_RESTART_REQUESTS)
+ restart_conflicting_writes(mdev, sector, peer_req->i.size);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ } else
+ D_ASSERT(drbd_interval_empty(&peer_req->i));
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+ drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
- return ok;
+ return err;
}
-static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- int ok = 1;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_peer_request *peer_req =
+ container_of(w, struct drbd_peer_request, w);
+ int err;
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
+ err = drbd_send_ack(mdev, ack, peer_req);
+ dec_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- D_ASSERT(!hlist_unhashed(&e->collision));
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
+ return err;
+}
- dec_unacked(mdev);
+static int e_send_superseded(struct drbd_work *w, int unused)
+{
+ return e_send_ack(w, P_SUPERSEDED);
+}
+
+static int e_send_retry_write(struct drbd_work *w, int unused)
+{
+ struct drbd_tconn *tconn = w->mdev->tconn;
+
+ return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
+ P_RETRY_WRITE : P_SUPERSEDED);
+}
+
+static bool seq_greater(u32 a, u32 b)
+{
+ /*
+ * We assume 32-bit wrap-around here.
+ * For 24-bit wrap-around, we would have to shift:
+ * a <<= 8; b <<= 8;
+ */
+ return (s32)a - (s32)b > 0;
+}
+
+static u32 seq_max(u32 a, u32 b)
+{
+ return seq_greater(a, b) ? a : b;
+}
+
+static bool need_peer_seq(struct drbd_conf *mdev)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ int tp;
- return ok;
+ /*
+ * We only need to keep track of the last packet_seq number of our peer
+ * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
+ * handle_write_conflicts().
+ */
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+
+ return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
}
-static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
{
+ unsigned int newest_peer_seq;
- struct drbd_epoch_entry *rs_e;
+ if (need_peer_seq(mdev)) {
+ spin_lock(&mdev->peer_seq_lock);
+ newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ mdev->peer_seq = newest_peer_seq;
+ spin_unlock(&mdev->peer_seq_lock);
+ /* wake up only if we actually changed mdev->peer_seq */
+ if (peer_seq == newest_peer_seq)
+ wake_up(&mdev->seq_wait);
+ }
+}
+
+static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
+{
+ return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
+}
+
+/* maybe change sync_ee into interval trees as well? */
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+{
+ struct drbd_peer_request *rs_req;
bool rv = 0;
- spin_lock_irq(&mdev->req_lock);
- list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
- if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+ if (overlaps(peer_req->i.sector, peer_req->i.size,
+ rs_req->i.sector, rs_req->i.size)) {
rv = 1;
break;
}
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return rv;
}
@@ -1643,35 +1966,41 @@ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_e
*
* returns 0 if we may process the packet,
* -ERESTARTSYS if we were interrupted (by disconnect signal). */
-static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
+static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
{
DEFINE_WAIT(wait);
- unsigned int p_seq;
long timeout;
- int ret = 0;
+ int ret;
+
+ if (!need_peer_seq(mdev))
+ return 0;
+
spin_lock(&mdev->peer_seq_lock);
for (;;) {
- prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
- if (seq_le(packet_seq, mdev->peer_seq+1))
+ if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
+ mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
+ ret = 0;
break;
+ }
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
- p_seq = mdev->peer_seq;
+ prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock(&mdev->peer_seq_lock);
- timeout = schedule_timeout(30*HZ);
+ rcu_read_lock();
+ timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
+ rcu_read_unlock();
+ timeout = schedule_timeout(timeout);
spin_lock(&mdev->peer_seq_lock);
- if (timeout == 0 && p_seq == mdev->peer_seq) {
+ if (!timeout) {
ret = -ETIMEDOUT;
- dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
+ dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
break;
}
}
- finish_wait(&mdev->seq_wait, &wait);
- if (mdev->peer_seq+1 == packet_seq)
- mdev->peer_seq++;
spin_unlock(&mdev->peer_seq_lock);
+ finish_wait(&mdev->seq_wait, &wait);
return ret;
}
@@ -1686,233 +2015,277 @@ static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
}
+static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
+ unsigned int size)
+{
+ struct drbd_interval *i;
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ struct drbd_request *req;
+ struct bio_and_error m;
+
+ if (!i->local)
+ continue;
+ req = container_of(i, struct drbd_request, i);
+ if (!(req->rq_state & RQ_POSTPONED))
+ continue;
+ req->rq_state &= ~RQ_POSTPONED;
+ __req_mod(req, NEG_ACKED, &m);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ goto repeat;
+ }
+}
+
+static int handle_write_conflicts(struct drbd_conf *mdev,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_tconn *tconn = mdev->tconn;
+ bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
+ sector_t sector = peer_req->i.sector;
+ const unsigned int size = peer_req->i.size;
+ struct drbd_interval *i;
+ bool equal;
+ int err;
+
+ /*
+ * Inserting the peer request into the write_requests tree will prevent
+ * new conflicting local requests from being added.
+ */
+ drbd_insert_interval(&mdev->write_requests, &peer_req->i);
+
+ repeat:
+ drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
+ if (i == &peer_req->i)
+ continue;
+
+ if (!i->local) {
+ /*
+ * Our peer has sent a conflicting remote request; this
+ * should not happen in a two-node setup. Wait for the
+ * earlier peer request to complete.
+ */
+ err = drbd_wait_misc(mdev, i);
+ if (err)
+ goto out;
+ goto repeat;
+ }
+
+ equal = i->sector == sector && i->size == size;
+ if (resolve_conflicts) {
+ /*
+ * If the peer request is fully contained within the
+ * overlapping request, it can be considered overwritten
+ * and thus superseded; otherwise, it will be retried
+ * once all overlapping requests have completed.
+ */
+ bool superseded = i->sector <= sector && i->sector +
+ (i->size >> 9) >= sector + (size >> 9);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u, "
+ "assuming %s came first\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size,
+ superseded ? "local" : "remote");
+
+ inc_unacked(mdev);
+ peer_req->w.cb = superseded ? e_send_superseded :
+ e_send_retry_write;
+ list_add_tail(&peer_req->w.list, &mdev->done_ee);
+ wake_asender(mdev->tconn);
+
+ err = -ENOENT;
+ goto out;
+ } else {
+ struct drbd_request *req =
+ container_of(i, struct drbd_request, i);
+
+ if (!equal)
+ dev_alert(DEV, "Concurrent writes detected: "
+ "local=%llus +%u, remote=%llus +%u\n",
+ (unsigned long long)i->sector, i->size,
+ (unsigned long long)sector, size);
+
+ if (req->rq_state & RQ_LOCAL_PENDING ||
+ !(req->rq_state & RQ_POSTPONED)) {
+ /*
+ * Wait for the node with the discard flag to
+ * decide if this request has been superseded
+ * or needs to be retried.
+ * Requests that have been superseded will
+ * disappear from the write_requests tree.
+ *
+ * In addition, wait for the conflicting
+ * request to finish locally before submitting
+ * the conflicting peer request.
+ */
+ err = drbd_wait_misc(mdev, &req->i);
+ if (err) {
+ _conn_request_state(mdev->tconn,
+ NS(conn, C_TIMEOUT),
+ CS_HARD);
+ fail_postponed_requests(mdev, sector, size);
+ goto out;
+ }
+ goto repeat;
+ }
+ /*
+ * Remember to restart the conflicting requests after
+ * the new peer request has completed.
+ */
+ peer_req->flags |= EE_RESTART_REQUESTS;
+ }
+ }
+ err = 0;
+
+ out:
+ if (err)
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ return err;
+}
+
/* mirrored write */
-static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- struct drbd_epoch_entry *e;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct drbd_peer_request *peer_req;
+ struct p_data *p = pi->data;
+ u32 peer_seq = be32_to_cpu(p->seq_num);
int rw = WRITE;
u32 dp_flags;
+ int err, tp;
- if (!get_ldev(mdev)) {
- spin_lock(&mdev->peer_seq_lock);
- if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
- mdev->peer_seq++;
- spin_unlock(&mdev->peer_seq_lock);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
- atomic_inc(&mdev->current_epoch->epoch_size);
- return drbd_drain_block(mdev, data_size);
+ if (!get_ldev(mdev)) {
+ int err2;
+
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ atomic_inc(&tconn->current_epoch->epoch_size);
+ err2 = drbd_drain_block(mdev, pi->size);
+ if (!err)
+ err = err2;
+ return err;
}
- /* get_ldev(mdev) successful.
- * Corresponding put_ldev done either below (on various errors),
- * or in drbd_endio_write_sec, if we successfully submit the data at
- * the end of this function. */
+ /*
+ * Corresponding put_ldev done either below (on various errors), or in
+ * drbd_peer_request_endio, if we successfully submit the data at the
+ * end of this function.
+ */
sector = be64_to_cpu(p->sector);
- e = read_in_block(mdev, p->block_id, sector, data_size);
- if (!e) {
+ peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -EIO;
}
- e->w.cb = e_end_block;
+ peer_req->w.cb = e_end_block;
dp_flags = be32_to_cpu(p->dp_flags);
rw |= wire_flags_to_bio(mdev, dp_flags);
- if (e->pages == NULL) {
- D_ASSERT(e->size == 0);
+ if (peer_req->pages == NULL) {
+ D_ASSERT(peer_req->i.size == 0);
D_ASSERT(dp_flags & DP_FLUSH);
}
if (dp_flags & DP_MAY_SET_IN_SYNC)
- e->flags |= EE_MAY_SET_IN_SYNC;
-
- spin_lock(&mdev->epoch_lock);
- e->epoch = mdev->current_epoch;
- atomic_inc(&e->epoch->epoch_size);
- atomic_inc(&e->epoch->active);
- spin_unlock(&mdev->epoch_lock);
-
- /* I'm the receiver, I do hold a net_cnt reference. */
- if (!mdev->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
- } else {
- /* don't get the req_lock yet,
- * we may sleep in drbd_wait_peer_seq */
- const int size = e->size;
- const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
- DEFINE_WAIT(wait);
- struct drbd_request *i;
- struct hlist_node *n;
- struct hlist_head *slot;
- int first;
-
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- BUG_ON(mdev->ee_hash == NULL);
- BUG_ON(mdev->tl_hash == NULL);
-
- /* conflict detection and handling:
- * 1. wait on the sequence number,
- * in case this data packet overtook ACK packets.
- * 2. check our hash tables for conflicting requests.
- * we only need to walk the tl_hash, since an ee can not
- * have a conflict with an other ee: on the submitting
- * node, the corresponding req had already been conflicting,
- * and a conflicting req is never sent.
- *
- * Note: for two_primaries, we are protocol C,
- * so there cannot be any request that is DONE
- * but still on the transfer log.
- *
- * unconditionally add to the ee_hash.
- *
- * if no conflicting request is found:
- * submit.
- *
- * if any conflicting request is found
- * that has not yet been acked,
- * AND I have the "discard concurrent writes" flag:
- * queue (via done_ee) the P_DISCARD_ACK; OUT.
- *
- * if any conflicting request is found:
- * block the receiver, waiting on misc_wait
- * until no more conflicting requests are there,
- * or we get interrupted (disconnect).
- *
- * we do not just write after local io completion of those
- * requests, but only after req is done completely, i.e.
- * we wait for the P_DISCARD_ACK to arrive!
- *
- * then proceed normally, i.e. submit.
- */
- if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
+ peer_req->flags |= EE_MAY_SET_IN_SYNC;
+
+ spin_lock(&tconn->epoch_lock);
+ peer_req->epoch = tconn->current_epoch;
+ atomic_inc(&peer_req->epoch->epoch_size);
+ atomic_inc(&peer_req->epoch->active);
+ spin_unlock(&tconn->epoch_lock);
+
+ rcu_read_lock();
+ tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
+ rcu_read_unlock();
+ if (tp) {
+ peer_req->flags |= EE_IN_INTERVAL_TREE;
+ err = wait_for_and_update_peer_seq(mdev, peer_seq);
+ if (err)
goto out_interrupted;
-
- spin_lock_irq(&mdev->req_lock);
-
- hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- first = 1;
- for (;;) {
- int have_unacked = 0;
- int have_conflict = 0;
- prepare_to_wait(&mdev->misc_wait, &wait,
- TASK_INTERRUPTIBLE);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- /* only ALERT on first iteration,
- * we may be woken up early... */
- if (first)
- dev_alert(DEV, "%s[%u] Concurrent local write detected!"
- " new: %llus +%u; pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- if (i->rq_state & RQ_NET_PENDING)
- ++have_unacked;
- ++have_conflict;
- }
- }
-#undef OVERLAPS
- if (!have_conflict)
- break;
-
- /* Discard Ack only for the _first_ iteration */
- if (first && discard && have_unacked) {
- dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
- (unsigned long long)sector);
- inc_unacked(mdev);
- e->w.cb = e_send_discard_ack;
- list_add_tail(&e->w.list, &mdev->done_ee);
-
- spin_unlock_irq(&mdev->req_lock);
-
- /* we could probably send that P_DISCARD_ACK ourselves,
- * but I don't like the receiver using the msock */
-
+ spin_lock_irq(&mdev->tconn->req_lock);
+ err = handle_write_conflicts(mdev, peer_req);
+ if (err) {
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (err == -ENOENT) {
put_ldev(mdev);
- wake_asender(mdev);
- finish_wait(&mdev->misc_wait, &wait);
- return true;
+ return 0;
}
+ goto out_interrupted;
+ }
+ } else
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->active_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (signal_pending(current)) {
- hlist_del_init(&e->collision);
-
- spin_unlock_irq(&mdev->req_lock);
-
- finish_wait(&mdev->misc_wait, &wait);
- goto out_interrupted;
- }
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
- spin_unlock_irq(&mdev->req_lock);
- if (first) {
- first = 0;
- dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
- "sec=%llus\n", (unsigned long long)sector);
- } else if (discard) {
- /* we had none on the first iteration.
- * there must be none now. */
- D_ASSERT(have_unacked == 0);
- }
- schedule();
- spin_lock_irq(&mdev->req_lock);
+ if (mdev->tconn->agreed_pro_version < 100) {
+ rcu_read_lock();
+ switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
+ case DRBD_PROT_C:
+ dp_flags |= DP_SEND_WRITE_ACK;
+ break;
+ case DRBD_PROT_B:
+ dp_flags |= DP_SEND_RECEIVE_ACK;
+ break;
}
- finish_wait(&mdev->misc_wait, &wait);
+ rcu_read_unlock();
}
- list_add(&e->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->state.conn == C_SYNC_TARGET)
- wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
-
- switch (mdev->net_conf->wire_protocol) {
- case DRBD_PROT_C:
+ if (dp_flags & DP_SEND_WRITE_ACK) {
+ peer_req->flags |= EE_SEND_WRITE_ACK;
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_block()
* respective _drbd_clear_done_ee */
- break;
- case DRBD_PROT_B:
+ }
+
+ if (dp_flags & DP_SEND_RECEIVE_ACK) {
/* I really don't like it that the receiver thread
* sends on the msock, but anyways */
- drbd_send_ack(mdev, P_RECV_ACK, e);
- break;
- case DRBD_PROT_A:
- /* nothing to do */
- break;
+ drbd_send_ack(mdev, P_RECV_ACK, peer_req);
}
if (mdev->state.pdsk < D_INCONSISTENT) {
/* In case we have the only disk of the cluster, */
- drbd_set_out_of_sync(mdev, e->sector, e->size);
- e->flags |= EE_CALL_AL_COMPLETE_IO;
- e->flags &= ~EE_MAY_SET_IN_SYNC;
- drbd_al_begin_io(mdev, e->sector);
+ drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
+ peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
+ peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
+ drbd_al_begin_io(mdev, &peer_req->i);
}
- if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
- return true;
+ err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
+ if (!err)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- hlist_del_init(&e->collision);
- spin_unlock_irq(&mdev->req_lock);
- if (e->flags & EE_CALL_AL_COMPLETE_IO)
- drbd_al_complete_io(mdev, e->sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ drbd_remove_epoch_entry_interval(mdev, peer_req);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
+ drbd_al_complete_io(mdev, &peer_req->i);
out_interrupted:
- drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
+ drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return err;
}
/* We may throttle resync, if the lower device seems to be busy,
@@ -1933,9 +2306,14 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
struct lc_element *tmp;
int curr_events;
int throttle = 0;
+ unsigned int c_min_rate;
+
+ rcu_read_lock();
+ c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+ rcu_read_unlock();
/* feature disabled? */
- if (mdev->sync_conf.c_min_rate == 0)
+ if (c_min_rate == 0)
return 0;
spin_lock_irq(&mdev->al_lock);
@@ -1975,40 +2353,46 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
db = mdev->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
- if (dbdt > mdev->sync_conf.c_min_rate)
+ if (dbdt > c_min_rate)
throttle = 1;
}
return throttle;
}
-static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
+static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ sector_t capacity;
+ struct drbd_peer_request *peer_req;
struct digest_info *di = NULL;
int size, verb;
unsigned int fault_type;
- struct p_block_req *p = &mdev->data.rbuf.block_req;
+ struct p_block_req *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+ capacity = drbd_get_capacity(mdev->this_bdev);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (sector + (size>>9) > capacity) {
dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
(unsigned long long)sector, size);
- return false;
+ return -EINVAL;
}
if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
verb = 1;
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
break;
@@ -2023,35 +2407,34 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
+ BUG();
}
if (verb && __ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not satisfy peer's read request, "
"no local data.\n");
/* drain possibly payload */
- return drbd_drain_block(mdev, digest_size);
+ return drbd_drain_block(mdev, pi->size);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
- if (!e) {
+ peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
+ if (!peer_req) {
put_ldev(mdev);
- return false;
+ return -ENOMEM;
}
- switch (cmd) {
+ switch (pi->cmd) {
case P_DATA_REQUEST:
- e->w.cb = w_e_end_data_req;
+ peer_req->w.cb = w_e_end_data_req;
fault_type = DRBD_FAULT_DT_RD;
/* application IO, don't drbd_rs_begin_io */
goto submit;
case P_RS_DATA_REQUEST:
- e->w.cb = w_e_end_rsdata_req;
+ peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2060,28 +2443,28 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REPLY:
case P_CSUM_RS_REQUEST:
fault_type = DRBD_FAULT_RS_RD;
- di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
+ di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
if (!di)
goto out_free_e;
- di->digest_size = digest_size;
+ di->digest_size = pi->size;
di->digest = (((char *)di)+sizeof(struct digest_info));
- e->digest = di;
- e->flags |= EE_HAS_DIGEST;
+ peer_req->digest = di;
+ peer_req->flags |= EE_HAS_DIGEST;
- if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
+ if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
goto out_free_e;
- if (cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->agreed_pro_version >= 89);
- e->w.cb = w_e_end_csum_rs_req;
+ if (pi->cmd == P_CSUM_RS_REQUEST) {
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
+ peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
- } else if (cmd == P_OV_REPLY) {
+ } else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */
atomic_add(size >> 9, &mdev->rs_sect_in);
- e->w.cb = w_e_end_ov_reply;
+ peer_req->w.cb = w_e_end_ov_reply;
dec_rs_pending(mdev);
/* drbd_rs_begin_io done when we sent this request,
* but accounting still needs to be done. */
@@ -2091,7 +2474,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
case P_OV_REQUEST:
if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->agreed_pro_version >= 90) {
+ mdev->tconn->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
mdev->ov_start_sector = sector;
@@ -2105,15 +2488,12 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
dev_info(DEV, "Online Verify start sector: %llu\n",
(unsigned long long)sector);
}
- e->w.cb = w_e_end_ov_req;
+ peer_req->w.cb = w_e_end_ov_req;
fault_type = DRBD_FAULT_RS_RD;
break;
default:
- dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
- cmdname(cmd));
- fault_type = DRBD_FAULT_MAX;
- goto out_free_e;
+ BUG();
}
/* Throttle, drbd_rs_begin_io and submit should become asynchronous
@@ -2148,30 +2528,31 @@ submit_for_resync:
submit:
inc_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
- return true;
+ if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
+ return 0;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
put_ldev(mdev);
- drbd_free_ee(mdev, e);
- return false;
+ drbd_free_peer_req(mdev, peer_req);
+ return -EIO;
}
static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
{
int self, peer, rv = -100;
unsigned long ch_self, ch_peer;
+ enum drbd_after_sb_p after_sb_0p;
self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
peer = mdev->p_uuid[UI_BITMAP] & 1;
@@ -2179,10 +2560,14 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
ch_peer = mdev->p_uuid[UI_SIZE];
ch_self = mdev->comm_bm_set;
- switch (mdev->net_conf->after_sb_0p) {
+ rcu_read_lock();
+ after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
+ rcu_read_unlock();
+ switch (after_sb_0p) {
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
case ASB_CALL_HELPER:
+ case ASB_VIOLENTLY:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2211,14 +2596,14 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
} else {
if (ch_peer == 0) { rv = 1; break; }
if (ch_self == 0) { rv = -1; break; }
}
- if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
+ if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
@@ -2227,7 +2612,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2243,13 +2628,18 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_1p;
- switch (mdev->net_conf->after_sb_1p) {
+ rcu_read_lock();
+ after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
+ rcu_read_unlock();
+ switch (after_sb_1p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
case ASB_DISCARD_LOCAL:
case ASB_DISCARD_REMOTE:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_DISCONNECT:
@@ -2292,8 +2682,12 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
{
int hg, rv = -100;
+ enum drbd_after_sb_p after_sb_2p;
- switch (mdev->net_conf->after_sb_2p) {
+ rcu_read_lock();
+ after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
+ rcu_read_unlock();
+ switch (after_sb_2p) {
case ASB_DISCARD_YOUNGER_PRI:
case ASB_DISCARD_OLDER_PRI:
case ASB_DISCARD_LEAST_CHG:
@@ -2301,6 +2695,7 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
case ASB_DISCARD_REMOTE:
case ASB_CONSENSUS:
case ASB_DISCARD_SECONDARY:
+ case ASB_DISCARD_ZERO_CHG:
dev_err(DEV, "Configuration error.\n");
break;
case ASB_VIOLENTLY:
@@ -2386,13 +2781,15 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
- drbd_uuid_set_bm(mdev, 0UL);
+ drbd_uuid_move_history(mdev);
+ mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
+ mdev->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@@ -2407,7 +2804,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2440,7 +2837,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
return dc ? -1 : 1;
}
}
@@ -2453,14 +2850,14 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 51;
peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
@@ -2490,18 +2887,18 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
*rule_nr = 71;
self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
- _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
- _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
+ __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
+ __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
@@ -2545,20 +2942,24 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
enum drbd_disk_state peer_disk) __must_hold(local)
{
- int hg, rule_nr;
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
+ struct net_conf *nc;
+ int hg, rule_nr, rr_conflict, tentative;
mydisk = mdev->state.disk;
if (mydisk == D_NEGOTIATING)
mydisk = mdev->new_state_tmp.disk;
dev_info(DEV, "drbd_sync_handshake:\n");
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(mdev, &rule_nr);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -2584,7 +2985,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (abs(hg) == 100)
drbd_khelper(mdev, "initial-split-brain");
- if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+
+ if (hg == 100 || (hg == -100 && nc->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
@@ -2613,9 +3017,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
if (hg == -100) {
- if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+ if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
hg = -1;
- if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+ if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
hg = 1;
if (abs(hg) < 100)
@@ -2623,6 +3027,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
+ rr_conflict = nc->rr_conflict;
+ tentative = nc->tentative;
+ rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
@@ -2641,7 +3048,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
if (hg < 0 && /* by intention we do not use mydisk here. */
mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
- switch (mdev->net_conf->rr_conflict) {
+ switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(mdev, "pri-lost");
/* fall through */
@@ -2654,7 +3061,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
}
- if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+ if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
@@ -2686,33 +3093,29 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
return rv;
}
-/* returns 1 if invalid */
-static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
+static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
{
/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
- if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
- (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
- return 0;
+ if (peer == ASB_DISCARD_REMOTE)
+ return ASB_DISCARD_LOCAL;
/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
- if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
- self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
- return 1;
+ if (peer == ASB_DISCARD_LOCAL)
+ return ASB_DISCARD_REMOTE;
/* everything else is valid if they are equal on both sides. */
- if (peer == self)
- return 0;
-
- /* everything es is invalid. */
- return 1;
+ return peer;
}
-static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_protocol *p = &mdev->data.rbuf.protocol;
- int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
- int p_want_lose, p_two_primaries, cf;
- char p_integrity_alg[SHARED_SECRET_MAX] = "";
+ struct p_protocol *p = pi->data;
+ enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
+ int p_proto, p_discard_my_data, p_two_primaries, cf;
+ struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
+ char integrity_alg[SHARED_SECRET_MAX] = "";
+ struct crypto_hash *peer_integrity_tfm = NULL;
+ void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
@@ -2720,63 +3123,138 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
p_two_primaries = be32_to_cpu(p->two_primaries);
cf = be32_to_cpu(p->conn_flags);
- p_want_lose = cf & CF_WANT_LOSE;
-
- clear_bit(CONN_DRY_RUN, &mdev->flags);
+ p_discard_my_data = cf & CF_DISCARD_MY_DATA;
- if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &mdev->flags);
+ if (tconn->agreed_pro_version >= 87) {
+ int err;
- if (p_proto != mdev->net_conf->wire_protocol) {
- dev_err(DEV, "incompatible communication protocols\n");
- goto disconnect;
+ if (pi->size > sizeof(integrity_alg))
+ return -EIO;
+ err = drbd_recv_all(tconn, integrity_alg, pi->size);
+ if (err)
+ return err;
+ integrity_alg[SHARED_SECRET_MAX - 1] = 0;
}
- if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
- dev_err(DEV, "incompatible after-sb-0pri settings\n");
- goto disconnect;
- }
+ if (pi->cmd != P_PROTOCOL_UPDATE) {
+ clear_bit(CONN_DRY_RUN, &tconn->flags);
- if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
- dev_err(DEV, "incompatible after-sb-1pri settings\n");
- goto disconnect;
- }
+ if (cf & CF_DRY_RUN)
+ set_bit(CONN_DRY_RUN, &tconn->flags);
- if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
- dev_err(DEV, "incompatible after-sb-2pri settings\n");
- goto disconnect;
- }
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
- if (p_want_lose && mdev->net_conf->want_lose) {
- dev_err(DEV, "both sides have the 'want_lose' flag set\n");
- goto disconnect;
- }
+ if (p_proto != nc->wire_protocol) {
+ conn_err(tconn, "incompatible %s settings\n", "protocol");
+ goto disconnect_rcu_unlock;
+ }
- if (p_two_primaries != mdev->net_conf->two_primaries) {
- dev_err(DEV, "incompatible setting of the two-primaries options\n");
- goto disconnect;
+ if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
+ conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_discard_my_data && nc->discard_my_data) {
+ conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (p_two_primaries != nc->two_primaries) {
+ conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+ goto disconnect_rcu_unlock;
+ }
+
+ if (strcmp(integrity_alg, nc->integrity_alg)) {
+ conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+ goto disconnect_rcu_unlock;
+ }
+
+ rcu_read_unlock();
}
- if (mdev->agreed_pro_version >= 87) {
- unsigned char *my_alg = mdev->net_conf->integrity_alg;
+ if (integrity_alg[0]) {
+ int hash_size;
- if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
- return false;
+ /*
+ * We can only change the peer data integrity algorithm
+ * here. Changing our own data integrity algorithm
+ * requires that we send a P_PROTOCOL_UPDATE packet at
+ * the same time; otherwise, the peer has no way to
+ * tell between which packets the algorithm should
+ * change.
+ */
- p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
- if (strcmp(p_integrity_alg, my_alg)) {
- dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
+ peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ if (!peer_integrity_tfm) {
+ conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+ integrity_alg);
goto disconnect;
}
- dev_info(DEV, "data-integrity-alg: %s\n",
- my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
+
+ hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+ int_dig_in = kmalloc(hash_size, GFP_KERNEL);
+ int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
+ if (!(int_dig_in && int_dig_vv)) {
+ conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
+ goto disconnect;
+ }
+ }
+
+ new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ conn_err(tconn, "Allocation of new net_conf failed\n");
+ goto disconnect;
}
- return true;
+ mutex_lock(&tconn->data.mutex);
+ mutex_lock(&tconn->conf_update);
+ old_net_conf = tconn->net_conf;
+ *new_net_conf = *old_net_conf;
+
+ new_net_conf->wire_protocol = p_proto;
+ new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
+ new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
+ new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
+ new_net_conf->two_primaries = p_two_primaries;
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
+ mutex_unlock(&tconn->conf_update);
+ mutex_unlock(&tconn->data.mutex);
+
+ crypto_free_hash(tconn->peer_integrity_tfm);
+ kfree(tconn->int_dig_in);
+ kfree(tconn->int_dig_vv);
+ tconn->peer_integrity_tfm = peer_integrity_tfm;
+ tconn->int_dig_in = int_dig_in;
+ tconn->int_dig_vv = int_dig_vv;
+
+ if (strcmp(old_net_conf->integrity_alg, integrity_alg))
+ conn_info(tconn, "peer data-integrity-alg: %s\n",
+ integrity_alg[0] ? integrity_alg : "(none)");
+
+ synchronize_rcu();
+ kfree(old_net_conf);
+ return 0;
+
+disconnect_rcu_unlock:
+ rcu_read_unlock();
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ crypto_free_hash(peer_integrity_tfm);
+ kfree(int_dig_in);
+ kfree(int_dig_vv);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* helper function
@@ -2798,24 +3276,64 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
alg, name, PTR_ERR(tfm));
return tfm;
}
- if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
- crypto_free_hash(tfm);
- dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
- return ERR_PTR(-EINVAL);
- }
return tfm;
}
-static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
+static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ void *buffer = tconn->data.rbuf;
+ int size = pi->size;
+
+ while (size) {
+ int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
+ s = drbd_recv(tconn, buffer, s);
+ if (s <= 0) {
+ if (s < 0)
+ return s;
+ break;
+ }
+ size -= s;
+ }
+ if (size)
+ return -EIO;
+ return 0;
+}
+
+/*
+ * config_unknown_volume - device configuration command for unknown volume
+ *
+ * When a device is added to an existing connection, the node on which the
+ * device is added first will send configuration commands to its peer but the
+ * peer will not know about the device yet. It will warn and ignore these
+ * commands. Once the device is added on the second node, the second node will
+ * send the same device configuration commands, but in the other direction.
+ *
+ * (We can also end up here if drbd is misconfigured.)
+ */
+static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
{
- int ok = true;
- struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
+ conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+ cmdname(pi->cmd), pi->vnr);
+ return ignore_remaining_packet(tconn, pi);
+}
+
+static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
- const int apv = mdev->agreed_pro_version;
- int *rs_plan_s = NULL;
+ struct net_conf *old_net_conf, *new_net_conf = NULL;
+ struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
+ const int apv = tconn->agreed_pro_version;
+ struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int fifo_size = 0;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param)
@@ -2823,32 +3341,49 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
: apv <= 94 ? sizeof(struct p_rs_param_89)
: /* apv >= 95 */ sizeof(struct p_rs_param_95);
- if (packet_size > exp_max_sz) {
+ if (pi->size > exp_max_sz) {
dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
- packet_size, exp_max_sz);
- return false;
+ pi->size, exp_max_sz);
+ return -EIO;
}
if (apv <= 88) {
- header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param);
+ data_size = pi->size - header_size;
} else if (apv <= 94) {
- header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_89);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
} else {
- header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
- data_size = packet_size - header_size;
+ header_size = sizeof(struct p_rs_param_95);
+ data_size = pi->size - header_size;
D_ASSERT(data_size == 0);
}
/* initialize verify_alg and csums_alg */
+ p = pi->data;
memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
- if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
- return false;
+ err = drbd_recv_all(mdev->tconn, p, header_size);
+ if (err)
+ return err;
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
+ mutex_lock(&mdev->tconn->conf_update);
+ old_net_conf = mdev->tconn->net_conf;
+ if (get_ldev(mdev)) {
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ put_ldev(mdev);
+ mutex_unlock(&mdev->tconn->conf_update);
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ return -ENOMEM;
+ }
+
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+
+ new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
+ }
if (apv >= 88) {
if (apv == 88) {
@@ -2856,12 +3391,13 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
dev_err(DEV, "verify-alg of wrong size, "
"peer wants %u, accepting only up to %u byte\n",
data_size, SHARED_SECRET_MAX);
- return false;
+ err = -EIO;
+ goto reconnect;
}
- if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
- return false;
-
+ err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
+ if (err)
+ goto reconnect;
/* we expect NUL terminated string */
/* but just in case someone tries to be evil */
D_ASSERT(p->verify_alg[data_size-1] == 0);
@@ -2876,10 +3412,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
p->csums_alg[SHARED_SECRET_MAX-1] = 0;
}
- if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
+ if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.verify_alg, p->verify_alg);
+ old_net_conf->verify_alg, p->verify_alg);
goto disconnect;
}
verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2890,10 +3426,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
}
}
- if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
+ if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
if (mdev->state.conn == C_WF_REPORT_PARAMS) {
dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
- mdev->sync_conf.csums_alg, p->csums_alg);
+ old_net_conf->csums_alg, p->csums_alg);
goto disconnect;
}
csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -2904,57 +3440,91 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
}
}
- if (apv > 94) {
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
- mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
- mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
- mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
- mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
-
- fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
- if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
- if (!rs_plan_s) {
+ if (apv > 94 && new_disk_conf) {
+ new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
+ new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
+ new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
+ new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
+
+ fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+ if (fifo_size != mdev->rs_plan_s->size) {
+ new_plan = fifo_alloc(fifo_size);
+ if (!new_plan) {
dev_err(DEV, "kmalloc of fifo_buffer failed");
+ put_ldev(mdev);
goto disconnect;
}
}
}
- spin_lock(&mdev->peer_seq_lock);
- /* lock against drbd_nl_syncer_conf() */
- if (verify_tfm) {
- strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
- mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(mdev->verify_tfm);
- mdev->verify_tfm = verify_tfm;
- dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
- }
- if (csums_tfm) {
- strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
- mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(mdev->csums_tfm);
- mdev->csums_tfm = csums_tfm;
- dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
- }
- if (fifo_size != mdev->rs_plan_s.size) {
- kfree(mdev->rs_plan_s.values);
- mdev->rs_plan_s.values = rs_plan_s;
- mdev->rs_plan_s.size = fifo_size;
- mdev->rs_planed = 0;
+ if (verify_tfm || csums_tfm) {
+ new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+ if (!new_net_conf) {
+ dev_err(DEV, "Allocation of new net_conf failed\n");
+ goto disconnect;
+ }
+
+ *new_net_conf = *old_net_conf;
+
+ if (verify_tfm) {
+ strcpy(new_net_conf->verify_alg, p->verify_alg);
+ new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
+ crypto_free_hash(mdev->tconn->verify_tfm);
+ mdev->tconn->verify_tfm = verify_tfm;
+ dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
+ }
+ if (csums_tfm) {
+ strcpy(new_net_conf->csums_alg, p->csums_alg);
+ new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
+ crypto_free_hash(mdev->tconn->csums_tfm);
+ mdev->tconn->csums_tfm = csums_tfm;
+ dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
+ }
+ rcu_assign_pointer(tconn->net_conf, new_net_conf);
}
- spin_unlock(&mdev->peer_seq_lock);
}
- return ok;
+ if (new_disk_conf) {
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ put_ldev(mdev);
+ }
+
+ if (new_plan) {
+ old_plan = mdev->rs_plan_s;
+ rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+ }
+
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ if (new_net_conf)
+ kfree(old_net_conf);
+ kfree(old_disk_conf);
+ kfree(old_plan);
+
+ return 0;
+
+reconnect:
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
+ return -EIO;
+
disconnect:
+ kfree(new_plan);
+ if (new_disk_conf) {
+ put_ldev(mdev);
+ kfree(new_disk_conf);
+ }
+ mutex_unlock(&mdev->tconn->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_hash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_hash(verify_tfm);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
/* warn if the arguments differ by more than 12.5% */
@@ -2970,59 +3540,77 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
(unsigned long long)a, (unsigned long long)b);
}
-static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_sizes *p = &mdev->data.rbuf.sizes;
+ struct drbd_conf *mdev;
+ struct p_sizes *p = pi->data;
enum determine_dev_size dd = unchanged;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
- if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
- dev_err(DEV, "some backing storage is needed\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
- }
-
/* just store the peer's disk size for now.
* we still need to figure out whether we accept that. */
mdev->p_size = p_size;
if (get_ldev(mdev)) {
+ rcu_read_lock();
+ my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+ rcu_read_unlock();
+
warn_if_differ_considerably(mdev, "lower level device sizes",
p_size, drbd_get_max_capacity(mdev->ldev));
warn_if_differ_considerably(mdev, "user requested size",
- p_usize, mdev->ldev->dc.disk_size);
+ p_usize, my_usize);
/* if this is the first connect, or an otherwise expected
* param exchange, choose the minimum */
if (mdev->state.conn == C_WF_REPORT_PARAMS)
- p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
- p_usize);
-
- my_usize = mdev->ldev->dc.disk_size;
-
- if (mdev->ldev->dc.disk_size != p_usize) {
- mdev->ldev->dc.disk_size = p_usize;
- dev_info(DEV, "Peer sets u_size to %lu sectors\n",
- (unsigned long)mdev->ldev->dc.disk_size);
- }
+ p_usize = min_not_zero(my_usize, p_usize);
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
- drbd_get_capacity(mdev->this_bdev) &&
- mdev->state.disk >= D_OUTDATED &&
- mdev->state.conn < C_CONNECTED) {
+ if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
+ drbd_get_capacity(mdev->this_bdev) &&
+ mdev->state.disk >= D_OUTDATED &&
+ mdev->state.conn < C_CONNECTED) {
dev_err(DEV, "The peer's disk size is too small!\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- mdev->ldev->dc.disk_size = my_usize;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(mdev);
- return false;
+ return -EIO;
+ }
+
+ if (my_usize != p_usize) {
+ struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+
+ new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+ if (!new_disk_conf) {
+ dev_err(DEV, "Allocation of new disk_conf failed\n");
+ put_ldev(mdev);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&mdev->tconn->conf_update);
+ old_disk_conf = mdev->ldev->disk_conf;
+ *new_disk_conf = *old_disk_conf;
+ new_disk_conf->disk_size = p_usize;
+
+ rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+ mutex_unlock(&mdev->tconn->conf_update);
+ synchronize_rcu();
+ kfree(old_disk_conf);
+
+ dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+ (unsigned long)my_usize);
}
+
put_ldev(mdev);
}
@@ -3031,7 +3619,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dd = drbd_determine_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
- return false;
+ return -EIO;
drbd_md_sync(mdev);
} else {
/* I am diskless, need to accept the peer's size. */
@@ -3070,16 +3658,25 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- return true;
+ return 0;
}
-static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_uuids *p = &mdev->data.rbuf.uuids;
+ struct drbd_conf *mdev;
+ struct p_uuids *p = pi->data;
u64 *p_uuid;
int i, updated_uuids = 0;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
+ if (!p_uuid) {
+ dev_err(DEV, "kmalloc of p_uuid failed\n");
+ return false;
+ }
for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
p_uuid[i] = be64_to_cpu(p->uuid[i]);
@@ -3093,14 +3690,14 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
(mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
(unsigned long long)mdev->ed_uuid);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (get_ldev(mdev)) {
int skip_initial_sync =
mdev->state.conn == C_CONNECTED &&
- mdev->agreed_pro_version >= 90 &&
+ mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
@@ -3127,14 +3724,15 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ mutex_lock(mdev->state_mutex);
+ mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
if (updated_uuids)
drbd_print_uuids(mdev, "receiver updated UUIDs to");
- return true;
+ return 0;
}
/**
@@ -3146,6 +3744,7 @@ static union drbd_state convert_state(union drbd_state ps)
union drbd_state ms;
static enum drbd_conns c_tab[] = {
+ [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
[C_CONNECTED] = C_CONNECTED,
[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
@@ -3167,40 +3766,74 @@ static union drbd_state convert_state(union drbd_state ps)
return ms;
}
-static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state *p = &mdev->data.rbuf.req_state;
+ struct drbd_conf *mdev;
+ struct p_req_state *p = pi->data;
union drbd_state mask, val;
enum drbd_state_rv rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
- test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
+ if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
+ mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
- return true;
+ return 0;
}
mask = convert_state(mask);
val = convert_state(val);
rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
-
drbd_send_sr_reply(mdev, rv);
+
drbd_md_sync(mdev);
- return true;
+ return 0;
}
-static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_state *p = &mdev->data.rbuf.state;
+ struct p_req_state *p = pi->data;
+ union drbd_state mask, val;
+ enum drbd_state_rv rv;
+
+ mask.i = be32_to_cpu(p->mask);
+ val.i = be32_to_cpu(p->val);
+
+ if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
+ mutex_is_locked(&tconn->cstate_mutex)) {
+ conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
+ return 0;
+ }
+
+ mask = convert_state(mask);
+ val = convert_state(val);
+
+ rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
+ conn_send_sr_reply(tconn, rv);
+
+ return 0;
+}
+
+static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_state *p = pi->data;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
int rv;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return config_unknown_volume(tconn, pi);
+
peer_state.i = be32_to_cpu(p->state);
real_peer_disk = peer_state.disk;
@@ -3209,16 +3842,16 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
retry:
- os = ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ os = ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* If some other part of the code (asender thread, timeout)
* already decided to close the connection again,
* we must not "re-establish" it here. */
if (os.conn <= C_TEAR_DOWN)
- return false;
+ return -ECONNRESET;
/* If this is the "end of sync" confirmation, usually the peer disk
* transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
@@ -3246,10 +3879,18 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.conn == C_CONNECTED) {
if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
drbd_resync_finished(mdev);
- return true;
+ return 0;
}
}
+ /* explicit verify finished notification, stop sector reached. */
+ if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
+ peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
+ ov_out_of_sync_print(mdev);
+ drbd_resync_finished(mdev);
+ return 0;
+ }
+
/* peer says his disk is inconsistent, while we think it is uptodate,
* and this happens while the peer still thinks we have a sync going on,
* but we think we are already done with the sync.
@@ -3298,17 +3939,17 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
- return false;
+ if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
+ return -EIO;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
}
}
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.i != os.i)
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (os.i != drbd_read_state(mdev).i)
goto retry;
clear_bit(CONSIDER_RESYNC, &mdev->flags);
ns.peer = peer_state.role;
@@ -3317,25 +3958,25 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
ns.disk = mdev->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
- if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
+ if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
test_bit(NEW_CUR_UUID, &mdev->flags)) {
- /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
+ /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
- tl_clear(mdev);
+ tl_clear(mdev->tconn);
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
- drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
- return false;
+ conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
+ return -EIO;
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
- ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ ns = drbd_read_state(mdev);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return false;
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
}
if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3349,16 +3990,21 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
- mdev->net_conf->want_lose = 0;
+ clear_bit(DISCARD_MY_DATA, &mdev->flags);
drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
- return true;
+ return 0;
}
-static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
+ struct drbd_conf *mdev;
+ struct p_rs_uuid *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
wait_event(mdev->misc_wait,
mdev->state.conn == C_WF_SYNC_UUID ||
@@ -3381,7 +4027,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
} else
dev_err(DEV, "Ignoring SyncUUID packet!\n");
- return true;
+ return 0;
}
/**
@@ -3391,27 +4037,27 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
* code upon failure.
*/
static int
-receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
- unsigned long *buffer, struct bm_xfer_ctx *c)
+receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
+ unsigned long *p, struct bm_xfer_ctx *c)
{
- unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
- unsigned want = num_words * sizeof(long);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
+ drbd_header_size(mdev->tconn);
+ unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
+ c->bm_words - c->word_offset);
+ unsigned int want = num_words * sizeof(*p);
int err;
- if (want != data_size) {
- dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
+ if (want != size) {
+ dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
return -EIO;
}
if (want == 0)
return 0;
- err = drbd_recv(mdev, buffer, want);
- if (err != want) {
- if (err >= 0)
- err = -EIO;
+ err = drbd_recv_all(mdev->tconn, p, want);
+ if (err)
return err;
- }
- drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
+ drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG;
@@ -3421,6 +4067,21 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
return 1;
}
+static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
+{
+ return (enum drbd_bitmap_code)(p->encoding & 0x0f);
+}
+
+static int dcbp_get_start(struct p_compressed_bm *p)
+{
+ return (p->encoding & 0x80) != 0;
+}
+
+static int dcbp_get_pad_bits(struct p_compressed_bm *p)
+{
+ return (p->encoding >> 4) & 0x7;
+}
+
/**
* recv_bm_rle_bits
*
@@ -3430,7 +4091,8 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
static int
recv_bm_rle_bits(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
struct bitstream bs;
u64 look_ahead;
@@ -3438,12 +4100,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
u64 tmp;
unsigned long s = c->bit_offset;
unsigned long e;
- int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
- int toggle = DCBP_get_start(p);
+ int toggle = dcbp_get_start(p);
int have;
int bits;
- bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
+ bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
bits = bitstream_get_bits(&bs, &look_ahead, 64);
if (bits < 0)
@@ -3495,17 +4156,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
static int
decode_bitmap_c(struct drbd_conf *mdev,
struct p_compressed_bm *p,
- struct bm_xfer_ctx *c)
+ struct bm_xfer_ctx *c,
+ unsigned int len)
{
- if (DCBP_get_code(p) == RLE_VLI_Bits)
- return recv_bm_rle_bits(mdev, p, c);
+ if (dcbp_get_code(p) == RLE_VLI_Bits)
+ return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
/* other variants had been implemented for evaluation,
* but have been dropped as this one turned out to be "best"
* during all our tests. */
dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+ conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
return -EIO;
}
@@ -3513,11 +4175,13 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
const char *direction, struct bm_xfer_ctx *c)
{
/* what would it take to transfer it "plaintext" */
- unsigned plain = sizeof(struct p_header80) *
- ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
- + c->bm_words * sizeof(long);
- unsigned total = c->bytes[0] + c->bytes[1];
- unsigned r;
+ unsigned int header_size = drbd_header_size(mdev->tconn);
+ unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
+ unsigned int plain =
+ header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
+ c->bm_words * sizeof(unsigned long);
+ unsigned int total = c->bytes[0] + c->bytes[1];
+ unsigned int r;
/* total can not be zero. but just in case: */
if (total == 0)
@@ -3551,67 +4215,63 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
in order to be agnostic to the 32 vs 64 bits issue.
returns 0 on failure, 1 if we successfully received it. */
-static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
struct bm_xfer_ctx c;
- void *buffer;
int err;
- int ok = false;
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
* if you actually set bits during this phase */
- /* maybe we should use some per thread scratch page,
- * and allocate that during initial device creation? */
- buffer = (unsigned long *) __get_free_page(GFP_NOIO);
- if (!buffer) {
- dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
- goto out;
- }
-
c = (struct bm_xfer_ctx) {
.bm_bits = drbd_bm_bits(mdev),
.bm_words = drbd_bm_words(mdev),
};
for(;;) {
- if (cmd == P_BITMAP) {
- err = receive_bitmap_plain(mdev, data_size, buffer, &c);
- } else if (cmd == P_COMPRESSED_BITMAP) {
+ if (pi->cmd == P_BITMAP)
+ err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
+ else if (pi->cmd == P_COMPRESSED_BITMAP) {
/* MAYBE: sanity check that we speak proto >= 90,
* and the feature is enabled! */
- struct p_compressed_bm *p;
+ struct p_compressed_bm *p = pi->data;
- if (data_size > BM_PACKET_PAYLOAD_BYTES) {
+ if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
dev_err(DEV, "ReportCBitmap packet too large\n");
+ err = -EIO;
goto out;
}
- /* use the page buff */
- p = buffer;
- memcpy(p, h, sizeof(*h));
- if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
- goto out;
- if (data_size <= (sizeof(*p) - sizeof(p->head))) {
- dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
+ if (pi->size <= sizeof(*p)) {
+ dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
+ err = -EIO;
goto out;
}
- err = decode_bitmap_c(mdev, p, &c);
+ err = drbd_recv_all(mdev->tconn, p, pi->size);
+ if (err)
+ goto out;
+ err = decode_bitmap_c(mdev, p, &c, pi->size);
} else {
- dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
+ dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
+ err = -EIO;
goto out;
}
- c.packets[cmd == P_BITMAP]++;
- c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
+ c.packets[pi->cmd == P_BITMAP]++;
+ c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
if (err <= 0) {
if (err < 0)
goto out;
break;
}
- if (!drbd_recv_header(mdev, &cmd, &data_size))
+ err = drbd_recv_header(mdev->tconn, pi);
+ if (err)
goto out;
}
@@ -3620,8 +4280,8 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
if (mdev->state.conn == C_WF_BITMAP_T) {
enum drbd_state_rv rv;
- ok = !drbd_send_bitmap(mdev);
- if (!ok)
+ err = drbd_send_bitmap(mdev);
+ if (err)
goto out;
/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
@@ -3632,47 +4292,40 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
drbd_conn_str(mdev->state.conn));
}
+ err = 0;
- ok = true;
out:
drbd_bm_unlock(mdev);
- if (ok && mdev->state.conn == C_WF_BITMAP_S)
+ if (!err && mdev->state.conn == C_WF_BITMAP_S)
drbd_start_resync(mdev, C_SYNC_SOURCE);
- free_page((unsigned long) buffer);
- return ok;
+ return err;
}
-static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
{
- /* TODO zero copy sink :) */
- static char sink[128];
- int size, want, r;
+ conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
+ pi->cmd, pi->size);
- dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
- cmd, data_size);
-
- size = data_size;
- while (size > 0) {
- want = min_t(int, size, sizeof(sink));
- r = drbd_recv(mdev, sink, want);
- ERR_IF(r <= 0) break;
- size -= r;
- }
- return size == 0;
+ return ignore_remaining_packet(tconn, pi);
}
-static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(mdev->data.socket);
+ drbd_tcp_quickack(tconn->data.socket);
- return true;
+ return 0;
}
-static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+ struct drbd_conf *mdev;
+ struct p_block_desc *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
switch (mdev->state.conn) {
case C_WF_SYNC_UUID:
@@ -3686,15 +4339,13 @@ static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, un
drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
- return true;
+ return 0;
}
-typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
-
struct data_cmd {
int expect_payload;
size_t pkt_size;
- drbd_cmd_handler_f function;
+ int (*fn)(struct drbd_tconn *, struct packet_info *);
};
static struct data_cmd drbd_cmd_handler[] = {
@@ -3702,13 +4353,13 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
[P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
[P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
- [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
- [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
+ [P_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
+ [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
[P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
- [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
- [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
+ [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
+ [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
[P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
[P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
@@ -3720,124 +4371,75 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
- /* anything missing from this table is in
- * the asender_tbl, see get_asender_cmd */
- [P_MAX_CMD] = { 0, 0, NULL },
+ [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
+ [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
};
-/* All handler functions that expect a sub-header get that sub-heder in
- mdev->data.rbuf.header.head.payload.
-
- Usually in mdev->data.rbuf.header.head the callback can find the usual
- p_header, but they may not rely on that. Since there is also p_header95 !
- */
-
-static void drbdd(struct drbd_conf *mdev)
+static void drbdd(struct drbd_tconn *tconn)
{
- union p_header *header = &mdev->data.rbuf.header;
- unsigned int packet_size;
- enum drbd_packets cmd;
+ struct packet_info pi;
size_t shs; /* sub header size */
- int rv;
+ int err;
+
+ while (get_t_state(&tconn->receiver) == RUNNING) {
+ struct data_cmd *cmd;
- while (get_t_state(&mdev->receiver) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (!drbd_recv_header(mdev, &cmd, &packet_size))
+ drbd_thread_current_set_cpu(&tconn->receiver);
+ if (drbd_recv_header(tconn, &pi))
goto err_out;
- if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
- dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
+ cmd = &drbd_cmd_handler[pi.cmd];
+ if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
+ conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+ cmdname(pi.cmd), pi.cmd);
goto err_out;
}
- shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
- if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
- dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
+ shs = cmd->pkt_size;
+ if (pi.size > shs && !cmd->expect_payload) {
+ conn_err(tconn, "No payload expected %s l:%d\n",
+ cmdname(pi.cmd), pi.size);
goto err_out;
}
if (shs) {
- rv = drbd_recv(mdev, &header->h80.payload, shs);
- if (unlikely(rv != shs)) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
+ err = drbd_recv_all_warn(tconn, pi.data, shs);
+ if (err)
goto err_out;
- }
+ pi.size -= shs;
}
- rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
-
- if (unlikely(!rv)) {
- dev_err(DEV, "error receiving %s, l: %d!\n",
- cmdname(cmd), packet_size);
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
+ cmdname(pi.cmd), err, pi.size);
goto err_out;
}
}
+ return;
- if (0) {
- err_out:
- drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
- }
- /* If we leave here, we probably want to update at least the
- * "Connected" indicator on stable storage. Do so explicitly here. */
- drbd_md_sync(mdev);
+ err_out:
+ conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
}
-void drbd_flush_workqueue(struct drbd_conf *mdev)
+void conn_flush_workqueue(struct drbd_tconn *tconn)
{
struct drbd_wq_barrier barr;
barr.w.cb = w_prev_work_done;
+ barr.w.tconn = tconn;
init_completion(&barr.done);
- drbd_queue_work(&mdev->data.work, &barr.w);
+ drbd_queue_work(&tconn->sender_work, &barr.w);
wait_for_completion(&barr.done);
}
-void drbd_free_tl_hash(struct drbd_conf *mdev)
-{
- struct hlist_head *h;
-
- spin_lock_irq(&mdev->req_lock);
-
- if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
- spin_unlock_irq(&mdev->req_lock);
- return;
- }
- /* paranoia code */
- for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
- if (h->first)
- dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
- (int)(h - mdev->ee_hash), h->first);
- kfree(mdev->ee_hash);
- mdev->ee_hash = NULL;
- mdev->ee_hash_s = 0;
-
- /* We may not have had the chance to wait for all locally pending
- * application requests. The hlist_add_fake() prevents access after
- * free on master bio completion. */
- for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
- struct drbd_request *req;
- struct hlist_node *pos, *n;
- hlist_for_each_entry_safe(req, pos, n, h, collision) {
- hlist_del_init(&req->collision);
- hlist_add_fake(&req->collision);
- }
- }
-
- kfree(mdev->tl_hash);
- mdev->tl_hash = NULL;
- mdev->tl_hash_s = 0;
- spin_unlock_irq(&mdev->req_lock);
-}
-
-static void drbd_disconnect(struct drbd_conf *mdev)
+static void conn_disconnect(struct drbd_tconn *tconn)
{
- enum drbd_fencing_p fp;
- union drbd_state os, ns;
- int rv = SS_UNKNOWN_ERROR;
- unsigned int i;
+ struct drbd_conf *mdev;
+ enum drbd_conns oc;
+ int vnr;
- if (mdev->state.conn == C_STANDALONE)
+ if (tconn->cstate == C_STANDALONE)
return;
/* We are about to start the cleanup after connection loss.
@@ -3845,18 +4447,54 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* Usually we should be in some network failure state already,
* but just in case we are not, we fix it up here.
*/
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&mdev->asender);
- drbd_free_sock(mdev);
+ drbd_thread_stop(&tconn->asender);
+ drbd_free_sock(tconn);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_disconnected(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ if (!list_empty(&tconn->current_epoch->list))
+ conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+ atomic_set(&tconn->current_epoch->epoch_size, 0);
+ tconn->send.seen_any_write_yet = false;
+
+ conn_info(tconn, "Connection closed\n");
+
+ if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
+ conn_try_outdate_peer_async(tconn);
+
+ spin_lock_irq(&tconn->req_lock);
+ oc = tconn->cstate;
+ if (oc >= C_UNCONNECTED)
+ _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
+ spin_unlock_irq(&tconn->req_lock);
+
+ if (oc == C_DISCONNECTING)
+ conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
+}
+
+static int drbd_disconnected(struct drbd_conf *mdev)
+{
+ unsigned int i;
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
@@ -3874,7 +4512,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- /* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -3883,50 +4520,25 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* to be "canceled" */
drbd_flush_workqueue(mdev);
- /* This also does reclaim_net_ee(). If we do this too early, we might
- * miss some resync ee and pages.*/
- drbd_process_done_ee(mdev);
+ drbd_finish_peer_reqs(mdev);
+
+ /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
+ might have issued a work again. The one before drbd_finish_peer_reqs() is
+ necessary to reclain net_ee in drbd_finish_peer_reqs(). */
+ drbd_flush_workqueue(mdev);
+
+ /* need to do it again, drbd_finish_peer_reqs() may have populated it
+ * again via drbd_try_clear_on_disk_bm(). */
+ drbd_rs_cancel_all(mdev);
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
- if (!is_susp(mdev->state))
- tl_clear(mdev);
-
- dev_info(DEV, "Connection closed\n");
+ if (!drbd_suspended(mdev))
+ tl_clear(mdev->tconn);
drbd_md_sync(mdev);
- fp = FP_DONT_CARE;
- if (get_ldev(mdev)) {
- fp = mdev->ldev->dc.fencing;
- put_ldev(mdev);
- }
-
- if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
- drbd_try_outdate_peer_async(mdev);
-
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
- if (os.conn >= C_UNCONNECTED) {
- /* Do not restart in case we are C_DISCONNECTING */
- ns = os;
- ns.conn = C_UNCONNECTED;
- rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- }
- spin_unlock_irq(&mdev->req_lock);
-
- if (os.conn == C_DISCONNECTING) {
- wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
-
- crypto_free_hash(mdev->cram_hmac_tfm);
- mdev->cram_hmac_tfm = NULL;
-
- kfree(mdev->net_conf);
- mdev->net_conf = NULL;
- drbd_request_state(mdev, NS(conn, C_STANDALONE));
- }
-
/* serialize with bitmap writeout triggered by the state change,
* if any. */
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
@@ -3938,7 +4550,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
* Actually we don't care for exactly when the network stack does its
* put_page(), but release our reference on these pages right here.
*/
- i = drbd_release_ee(mdev, &mdev->net_ee);
+ i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
if (i)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use_by_net);
@@ -3953,9 +4565,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->sync_ee));
D_ASSERT(list_empty(&mdev->done_ee));
- /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
- atomic_set(&mdev->current_epoch->epoch_size, 0);
- D_ASSERT(list_empty(&mdev->current_epoch->list));
+ return 0;
}
/*
@@ -3967,29 +4577,19 @@ static void drbd_disconnect(struct drbd_conf *mdev)
*
* for now, they are expected to be zero, but ignored.
*/
-static int drbd_send_handshake(struct drbd_conf *mdev)
+static int drbd_send_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.sbuf.handshake;
- int ok;
-
- if (mutex_lock_interruptible(&mdev->data.mutex)) {
- dev_err(DEV, "interrupted during initial handshake\n");
- return 0; /* interrupted. not ok. */
- }
-
- if (mdev->data.socket == NULL) {
- mutex_unlock(&mdev->data.mutex);
- return 0;
- }
+ struct drbd_socket *sock;
+ struct p_connection_features *p;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
- (struct p_header80 *)p, sizeof(*p), 0 );
- mutex_unlock(&mdev->data.mutex);
- return ok;
+ return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
}
/*
@@ -3999,42 +4599,38 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
* -1 peer talks different language,
* no point in trying again, please go standalone.
*/
-static int drbd_do_handshake(struct drbd_conf *mdev)
+static int drbd_do_features(struct drbd_tconn *tconn)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.rbuf.handshake;
- const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
- unsigned int length;
- enum drbd_packets cmd;
- int rv;
+ /* ASSERT current == tconn->receiver ... */
+ struct p_connection_features *p;
+ const int expect = sizeof(struct p_connection_features);
+ struct packet_info pi;
+ int err;
- rv = drbd_send_handshake(mdev);
- if (!rv)
+ err = drbd_send_features(tconn);
+ if (err)
return 0;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err)
return 0;
- if (cmd != P_HAND_SHAKE) {
- dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_CONNECTION_FEATURES) {
+ conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
return -1;
}
- if (length != expect) {
- dev_err(DEV, "expected HandShake length: %u, received: %u\n",
- expect, length);
+ if (pi.size != expect) {
+ conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
+ expect, pi.size);
return -1;
}
- rv = drbd_recv(mdev, &p->head.payload, expect);
-
- if (rv != expect) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
+ p = pi.data;
+ err = drbd_recv_all_warn(tconn, p, expect);
+ if (err)
return 0;
- }
p->protocol_min = be32_to_cpu(p->protocol_min);
p->protocol_max = be32_to_cpu(p->protocol_max);
@@ -4045,15 +4641,15 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
- dev_info(DEV, "Handshake successful: "
- "Agreed network protocol version %d\n", mdev->agreed_pro_version);
+ conn_info(tconn, "Handshake successful: "
+ "Agreed network protocol version %d\n", tconn->agreed_pro_version);
return 1;
incompat:
- dev_err(DEV, "incompatible DRBD dialects: "
+ conn_err(tconn, "incompatible DRBD dialects: "
"I support %d-%d, peer supports %d-%d\n",
PRO_VERSION_MIN, PRO_VERSION_MAX,
p->protocol_min, p->protocol_max);
@@ -4061,7 +4657,7 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
}
#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
@@ -4076,121 +4672,139 @@ static int drbd_do_auth(struct drbd_conf *mdev)
-1 - auth failed, don't try again.
*/
-static int drbd_do_auth(struct drbd_conf *mdev)
+static int drbd_do_auth(struct drbd_tconn *tconn)
{
+ struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
struct scatterlist sg;
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
- unsigned int key_len = strlen(mdev->net_conf->shared_secret);
+ unsigned int key_len;
+ char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
struct hash_desc desc;
- enum drbd_packets cmd;
- unsigned int length;
- int rv;
+ struct packet_info pi;
+ struct net_conf *nc;
+ int err, rv;
+
+ /* FIXME: Put the challenge/response into the preallocated socket buffer. */
- desc.tfm = mdev->cram_hmac_tfm;
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ key_len = strlen(nc->shared_secret);
+ memcpy(secret, nc->shared_secret, key_len);
+ rcu_read_unlock();
+
+ desc.tfm = tconn->cram_hmac_tfm;
desc.flags = 0;
- rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
- (u8 *)mdev->net_conf->shared_secret, key_len);
+ rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
get_random_bytes(my_challenge, CHALLENGE_LEN);
- rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
+ sock = &tconn->data;
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
+ goto fail;
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
+ my_challenge, CHALLENGE_LEN);
if (!rv)
goto fail;
- rv = drbd_recv_header(mdev, &cmd, &length);
- if (!rv)
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
+ rv = 0;
goto fail;
+ }
- if (cmd != P_AUTH_CHALLENGE) {
- dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ if (pi.cmd != P_AUTH_CHALLENGE) {
+ conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- if (length > CHALLENGE_LEN * 2) {
- dev_err(DEV, "expected AuthChallenge payload too big.\n");
+ if (pi.size > CHALLENGE_LEN * 2) {
+ conn_err(tconn, "expected AuthChallenge payload too big.\n");
rv = -1;
goto fail;
}
- peers_ch = kmalloc(length, GFP_NOIO);
+ peers_ch = kmalloc(pi.size, GFP_NOIO);
if (peers_ch == NULL) {
- dev_err(DEV, "kmalloc of peers_ch failed\n");
+ conn_err(tconn, "kmalloc of peers_ch failed\n");
rv = -1;
goto fail;
}
- rv = drbd_recv(mdev, peers_ch, length);
-
- if (rv != length) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
+ if (err) {
rv = 0;
goto fail;
}
- resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
+ resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
- dev_err(DEV, "kmalloc of response failed\n");
+ conn_err(tconn, "kmalloc of response failed\n");
rv = -1;
goto fail;
}
sg_init_table(&sg, 1);
- sg_set_buf(&sg, peers_ch, length);
+ sg_set_buf(&sg, peers_ch, pi.size);
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
- rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
- if (!rv)
+ if (!conn_prepare_command(tconn, sock)) {
+ rv = 0;
goto fail;
-
- rv = drbd_recv_header(mdev, &cmd, &length);
+ }
+ rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
+ response, resp_size);
if (!rv)
goto fail;
- if (cmd != P_AUTH_RESPONSE) {
- dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
- cmdname(cmd), cmd);
+ err = drbd_recv_header(tconn, &pi);
+ if (err) {
rv = 0;
goto fail;
}
- if (length != resp_size) {
- dev_err(DEV, "expected AuthResponse payload of wrong size\n");
+ if (pi.cmd != P_AUTH_RESPONSE) {
+ conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
rv = 0;
goto fail;
}
- rv = drbd_recv(mdev, response , resp_size);
+ if (pi.size != resp_size) {
+ conn_err(tconn, "expected AuthResponse payload of wrong size\n");
+ rv = 0;
+ goto fail;
+ }
- if (rv != resp_size) {
- if (!signal_pending(current))
- dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
+ err = drbd_recv_all_warn(tconn, response , resp_size);
+ if (err) {
rv = 0;
goto fail;
}
right_response = kmalloc(resp_size, GFP_NOIO);
if (right_response == NULL) {
- dev_err(DEV, "kmalloc of right_response failed\n");
+ conn_err(tconn, "kmalloc of right_response failed\n");
rv = -1;
goto fail;
}
@@ -4199,7 +4813,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
- dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
+ conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -4207,8 +4821,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = !memcmp(response, right_response, resp_size);
if (rv)
- dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
- resp_size, mdev->net_conf->cram_hmac_alg);
+ conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
+ resp_size);
else
rv = -1;
@@ -4223,82 +4837,106 @@ static int drbd_do_auth(struct drbd_conf *mdev)
int drbdd_init(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- unsigned int minor = mdev_to_minor(mdev);
+ struct drbd_tconn *tconn = thi->tconn;
int h;
- sprintf(current->comm, "drbd%d_receiver", minor);
-
- dev_info(DEV, "receiver (re)started\n");
+ conn_info(tconn, "receiver (re)started\n");
do {
- h = drbd_connect(mdev);
+ h = conn_connect(tconn);
if (h == 0) {
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
schedule_timeout_interruptible(HZ);
}
if (h == -1) {
- dev_warn(DEV, "Discarding network configuration.\n");
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_warn(tconn, "Discarding network configuration.\n");
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
} while (h == 0);
- if (h > 0) {
- if (get_net_conf(mdev)) {
- drbdd(mdev);
- put_net_conf(mdev);
- }
- }
+ if (h > 0)
+ drbdd(tconn);
- drbd_disconnect(mdev);
+ conn_disconnect(tconn);
- dev_info(DEV, "receiver terminated\n");
+ conn_info(tconn, "receiver terminated\n");
return 0;
}
/* ********* acknowledge sender ******** */
-static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_req_state_reply *p = (struct p_req_state_reply *)h;
+ struct p_req_state_reply *p = pi->data;
+ int retcode = be32_to_cpu(p->retcode);
+
+ if (retcode >= SS_SUCCESS) {
+ set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
+ } else {
+ set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
+ conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
+ drbd_set_st_err_str(retcode), retcode);
+ }
+ wake_up(&tconn->ping_wait);
+
+ return 0;
+}
+static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ struct drbd_conf *mdev;
+ struct p_req_state_reply *p = pi->data;
int retcode = be32_to_cpu(p->retcode);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
+ D_ASSERT(tconn->agreed_pro_version < 100);
+ return got_conn_RqSReply(tconn, pi);
+ }
+
if (retcode >= SS_SUCCESS) {
set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
} else {
set_bit(CL_ST_CHG_FAIL, &mdev->flags);
dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
- drbd_set_st_err_str(retcode), retcode);
+ drbd_set_st_err_str(retcode), retcode);
}
wake_up(&mdev->state_wait);
- return true;
+ return 0;
}
-static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
{
- return drbd_send_ping_ack(mdev);
+ return drbd_send_ping_ack(tconn);
}
-static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
/* restore idle timeout */
- mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
- wake_up(&mdev->misc_wait);
+ tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
+ if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
+ wake_up(&tconn->ping_wait);
- return true;
+ return 0;
}
-static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- D_ASSERT(mdev->agreed_pro_version >= 89);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4312,162 +4950,139 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
dec_rs_pending(mdev);
atomic_add(blksize >> 9, &mdev->rs_sect_in);
- return true;
-}
-
-/* when we receive the ACK for a write request,
- * verify that we actually know about it */
-static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = tl_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- if (req->sector != sector) {
- dev_err(DEV, "_ack_id_to_req: found req %p but it has "
- "wrong sector (%llus versus %llus)\n", req,
- (unsigned long long)req->sector,
- (unsigned long long)sector);
- break;
- }
- return req;
- }
- }
- return NULL;
+ return 0;
}
-typedef struct drbd_request *(req_validator_fn)
- (struct drbd_conf *mdev, u64 id, sector_t sector);
-
-static int validate_req_change_req_state(struct drbd_conf *mdev,
- u64 id, sector_t sector, req_validator_fn validator,
- const char *func, enum drbd_req_event what)
+static int
+validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+ struct rb_root *root, const char *func,
+ enum drbd_req_event what, bool missing_ok)
{
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->req_lock);
- req = validator(mdev, id, sector);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ req = find_request(mdev, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->req_lock);
-
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
- (void *)(unsigned long)id, (unsigned long long)sector);
- return false;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ return -EIO;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
enum drbd_req_event what;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
drbd_set_in_sync(mdev, sector, blksize);
dec_rs_pending(mdev);
- return true;
+ return 0;
}
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_RS_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer_and_sis;
+ what = WRITE_ACKED_BY_PEER_AND_SIS;
break;
case P_WRITE_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = write_acked_by_peer;
+ what = WRITE_ACKED_BY_PEER;
break;
case P_RECV_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
- what = recv_acked_by_peer;
+ what = RECV_ACKED_BY_PEER;
break;
- case P_DISCARD_ACK:
- D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
- what = conflict_discarded_by_peer;
+ case P_SUPERSEDED:
+ what = CONFLICT_RESOLVED;
+ break;
+ case P_RETRY_WRITE:
+ what = POSTPONE_WRITE;
break;
default:
- D_ASSERT(0);
- return false;
+ BUG();
}
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ack_id_to_req, __func__ , what);
+ &mdev->write_requests, __func__,
+ what, false);
}
-static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
int size = be32_to_cpu(p->blksize);
- struct drbd_request *req;
- struct bio_and_error m;
+ int err;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- if (is_syncer_block_id(p->block_id)) {
+ if (p->block_id == ID_SYNCER) {
dec_rs_pending(mdev);
drbd_rs_failed_io(mdev, sector, size);
- return true;
+ return 0;
}
- spin_lock_irq(&mdev->req_lock);
- req = _ack_id_to_req(mdev, p->block_id, sector);
- if (!req) {
- spin_unlock_irq(&mdev->req_lock);
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
- mdev->net_conf->wire_protocol == DRBD_PROT_B) {
- /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
- The master bio might already be completed, therefore the
- request is no longer in the collision hash.
- => Do not try to validate block_id as request. */
- /* In Protocol B we might already have got a P_RECV_ACK
- but then get a P_NEG_ACK after wards. */
- drbd_set_out_of_sync(mdev, sector, size);
- return true;
- } else {
- dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
- (void *)(unsigned long)p->block_id, (unsigned long long)sector);
- return false;
- }
+ err = validate_req_change_req_state(mdev, p->block_id, sector,
+ &mdev->write_requests, __func__,
+ NEG_ACKED, true);
+ if (err) {
+ /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+ The master bio might already be completed, therefore the
+ request is no longer in the collision hash. */
+ /* In Protocol B we might already have got a P_RECV_ACK
+ but then get a P_NEG_ACK afterwards. */
+ drbd_set_out_of_sync(mdev, sector, size);
}
- __req_mod(req, neg_acked, &m);
- spin_unlock_irq(&mdev->req_lock);
-
- if (m.bio)
- complete_master_bio(mdev, &m);
- return true;
+ return 0;
}
-static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
sector_t sector = be64_to_cpu(p->sector);
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
- dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
+
+ dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
(unsigned long long)sector, be32_to_cpu(p->blksize));
return validate_req_change_req_state(mdev, p->block_id, sector,
- _ar_id_to_req, __func__ , neg_acked);
+ &mdev->read_requests, __func__,
+ NEG_ACKED, false);
}
-static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
{
+ struct drbd_conf *mdev;
sector_t sector;
int size;
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct p_block_ack *p = pi->data;
+
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -4478,57 +5093,66 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
- switch (be16_to_cpu(h->command)) {
+ switch (pi->cmd) {
case P_NEG_RS_DREPLY:
drbd_rs_failed_io(mdev, sector, size);
case P_RS_CANCEL:
break;
default:
- D_ASSERT(0);
- put_ldev(mdev);
- return false;
+ BUG();
}
put_ldev(mdev);
}
- return true;
+ return 0;
}
-static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_barrier_ack *p = (struct p_barrier_ack *)h;
-
- tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
-
- if (mdev->state.conn == C_AHEAD &&
- atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
- mdev->start_resync_timer.expires = jiffies + HZ;
- add_timer(&mdev->start_resync_timer);
+ struct p_barrier_ack *p = pi->data;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.conn == C_AHEAD &&
+ atomic_read(&mdev->ap_in_flight) == 0 &&
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+ mdev->start_resync_timer.expires = jiffies + HZ;
+ add_timer(&mdev->start_resync_timer);
+ }
}
+ rcu_read_unlock();
- return true;
+ return 0;
}
-static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
+static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
{
- struct p_block_ack *p = (struct p_block_ack *)h;
+ struct drbd_conf *mdev;
+ struct p_block_ack *p = pi->data;
struct drbd_work *w;
sector_t sector;
int size;
+ mdev = vnr_to_mdev(tconn, pi->vnr);
+ if (!mdev)
+ return -EIO;
+
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
if (!get_ldev(mdev))
- return true;
+ return 0;
drbd_rs_complete_io(mdev, sector);
dec_rs_pending(mdev);
@@ -4543,114 +5167,137 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) {
w->cb = w_ov_finished;
- drbd_queue_work_front(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
}
put_ldev(mdev);
- return true;
+ return 0;
+}
+
+static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
+{
+ return 0;
}
-static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
+static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
{
- return true;
+ struct drbd_conf *mdev;
+ int vnr, not_empty = 0;
+
+ do {
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
+ flush_signals(current);
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ if (drbd_finish_peer_reqs(mdev)) {
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ return 1;
+ }
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
+ }
+ set_bit(SIGNAL_ASENDER, &tconn->flags);
+
+ spin_lock_irq(&tconn->req_lock);
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ not_empty = !list_empty(&mdev->done_ee);
+ if (not_empty)
+ break;
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ rcu_read_unlock();
+ } while (not_empty);
+
+ return 0;
}
struct asender_cmd {
size_t pkt_size;
- int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
+ int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
};
-static struct asender_cmd *get_asender_cmd(int cmd)
-{
- static struct asender_cmd asender_tbl[] = {
- /* anything missing from this table is in
- * the drbd_cmd_handler (drbd_default_handler) table,
- * see the beginning of drbdd() */
- [P_PING] = { sizeof(struct p_header80), got_Ping },
- [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
+static struct asender_cmd asender_tbl[] = {
+ [P_PING] = { 0, got_Ping },
+ [P_PING_ACK] = { 0, got_PingAck },
[P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
[P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
- [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
+ [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
[P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
[P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
- [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
+ [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
[P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
[P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
- [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
- [P_MAX_CMD] = { 0, NULL },
- };
- if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
- return NULL;
- return &asender_tbl[cmd];
-}
+ [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
+ [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
+ [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
+};
int drbd_asender(struct drbd_thread *thi)
{
- struct drbd_conf *mdev = thi->mdev;
- struct p_header80 *h = &mdev->meta.rbuf.header.h80;
+ struct drbd_tconn *tconn = thi->tconn;
struct asender_cmd *cmd = NULL;
-
- int rv, len;
- void *buf = h;
+ struct packet_info pi;
+ int rv;
+ void *buf = tconn->meta.rbuf;
int received = 0;
- int expect = sizeof(struct p_header80);
- int empty;
- int ping_timeout_active = 0;
-
- sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
+ unsigned int header_size = drbd_header_size(tconn);
+ int expect = header_size;
+ bool ping_timeout_active = false;
+ struct net_conf *nc;
+ int ping_timeo, tcp_cork, ping_int;
current->policy = SCHED_RR; /* Make this a realtime task! */
current->rt_priority = 2; /* more important than all other tasks */
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
- if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
- ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
- mdev->meta.socket->sk->sk_rcvtimeo =
- mdev->net_conf->ping_timeo*HZ/10;
- ping_timeout_active = 1;
- }
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
- /* conditionally cork;
- * it may hurt latency if we cork without much to send */
- if (!mdev->net_conf->no_cork &&
- 3 < atomic_read(&mdev->unacked_cnt))
- drbd_tcp_cork(mdev->meta.socket);
- while (1) {
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
- flush_signals(current);
- if (!drbd_process_done_ee(mdev))
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ ping_timeo = nc->ping_timeo;
+ tcp_cork = nc->tcp_cork;
+ ping_int = nc->ping_int;
+ rcu_read_unlock();
+
+ if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
+ if (drbd_send_ping(tconn)) {
+ conn_err(tconn, "drbd_send_ping has failed\n");
goto reconnect;
- /* to avoid race with newly queued ACKs */
- set_bit(SIGNAL_ASENDER, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
- empty = list_empty(&mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
- /* new ack may have been queued right here,
- * but then there is also a signal pending,
- * and we start over... */
- if (empty)
- break;
+ }
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
+ ping_timeout_active = true;
+ }
+
+ /* TODO: conditionally cork; it may hurt latency if we cork without
+ much to send */
+ if (tcp_cork)
+ drbd_tcp_cork(tconn->meta.socket);
+ if (tconn_finish_peer_reqs(tconn)) {
+ conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
+ goto reconnect;
}
/* but unconditionally uncork unless disabled */
- if (!mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->meta.socket);
+ if (tcp_cork)
+ drbd_tcp_uncork(tconn->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(mdev, mdev->meta.socket,
- buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
flush_signals(current);
@@ -4668,80 +5315,91 @@ int drbd_asender(struct drbd_thread *thi)
received += rv;
buf += rv;
} else if (rv == 0) {
- dev_err(DEV, "meta connection shut down by peer.\n");
+ if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
+ long t;
+ rcu_read_lock();
+ t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
+ rcu_read_unlock();
+
+ t = wait_event_timeout(tconn->ping_wait,
+ tconn->cstate < C_WF_REPORT_PARAMS,
+ t);
+ if (t)
+ break;
+ }
+ conn_err(tconn, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(mdev->last_received,
- jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(tconn->last_received,
+ jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
- dev_err(DEV, "PingAck did not arrive in time.\n");
+ conn_err(tconn, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &mdev->flags);
+ set_bit(SEND_PING, &tconn->flags);
continue;
} else if (rv == -EINTR) {
continue;
} else {
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
+ conn_err(tconn, "sock_recvmsg returned %d\n", rv);
goto reconnect;
}
if (received == expect && cmd == NULL) {
- if (unlikely(h->magic != BE_DRBD_MAGIC)) {
- dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ if (decode_header(tconn, tconn->meta.rbuf, &pi))
goto reconnect;
- }
- cmd = get_asender_cmd(be16_to_cpu(h->command));
- len = be16_to_cpu(h->length);
- if (unlikely(cmd == NULL)) {
- dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
- be32_to_cpu(h->magic),
- be16_to_cpu(h->command),
- be16_to_cpu(h->length));
+ cmd = &asender_tbl[pi.cmd];
+ if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
+ conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+ cmdname(pi.cmd), pi.cmd);
goto disconnect;
}
- expect = cmd->pkt_size;
- ERR_IF(len != expect-sizeof(struct p_header80))
+ expect = header_size + cmd->pkt_size;
+ if (pi.size != expect - header_size) {
+ conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
+ pi.cmd, pi.size);
goto reconnect;
+ }
}
if (received == expect) {
- mdev->last_received = jiffies;
- D_ASSERT(cmd != NULL);
- if (!cmd->process(mdev, h))
+ bool err;
+
+ err = cmd->fn(tconn, &pi);
+ if (err) {
+ conn_err(tconn, "%pf failed\n", cmd->fn);
goto reconnect;
+ }
+
+ tconn->last_received = jiffies;
- /* the idle_timeout (ping-int)
- * has been restored in got_PingAck() */
- if (cmd == get_asender_cmd(P_PING_ACK))
- ping_timeout_active = 0;
+ if (cmd == &asender_tbl[P_PING_ACK]) {
+ /* restore idle timeout */
+ tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
+ ping_timeout_active = false;
+ }
- buf = h;
+ buf = tconn->meta.rbuf;
received = 0;
- expect = sizeof(struct p_header80);
+ expect = header_size;
cmd = NULL;
}
}
if (0) {
reconnect:
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+ conn_md_sync(tconn);
}
if (0) {
disconnect:
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- drbd_md_sync(mdev);
+ conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
}
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ clear_bit(SIGNAL_ASENDER, &tconn->flags);
- D_ASSERT(mdev->state.conn < C_CONNECTED);
- dev_info(DEV, "asender terminated\n");
+ conn_info(tconn, "asender terminated\n");
return 0;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 01b2ac641c7b..2b8303ad63c9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -31,6 +31,8 @@
#include "drbd_req.h"
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
+
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
@@ -40,6 +42,8 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ (void) cpu; /* The macro invocations above want the cpu argument, I do not like
+ the compiler warning about cpu only assigned but never used... */
part_inc_in_flight(&mdev->vdisk->part0, rw);
part_stat_unlock();
}
@@ -57,9 +61,51 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
part_stat_unlock();
}
-static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
+static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
+ struct bio *bio_src)
+{
+ struct drbd_request *req;
+
+ req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
+ if (!req)
+ return NULL;
+
+ drbd_req_make_private_bio(req, bio_src);
+ req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
+ req->w.mdev = mdev;
+ req->master_bio = bio_src;
+ req->epoch = 0;
+
+ drbd_clear_interval(&req->i);
+ req->i.sector = bio_src->bi_sector;
+ req->i.size = bio_src->bi_size;
+ req->i.local = true;
+ req->i.waiting = false;
+
+ INIT_LIST_HEAD(&req->tl_requests);
+ INIT_LIST_HEAD(&req->w.list);
+
+ /* one reference to be put by __drbd_make_request */
+ atomic_set(&req->completion_ref, 1);
+ /* one kref as long as completion_ref > 0 */
+ kref_init(&req->kref);
+ return req;
+}
+
+void drbd_req_destroy(struct kref *kref)
{
- const unsigned long s = req->rq_state;
+ struct drbd_request *req = container_of(kref, struct drbd_request, kref);
+ struct drbd_conf *mdev = req->w.mdev;
+ const unsigned s = req->rq_state;
+
+ if ((req->master_bio && !(s & RQ_POSTPONED)) ||
+ atomic_read(&req->completion_ref) ||
+ (s & RQ_LOCAL_PENDING) ||
+ ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
+ dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
+ s, atomic_read(&req->completion_ref));
+ return;
+ }
/* remove it from the transfer log.
* well, only if it had been there in the first
@@ -67,24 +113,33 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* and never sent), it should still be "empty" as
* initialized in drbd_req_new(), so we can list_del() it
* here unconditionally */
- list_del(&req->tl_requests);
+ list_del_init(&req->tl_requests);
/* if it was a write, we may have to set the corresponding
* bit(s) out-of-sync first. If it had a local part, we need to
* release the reference to the activity log. */
- if (rw == WRITE) {
+ if (s & RQ_WRITE) {
/* Set out-of-sync unless both OK flags are set
* (local only or remote failed).
* Other places where we set out-of-sync:
* READ with local io-error */
- if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
- drbd_set_out_of_sync(mdev, req->sector, req->size);
- if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
- drbd_set_in_sync(mdev, req->sector, req->size);
+ /* There is a special case:
+ * we may notice late that IO was suspended,
+ * and postpone, or schedule for retry, a write,
+ * before it even was submitted or sent.
+ * In that case we do not want to touch the bitmap at all.
+ */
+ if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
+ if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+
+ if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
+ drbd_set_in_sync(mdev, req->i.sector, req->i.size);
+ }
/* one might be tempted to move the drbd_al_complete_io
- * to the local io completion callback drbd_endio_pri.
+ * to the local io completion callback drbd_request_endio.
* but, if this was a mirror write, we may only
* drbd_al_complete_io after this is RQ_NET_DONE,
* otherwise the extent could be dropped from the al
@@ -93,109 +148,35 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* but after the extent has been dropped from the al,
* we would forget to resync the corresponding extent.
*/
- if (s & RQ_LOCAL_MASK) {
+ if (s & RQ_IN_ACT_LOG) {
if (get_ldev_if_state(mdev, D_FAILED)) {
- if (s & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, req->sector);
+ drbd_al_complete_io(mdev, &req->i);
put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
- dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
- "but my Disk seems to have failed :(\n",
- (unsigned long long) req->sector);
+ dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
+ "but my Disk seems to have failed :(\n",
+ (unsigned long long) req->i.sector, req->i.size);
}
}
}
- drbd_req_free(req);
+ mempool_free(req, drbd_request_mempool);
}
-static void queue_barrier(struct drbd_conf *mdev)
-{
- struct drbd_tl_epoch *b;
-
- /* We are within the req_lock. Once we queued the barrier for sending,
- * we set the CREATE_BARRIER bit. It is cleared as soon as a new
- * barrier/epoch object is added. This is the only place this bit is
- * set. It indicates that the barrier for this epoch is already queued,
- * and no new epoch has been created yet. */
- if (test_bit(CREATE_BARRIER, &mdev->flags))
- return;
-
- b = mdev->newest_tle;
- b->w.cb = w_send_barrier;
- /* inc_ap_pending done here, so we won't
- * get imbalanced on connection loss.
- * dec_ap_pending will be done in got_BarrierAck
- * or (on connection loss) in tl_clear. */
- inc_ap_pending(mdev);
- drbd_queue_work(&mdev->data.work, &b->w);
- set_bit(CREATE_BARRIER, &mdev->flags);
+static void wake_all_senders(struct drbd_tconn *tconn) {
+ wake_up(&tconn->sender_work.q_wait);
}
-static void _about_to_complete_local_write(struct drbd_conf *mdev,
- struct drbd_request *req)
+/* must hold resource->req_lock */
+void start_new_tl_epoch(struct drbd_tconn *tconn)
{
- const unsigned long s = req->rq_state;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
-
- /* Before we can signal completion to the upper layers,
- * we may need to close the current epoch.
- * We can skip this, if this request has not even been sent, because we
- * did not have a fully established connection yet/anymore, during
- * bitmap exchange, or while we are C_AHEAD due to congestion policy.
- */
- if (mdev->state.conn >= C_CONNECTED &&
- (s & RQ_NET_SENT) != 0 &&
- req->epoch == mdev->newest_tle->br_number)
- queue_barrier(mdev);
-
- /* we need to do the conflict detection stuff,
- * if we have the ee_hash (two_primaries) and
- * this has been on the network */
- if ((s & RQ_NET_DONE) && mdev->ee_hash != NULL) {
- const sector_t sector = req->sector;
- const int size = req->size;
-
- /* ASSERT:
- * there must be no conflicting requests, since
- * they must have been failed on the spot */
-#define OVERLAPS overlaps(sector, size, i->sector, i->size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
- "other: %p %llus +%u\n",
- req, (unsigned long long)sector, size,
- i, (unsigned long long)i->sector, i->size);
- }
- }
+ /* no point closing an epoch, if it is empty, anyways. */
+ if (tconn->current_tle_writes == 0)
+ return;
- /* maybe "wake" those conflicting epoch entries
- * that wait for this request to finish.
- *
- * currently, there can be only _one_ such ee
- * (well, or some more, which would be pending
- * P_DISCARD_ACK not yet sent by the asender...),
- * since we block the receiver thread upon the
- * first conflict detection, which will wait on
- * misc_wait. maybe we want to assert that?
- *
- * anyways, if we found one,
- * we just have to do a wake_up. */
-#undef OVERLAPS
-#define OVERLAPS overlaps(sector, size, e->sector, e->size)
- slot = ee_hash_slot(mdev, req->sector);
- hlist_for_each_entry(e, n, slot, collision) {
- if (OVERLAPS) {
- wake_up(&mdev->misc_wait);
- break;
- }
- }
- }
-#undef OVERLAPS
+ tconn->current_tle_writes = 0;
+ atomic_inc(&tconn->current_tle_nr);
+ wake_all_senders(tconn);
}
void complete_master_bio(struct drbd_conf *mdev,
@@ -205,17 +186,33 @@ void complete_master_bio(struct drbd_conf *mdev,
dec_ap_bio(mdev);
}
+
+static void drbd_remove_request_interval(struct rb_root *root,
+ struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_interval *i = &req->i;
+
+ drbd_remove_interval(root, i);
+
+ /* Wake up any processes waiting for this request to complete. */
+ if (i->waiting)
+ wake_up(&mdev->misc_wait);
+}
+
/* Helper for __req_mod().
* Set m->bio to the master bio, if it is fit to be completed,
* or leave it alone (it is initialized to NULL in __req_mod),
* if it has already been completed, or cannot be completed yet.
* If m->bio is set, the error status to be returned is placed in m->error.
*/
-void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
+static
+void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
- const unsigned long s = req->rq_state;
- struct drbd_conf *mdev = req->mdev;
- int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
+ const unsigned s = req->rq_state;
+ struct drbd_conf *mdev = req->w.mdev;
+ int rw;
+ int error, ok;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -226,165 +223,220 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
* the receiver,
* the bio_endio completion callbacks.
*/
- if (s & RQ_NET_QUEUED)
- return;
- if (s & RQ_NET_PENDING)
+ if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
+ (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
+ (s & RQ_COMPLETION_SUSP)) {
+ dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
return;
- if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
+ }
+
+ if (!req->master_bio) {
+ dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
return;
+ }
- if (req->master_bio) {
- /* this is data_received (remote read)
- * or protocol C P_WRITE_ACK
- * or protocol B P_RECV_ACK
- * or protocol A "handed_over_to_network" (SendAck)
- * or canceled or failed,
- * or killed from the transfer log due to connection loss.
- */
+ rw = bio_rw(req->master_bio);
- /*
- * figure out whether to report success or failure.
- *
- * report success when at least one of the operations succeeded.
- * or, to put the other way,
- * only report failure, when both operations failed.
- *
- * what to do about the failures is handled elsewhere.
- * what we need to do here is just: complete the master_bio.
- *
- * local completion error, if any, has been stored as ERR_PTR
- * in private_bio within drbd_endio_pri.
- */
- int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
- int error = PTR_ERR(req->private_bio);
+ /*
+ * figure out whether to report success or failure.
+ *
+ * report success when at least one of the operations succeeded.
+ * or, to put the other way,
+ * only report failure, when both operations failed.
+ *
+ * what to do about the failures is handled elsewhere.
+ * what we need to do here is just: complete the master_bio.
+ *
+ * local completion error, if any, has been stored as ERR_PTR
+ * in private_bio within drbd_request_endio.
+ */
+ ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
+ error = PTR_ERR(req->private_bio);
- /* remove the request from the conflict detection
- * respective block_id verification hash */
- if (!hlist_unhashed(&req->collision))
- hlist_del(&req->collision);
- else
- D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
+ /* remove the request from the conflict detection
+ * respective block_id verification hash */
+ if (!drbd_interval_empty(&req->i)) {
+ struct rb_root *root;
- /* for writes we need to do some extra housekeeping */
if (rw == WRITE)
- _about_to_complete_local_write(mdev, req);
+ root = &mdev->write_requests;
+ else
+ root = &mdev->read_requests;
+ drbd_remove_request_interval(root, req);
+ } else if (!(s & RQ_POSTPONED))
+ D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
- /* Update disk stats */
- _drbd_end_io_acct(mdev, req);
+ /* Before we can signal completion to the upper layers,
+ * we may need to close the current transfer log epoch.
+ * We are within the request lock, so we can simply compare
+ * the request epoch number with the current transfer log
+ * epoch number. If they match, increase the current_tle_nr,
+ * and reset the transfer log epoch write_cnt.
+ */
+ if (rw == WRITE &&
+ req->epoch == atomic_read(&mdev->tconn->current_tle_nr))
+ start_new_tl_epoch(mdev->tconn);
+
+ /* Update disk stats */
+ _drbd_end_io_acct(mdev, req);
+
+ /* If READ failed,
+ * have it be pushed back to the retry work queue,
+ * so it will re-enter __drbd_make_request(),
+ * and be re-assigned to a suitable local or remote path,
+ * or failed if we do not have access to good data anymore.
+ *
+ * Unless it was failed early by __drbd_make_request(),
+ * because no path was available, in which case
+ * it was not even added to the transfer_log.
+ *
+ * READA may fail, and will not be retried.
+ *
+ * WRITE should have used all available paths already.
+ */
+ if (!ok && rw == READ && !list_empty(&req->tl_requests))
+ req->rq_state |= RQ_POSTPONED;
+ if (!(req->rq_state & RQ_POSTPONED)) {
m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio;
req->master_bio = NULL;
}
+}
- if (s & RQ_LOCAL_PENDING)
- return;
+static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
+
+ if (!atomic_sub_and_test(put, &req->completion_ref))
+ return 0;
- if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
- /* this is disconnected (local only) operation,
- * or protocol C P_WRITE_ACK,
- * or protocol A or B P_BARRIER_ACK,
- * or killed from the transfer log due to connection loss. */
- _req_is_done(mdev, req, rw);
+ drbd_req_complete(req, m);
+
+ if (req->rq_state & RQ_POSTPONED) {
+ /* don't destroy the req object just yet,
+ * but queue it for retry */
+ drbd_restart_request(req);
+ return 0;
}
- /* else: network part and not DONE yet. that is
- * protocol A or B, barrier ack still pending... */
+
+ return 1;
}
-static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
+/* I'd like this to be the only place that manipulates
+ * req->completion_ref and req->kref. */
+static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
+ int clear, int set)
{
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
+ unsigned s = req->rq_state;
+ int c_put = 0;
+ int k_put = 0;
- if (!is_susp(mdev->state))
- _req_may_be_done(req, m);
-}
+ if (drbd_suspended(mdev) && !((s | clear) & RQ_COMPLETION_SUSP))
+ set |= RQ_COMPLETION_SUSP;
-/*
- * checks whether there was an overlapping request
- * or ee already registered.
- *
- * if so, return 1, in which case this request is completed on the spot,
- * without ever being submitted or send.
- *
- * return 0 if it is ok to submit this request.
- *
- * NOTE:
- * paranoia: assume something above us is broken, and issues different write
- * requests for the same block simultaneously...
- *
- * To ensure these won't be reordered differently on both nodes, resulting in
- * diverging data sets, we discard the later one(s). Not that this is supposed
- * to happen, but this is the rationale why we also have to check for
- * conflicting requests with local origin, and why we have to do so regardless
- * of whether we allowed multiple primaries.
- *
- * BTW, in case we only have one primary, the ee_hash is empty anyways, and the
- * second hlist_for_each_entry becomes a noop. This is even simpler than to
- * grab a reference on the net_conf, and check for the two_primaries flag...
- */
-static int _req_conflicts(struct drbd_request *req)
-{
- struct drbd_conf *mdev = req->mdev;
- const sector_t sector = req->sector;
- const int size = req->size;
- struct drbd_request *i;
- struct drbd_epoch_entry *e;
- struct hlist_node *n;
- struct hlist_head *slot;
+ /* apply */
- D_ASSERT(hlist_unhashed(&req->collision));
+ req->rq_state &= ~clear;
+ req->rq_state |= set;
- if (!get_net_conf(mdev))
- return 0;
+ /* no change? */
+ if (req->rq_state == s)
+ return;
- /* BUG_ON */
- ERR_IF (mdev->tl_hash_s == 0)
- goto out_no_conflict;
- BUG_ON(mdev->tl_hash == NULL);
-
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
- slot = tl_hash_slot(mdev, sector);
- hlist_for_each_entry(i, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent local write detected! "
- "[DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)i->sector, i->size);
- goto out_conflict;
- }
+ /* intent: get references */
+
+ if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
+ atomic_inc(&req->completion_ref);
+
+ if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
+ inc_ap_pending(mdev);
+ atomic_inc(&req->completion_ref);
}
- if (mdev->ee_hash_s) {
- /* now, check for overlapping requests with remote origin */
- BUG_ON(mdev->ee_hash == NULL);
-#undef OVERLAPS
-#define OVERLAPS overlaps(e->sector, e->size, sector, size)
- slot = ee_hash_slot(mdev, sector);
- hlist_for_each_entry(e, n, slot, collision) {
- if (OVERLAPS) {
- dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
- " [DISCARD L] new: %llus +%u; "
- "pending: %llus +%u\n",
- current->comm, current->pid,
- (unsigned long long)sector, size,
- (unsigned long long)e->sector, e->size);
- goto out_conflict;
- }
- }
+ if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED))
+ atomic_inc(&req->completion_ref);
+
+ if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
+ kref_get(&req->kref); /* wait for the DONE */
+
+ if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT))
+ atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
+
+ if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
+ atomic_inc(&req->completion_ref);
+
+ /* progress: put references */
+
+ if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
+ ++c_put;
+
+ if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
+ D_ASSERT(req->rq_state & RQ_LOCAL_PENDING);
+ /* local completion may still come in later,
+ * we need to keep the req object around. */
+ kref_get(&req->kref);
+ ++c_put;
+ }
+
+ if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
+ if (req->rq_state & RQ_LOCAL_ABORTED)
+ ++k_put;
+ else
+ ++c_put;
}
-#undef OVERLAPS
-out_no_conflict:
- /* this is like it should be, and what we expected.
- * our users do behave after all... */
- put_net_conf(mdev);
- return 0;
+ if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
+ dec_ap_pending(mdev);
+ ++c_put;
+ }
-out_conflict:
- put_net_conf(mdev);
- return 1;
+ if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED))
+ ++c_put;
+
+ if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
+ if (req->rq_state & RQ_NET_SENT)
+ atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
+ ++k_put;
+ }
+
+ /* potentially complete and destroy */
+
+ if (k_put || c_put) {
+ /* Completion does it's own kref_put. If we are going to
+ * kref_sub below, we need req to be still around then. */
+ int at_least = k_put + !!c_put;
+ int refcount = atomic_read(&req->kref.refcount);
+ if (refcount < at_least)
+ dev_err(DEV,
+ "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
+ s, req->rq_state, refcount, at_least);
+ }
+
+ /* If we made progress, retry conflicting peer requests, if any. */
+ if (req->i.waiting)
+ wake_up(&mdev->misc_wait);
+
+ if (c_put)
+ k_put += drbd_req_put_completion_ref(req, m, c_put);
+ if (k_put)
+ kref_sub(&req->kref, k_put, drbd_req_destroy);
+}
+
+static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ char b[BDEVNAME_SIZE];
+
+ if (!__ratelimit(&drbd_ratelimit_state))
+ return;
+
+ dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n",
+ (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
+ (unsigned long long)req->i.sector,
+ req->i.size >> 9,
+ bdevname(mdev->ldev->backing_bdev, b));
}
/* obviously this could be coded as many single functions
@@ -402,9 +454,12 @@ out_conflict:
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
- struct drbd_conf *mdev = req->mdev;
- int rv = 0;
- m->bio = NULL;
+ struct drbd_conf *mdev = req->w.mdev;
+ struct net_conf *nc;
+ int p, rv = 0;
+
+ if (m)
+ m->bio = NULL;
switch (what) {
default:
@@ -413,116 +468,91 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* does not happen...
* initialization done in drbd_req_new
- case created:
+ case CREATED:
break;
*/
- case to_be_send: /* via network */
- /* reached via drbd_make_request_common
+ case TO_BE_SENT: /* via network */
+ /* reached via __drbd_make_request
* and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ p = nc->wire_protocol;
+ rcu_read_unlock();
+ req->rq_state |=
+ p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
+ p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
+ mod_rq_state(req, m, 0, RQ_NET_PENDING);
break;
- case to_be_submitted: /* locally */
- /* reached via drbd_make_request_common */
+ case TO_BE_SUBMITTED: /* locally */
+ /* reached via __drbd_make_request */
D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
- req->rq_state |= RQ_LOCAL_PENDING;
+ mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
break;
- case completed_ok:
+ case COMPLETED_OK:
if (req->rq_state & RQ_WRITE)
- mdev->writ_cnt += req->size>>9;
+ mdev->writ_cnt += req->i.size >> 9;
else
- mdev->read_cnt += req->size>>9;
+ mdev->read_cnt += req->i.size >> 9;
- req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_LOCAL_PENDING,
+ RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
break;
- case abort_disk_io:
- req->rq_state |= RQ_LOCAL_ABORTED;
- if (req->rq_state & RQ_WRITE)
- _req_may_be_done_not_susp(req, m);
- else
- goto goto_queue_for_net_read;
+ case ABORT_DISK_IO:
+ mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
break;
- case write_completed_with_error:
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- _req_may_be_done_not_susp(req, m);
+ case WRITE_COMPLETED_WITH_ERROR:
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
- case read_ahead_completed_with_error:
- /* it is legal to fail READA */
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
- _req_may_be_done_not_susp(req, m);
+ case READ_COMPLETED_WITH_ERROR:
+ drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ /* fall through. */
+ case READ_AHEAD_COMPLETED_WITH_ERROR:
+ /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
+ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
- case read_completed_with_error:
- drbd_set_out_of_sync(mdev, req->sector, req->size);
-
- req->rq_state |= RQ_LOCAL_COMPLETED;
- req->rq_state &= ~RQ_LOCAL_PENDING;
-
- if (req->rq_state & RQ_LOCAL_ABORTED) {
- _req_may_be_done(req, m);
- break;
- }
-
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
-
- goto_queue_for_net_read:
-
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
-
- /* no point in retrying if there is no good remote data,
- * or we have no connection. */
- if (mdev->state.pdsk != D_UP_TO_DATE) {
- _req_may_be_done_not_susp(req, m);
- break;
- }
-
- /* _req_mod(req,to_be_send); oops, recursion... */
- req->rq_state |= RQ_NET_PENDING;
- inc_ap_pending(mdev);
- /* fall through: _req_mod(req,queue_for_net_read); */
-
- case queue_for_net_read:
+ case QUEUE_FOR_NET_READ:
/* READ or READA, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
- /* from drbd_make_request_common
+ /* from __drbd_make_request
* or from bio_endio during read io-error recovery */
- /* so we can verify the handle in the answer packet
- * corresponding hlist_del is in _req_may_be_done() */
- hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
+ /* So we can verify the handle in the answer packet.
+ * Corresponding drbd_remove_request_interval is in
+ * drbd_req_complete() */
+ D_ASSERT(drbd_interval_empty(&req->i));
+ drbd_insert_interval(&mdev->read_requests, &req->i);
set_bit(UNPLUG_REMOTE, &mdev->flags);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- req->rq_state |= RQ_NET_QUEUED;
- req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
- ? w_read_retry_remote
- : w_send_read_req;
- drbd_queue_work(&mdev->data.work, &req->w);
+ D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED);
+ req->w.cb = w_send_read_req;
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case queue_for_net_write:
+ case QUEUE_FOR_NET_WRITE:
/* assert something? */
- /* from drbd_make_request_common only */
+ /* from __drbd_make_request only */
- hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
- /* corresponding hlist_del is in _req_may_be_done() */
+ /* Corresponding drbd_remove_request_interval is in
+ * drbd_req_complete() */
+ D_ASSERT(drbd_interval_empty(&req->i));
+ drbd_insert_interval(&mdev->write_requests, &req->i);
/* NOTE
* In case the req ended up on the transfer log before being
@@ -533,7 +563,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
*
* _req_add_to_epoch(req); this has to be after the
* _maybe_start_new_epoch(req); which happened in
- * drbd_make_request_common, because we now may set the bit
+ * __drbd_make_request, because we now may set the bit
* again ourselves to close the current epoch.
*
* Add req to the (now) current epoch (barrier). */
@@ -543,202 +573,187 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &mdev->flags);
- /* see drbd_make_request_common,
- * just after it grabs the req_lock */
- D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
-
- req->epoch = mdev->newest_tle->br_number;
-
- /* increment size of current epoch */
- mdev->newest_tle->n_writes++;
-
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- req->rq_state |= RQ_NET_QUEUED;
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
- if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
- queue_barrier(mdev);
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ p = nc->max_epoch_size;
+ rcu_read_unlock();
+ if (mdev->tconn->current_tle_writes >= p)
+ start_new_tl_epoch(mdev->tconn);
break;
- case queue_for_send_oos:
- req->rq_state |= RQ_NET_QUEUED;
- req->w.cb = w_send_oos;
- drbd_queue_work(&mdev->data.work, &req->w);
+ case QUEUE_FOR_SEND_OOS:
+ mod_rq_state(req, m, 0, RQ_NET_QUEUED);
+ req->w.cb = w_send_out_of_sync;
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case read_retry_remote_canceled:
- case send_canceled:
- case send_failed:
+ case READ_RETRY_REMOTE_CANCELED:
+ case SEND_CANCELED:
+ case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
- req->rq_state &= ~RQ_NET_QUEUED;
- /* if we did it right, tl_clear should be scheduled only after
- * this, so this should not be necessary! */
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, 0);
break;
- case handed_over_to_network:
+ case HANDED_OVER_TO_NETWORK:
/* assert something? */
- if (bio_data_dir(req->master_bio) == WRITE)
- atomic_add(req->size>>9, &mdev->ap_in_flight);
-
if (bio_data_dir(req->master_bio) == WRITE &&
- mdev->net_conf->wire_protocol == DRBD_PROT_A) {
+ !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
/* this is what is dangerous about protocol A:
* pretend it was successfully written on the peer. */
- if (req->rq_state & RQ_NET_PENDING) {
- dec_ap_pending(mdev);
- req->rq_state &= ~RQ_NET_PENDING;
- req->rq_state |= RQ_NET_OK;
- } /* else: neg-ack was faster... */
+ if (req->rq_state & RQ_NET_PENDING)
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
+ /* else: neg-ack was faster... */
/* it is still not yet RQ_NET_DONE until the
* corresponding epoch barrier got acked as well,
* so we know what to dirty on connection loss */
}
- req->rq_state &= ~RQ_NET_QUEUED;
- req->rq_state |= RQ_NET_SENT;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
break;
- case oos_handed_to_network:
+ case OOS_HANDED_TO_NETWORK:
/* Was not set PENDING, no longer QUEUED, so is now DONE
* as far as this connection is concerned. */
- req->rq_state &= ~RQ_NET_QUEUED;
- req->rq_state |= RQ_NET_DONE;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
break;
- case connection_lost_while_pending:
+ case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */
- /* assert something? */
- if (req->rq_state & RQ_NET_PENDING)
- dec_ap_pending(mdev);
- req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
- req->rq_state |= RQ_NET_DONE;
- if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
-
- /* if it is still queued, we may not complete it here.
- * it will be canceled soon. */
- if (!(req->rq_state & RQ_NET_QUEUED))
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ mod_rq_state(req, m,
+ RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
+ RQ_NET_DONE);
break;
- case conflict_discarded_by_peer:
- /* for discarded conflicting writes of multiple primaries,
+ case CONFLICT_RESOLVED:
+ /* for superseded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
- * node crashes are covered by the activity log. */
- if (what == conflict_discarded_by_peer)
- dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
- " DRBD is not a random data generator!\n",
- (unsigned long long)req->sector, req->size);
- req->rq_state |= RQ_NET_DONE;
- /* fall through */
- case write_acked_by_peer_and_sis:
- case write_acked_by_peer:
- if (what == write_acked_by_peer_and_sis)
- req->rq_state |= RQ_NET_SIS;
+ * node crashes are covered by the activity log.
+ *
+ * If this request had been marked as RQ_POSTPONED before,
+ * it will actually not be completed, but "restarted",
+ * resubmitted from the retry worker context. */
+ D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
+ break;
+
+ case WRITE_ACKED_BY_PEER_AND_SIS:
+ req->rq_state |= RQ_NET_SIS;
+ case WRITE_ACKED_BY_PEER:
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
/* protocol C; successfully written on peer.
* Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
* for volatile write-back caches on lower level devices. */
- case recv_acked_by_peer:
+ goto ack_common;
+ case RECV_ACKED_BY_PEER:
+ D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
/* protocol B; pretends to be successfully written on peer.
- * see also notes above in handed_over_to_network about
+ * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */
- req->rq_state |= RQ_NET_OK;
+ ack_common:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- req->rq_state &= ~RQ_NET_PENDING;
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
break;
- case neg_acked:
- /* assert something? */
- if (req->rq_state & RQ_NET_PENDING) {
- dec_ap_pending(mdev);
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- }
- req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
+ case POSTPONE_WRITE:
+ D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
+ /* If this node has already detected the write conflict, the
+ * worker will be waiting on misc_wait. Wake it up once this
+ * request has completed locally.
+ */
+ D_ASSERT(req->rq_state & RQ_NET_PENDING);
+ req->rq_state |= RQ_POSTPONED;
+ if (req->i.waiting)
+ wake_up(&mdev->misc_wait);
+ /* Do not clear RQ_NET_PENDING. This request will make further
+ * progress via restart_conflicting_writes() or
+ * fail_postponed_requests(). Hopefully. */
+ break;
- req->rq_state |= RQ_NET_DONE;
- _req_may_be_done_not_susp(req, m);
- /* else: done by handed_over_to_network */
+ case NEG_ACKED:
+ mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
break;
- case fail_frozen_disk_io:
+ case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
-
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
- case restart_frozen_disk_io:
+ case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break;
- req->rq_state &= ~RQ_LOCAL_COMPLETED;
+ mod_rq_state(req, m,
+ RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
+ RQ_LOCAL_PENDING);
rv = MR_READ;
if (bio_data_dir(req->master_bio) == WRITE)
rv = MR_WRITE;
- get_ldev(mdev);
+ get_ldev(mdev); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
- case resend:
+ case RESEND:
/* Simply complete (local only) READs. */
if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
- _req_may_be_done(req, m);
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
break;
}
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
- before the connection loss (B&C only); only P_BARRIER_ACK was missing.
- Trowing them out of the TL here by pretending we got a BARRIER_ACK
- We ensure that the peer was not rebooted */
+ before the connection loss (B&C only); only P_BARRIER_ACK
+ (or the local completion?) was missing when we suspended.
+ Throwing them out of the TL here by pretending we got a BARRIER_ACK.
+ During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
+ /* FIXME could this possibly be a req->w.cb == w_send_out_of_sync?
+ * in that case we must not set RQ_NET_PENDING. */
+
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) {
- drbd_queue_work(&mdev->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
- }
+ } /* else: FIXME can this happen? */
break;
}
- /* else, fall through to barrier_acked */
+ /* else, fall through to BARRIER_ACKED */
- case barrier_acked:
+ case BARRIER_ACKED:
+ /* barrier ack for READ requests does not make sense */
if (!(req->rq_state & RQ_WRITE))
break;
if (req->rq_state & RQ_NET_PENDING) {
- /* barrier came in before all requests have been acked.
+ /* barrier came in before all requests were acked.
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
- dev_err(DEV, "FIXME (barrier_acked but pending)\n");
- list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
+ dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
}
- if ((req->rq_state & RQ_NET_MASK) != 0) {
- req->rq_state |= RQ_NET_DONE;
- if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
- atomic_sub(req->size>>9, &mdev->ap_in_flight);
- }
- _req_may_be_done(req, m); /* Allowed while state.susp */
+ /* Allowed to complete requests, even while suspended.
+ * As this is called for all requests within a matching epoch,
+ * we need to filter, and only set RQ_NET_DONE for those that
+ * have actually been on the wire. */
+ mod_rq_state(req, m, RQ_COMPLETION_SUSP,
+ (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
break;
- case data_received:
+ case DATA_RECEIVED:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
- dec_ap_pending(mdev);
- req->rq_state &= ~RQ_NET_PENDING;
- req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
- _req_may_be_done_not_susp(req, m);
+ mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
break;
};
@@ -752,75 +767,265 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
-static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
+static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
if (mdev->state.disk == D_UP_TO_DATE)
- return 1;
- if (mdev->state.disk >= D_OUTDATED)
- return 0;
- if (mdev->state.disk < D_INCONSISTENT)
- return 0;
- /* state.disk == D_INCONSISTENT We will have a look at the BitMap */
- nr_sectors = drbd_get_capacity(mdev->this_bdev);
+ return true;
+ if (mdev->state.disk != D_INCONSISTENT)
+ return false;
esector = sector + (size >> 9) - 1;
-
+ nr_sectors = drbd_get_capacity(mdev->this_bdev);
D_ASSERT(sector < nr_sectors);
D_ASSERT(esector < nr_sectors);
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
- return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
+ return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
+}
+
+static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector,
+ enum drbd_read_balancing rbm)
+{
+ struct backing_dev_info *bdi;
+ int stripe_shift;
+
+ switch (rbm) {
+ case RB_CONGESTED_REMOTE:
+ bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ return bdi_read_congested(bdi);
+ case RB_LEAST_PENDING:
+ return atomic_read(&mdev->local_cnt) >
+ atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
+ case RB_32K_STRIPING: /* stripe_shift = 15 */
+ case RB_64K_STRIPING:
+ case RB_128K_STRIPING:
+ case RB_256K_STRIPING:
+ case RB_512K_STRIPING:
+ case RB_1M_STRIPING: /* stripe_shift = 20 */
+ stripe_shift = (rbm - RB_32K_STRIPING + 15);
+ return (sector >> (stripe_shift - 9)) & 1;
+ case RB_ROUND_ROBIN:
+ return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
+ case RB_PREFER_REMOTE:
+ return true;
+ case RB_PREFER_LOCAL:
+ default:
+ return false;
+ }
+}
+
+/*
+ * complete_conflicting_writes - wait for any conflicting write requests
+ *
+ * The write_requests tree contains all active write requests which we
+ * currently know about. Wait for any requests to complete which conflict with
+ * the new one.
+ *
+ * Only way out: remove the conflicting intervals from the tree.
+ */
+static void complete_conflicting_writes(struct drbd_request *req)
+{
+ DEFINE_WAIT(wait);
+ struct drbd_conf *mdev = req->w.mdev;
+ struct drbd_interval *i;
+ sector_t sector = req->i.sector;
+ int size = req->i.size;
+
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (!i)
+ return;
+
+ for (;;) {
+ prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+ i = drbd_find_overlap(&mdev->write_requests, sector, size);
+ if (!i)
+ break;
+ /* Indicate to wake up device->misc_wait on progress. */
+ i->waiting = true;
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ schedule();
+ spin_lock_irq(&mdev->tconn->req_lock);
+ }
+ finish_wait(&mdev->misc_wait, &wait);
}
+/* called within req_lock and rcu_read_lock() */
static void maybe_pull_ahead(struct drbd_conf *mdev)
{
- int congested = 0;
+ struct drbd_tconn *tconn = mdev->tconn;
+ struct net_conf *nc;
+ bool congested = false;
+ enum drbd_on_congestion on_congestion;
+
+ nc = rcu_dereference(tconn->net_conf);
+ on_congestion = nc ? nc->on_congestion : OC_BLOCK;
+ if (on_congestion == OC_BLOCK ||
+ tconn->agreed_pro_version < 96)
+ return;
/* If I don't even have good local storage, we can not reasonably try
* to pull ahead of the peer. We also need the local reference to make
* sure mdev->act_log is there.
- * Note: caller has to make sure that net_conf is there.
*/
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
return;
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ if (nc->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
+ congested = true;
}
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ if (mdev->act_log->used >= nc->cong_extents) {
dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
+ congested = true;
}
if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
+ /* start a new epoch for non-mirrored writes */
+ start_new_tl_epoch(mdev->tconn);
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ else /*nc->on_congestion == OC_DISCONNECT */
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
}
put_ldev(mdev);
}
-static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+/* If this returns false, and req->private_bio is still set,
+ * this should be submitted locally.
+ *
+ * If it returns false, but req->private_bio is not set,
+ * we do not have access to good data :(
+ *
+ * Otherwise, this destroys req->private_bio, if any,
+ * and returns true.
+ */
+static bool do_remote_read(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ enum drbd_read_balancing rbm;
+
+ if (req->private_bio) {
+ if (!drbd_may_do_local_read(mdev,
+ req->i.sector, req->i.size)) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
+ }
+
+ if (mdev->state.pdsk != D_UP_TO_DATE)
+ return false;
+
+ if (req->private_bio == NULL)
+ return true;
+
+ /* TODO: improve read balancing decisions, take into account drbd
+ * protocol, pending requests etc. */
+
+ rcu_read_lock();
+ rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
+ rcu_read_unlock();
+
+ if (rbm == RB_PREFER_LOCAL && req->private_bio)
+ return false; /* submit locally */
+
+ if (remote_due_to_read_balancing(mdev, req->i.sector, rbm)) {
+ if (req->private_bio) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+/* returns number of connections (== 1, for drbd 8.4)
+ * expected to actually write this data,
+ * which does NOT include those that we are L_AHEAD for. */
+static int drbd_process_write_request(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ int remote, send_oos;
+
+ rcu_read_lock();
+ remote = drbd_should_do_remote(mdev->state);
+ if (remote) {
+ maybe_pull_ahead(mdev);
+ remote = drbd_should_do_remote(mdev->state);
+ }
+ send_oos = drbd_should_send_out_of_sync(mdev->state);
+ rcu_read_unlock();
+
+ /* Need to replicate writes. Unless it is an empty flush,
+ * which is better mapped to a DRBD P_BARRIER packet,
+ * also for drbd wire protocol compatibility reasons.
+ * If this was a flush, just start a new epoch.
+ * Unless the current epoch was empty anyways, or we are not currently
+ * replicating, in which case there is no point. */
+ if (unlikely(req->i.size == 0)) {
+ /* The only size==0 bios we expect are empty flushes. */
+ D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
+ if (remote)
+ start_new_tl_epoch(mdev->tconn);
+ return 0;
+ }
+
+ if (!remote && !send_oos)
+ return 0;
+
+ D_ASSERT(!(remote && send_oos));
+
+ if (remote) {
+ _req_mod(req, TO_BE_SENT);
+ _req_mod(req, QUEUE_FOR_NET_WRITE);
+ } else if (drbd_set_out_of_sync(mdev, req->i.sector, req->i.size))
+ _req_mod(req, QUEUE_FOR_SEND_OOS);
+
+ return remote;
+}
+
+static void
+drbd_submit_req_private_bio(struct drbd_request *req)
+{
+ struct drbd_conf *mdev = req->w.mdev;
+ struct bio *bio = req->private_bio;
+ const int rw = bio_rw(bio);
+
+ bio->bi_bdev = mdev->ldev->backing_bdev;
+
+ /* State may have changed since we grabbed our reference on the
+ * ->ldev member. Double check, and short-circuit to endio.
+ * In case the last activity log transaction failed to get on
+ * stable storage, and this is a WRITE, we may not even submit
+ * this bio. */
+ if (get_ldev(mdev)) {
+ if (drbd_insert_fault(mdev,
+ rw == WRITE ? DRBD_FAULT_DT_WR
+ : rw == READ ? DRBD_FAULT_DT_RD
+ : DRBD_FAULT_DT_RA))
+ bio_endio(bio, -EIO);
+ else
+ generic_make_request(bio);
+ put_ldev(mdev);
+ } else
+ bio_endio(bio, -EIO);
+}
+
+void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
- const int size = bio->bi_size;
- const sector_t sector = bio->bi_sector;
- struct drbd_tl_epoch *b = NULL;
+ struct bio_and_error m = { NULL, };
struct drbd_request *req;
- int local, remote, send_oos = 0;
- int err = -EIO;
- int ret = 0;
- union drbd_state s;
+ bool no_remote = false;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -830,55 +1035,14 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* if user cannot handle io errors, that's not our business. */
dev_err(DEV, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
- return 0;
+ return;
}
req->start_time = start_time;
- local = get_ldev(mdev);
- if (!local) {
- bio_put(req->private_bio); /* or we get a bio leak */
+ if (!get_ldev(mdev)) {
+ bio_put(req->private_bio);
req->private_bio = NULL;
}
- if (rw == WRITE) {
- /* Need to replicate writes. Unless it is an empty flush,
- * which is better mapped to a DRBD P_BARRIER packet,
- * also for drbd wire protocol compatibility reasons. */
- if (unlikely(size == 0)) {
- /* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(bio->bi_rw & REQ_FLUSH);
- remote = 0;
- } else
- remote = 1;
- } else {
- /* READ || READA */
- if (local) {
- if (!drbd_may_do_local_read(mdev, sector, size)) {
- /* we could kick the syncer to
- * sync this extent asap, wait for
- * it, then continue locally.
- * Or just issue the request remotely.
- */
- local = 0;
- bio_put(req->private_bio);
- req->private_bio = NULL;
- put_ldev(mdev);
- }
- }
- remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
- }
-
- /* If we have a disk, but a READA request is mapped to remote,
- * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
- * Just fail that READA request right here.
- *
- * THINK: maybe fail all READA when not local?
- * or make this configurable...
- * if network is slow, READA won't do any good.
- */
- if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
- err = -EWOULDBLOCK;
- goto fail_and_free_req;
- }
/* For WRITES going to the local disk, grab a reference on the target
* extent. This waits for any resync activity in the corresponding
@@ -887,348 +1051,131 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* of transactional on-disk meta data updates.
* Empty flushes don't need to go into the activity log, they can only
* flush data for pending writes which are already in there. */
- if (rw == WRITE && local && size
+ if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
req->rq_state |= RQ_IN_ACT_LOG;
- drbd_al_begin_io(mdev, sector);
- }
-
- s = mdev->state;
- remote = remote && drbd_should_do_remote(s);
- send_oos = rw == WRITE && drbd_should_send_oos(s);
- D_ASSERT(!(remote && send_oos));
-
- if (!(local || remote) && !is_susp(mdev->state)) {
- if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- goto fail_free_complete;
+ drbd_al_begin_io(mdev, &req->i);
}
- /* For WRITE request, we have to make sure that we have an
- * unused_spare_tle, in case we need to start a new epoch.
- * I try to be smart and avoid to pre-allocate always "just in case",
- * but there is a race between testing the bit and pointer outside the
- * spinlock, and grabbing the spinlock.
- * if we lost that race, we retry. */
- if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
-allocate_barrier:
- b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
- if (!b) {
- dev_err(DEV, "Failed to alloc barrier.\n");
- err = -ENOMEM;
- goto fail_free_complete;
- }
+ spin_lock_irq(&mdev->tconn->req_lock);
+ if (rw == WRITE) {
+ /* This may temporarily give up the req_lock,
+ * but will re-aquire it before it returns here.
+ * Needs to be before the check on drbd_suspended() */
+ complete_conflicting_writes(req);
}
- /* GOOD, everything prepared, grab the spin_lock */
- spin_lock_irq(&mdev->req_lock);
-
- if (is_susp(mdev->state)) {
- /* If we got suspended, use the retry mechanism of
- drbd_make_request() to restart processing of this
- bio. In the next call to drbd_make_request
- we sleep in inc_ap_bio() */
- ret = 1;
- spin_unlock_irq(&mdev->req_lock);
- goto fail_free_complete;
- }
+ /* no more giving up req_lock from now on! */
- if (remote || send_oos) {
- remote = drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
- D_ASSERT(!(remote && send_oos));
-
- if (!(remote || send_oos))
- dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
- if (!(local || remote)) {
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
- spin_unlock_irq(&mdev->req_lock);
- goto fail_free_complete;
+ if (drbd_suspended(mdev)) {
+ /* push back and retry: */
+ req->rq_state |= RQ_POSTPONED;
+ if (req->private_bio) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(mdev);
}
+ goto out;
}
- if (b && mdev->unused_spare_tle == NULL) {
- mdev->unused_spare_tle = b;
- b = NULL;
- }
- if (rw == WRITE && (remote || send_oos) &&
- mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
- /* someone closed the current epoch
- * while we were grabbing the spinlock */
- spin_unlock_irq(&mdev->req_lock);
- goto allocate_barrier;
- }
-
-
/* Update disk stats */
_drbd_start_io_acct(mdev, req, bio);
- /* _maybe_start_new_epoch(mdev);
- * If we need to generate a write barrier packet, we have to add the
- * new epoch (barrier) object, and queue the barrier packet for sending,
- * and queue the req's data after it _within the same lock_, otherwise
- * we have race conditions were the reorder domains could be mixed up.
- *
- * Even read requests may start a new epoch and queue the corresponding
- * barrier packet. To get the write ordering right, we only have to
- * make sure that, if this is a write request and it triggered a
- * barrier packet, this request is queued within the same spinlock. */
- if ((remote || send_oos) && mdev->unused_spare_tle &&
- test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
- _tl_add_barrier(mdev, mdev->unused_spare_tle);
- mdev->unused_spare_tle = NULL;
- } else {
- D_ASSERT(!(remote && rw == WRITE &&
- test_bit(CREATE_BARRIER, &mdev->flags)));
+ /* We fail READ/READA early, if we can not serve it.
+ * We must do this before req is registered on any lists.
+ * Otherwise, drbd_req_complete() will queue failed READ for retry. */
+ if (rw != WRITE) {
+ if (!do_remote_read(req) && !req->private_bio)
+ goto nodata;
}
- /* NOTE
- * Actually, 'local' may be wrong here already, since we may have failed
- * to write to the meta data, and may become wrong anytime because of
- * local io-error for some other request, which would lead to us
- * "detaching" the local disk.
- *
- * 'remote' may become wrong any time because the network could fail.
- *
- * This is a harmless race condition, though, since it is handled
- * correctly at the appropriate places; so it just defers the failure
- * of the respective operation.
- */
-
- /* mark them early for readability.
- * this just sets some state flags. */
- if (remote)
- _req_mod(req, to_be_send);
- if (local)
- _req_mod(req, to_be_submitted);
-
- /* check this request on the collision detection hash tables.
- * if we have a conflict, just complete it here.
- * THINK do we want to check reads, too? (I don't think so...) */
- if (rw == WRITE && _req_conflicts(req))
- goto fail_conflicting;
+ /* which transfer log epoch does this belong to? */
+ req->epoch = atomic_read(&mdev->tconn->current_tle_nr);
/* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */
- if (likely(size!=0))
- list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+ if (likely(req->i.size!=0)) {
+ if (rw == WRITE)
+ mdev->tconn->current_tle_writes++;
- /* NOTE remote first: to get the concurrent write detection right,
- * we must register the request before start of local IO. */
- if (remote) {
- /* either WRITE and C_CONNECTED,
- * or READ, and no local disk,
- * or READ, but not in sync.
- */
- _req_mod(req, (rw == WRITE)
- ? queue_for_net_write
- : queue_for_net_read);
+ list_add_tail(&req->tl_requests, &mdev->tconn->transfer_log);
}
- if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
- _req_mod(req, queue_for_send_oos);
- if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
- maybe_pull_ahead(mdev);
-
- /* If this was a flush, queue a drbd barrier/start a new epoch.
- * Unless the current epoch was empty anyways, or we are not currently
- * replicating, in which case there is no point. */
- if (unlikely(bio->bi_rw & REQ_FLUSH)
- && mdev->newest_tle->n_writes
- && drbd_should_do_remote(mdev->state))
- queue_barrier(mdev);
-
- spin_unlock_irq(&mdev->req_lock);
- kfree(b); /* if someone else has beaten us to it... */
-
- if (local) {
- req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
-
- /* State may have changed since we grabbed our reference on the
- * mdev->ldev member. Double check, and short-circuit to endio.
- * In case the last activity log transaction failed to get on
- * stable storage, and this is a WRITE, we may not even submit
- * this bio. */
- if (get_ldev(mdev)) {
- if (drbd_insert_fault(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
- bio_endio(req->private_bio, -EIO);
- else
- generic_make_request(req->private_bio);
- put_ldev(mdev);
+ if (rw == WRITE) {
+ if (!drbd_process_write_request(req))
+ no_remote = true;
+ } else {
+ /* We either have a private_bio, or we can read from remote.
+ * Otherwise we had done the goto nodata above. */
+ if (req->private_bio == NULL) {
+ _req_mod(req, TO_BE_SENT);
+ _req_mod(req, QUEUE_FOR_NET_READ);
} else
- bio_endio(req->private_bio, -EIO);
+ no_remote = true;
}
- return 0;
-
-fail_conflicting:
- /* this is a conflicting request.
- * even though it may have been only _partially_
- * overlapping with one of the currently pending requests,
- * without even submitting or sending it, we will
- * pretend that it was successfully served right now.
- */
- _drbd_end_io_acct(mdev, req);
- spin_unlock_irq(&mdev->req_lock);
- if (remote)
- dec_ap_pending(mdev);
- /* THINK: do we want to fail it (-EIO), or pretend success?
- * this pretends success. */
- err = 0;
-
-fail_free_complete:
- if (req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_complete_io(mdev, sector);
-fail_and_free_req:
- if (local) {
- bio_put(req->private_bio);
- req->private_bio = NULL;
- put_ldev(mdev);
+ if (req->private_bio) {
+ /* needs to be marked within the same spinlock */
+ _req_mod(req, TO_BE_SUBMITTED);
+ /* but we need to give up the spinlock to submit */
+ spin_unlock_irq(&mdev->tconn->req_lock);
+ drbd_submit_req_private_bio(req);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ } else if (no_remote) {
+nodata:
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
+ (unsigned long long)req->i.sector, req->i.size >> 9);
+ /* A write may have been queued for send_oos, however.
+ * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
}
- if (!ret)
- bio_endio(bio, err);
-
- drbd_req_free(req);
- dec_ap_bio(mdev);
- kfree(b);
-
- return ret;
-}
-/* helper function for drbd_make_request
- * if we can determine just by the mdev (state) that this request will fail,
- * return 1
- * otherwise return 0
- */
-static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
-{
- if (mdev->state.role != R_PRIMARY &&
- (!allow_oos || is_write)) {
- if (__ratelimit(&drbd_ratelimit_state)) {
- dev_err(DEV, "Process %s[%u] tried to %s; "
- "since we are not in Primary state, "
- "we cannot allow this\n",
- current->comm, current->pid,
- is_write ? "WRITE" : "READ");
- }
- return 1;
- }
+out:
+ if (drbd_req_put_completion_ref(req, &m, 1))
+ kref_put(&req->kref, drbd_req_destroy);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- return 0;
+ if (m.bio)
+ complete_master_bio(mdev, &m);
+ return;
}
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
- unsigned int s_enr, e_enr;
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
unsigned long start_time;
- if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
- bio_endio(bio, -EPERM);
- return;
- }
-
start_time = jiffies;
/*
* what we "blindly" assume:
*/
- D_ASSERT((bio->bi_size & 0x1ff) == 0);
-
- /* to make some things easier, force alignment of requests within the
- * granularity of our hash tables */
- s_enr = bio->bi_sector >> HT_SHIFT;
- e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
-
- if (likely(s_enr == e_enr)) {
- do {
- inc_ap_bio(mdev, 1);
- } while (drbd_make_request_common(mdev, bio, start_time));
- return;
- }
-
- /* can this bio be split generically?
- * Maybe add our own split-arbitrary-bios function. */
- if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
- /* rather error out here than BUG in bio_split */
- dev_err(DEV, "bio would need to, but cannot, be split: "
- "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
- bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- (unsigned long long)bio->bi_sector);
- bio_endio(bio, -EINVAL);
- } else {
- /* This bio crosses some boundary, so we have to split it. */
- struct bio_pair *bp;
- /* works for the "do not cross hash slot boundaries" case
- * e.g. sector 262269, size 4096
- * s_enr = 262269 >> 6 = 4097
- * e_enr = (262269+8-1) >> 6 = 4098
- * HT_SHIFT = 6
- * sps = 64, mask = 63
- * first_sectors = 64 - (262269 & 63) = 3
- */
- const sector_t sect = bio->bi_sector;
- const int sps = 1 << HT_SHIFT; /* sectors per slot */
- const int mask = sps - 1;
- const sector_t first_sectors = sps - (sect & mask);
- bp = bio_split(bio, first_sectors);
+ D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
- /* we need to get a "reference count" (ap_bio_cnt)
- * to avoid races with the disconnect/reconnect/suspend code.
- * In case we need to split the bio here, we need to get three references
- * atomically, otherwise we might deadlock when trying to submit the
- * second one! */
- inc_ap_bio(mdev, 3);
-
- D_ASSERT(e_enr == s_enr + 1);
-
- while (drbd_make_request_common(mdev, &bp->bio1, start_time))
- inc_ap_bio(mdev, 1);
-
- while (drbd_make_request_common(mdev, &bp->bio2, start_time))
- inc_ap_bio(mdev, 1);
-
- dec_ap_bio(mdev);
-
- bio_pair_release(bp);
- }
+ inc_ap_bio(mdev);
+ __drbd_make_request(mdev, bio, start_time);
}
-/* This is called by bio_add_page(). With this function we reduce
- * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
- * units (was AL_EXTENTs).
+/* This is called by bio_add_page().
+ *
+ * q->max_hw_sectors and other global limits are already enforced there.
*
- * we do the calculation within the lower 32bit of the byte offsets,
- * since we don't care for actual offset, but only check whether it
- * would cross "activity log extent" boundaries.
+ * We need to call down to our lower level device,
+ * in case it has special restrictions.
+ *
+ * We also may need to enforce configured max-bio-bvecs limits.
*
* As long as the BIO is empty we have to allow at least one bvec,
- * regardless of size and offset. so the resulting bio may still
- * cross extent boundaries. those are dealt with (bio_split) in
- * drbd_make_request.
+ * regardless of size and offset, so no need to ask lower levels.
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
- unsigned int bio_offset =
- (unsigned int)bvm->bi_sector << 9; /* 32 bit */
unsigned int bio_size = bvm->bi_size;
- int limit, backing_limit;
-
- limit = DRBD_MAX_BIO_SIZE
- - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
- if (limit < 0)
- limit = 0;
- if (bio_size == 0) {
- if (limit <= bvec->bv_len)
- limit = bvec->bv_len;
- } else if (limit && get_ldev(mdev)) {
+ int limit = DRBD_MAX_BIO_SIZE;
+ int backing_limit;
+
+ if (bio_size && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn) {
@@ -1240,24 +1187,38 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
return limit;
}
+struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
+{
+ /* Walk the transfer log,
+ * and find the oldest not yet completed request */
+ struct drbd_request *r;
+ list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
+ if (atomic_read(&r->completion_ref))
+ return r;
+ }
+ return NULL;
+}
+
void request_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
+ struct drbd_tconn *tconn = mdev->tconn;
struct drbd_request *req; /* oldest request */
- struct list_head *le;
+ struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now;
- if (get_net_conf(mdev)) {
- if (mdev->state.conn >= C_WF_REPORT_PARAMS)
- ent = mdev->net_conf->timeout*HZ/10
- * mdev->net_conf->ko_count;
- put_net_conf(mdev);
- }
+ rcu_read_lock();
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = nc->timeout * HZ/10 * nc->ko_count;
+
if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
- dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
put_ldev(mdev);
}
+ rcu_read_unlock();
+
et = min_not_zero(dt, ent);
if (!et)
@@ -1265,17 +1226,14 @@ void request_timer_fn(unsigned long data)
now = jiffies;
- spin_lock_irq(&mdev->req_lock);
- le = &mdev->oldest_tle->requests;
- if (list_empty(le)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&tconn->req_lock);
+ req = find_oldest_request(tconn);
+ if (!req) {
+ spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, now + et);
return;
}
- le = le->prev;
- req = list_entry(le, struct drbd_request, tl_requests);
-
/* The request is considered timed out, if
* - we have some effective timeout from the configuration,
* with above state restrictions applied,
@@ -1294,17 +1252,17 @@ void request_timer_fn(unsigned long data)
*/
if (ent && req->rq_state & RQ_NET_PENDING &&
time_after(now, req->start_time + ent) &&
- !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
}
- if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.mdev == mdev &&
time_after(now, req->start_time + dt) &&
!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
__drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&tconn->req_lock);
mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 3d2111919486..c08d22964d06 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -77,40 +77,41 @@
*/
enum drbd_req_event {
- created,
- to_be_send,
- to_be_submitted,
+ CREATED,
+ TO_BE_SENT,
+ TO_BE_SUBMITTED,
/* XXX yes, now I am inconsistent...
* these are not "events" but "actions"
* oh, well... */
- queue_for_net_write,
- queue_for_net_read,
- queue_for_send_oos,
-
- send_canceled,
- send_failed,
- handed_over_to_network,
- oos_handed_to_network,
- connection_lost_while_pending,
- read_retry_remote_canceled,
- recv_acked_by_peer,
- write_acked_by_peer,
- write_acked_by_peer_and_sis, /* and set_in_sync */
- conflict_discarded_by_peer,
- neg_acked,
- barrier_acked, /* in protocol A and B */
- data_received, /* (remote read) */
-
- read_completed_with_error,
- read_ahead_completed_with_error,
- write_completed_with_error,
- abort_disk_io,
- completed_ok,
- resend,
- fail_frozen_disk_io,
- restart_frozen_disk_io,
- nothing, /* for tracing only */
+ QUEUE_FOR_NET_WRITE,
+ QUEUE_FOR_NET_READ,
+ QUEUE_FOR_SEND_OOS,
+
+ SEND_CANCELED,
+ SEND_FAILED,
+ HANDED_OVER_TO_NETWORK,
+ OOS_HANDED_TO_NETWORK,
+ CONNECTION_LOST_WHILE_PENDING,
+ READ_RETRY_REMOTE_CANCELED,
+ RECV_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER,
+ WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
+ CONFLICT_RESOLVED,
+ POSTPONE_WRITE,
+ NEG_ACKED,
+ BARRIER_ACKED, /* in protocol A and B */
+ DATA_RECEIVED, /* (remote read) */
+
+ READ_COMPLETED_WITH_ERROR,
+ READ_AHEAD_COMPLETED_WITH_ERROR,
+ WRITE_COMPLETED_WITH_ERROR,
+ ABORT_DISK_IO,
+ COMPLETED_OK,
+ RESEND,
+ FAIL_FROZEN_DISK_IO,
+ RESTART_FROZEN_DISK_IO,
+ NOTHING,
};
/* encoding of request states for now. we don't actually need that many bits.
@@ -142,8 +143,8 @@ enum drbd_req_state_bits {
* recv_ack (B) or implicit "ack" (A),
* still waiting for the barrier ack.
* master_bio may already be completed and invalidated.
- * 11100: write_acked (C),
- * data_received (for remote read, any protocol)
+ * 11100: write acked (C),
+ * data received (for remote read, any protocol)
* or finally the barrier ack has arrived (B,A)...
* request can be freed
* 01100: neg-acked (write, protocol C)
@@ -198,6 +199,22 @@ enum drbd_req_state_bits {
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
+
+ /* The peer has sent a retry ACK */
+ __RQ_POSTPONED,
+
+ /* would have been completed,
+ * but was not, because of drbd_suspended() */
+ __RQ_COMPLETION_SUSP,
+
+ /* We expect a receive ACK (wire proto B) */
+ __RQ_EXP_RECEIVE_ACK,
+
+ /* We expect a write ACK (wite proto C) */
+ __RQ_EXP_WRITE_ACK,
+
+ /* waiting for a barrier ack, did an extra kref_get */
+ __RQ_EXP_BARR_ACK,
};
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
@@ -219,56 +236,16 @@ enum drbd_req_state_bits {
#define RQ_WRITE (1UL << __RQ_WRITE)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
+#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
+#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
+#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
+#define RQ_EXP_WRITE_ACK (1UL << __RQ_EXP_WRITE_ACK)
+#define RQ_EXP_BARR_ACK (1UL << __RQ_EXP_BARR_ACK)
/* For waking up the frozen transfer log mod_req() has to return if the request
should be counted in the epoch object*/
-#define MR_WRITE_SHIFT 0
-#define MR_WRITE (1 << MR_WRITE_SHIFT)
-#define MR_READ_SHIFT 1
-#define MR_READ (1 << MR_READ_SHIFT)
-
-/* epoch entries */
-static inline
-struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- BUG_ON(mdev->ee_hash_s == 0);
- return mdev->ee_hash +
- ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
-}
-
-/* transfer log (drbd_request objects) */
-static inline
-struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- BUG_ON(mdev->tl_hash_s == 0);
- return mdev->tl_hash +
- ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
-}
-
-/* application reads (drbd_request objects) */
-static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
- return mdev->app_reads_hash
- + ((unsigned int)(sector) % APP_R_HSIZE);
-}
-
-/* when we receive the answer for a read request,
- * verify that we actually know about it */
-static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = ar_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, collision) {
- if ((unsigned long)req == (unsigned long)id) {
- D_ASSERT(req->sector == sector);
- return req;
- }
- }
- return NULL;
-}
+#define MR_WRITE 1
+#define MR_READ 2
static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
{
@@ -278,41 +255,10 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
req->private_bio = bio;
bio->bi_private = req;
- bio->bi_end_io = drbd_endio_pri;
+ bio->bi_end_io = drbd_request_endio;
bio->bi_next = NULL;
}
-static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
- struct bio *bio_src)
-{
- struct drbd_request *req =
- mempool_alloc(drbd_request_mempool, GFP_NOIO);
- if (likely(req)) {
- drbd_req_make_private_bio(req, bio_src);
-
- req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->mdev = mdev;
- req->master_bio = bio_src;
- req->epoch = 0;
- req->sector = bio_src->bi_sector;
- req->size = bio_src->bi_size;
- INIT_HLIST_NODE(&req->collision);
- INIT_LIST_HEAD(&req->tl_requests);
- INIT_LIST_HEAD(&req->w.list);
- }
- return req;
-}
-
-static inline void drbd_req_free(struct drbd_request *req)
-{
- mempool_free(req, drbd_request_mempool);
-}
-
-static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
-{
- return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
-}
-
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
* bio->bi_size, or similar. But that would be too ugly. */
@@ -321,6 +267,8 @@ struct bio_and_error {
int error;
};
+extern void start_new_tl_epoch(struct drbd_tconn *tconn);
+extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
@@ -328,13 +276,17 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
extern void complete_master_bio(struct drbd_conf *mdev,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
-extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
+extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+
+/* this is in drbd_main.c */
+extern void drbd_restart_request(struct drbd_request *req);
/* use this if you don't want to deal with calling complete_master_bio()
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
@@ -354,13 +306,13 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
rv = __req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -368,7 +320,7 @@ static inline int req_mod(struct drbd_request *req,
return rv;
}
-static inline bool drbd_should_do_remote(union drbd_state s)
+static inline bool drbd_should_do_remote(union drbd_dev_state s)
{
return s.pdsk == D_UP_TO_DATE ||
(s.pdsk >= D_INCONSISTENT &&
@@ -378,7 +330,7 @@ static inline bool drbd_should_do_remote(union drbd_state s)
That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
states. */
}
-static inline bool drbd_should_send_oos(union drbd_state s)
+static inline bool drbd_should_send_out_of_sync(union drbd_dev_state s)
{
return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
new file mode 100644
index 000000000000..0fe220cfb9e9
--- /dev/null
+++ b/drivers/block/drbd/drbd_state.c
@@ -0,0 +1,1863 @@
+/*
+ drbd_state.c
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+ Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
+ from Logicworks, Inc. for making SDP replication support possible.
+
+ drbd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ drbd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with drbd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/drbd_limits.h>
+#include "drbd_int.h"
+#include "drbd_req.h"
+
+/* in drbd_main.c */
+extern void tl_abort_disk_io(struct drbd_conf *mdev);
+
+struct after_state_chg_work {
+ struct drbd_work w;
+ union drbd_state os;
+ union drbd_state ns;
+ enum chg_state_flags flags;
+ struct completion *done;
+};
+
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
+
+static int w_after_state_ch(struct drbd_work *w, int unused);
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags);
+static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
+static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
+static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum sanitize_state_warnings *warn);
+
+static inline bool is_susp(union drbd_state s)
+{
+ return s.susp || s.susp_nod || s.susp_fen;
+}
+
+bool conn_all_vols_unconf(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ bool rv = true;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (mdev->state.disk != D_DISKLESS ||
+ mdev->state.conn != C_STANDALONE ||
+ mdev->state.role != R_SECONDARY) {
+ rv = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+/* Unfortunately the states where not correctly ordered, when
+ they where defined. therefore can not use max_t() here. */
+static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
+{
+ if (role1 == R_PRIMARY || role2 == R_PRIMARY)
+ return R_PRIMARY;
+ if (role1 == R_SECONDARY || role2 == R_SECONDARY)
+ return R_SECONDARY;
+ return R_UNKNOWN;
+}
+static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
+{
+ if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
+ return R_UNKNOWN;
+ if (role1 == R_SECONDARY || role2 == R_SECONDARY)
+ return R_SECONDARY;
+ return R_PRIMARY;
+}
+
+enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
+{
+ enum drbd_role role = R_UNKNOWN;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ role = max_role(role, mdev->state.role);
+ rcu_read_unlock();
+
+ return role;
+}
+
+enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
+{
+ enum drbd_role peer = R_UNKNOWN;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ peer = max_role(peer, mdev->state.peer);
+ rcu_read_unlock();
+
+ return peer;
+}
+
+enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_DISKLESS;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_MASK;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
+{
+ enum drbd_disk_state ds = D_DISKLESS;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
+ rcu_read_unlock();
+
+ return ds;
+}
+
+enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
+{
+ enum drbd_conns conn = C_MASK;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ conn = min_t(enum drbd_conns, conn, mdev->state.conn);
+ rcu_read_unlock();
+
+ return conn;
+}
+
+static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
+{
+ struct drbd_conf *mdev;
+ int vnr;
+ bool rv = true;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr)
+ if (mdev->state.conn == C_WF_REPORT_PARAMS) {
+ rv = false;
+ break;
+ }
+ rcu_read_unlock();
+
+ return rv;
+}
+
+
+/**
+ * cl_wide_st_chg() - true if the state change is a cluster wide one
+ * @mdev: DRBD device.
+ * @os: old (current) state.
+ * @ns: new (wanted) state.
+ */
+static int cl_wide_st_chg(struct drbd_conf *mdev,
+ union drbd_state os, union drbd_state ns)
+{
+ return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
+ ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
+ (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
+ (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
+ (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) ||
+ (os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS);
+}
+
+static union drbd_state
+apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
+{
+ union drbd_state ns;
+ ns.i = (os.i & ~mask.i) | val.i;
+ return ns;
+}
+
+enum drbd_state_rv
+drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+ union drbd_state mask, union drbd_state val)
+{
+ unsigned long flags;
+ union drbd_state ns;
+ enum drbd_state_rv rv;
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(mdev), mask, val);
+ rv = _drbd_set_state(mdev, ns, f, NULL);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ return rv;
+}
+
+/**
+ * drbd_force_state() - Impose a change which happens outside our control on our state
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ */
+void drbd_force_state(struct drbd_conf *mdev,
+ union drbd_state mask, union drbd_state val)
+{
+ drbd_change_state(mdev, CS_HARD, mask, val);
+}
+
+static enum drbd_state_rv
+_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val)
+{
+ union drbd_state os, ns;
+ unsigned long flags;
+ enum drbd_state_rv rv;
+
+ if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ return SS_CW_SUCCESS;
+
+ if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ return SS_CW_FAILED_BY_PEER;
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ rv = is_valid_transition(os, ns);
+ if (rv >= SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+
+ if (!cl_wide_st_chg(mdev, os, ns))
+ rv = SS_CW_NO_NEED;
+ if (rv == SS_UNKNOWN_ERROR) {
+ rv = is_valid_state(mdev, ns);
+ if (rv >= SS_SUCCESS) {
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ if (rv >= SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+ }
+ }
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ return rv;
+}
+
+/**
+ * drbd_req_state() - Perform an eventually cluster wide state change
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ * @f: flags
+ *
+ * Should not be called directly, use drbd_request_state() or
+ * _drbd_request_state().
+ */
+static enum drbd_state_rv
+drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ struct completion done;
+ unsigned long flags;
+ union drbd_state os, ns;
+ enum drbd_state_rv rv;
+
+ init_completion(&done);
+
+ if (f & CS_SERIALIZE)
+ mutex_lock(mdev->state_mutex);
+
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS) {
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+ goto abort;
+ }
+
+ if (cl_wide_st_chg(mdev, os, ns)) {
+ rv = is_valid_state(mdev, ns);
+ if (rv == SS_SUCCESS)
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+
+ if (drbd_send_state_req(mdev, mask, val)) {
+ rv = SS_CW_FAILED_BY_PEER;
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+
+ wait_event(mdev->state_wait,
+ (rv = _req_st_cond(mdev, mask, val)));
+
+ if (rv < SS_SUCCESS) {
+ if (f & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ goto abort;
+ }
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ ns = apply_mask_val(drbd_read_state(mdev), mask, val);
+ rv = _drbd_set_state(mdev, ns, f, &done);
+ } else {
+ rv = _drbd_set_state(mdev, ns, f, &done);
+ }
+
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
+
+ if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
+ D_ASSERT(current != mdev->tconn->worker.task);
+ wait_for_completion(&done);
+ }
+
+abort:
+ if (f & CS_SERIALIZE)
+ mutex_unlock(mdev->state_mutex);
+
+ return rv;
+}
+
+/**
+ * _drbd_request_state() - Request a state change (with flags)
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ * @f: flags
+ *
+ * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
+ * flag, or when logging of failed state change requests is not desired.
+ */
+enum drbd_state_rv
+_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+ union drbd_state val, enum chg_state_flags f)
+{
+ enum drbd_state_rv rv;
+
+ wait_event(mdev->state_wait,
+ (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
+
+ return rv;
+}
+
+static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
+{
+ dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
+ name,
+ drbd_conn_str(ns.conn),
+ drbd_role_str(ns.role),
+ drbd_role_str(ns.peer),
+ drbd_disk_str(ns.disk),
+ drbd_disk_str(ns.pdsk),
+ is_susp(ns) ? 's' : 'r',
+ ns.aftr_isp ? 'a' : '-',
+ ns.peer_isp ? 'p' : '-',
+ ns.user_isp ? 'u' : '-',
+ ns.susp_fen ? 'F' : '-',
+ ns.susp_nod ? 'N' : '-'
+ );
+}
+
+void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum drbd_state_rv err)
+{
+ if (err == SS_IN_TRANSIENT_STATE)
+ return;
+ dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
+ print_st(mdev, " state", os);
+ print_st(mdev, "wanted", ns);
+}
+
+static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char *pbp;
+ pbp = pb;
+ *pbp = 0;
+
+ if (ns.role != os.role && flags & CS_DC_ROLE)
+ pbp += sprintf(pbp, "role( %s -> %s ) ",
+ drbd_role_str(os.role),
+ drbd_role_str(ns.role));
+ if (ns.peer != os.peer && flags & CS_DC_PEER)
+ pbp += sprintf(pbp, "peer( %s -> %s ) ",
+ drbd_role_str(os.peer),
+ drbd_role_str(ns.peer));
+ if (ns.conn != os.conn && flags & CS_DC_CONN)
+ pbp += sprintf(pbp, "conn( %s -> %s ) ",
+ drbd_conn_str(os.conn),
+ drbd_conn_str(ns.conn));
+ if (ns.disk != os.disk && flags & CS_DC_DISK)
+ pbp += sprintf(pbp, "disk( %s -> %s ) ",
+ drbd_disk_str(os.disk),
+ drbd_disk_str(ns.disk));
+ if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
+ pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
+ drbd_disk_str(os.pdsk),
+ drbd_disk_str(ns.pdsk));
+
+ return pbp - pb;
+}
+
+static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char pb[300];
+ char *pbp = pb;
+
+ pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
+
+ if (ns.aftr_isp != os.aftr_isp)
+ pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
+ os.aftr_isp,
+ ns.aftr_isp);
+ if (ns.peer_isp != os.peer_isp)
+ pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
+ os.peer_isp,
+ ns.peer_isp);
+ if (ns.user_isp != os.user_isp)
+ pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
+ os.user_isp,
+ ns.user_isp);
+
+ if (pbp != pb)
+ dev_info(DEV, "%s\n", pb);
+}
+
+static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
+ enum chg_state_flags flags)
+{
+ char pb[300];
+ char *pbp = pb;
+
+ pbp += print_state_change(pbp, os, ns, flags);
+
+ if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
+ pbp += sprintf(pbp, "susp( %d -> %d ) ",
+ is_susp(os),
+ is_susp(ns));
+
+ if (pbp != pb)
+ conn_info(tconn, "%s\n", pb);
+}
+
+
+/**
+ * is_valid_state() - Returns an SS_ error code if ns is not valid
+ * @mdev: DRBD device.
+ * @ns: State to consider.
+ */
+static enum drbd_state_rv
+is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+{
+ /* See drbd_state_sw_errors in drbd_strings.c */
+
+ enum drbd_fencing_p fp;
+ enum drbd_state_rv rv = SS_SUCCESS;
+ struct net_conf *nc;
+
+ rcu_read_lock();
+ fp = FP_DONT_CARE;
+ if (get_ldev(mdev)) {
+ fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ put_ldev(mdev);
+ }
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc) {
+ if (!nc->two_primaries && ns.role == R_PRIMARY) {
+ if (ns.peer == R_PRIMARY)
+ rv = SS_TWO_PRIMARIES;
+ else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
+ rv = SS_O_VOL_PEER_PRI;
+ }
+ }
+
+ if (rv <= 0)
+ /* already found a reason to abort */;
+ else if (ns.role == R_SECONDARY && mdev->open_cnt)
+ rv = SS_DEVICE_IN_USE;
+
+ else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if (fp >= FP_RESOURCE &&
+ ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
+ rv = SS_PRIMARY_NOP;
+
+ else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
+ rv = SS_NO_LOCAL_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
+ rv = SS_NO_REMOTE_DISK;
+
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
+ else if ((ns.conn == C_CONNECTED ||
+ ns.conn == C_WF_BITMAP_S ||
+ ns.conn == C_SYNC_SOURCE ||
+ ns.conn == C_PAUSED_SYNC_S) &&
+ ns.disk == D_OUTDATED)
+ rv = SS_CONNECTED_OUTDATES;
+
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ (nc->verify_alg[0] == 0))
+ rv = SS_NO_VERIFY_ALG;
+
+ else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ mdev->tconn->agreed_pro_version < 88)
+ rv = SS_NOT_SUPPORTED;
+
+ else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
+ rv = SS_CONNECTED_OUTDATES;
+
+ rcu_read_unlock();
+
+ return rv;
+}
+
+/**
+ * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
+ * This function limits state transitions that may be declined by DRBD. I.e.
+ * user requests (aka soft transitions).
+ * @mdev: DRBD device.
+ * @ns: new state.
+ * @os: old state.
+ */
+static enum drbd_state_rv
+is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+
+ if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
+ os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
+
+ if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
+ rv = SS_ALREADY_STANDALONE;
+
+ if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
+
+ if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
+ rv = SS_NO_NET_CONFIG;
+
+ if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
+ rv = SS_LOWER_THAN_OUTDATED;
+
+ if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
+ rv = SS_IN_TRANSIENT_STATE;
+
+ /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
+ rv = SS_IN_TRANSIENT_STATE; */
+
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &tconn->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
+
+ if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+ ns.conn != os.conn && os.conn > C_CONNECTED)
+ rv = SS_RESYNC_RUNNING;
+
+ if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+ os.conn < C_CONNECTED)
+ rv = SS_NEED_CONNECTION;
+
+ if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
+ && os.conn < C_WF_REPORT_PARAMS)
+ rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+
+ return rv;
+}
+
+static enum drbd_state_rv
+is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
+{
+ /* no change -> nothing to do, at least for the connection part */
+ if (oc == nc)
+ return SS_NOTHING_TO_DO;
+
+ /* disconnect of an unconfigured connection does not make sense */
+ if (oc == C_STANDALONE && nc == C_DISCONNECTING)
+ return SS_ALREADY_STANDALONE;
+
+ /* from C_STANDALONE, we start with C_UNCONNECTED */
+ if (oc == C_STANDALONE && nc != C_UNCONNECTED)
+ return SS_NEED_CONNECTION;
+
+ /* When establishing a connection we need to go through WF_REPORT_PARAMS!
+ Necessary to do the right thing upon invalidate-remote on a disconnected resource */
+ if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED)
+ return SS_NEED_CONNECTION;
+
+ /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
+ if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
+ return SS_IN_TRANSIENT_STATE;
+
+ /* After C_DISCONNECTING only C_STANDALONE may follow */
+ if (oc == C_DISCONNECTING && nc != C_STANDALONE)
+ return SS_IN_TRANSIENT_STATE;
+
+ return SS_SUCCESS;
+}
+
+
+/**
+ * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
+ * This limits hard state transitions. Hard state transitions are facts there are
+ * imposed on DRBD by the environment. E.g. disk broke or network broke down.
+ * But those hard state transitions are still not allowed to do everything.
+ * @ns: new state.
+ * @os: old state.
+ */
+static enum drbd_state_rv
+is_valid_transition(union drbd_state os, union drbd_state ns)
+{
+ enum drbd_state_rv rv;
+
+ rv = is_valid_conn_transition(os.conn, ns.conn);
+
+ /* we cannot fail (again) if we already detached */
+ if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+ rv = SS_IS_DISKLESS;
+
+ return rv;
+}
+
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
+/**
+ * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
+ * @mdev: DRBD device.
+ * @os: old state.
+ * @ns: new state.
+ * @warn_sync_abort:
+ *
+ * When we loose connection, we have to set the state of the peers disk (pdsk)
+ * to D_UNKNOWN. This rule and many more along those lines are in this function.
+ */
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum sanitize_state_warnings *warn)
+{
+ enum drbd_fencing_p fp;
+ enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+
+ if (warn)
+ *warn = NO_WARNING;
+
+ fp = FP_DONT_CARE;
+ if (get_ldev(mdev)) {
+ rcu_read_lock();
+ fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+ rcu_read_unlock();
+ put_ldev(mdev);
+ }
+
+ /* Implications from connection to peer and peer_isp */
+ if (ns.conn < C_CONNECTED) {
+ ns.peer_isp = 0;
+ ns.peer = R_UNKNOWN;
+ if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
+ ns.pdsk = D_UNKNOWN;
+ }
+
+ /* Clear the aftr_isp when becoming unconfigured */
+ if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
+ ns.aftr_isp = 0;
+
+ /* An implication of the disk states onto the connection state */
+ /* Abort resync if a disk fails/detaches */
+ if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
+ if (warn)
+ *warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
+ ns.conn = C_CONNECTED;
+ }
+
+ /* Connection breaks down before we finished "Negotiating" */
+ if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
+ get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
+ ns.disk = mdev->new_state_tmp.disk;
+ ns.pdsk = mdev->new_state_tmp.pdsk;
+ } else {
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
+ ns.disk = D_DISKLESS;
+ ns.pdsk = D_UNKNOWN;
+ }
+ put_ldev(mdev);
+ }
+
+ /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
+ if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
+ if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
+ ns.disk = D_UP_TO_DATE;
+ if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
+ ns.pdsk = D_UP_TO_DATE;
+ }
+
+ /* Implications of the connection stat on the disk states */
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_UNKNOWN;
+ switch ((enum drbd_conns)ns.conn) {
+ case C_WF_BITMAP_T:
+ case C_PAUSED_SYNC_T:
+ case C_STARTING_SYNC_T:
+ case C_WF_SYNC_UUID:
+ case C_BEHIND:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_OUTDATED;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_VERIFY_S:
+ case C_VERIFY_T:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_CONNECTED:
+ disk_min = D_DISKLESS;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_DISKLESS;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_WF_BITMAP_S:
+ case C_PAUSED_SYNC_S:
+ case C_STARTING_SYNC_S:
+ case C_AHEAD:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
+ break;
+ case C_SYNC_TARGET:
+ disk_min = D_INCONSISTENT;
+ disk_max = D_INCONSISTENT;
+ pdsk_min = D_UP_TO_DATE;
+ pdsk_max = D_UP_TO_DATE;
+ break;
+ case C_SYNC_SOURCE:
+ disk_min = D_UP_TO_DATE;
+ disk_max = D_UP_TO_DATE;
+ pdsk_min = D_INCONSISTENT;
+ pdsk_max = D_INCONSISTENT;
+ break;
+ case C_STANDALONE:
+ case C_DISCONNECTING:
+ case C_UNCONNECTED:
+ case C_TIMEOUT:
+ case C_BROKEN_PIPE:
+ case C_NETWORK_FAILURE:
+ case C_PROTOCOL_ERROR:
+ case C_TEAR_DOWN:
+ case C_WF_CONNECTION:
+ case C_WF_REPORT_PARAMS:
+ case C_MASK:
+ break;
+ }
+ if (ns.disk > disk_max)
+ ns.disk = disk_max;
+
+ if (ns.disk < disk_min) {
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
+ ns.disk = disk_min;
+ }
+ if (ns.pdsk > pdsk_max)
+ ns.pdsk = pdsk_max;
+
+ if (ns.pdsk < pdsk_min) {
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
+ ns.pdsk = pdsk_min;
+ }
+
+ if (fp == FP_STONITH &&
+ (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
+ ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
+
+ if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
+
+ if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
+ if (ns.conn == C_SYNC_SOURCE)
+ ns.conn = C_PAUSED_SYNC_S;
+ if (ns.conn == C_SYNC_TARGET)
+ ns.conn = C_PAUSED_SYNC_T;
+ } else {
+ if (ns.conn == C_PAUSED_SYNC_S)
+ ns.conn = C_SYNC_SOURCE;
+ if (ns.conn == C_PAUSED_SYNC_T)
+ ns.conn = C_SYNC_TARGET;
+ }
+
+ return ns;
+}
+
+void drbd_resume_al(struct drbd_conf *mdev)
+{
+ if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+ dev_info(DEV, "Resumed AL updates\n");
+}
+
+/* helper for __drbd_set_state */
+static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
+{
+ if (mdev->tconn->agreed_pro_version < 90)
+ mdev->ov_start_sector = 0;
+ mdev->rs_total = drbd_bm_bits(mdev);
+ mdev->ov_position = 0;
+ if (cs == C_VERIFY_T) {
+ /* starting online verify from an arbitrary position
+ * does not fit well into the existing protocol.
+ * on C_VERIFY_T, we initialize ov_left and friends
+ * implicitly in receive_DataRequest once the
+ * first P_OV_REQUEST is received */
+ mdev->ov_start_sector = ~(sector_t)0;
+ } else {
+ unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
+ if (bit >= mdev->rs_total) {
+ mdev->ov_start_sector =
+ BM_BIT_TO_SECT(mdev->rs_total - 1);
+ mdev->rs_total = 1;
+ } else
+ mdev->rs_total -= bit;
+ mdev->ov_position = mdev->ov_start_sector;
+ }
+ mdev->ov_left = mdev->rs_total;
+}
+
+/**
+ * __drbd_set_state() - Set a new DRBD state
+ * @mdev: DRBD device.
+ * @ns: new state.
+ * @flags: Flags
+ * @done: Optional completion, that will get completed after the after_state_ch() finished
+ *
+ * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
+ */
+enum drbd_state_rv
+__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+ enum chg_state_flags flags, struct completion *done)
+{
+ union drbd_state os;
+ enum drbd_state_rv rv = SS_SUCCESS;
+ enum sanitize_state_warnings ssw;
+ struct after_state_chg_work *ascw;
+ bool did_remote, should_do_remote;
+
+ os = drbd_read_state(mdev);
+
+ ns = sanitize_state(mdev, ns, &ssw);
+ if (ns.i == os.i)
+ return SS_NOTHING_TO_DO;
+
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS)
+ return rv;
+
+ if (!(flags & CS_HARD)) {
+ /* pre-state-change checks ; only look at ns */
+ /* See drbd_state_sw_errors in drbd_strings.c */
+
+ rv = is_valid_state(mdev, ns);
+ if (rv < SS_SUCCESS) {
+ /* If the old state was illegal as well, then let
+ this happen...*/
+
+ if (is_valid_state(mdev, os) == rv)
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ } else
+ rv = is_valid_soft_transition(os, ns, mdev->tconn);
+ }
+
+ if (rv < SS_SUCCESS) {
+ if (flags & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+ return rv;
+ }
+
+ print_sanitize_warnings(mdev, ssw);
+
+ drbd_pr_state_change(mdev, os, ns, flags);
+
+ /* Display changes to the susp* flags that where caused by the call to
+ sanitize_state(). Only display it here if we where not called from
+ _conn_request_state() */
+ if (!(flags & CS_DC_SUSP))
+ conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
+
+ /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+ * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+ * drbd_ldev_destroy() won't happen before our corresponding
+ * after_state_ch works run, where we put_ldev again. */
+ if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+ (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+ atomic_inc(&mdev->local_cnt);
+
+ did_remote = drbd_should_do_remote(mdev->state);
+ mdev->state.i = ns.i;
+ should_do_remote = drbd_should_do_remote(mdev->state);
+ mdev->tconn->susp = ns.susp;
+ mdev->tconn->susp_nod = ns.susp_nod;
+ mdev->tconn->susp_fen = ns.susp_fen;
+
+ /* put replicated vs not-replicated requests in seperate epochs */
+ if (did_remote != should_do_remote)
+ start_new_tl_epoch(mdev->tconn);
+
+ if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
+ drbd_print_uuids(mdev, "attached to UUIDs");
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
+ no_peer_wf_report_params(mdev->tconn))
+ clear_bit(STATE_SENT, &mdev->tconn->flags);
+
+ wake_up(&mdev->misc_wait);
+ wake_up(&mdev->state_wait);
+ wake_up(&mdev->tconn->ping_wait);
+
+ /* Aborted verify run, or we reached the stop sector.
+ * Log the last position, unless end-of-device. */
+ if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
+ ns.conn <= C_CONNECTED) {
+ mdev->ov_start_sector =
+ BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
+ if (mdev->ov_left)
+ dev_info(DEV, "Online Verify reached sector %llu\n",
+ (unsigned long long)mdev->ov_start_sector);
+ }
+
+ if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
+ dev_info(DEV, "Syncer continues.\n");
+ mdev->rs_paused += (long)jiffies
+ -(long)mdev->rs_mark_time[mdev->rs_last_mark];
+ if (ns.conn == C_SYNC_TARGET)
+ mod_timer(&mdev->resync_timer, jiffies);
+ }
+
+ if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
+ (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
+ dev_info(DEV, "Resync suspended\n");
+ mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
+ }
+
+ if (os.conn == C_CONNECTED &&
+ (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
+ unsigned long now = jiffies;
+ int i;
+
+ set_ov_position(mdev, ns.conn);
+ mdev->rs_start = now;
+ mdev->rs_last_events = 0;
+ mdev->rs_last_sect_ev = 0;
+ mdev->ov_last_oos_size = 0;
+ mdev->ov_last_oos_start = 0;
+
+ for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+ mdev->rs_mark_left[i] = mdev->ov_left;
+ mdev->rs_mark_time[i] = now;
+ }
+
+ drbd_rs_controller_reset(mdev);
+
+ if (ns.conn == C_VERIFY_S) {
+ dev_info(DEV, "Starting Online Verify from sector %llu\n",
+ (unsigned long long)mdev->ov_position);
+ mod_timer(&mdev->resync_timer, jiffies);
+ }
+ }
+
+ if (get_ldev(mdev)) {
+ u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+ MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
+ MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
+
+ mdf &= ~MDF_AL_CLEAN;
+ if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ mdf |= MDF_CRASHED_PRIMARY;
+ if (mdev->state.role == R_PRIMARY ||
+ (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+ mdf |= MDF_PRIMARY_IND;
+ if (mdev->state.conn > C_WF_REPORT_PARAMS)
+ mdf |= MDF_CONNECTED_IND;
+ if (mdev->state.disk > D_INCONSISTENT)
+ mdf |= MDF_CONSISTENT;
+ if (mdev->state.disk > D_OUTDATED)
+ mdf |= MDF_WAS_UP_TO_DATE;
+ if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+ mdf |= MDF_PEER_OUT_DATED;
+ if (mdf != mdev->ldev->md.flags) {
+ mdev->ldev->md.flags = mdf;
+ drbd_md_mark_dirty(mdev);
+ }
+ if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
+ drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
+ put_ldev(mdev);
+ }
+
+ /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
+ if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
+ os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
+ set_bit(CONSIDER_RESYNC, &mdev->flags);
+
+ /* Receiver should clean up itself */
+ if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
+ drbd_thread_stop_nowait(&mdev->tconn->receiver);
+
+ /* Now the receiver finished cleaning up itself, it should die */
+ if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
+ drbd_thread_stop_nowait(&mdev->tconn->receiver);
+
+ /* Upon network failure, we need to restart the receiver. */
+ if (os.conn > C_WF_CONNECTION &&
+ ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
+ drbd_thread_restart_nowait(&mdev->tconn->receiver);
+
+ /* Resume AL writing if we get a connection */
+ if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+ drbd_resume_al(mdev);
+
+ /* remember last attach time so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
+ ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
+ if (ascw) {
+ ascw->os = os;
+ ascw->ns = ns;
+ ascw->flags = flags;
+ ascw->w.cb = w_after_state_ch;
+ ascw->w.mdev = mdev;
+ ascw->done = done;
+ drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
+ } else {
+ dev_err(DEV, "Could not kmalloc an ascw\n");
+ }
+
+ return rv;
+}
+
+static int w_after_state_ch(struct drbd_work *w, int unused)
+{
+ struct after_state_chg_work *ascw =
+ container_of(w, struct after_state_chg_work, w);
+ struct drbd_conf *mdev = w->mdev;
+
+ after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
+ if (ascw->flags & CS_WAIT_COMPLETE) {
+ D_ASSERT(ascw->done != NULL);
+ complete(ascw->done);
+ }
+ kfree(ascw);
+
+ return 0;
+}
+
+static void abw_start_sync(struct drbd_conf *mdev, int rv)
+{
+ if (rv) {
+ dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
+ _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
+ return;
+ }
+
+ switch (mdev->state.conn) {
+ case C_STARTING_SYNC_T:
+ _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+ break;
+ case C_STARTING_SYNC_S:
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ break;
+ }
+}
+
+int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+ int (*io_fn)(struct drbd_conf *),
+ char *why, enum bm_flag flags)
+{
+ int rv;
+
+ D_ASSERT(current == mdev->tconn->worker.task);
+
+ /* open coded non-blocking drbd_suspend_io(mdev); */
+ set_bit(SUSPEND_IO, &mdev->flags);
+
+ drbd_bm_lock(mdev, why, flags);
+ rv = io_fn(mdev);
+ drbd_bm_unlock(mdev);
+
+ drbd_resume_io(mdev);
+
+ return rv;
+}
+
+/**
+ * after_state_ch() - Perform after state change actions that may sleep
+ * @mdev: DRBD device.
+ * @os: old state.
+ * @ns: new state.
+ * @flags: Flags
+ */
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, enum chg_state_flags flags)
+{
+ struct sib_info sib;
+
+ sib.sib_reason = SIB_STATE_CHANGE;
+ sib.os = os;
+ sib.ns = ns;
+
+ if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
+ clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ if (mdev->p_uuid)
+ mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
+ }
+
+ /* Inform userspace about the change... */
+ drbd_bcast_event(mdev, &sib);
+
+ if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
+ (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+ drbd_khelper(mdev, "pri-on-incon-degr");
+
+ /* Here we have the actions that are performed after a
+ state change. This function might sleep */
+
+ if (ns.susp_nod) {
+ struct drbd_tconn *tconn = mdev->tconn;
+ enum drbd_req_event what = NOTHING;
+
+ spin_lock_irq(&tconn->req_lock);
+ if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED)
+ what = RESEND;
+
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ conn_lowest_disk(tconn) > D_NEGOTIATING)
+ what = RESTART_FROZEN_DISK_IO;
+
+ if (tconn->susp_nod && what != NOTHING) {
+ _tl_restart(tconn, what);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_nod = 1 } },
+ (union drbd_state) { { .susp_nod = 0 } },
+ CS_VERBOSE);
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ }
+
+ if (ns.susp_fen) {
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ spin_lock_irq(&tconn->req_lock);
+ if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
+ /* case2: The connection was established again: */
+ struct drbd_conf *odev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, odev, vnr)
+ clear_bit(NEW_CUR_UUID, &odev->flags);
+ rcu_read_unlock();
+ _tl_restart(tconn, RESEND);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE);
+ }
+ spin_unlock_irq(&tconn->req_lock);
+ }
+
+ /* Became sync source. With protocol >= 96, we still need to send out
+ * the sync uuid now. Need to do that before any drbd_send_state, or
+ * the other side may go "paused sync" before receiving the sync uuids,
+ * which is unexpected. */
+ if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
+ (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
+ mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
+ drbd_gen_and_send_sync_uuid(mdev);
+ put_ldev(mdev);
+ }
+
+ /* Do not change the order of the if above and the two below... */
+ if (os.pdsk == D_DISKLESS &&
+ ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */
+ /* we probably will start a resync soon.
+ * make sure those things are properly reset. */
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+ drbd_rs_cancel_all(mdev);
+
+ drbd_send_uuids(mdev);
+ drbd_send_state(mdev, ns);
+ }
+ /* No point in queuing send_bitmap if we don't have a connection
+ * anymore, so check also the _current_ state, not only the new state
+ * at the time this work was queued. */
+ if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
+ mdev->state.conn == C_WF_BITMAP_S)
+ drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+ "send_bitmap (WFBitMapS)",
+ BM_LOCKED_TEST_ALLOWED);
+
+ /* Lost contact to peer's copy of the data */
+ if ((os.pdsk >= D_INCONSISTENT &&
+ os.pdsk != D_UNKNOWN &&
+ os.pdsk != D_OUTDATED)
+ && (ns.pdsk < D_INCONSISTENT ||
+ ns.pdsk == D_UNKNOWN ||
+ ns.pdsk == D_OUTDATED)) {
+ if (get_ldev(mdev)) {
+ if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ if (drbd_suspended(mdev)) {
+ set_bit(NEW_CUR_UUID, &mdev->flags);
+ } else {
+ drbd_uuid_new_current(mdev);
+ drbd_send_uuids(mdev);
+ }
+ }
+ put_ldev(mdev);
+ }
+ }
+
+ if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+ drbd_uuid_new_current(mdev);
+ drbd_send_uuids(mdev);
+ }
+ /* D_DISKLESS Peer becomes secondary */
+ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
+ /* We may still be Primary ourselves.
+ * No harm done if the bitmap still changes,
+ * redirtied pages will follow later. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote diskless peer", BM_LOCKED_SET_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Write out all changed bits on demote.
+ * Though, no need to da that just yet
+ * if there is a resync going on still */
+ if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
+ mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+ /* No changes to the bitmap expected this time, so assert that,
+ * even though no harm was done if it did change. */
+ drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+ "demote", BM_LOCKED_TEST_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ /* Last part of the attaching process ... */
+ if (ns.conn >= C_CONNECTED &&
+ os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
+ drbd_send_sizes(mdev, 0, 0); /* to start sync... */
+ drbd_send_uuids(mdev);
+ drbd_send_state(mdev, ns);
+ }
+
+ /* We want to pause/continue resync, tell peer. */
+ if (ns.conn >= C_CONNECTED &&
+ ((os.aftr_isp != ns.aftr_isp) ||
+ (os.user_isp != ns.user_isp)))
+ drbd_send_state(mdev, ns);
+
+ /* In case one of the isp bits got set, suspend other devices. */
+ if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
+ (ns.aftr_isp || ns.peer_isp || ns.user_isp))
+ suspend_other_sg(mdev);
+
+ /* Make sure the peer gets informed about eventual state
+ changes (ISP bits) while we were in WFReportParams. */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
+ drbd_send_state(mdev, ns);
+
+ /* We are in the progress to start a full sync... */
+ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+ (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
+ /* no other bitmap changes expected during this phase */
+ drbd_queue_bitmap_io(mdev,
+ &drbd_bmio_set_n_write, &abw_start_sync,
+ "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
+
+ /* We are invalidating our self... */
+ if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
+ os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
+ /* other bitmap operation expected during this phase */
+ drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
+ "set_n_write from invalidate", BM_LOCKED_MASK);
+
+ /* first half of local IO error, failure to attach,
+ * or administrative detach */
+ if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
+ /* corresponding get_ldev was in __drbd_set_state, to serialize
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ rcu_read_lock();
+ eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
+ rcu_read_unlock();
+
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ if (was_io_error && eh == EP_CALL_HELPER)
+ drbd_khelper(mdev, "local-io-error");
+
+ /* Immediately allow completion of all application IO,
+ * that waits for completion from the local disk,
+ * if this was a force-detach due to disk_timeout
+ * or administrator request (drbdsetup detach --force).
+ * Do NOT abort otherwise.
+ * Aborting local requests may cause serious problems,
+ * if requests are completed to upper layers already,
+ * and then later the already submitted local bio completes.
+ * This can cause DMA into former bio pages that meanwhile
+ * have been re-used for other things.
+ * So aborting local requests may cause crashes,
+ * or even worse, silent data corruption.
+ */
+ if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+ tl_abort_disk_io(mdev);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
+ put_ldev(mdev);
+ }
+
+ /* second half of local IO error, failure to attach,
+ * or administrative detach,
+ * after local_cnt references have reached zero again */
+ if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+ /* We must still be diskless,
+ * re-attach has to be serialized with this! */
+ if (mdev->state.disk != D_DISKLESS)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s while going diskless\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+ /* corresponding get_ldev in __drbd_set_state
+ * this may finally trigger drbd_ldev_destroy. */
+ put_ldev(mdev);
+ }
+
+ /* Notify peer that I had a local IO error, and did not detached.. */
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ /* Disks got bigger while they were detached */
+ if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
+ test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ if (ns.conn == C_CONNECTED)
+ resync_after_online_grow(mdev);
+ }
+
+ /* A resync finished or aborted, wake paused devices... */
+ if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
+ (os.peer_isp && !ns.peer_isp) ||
+ (os.user_isp && !ns.user_isp))
+ resume_next_sg(mdev);
+
+ /* sync target done with resync. Explicitly notify peer, even though
+ * it should (at least for non-empty resyncs) already know itself. */
+ if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ /* Verify finished, or reached stop sector. Peer did not know about
+ * the stop sector, and we may even have changed the stop sector during
+ * verify to interrupt/stop early. Send the new state. */
+ if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
+ && verify_can_do_stop_sector(mdev))
+ drbd_send_state(mdev, ns);
+
+ /* This triggers bitmap writeout of potentially still unwritten pages
+ * if the resync finished cleanly, or aborted because of peer disk
+ * failure, or because of connection loss.
+ * For resync aborted because of local disk failure, we cannot do
+ * any bitmap writeout anymore.
+ * No harm done if some bits change during this phase.
+ */
+ if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
+ put_ldev(mdev);
+ }
+
+ if (ns.disk == D_DISKLESS &&
+ ns.conn == C_STANDALONE &&
+ ns.role == R_SECONDARY) {
+ if (os.aftr_isp != ns.aftr_isp)
+ resume_next_sg(mdev);
+ }
+
+ drbd_md_sync(mdev);
+}
+
+struct after_conn_state_chg_work {
+ struct drbd_work w;
+ enum drbd_conns oc;
+ union drbd_state ns_min;
+ union drbd_state ns_max; /* new, max state, over all mdevs */
+ enum chg_state_flags flags;
+};
+
+static int w_after_conn_state_ch(struct drbd_work *w, int unused)
+{
+ struct after_conn_state_chg_work *acscw =
+ container_of(w, struct after_conn_state_chg_work, w);
+ struct drbd_tconn *tconn = w->tconn;
+ enum drbd_conns oc = acscw->oc;
+ union drbd_state ns_max = acscw->ns_max;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ kfree(acscw);
+
+ /* Upon network configuration, we need to start the receiver */
+ if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
+ drbd_thread_start(&tconn->receiver);
+
+ if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
+ struct net_conf *old_conf;
+
+ mutex_lock(&tconn->conf_update);
+ old_conf = tconn->net_conf;
+ tconn->my_addr_len = 0;
+ tconn->peer_addr_len = 0;
+ rcu_assign_pointer(tconn->net_conf, NULL);
+ conn_free_crypto(tconn);
+ mutex_unlock(&tconn->conf_update);
+
+ synchronize_rcu();
+ kfree(old_conf);
+ }
+
+ if (ns_max.susp_fen) {
+ /* case1: The outdate peer handler is successful: */
+ if (ns_max.pdsk <= D_OUTDATED) {
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ drbd_uuid_new_current(mdev);
+ clear_bit(NEW_CUR_UUID, &mdev->flags);
+ }
+ }
+ rcu_read_unlock();
+ spin_lock_irq(&tconn->req_lock);
+ _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
+ _conn_request_state(tconn,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE);
+ spin_unlock_irq(&tconn->req_lock);
+ }
+ }
+ kref_put(&tconn->kref, &conn_destroy);
+
+ conn_md_sync(tconn);
+
+ return 0;
+}
+
+void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
+{
+ enum chg_state_flags flags = ~0;
+ struct drbd_conf *mdev;
+ int vnr, first_vol = 1;
+ union drbd_dev_state os, cs = {
+ { .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = tconn->cstate,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN,
+ } };
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ os = mdev->state;
+
+ if (first_vol) {
+ cs = os;
+ first_vol = 0;
+ continue;
+ }
+
+ if (cs.role != os.role)
+ flags &= ~CS_DC_ROLE;
+
+ if (cs.peer != os.peer)
+ flags &= ~CS_DC_PEER;
+
+ if (cs.conn != os.conn)
+ flags &= ~CS_DC_CONN;
+
+ if (cs.disk != os.disk)
+ flags &= ~CS_DC_DISK;
+
+ if (cs.pdsk != os.pdsk)
+ flags &= ~CS_DC_PDSK;
+ }
+ rcu_read_unlock();
+
+ *pf |= CS_DC_MASK;
+ *pf &= flags;
+ (*pcs).i = cs.i;
+}
+
+static enum drbd_state_rv
+conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+ union drbd_state ns, os;
+ struct drbd_conf *mdev;
+ int vnr;
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ os = drbd_read_state(mdev);
+ ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
+
+ if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
+ ns.disk = os.disk;
+
+ if (ns.i == os.i)
+ continue;
+
+ rv = is_valid_transition(os, ns);
+ if (rv < SS_SUCCESS)
+ break;
+
+ if (!(flags & CS_HARD)) {
+ rv = is_valid_state(mdev, ns);
+ if (rv < SS_SUCCESS) {
+ if (is_valid_state(mdev, os) == rv)
+ rv = is_valid_soft_transition(os, ns, tconn);
+ } else
+ rv = is_valid_soft_transition(os, ns, tconn);
+ }
+ if (rv < SS_SUCCESS)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (rv < SS_SUCCESS && flags & CS_VERBOSE)
+ print_st_err(mdev, os, ns, rv);
+
+ return rv;
+}
+
+void
+conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
+{
+ union drbd_state ns, os, ns_max = { };
+ union drbd_state ns_min = {
+ { .role = R_MASK,
+ .peer = R_MASK,
+ .conn = val.conn,
+ .disk = D_MASK,
+ .pdsk = D_MASK
+ } };
+ struct drbd_conf *mdev;
+ enum drbd_state_rv rv;
+ int vnr, number_of_volumes = 0;
+
+ if (mask.conn == C_MASK) {
+ /* remember last connect time so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
+ tconn->last_reconnect_jif = jiffies;
+
+ tconn->cstate = val.conn;
+ }
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ number_of_volumes++;
+ os = drbd_read_state(mdev);
+ ns = apply_mask_val(os, mask, val);
+ ns = sanitize_state(mdev, ns, NULL);
+
+ if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
+ ns.disk = os.disk;
+
+ rv = __drbd_set_state(mdev, ns, flags, NULL);
+ if (rv < SS_SUCCESS)
+ BUG();
+
+ ns.i = mdev->state.i;
+ ns_max.role = max_role(ns.role, ns_max.role);
+ ns_max.peer = max_role(ns.peer, ns_max.peer);
+ ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
+ ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
+ ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
+
+ ns_min.role = min_role(ns.role, ns_min.role);
+ ns_min.peer = min_role(ns.peer, ns_min.peer);
+ ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
+ ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
+ ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
+ }
+ rcu_read_unlock();
+
+ if (number_of_volumes == 0) {
+ ns_min = ns_max = (union drbd_state) { {
+ .role = R_SECONDARY,
+ .peer = R_UNKNOWN,
+ .conn = val.conn,
+ .disk = D_DISKLESS,
+ .pdsk = D_UNKNOWN
+ } };
+ }
+
+ ns_min.susp = ns_max.susp = tconn->susp;
+ ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
+ ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
+
+ *pns_min = ns_min;
+ *pns_max = ns_max;
+}
+
+static enum drbd_state_rv
+_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
+{
+ enum drbd_state_rv rv;
+
+ if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
+ return SS_CW_SUCCESS;
+
+ if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
+ return SS_CW_FAILED_BY_PEER;
+
+ rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
+
+ if (rv == SS_UNKNOWN_ERROR)
+ rv = conn_is_valid_transition(tconn, mask, val, 0);
+
+ if (rv == SS_SUCCESS)
+ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+
+ return rv;
+}
+
+enum drbd_state_rv
+_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv = SS_SUCCESS;
+ struct after_conn_state_chg_work *acscw;
+ enum drbd_conns oc = tconn->cstate;
+ union drbd_state ns_max, ns_min, os;
+ bool have_mutex = false;
+
+ if (mask.conn) {
+ rv = is_valid_conn_transition(oc, val.conn);
+ if (rv < SS_SUCCESS)
+ goto abort;
+ }
+
+ rv = conn_is_valid_transition(tconn, mask, val, flags);
+ if (rv < SS_SUCCESS)
+ goto abort;
+
+ if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
+ !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
+
+ /* This will be a cluster-wide state change.
+ * Need to give up the spinlock, grab the mutex,
+ * then send the state change request, ... */
+ spin_unlock_irq(&tconn->req_lock);
+ mutex_lock(&tconn->cstate_mutex);
+ have_mutex = true;
+
+ set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ if (conn_send_state_req(tconn, mask, val)) {
+ /* sending failed. */
+ clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ rv = SS_CW_FAILED_BY_PEER;
+ /* need to re-aquire the spin lock, though */
+ goto abort_unlocked;
+ }
+
+ if (val.conn == C_DISCONNECTING)
+ set_bit(DISCONNECT_SENT, &tconn->flags);
+
+ /* ... and re-aquire the spinlock.
+ * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
+ * conn_set_state() within the same spinlock. */
+ spin_lock_irq(&tconn->req_lock);
+ wait_event_lock_irq(tconn->ping_wait,
+ (rv = _conn_rq_cond(tconn, mask, val)),
+ tconn->req_lock);
+ clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
+ if (rv < SS_SUCCESS)
+ goto abort;
+ }
+
+ conn_old_common_state(tconn, &os, &flags);
+ flags |= CS_DC_SUSP;
+ conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
+ conn_pr_state_change(tconn, os, ns_max, flags);
+
+ acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
+ if (acscw) {
+ acscw->oc = os.conn;
+ acscw->ns_min = ns_min;
+ acscw->ns_max = ns_max;
+ acscw->flags = flags;
+ acscw->w.cb = w_after_conn_state_ch;
+ kref_get(&tconn->kref);
+ acscw->w.tconn = tconn;
+ drbd_queue_work(&tconn->sender_work, &acscw->w);
+ } else {
+ conn_err(tconn, "Could not kmalloc an acscw\n");
+ }
+
+ abort:
+ if (have_mutex) {
+ /* mutex_unlock() "... must not be used in interrupt context.",
+ * so give up the spinlock, then re-aquire it */
+ spin_unlock_irq(&tconn->req_lock);
+ abort_unlocked:
+ mutex_unlock(&tconn->cstate_mutex);
+ spin_lock_irq(&tconn->req_lock);
+ }
+ if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
+ conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv));
+ conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
+ conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
+ }
+ return rv;
+}
+
+enum drbd_state_rv
+conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags)
+{
+ enum drbd_state_rv rv;
+
+ spin_lock_irq(&tconn->req_lock);
+ rv = _conn_request_state(tconn, mask, val, flags);
+ spin_unlock_irq(&tconn->req_lock);
+
+ return rv;
+}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
new file mode 100644
index 000000000000..a3c361bbc4b6
--- /dev/null
+++ b/drivers/block/drbd/drbd_state.h
@@ -0,0 +1,161 @@
+#ifndef DRBD_STATE_H
+#define DRBD_STATE_H
+
+struct drbd_conf;
+struct drbd_tconn;
+
+/**
+ * DOC: DRBD State macros
+ *
+ * These macros are used to express state changes in easily readable form.
+ *
+ * The NS macros expand to a mask and a value, that can be bit ored onto the
+ * current state as soon as the spinlock (req_lock) was taken.
+ *
+ * The _NS macros are used for state functions that get called with the
+ * spinlock. These macros expand directly to the new state value.
+ *
+ * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
+ * to express state changes that affect more than one aspect of the state.
+ *
+ * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
+ * Means that the network connection was established and that the peer
+ * is in secondary role.
+ */
+#define role_MASK R_MASK
+#define peer_MASK R_MASK
+#define disk_MASK D_MASK
+#define pdsk_MASK D_MASK
+#define conn_MASK C_MASK
+#define susp_MASK 1
+#define user_isp_MASK 1
+#define aftr_isp_MASK 1
+#define susp_nod_MASK 1
+#define susp_fen_MASK 1
+
+#define NS(T, S) \
+ ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T = (S); val; })
+#define NS2(T1, S1, T2, S2) \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+ val.T2 = (S2); val; })
+#define NS3(T1, S1, T2, S2, T3, S3) \
+ ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+ mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
+ ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+ val.T2 = (S2); val.T3 = (S3); val; })
+
+#define _NS(D, T, S) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T = (S); __ns; })
+#define _NS2(D, T1, S1, T2, S2) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
+ __ns.T2 = (S2); __ns; })
+#define _NS3(D, T1, S1, T2, S2, T3, S3) \
+ D, ({ union drbd_state __ns; __ns = drbd_read_state(D); __ns.T1 = (S1); \
+ __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+
+enum chg_state_flags {
+ CS_HARD = 1 << 0,
+ CS_VERBOSE = 1 << 1,
+ CS_WAIT_COMPLETE = 1 << 2,
+ CS_SERIALIZE = 1 << 3,
+ CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
+ CS_LOCAL_ONLY = 1 << 4, /* Do not consider a device pair wide state change */
+ CS_DC_ROLE = 1 << 5, /* DC = display as connection state change */
+ CS_DC_PEER = 1 << 6,
+ CS_DC_CONN = 1 << 7,
+ CS_DC_DISK = 1 << 8,
+ CS_DC_PDSK = 1 << 9,
+ CS_DC_SUSP = 1 << 10,
+ CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
+ CS_IGN_OUTD_FAIL = 1 << 11,
+};
+
+/* drbd_dev_state and drbd_state are different types. This is to stress the
+ small difference. There is no suspended flag (.susp), and no suspended
+ while fence handler runs flas (susp_fen). */
+union drbd_dev_state {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned _unused:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned peer_isp:1 ;
+ unsigned user_isp:1 ;
+ unsigned _pad:11; /* 0 unused */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned _pad:11;
+ unsigned user_isp:1 ;
+ unsigned peer_isp:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned _unused:1 ;
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+#else
+# error "this endianess is not supported"
+#endif
+ };
+ unsigned int i;
+};
+
+extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+ enum chg_state_flags f,
+ union drbd_state mask,
+ union drbd_state val);
+extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+ union drbd_state);
+extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+ union drbd_state,
+ union drbd_state,
+ enum chg_state_flags);
+extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+ enum chg_state_flags,
+ struct completion *done);
+extern void print_st_err(struct drbd_conf *, union drbd_state,
+ union drbd_state, int);
+
+enum drbd_state_rv
+_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags);
+
+enum drbd_state_rv
+conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
+ enum chg_state_flags flags);
+
+extern void drbd_resume_al(struct drbd_conf *mdev);
+extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
+
+/**
+ * drbd_request_state() - Reqest a state change
+ * @mdev: DRBD device.
+ * @mask: mask of state bits to change.
+ * @val: value of new state bits.
+ *
+ * This is the most graceful way of requesting a state change. It is verbose
+ * quite verbose in case the state change is not possible, and all those
+ * state changes are globally serialized.
+ */
+static inline int drbd_request_state(struct drbd_conf *mdev,
+ union drbd_state mask,
+ union drbd_state val)
+{
+ return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+}
+
+enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
+enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
+enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
+enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
+
+#endif
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index c44a2a602772..9a664bd27404 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -89,6 +89,7 @@ static const char *drbd_state_sw_errors[] = {
[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
+ [-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
};
const char *drbd_conn_str(enum drbd_conns s)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 6bce2cc179d4..424dc7bdf9b7 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -38,16 +38,13 @@
#include "drbd_int.h"
#include "drbd_req.h"
-static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
-static int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel);
-
+static int w_make_ov_request(struct drbd_work *w, int cancel);
/* endio handlers:
* drbd_md_io_complete (defined here)
- * drbd_endio_pri (defined here)
- * drbd_endio_sec (defined here)
+ * drbd_request_endio (defined here)
+ * drbd_peer_request_endio (defined here)
* bm_async_io_complete (defined in drbd_bitmap.c)
*
* For all these callbacks, note the following:
@@ -60,7 +57,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
/* About the global_state_lock
Each state transition on an device holds a read lock. In case we have
- to evaluate the sync after dependencies, we grab a write lock, because
+ to evaluate the resync after dependencies, we grab a write lock, because
we need stable states on all devices for that. */
rwlock_t global_state_lock;
@@ -98,97 +95,93 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = e->mdev;
-
- D_ASSERT(e->block_id != ID_VACANT);
+ struct drbd_conf *mdev = peer_req->w.mdev;
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->read_cnt += e->size >> 9;
- list_del(&e->w.list);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ mdev->read_cnt += peer_req->i.size >> 9;
+ list_del(&peer_req->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
- if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- drbd_queue_work(&mdev->data.work, &e->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
put_ldev(mdev);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
-static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
+static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
- struct drbd_conf *mdev = e->mdev;
- sector_t e_sector;
+ struct drbd_conf *mdev = peer_req->w.mdev;
+ struct drbd_interval i;
int do_wake;
- int is_syncer_req;
+ u64 block_id;
int do_al_complete_io;
- D_ASSERT(e->block_id != ID_VACANT);
-
- /* after we moved e to done_ee,
+ /* after we moved peer_req to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
- e_sector = e->sector;
- do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
- is_syncer_req = is_syncer_block_id(e->block_id);
+ i = peer_req->i;
+ do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
+ block_id = peer_req->block_id;
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->writ_cnt += e->size >> 9;
- list_del(&e->w.list); /* has been on active_ee or sync_ee */
- list_add_tail(&e->w.list, &mdev->done_ee);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
+ mdev->writ_cnt += peer_req->i.size >> 9;
+ list_move_tail(&peer_req->w.list, &mdev->done_ee);
- /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
- * neither did we wake possibly waiting conflicting requests.
- * done from "drbd_process_done_ee" within the appropriate w.cb
- * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
+ /*
+ * Do not remove from the write_requests tree here: we did not send the
+ * Ack yet and did not wake possibly waiting conflicting requests.
+ * Removed from the tree from "drbd_process_done_ee" within the
+ * appropriate w.cb (e_end_block/e_end_resync_block) or from
+ * _drbd_clear_done_ee.
+ */
- do_wake = is_syncer_req
- ? list_empty(&mdev->sync_ee)
- : list_empty(&mdev->active_ee);
+ do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
- if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- if (is_syncer_req)
- drbd_rs_complete_io(mdev, e_sector);
+ if (block_id == ID_SYNCER)
+ drbd_rs_complete_io(mdev, i.sector);
if (do_wake)
wake_up(&mdev->ee_wait);
if (do_al_complete_io)
- drbd_al_complete_io(mdev, e_sector);
+ drbd_al_complete_io(mdev, &i);
- wake_asender(mdev);
+ wake_asender(mdev->tconn);
put_ldev(mdev);
}
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver.
*/
-void drbd_endio_sec(struct bio *bio, int error)
+void drbd_peer_request_endio(struct bio *bio, int error)
{
- struct drbd_epoch_entry *e = bio->bi_private;
- struct drbd_conf *mdev = e->mdev;
+ struct drbd_peer_request *peer_req = bio->bi_private;
+ struct drbd_conf *mdev = peer_req->w.mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
if (error && __ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: error=%d s=%llus\n",
is_write ? "write" : "read", error,
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
if (!error && !uptodate) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
is_write ? "write" : "read",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?! */
@@ -196,24 +189,24 @@ void drbd_endio_sec(struct bio *bio, int error)
}
if (error)
- set_bit(__EE_WAS_ERROR, &e->flags);
+ set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */
- if (atomic_dec_and_test(&e->pending_bios)) {
+ if (atomic_dec_and_test(&peer_req->pending_bios)) {
if (is_write)
- drbd_endio_write_sec_final(e);
+ drbd_endio_write_sec_final(peer_req);
else
- drbd_endio_read_sec_final(e);
+ drbd_endio_read_sec_final(peer_req);
}
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
*/
-void drbd_endio_pri(struct bio *bio, int error)
+void drbd_request_endio(struct bio *bio, int error)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
- struct drbd_conf *mdev = req->mdev;
+ struct drbd_conf *mdev = req->w.mdev;
struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
@@ -227,53 +220,72 @@ void drbd_endio_pri(struct bio *bio, int error)
error = -EIO;
}
+
+ /* If this request was aborted locally before,
+ * but now was completed "successfully",
+ * chances are that this caused arbitrary data corruption.
+ *
+ * "aborting" requests, or force-detaching the disk, is intended for
+ * completely blocked/hung local backing devices which do no longer
+ * complete requests at all, not even do error completions. In this
+ * situation, usually a hard-reset and failover is the only way out.
+ *
+ * By "aborting", basically faking a local error-completion,
+ * we allow for a more graceful swichover by cleanly migrating services.
+ * Still the affected node has to be rebooted "soon".
+ *
+ * By completing these requests, we allow the upper layers to re-use
+ * the associated data pages.
+ *
+ * If later the local backing device "recovers", and now DMAs some data
+ * from disk into the original request pages, in the best case it will
+ * just put random data into unused pages; but typically it will corrupt
+ * meanwhile completely unrelated data, causing all sorts of damage.
+ *
+ * Which means delayed successful completion,
+ * especially for READ requests,
+ * is a reason to panic().
+ *
+ * We assume that a delayed *error* completion is OK,
+ * though we still will complain noisily about it.
+ */
+ if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
+
+ if (!error)
+ panic("possible random memory corruption caused by delayed completion of aborted local request\n");
+ }
+
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE)
- ? write_completed_with_error
+ ? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ)
- ? read_completed_with_error
- : read_ahead_completed_with_error;
+ ? READ_COMPLETED_WITH_ERROR
+ : READ_AHEAD_COMPLETED_WITH_ERROR;
} else
- what = completed_ok;
+ what = COMPLETED_OK;
bio_put(req->private_bio);
req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */
- spin_lock_irqsave(&mdev->req_lock, flags);
+ spin_lock_irqsave(&mdev->tconn->req_lock, flags);
__req_mod(req, what, &m);
- spin_unlock_irqrestore(&mdev->req_lock, flags);
+ spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
}
-int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- struct drbd_request *req = container_of(w, struct drbd_request, w);
-
- /* We should not detach for read io-error,
- * but try to WRITE the P_DATA_REPLY to the failed location,
- * to give the disk the chance to relocate that block */
-
- spin_lock_irq(&mdev->req_lock);
- if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
- _req_mod(req, read_retry_remote_canceled);
- spin_unlock_irq(&mdev->req_lock);
- return 1;
- }
- spin_unlock_irq(&mdev->req_lock);
-
- return w_send_read_req(mdev, w, 0);
-}
-
-void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
+ struct drbd_peer_request *peer_req, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
- struct page *page = e->pages;
+ struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
@@ -290,7 +302,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_e
page = tmp;
}
/* and now the last, possibly only partially used page */
- len = e->size & (PAGE_SIZE - 1);
+ len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
crypto_hash_update(&desc, &sg, sg.length);
crypto_hash_final(&desc, digest);
@@ -316,59 +328,58 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
crypto_hash_final(&desc, digest);
}
-/* TODO merge common code with w_e_end_ov_req */
-int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+/* MAYBE merge common code with w_e_end_ov_req */
+static int w_e_send_csum(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
int digest_size;
void *digest;
- int ok = 1;
-
- D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
+ int err = 0;
if (unlikely(cancel))
goto out;
- if (likely((e->flags & EE_WAS_ERROR) != 0))
+ if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(mdev->csums_tfm);
+ digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- sector_t sector = e->sector;
- unsigned int size = e->size;
- drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
- /* Free e and pages before send.
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
+ drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
+ /* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
- e = NULL;
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
+ peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_CSUM_RS_REQUEST);
+ err = drbd_send_drequest_csum(mdev, sector, size,
+ digest, digest_size,
+ P_CSUM_RS_REQUEST);
kfree(digest);
} else {
dev_err(DEV, "kmalloc() of digest failed.\n");
- ok = 0;
+ err = -ENOMEM;
}
out:
- if (e)
- drbd_free_ee(mdev, e);
+ if (peer_req)
+ drbd_free_peer_req(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
- return ok;
+ return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *peer_req;
if (!get_ldev(mdev))
return -EIO;
@@ -378,45 +389,47 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
- e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
- if (!e)
+ peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector,
+ size, GFP_TRY);
+ if (!peer_req)
goto defer;
- e->w.cb = w_e_send_csum;
- spin_lock_irq(&mdev->req_lock);
- list_add(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ peer_req->w.cb = w_e_send_csum;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
* because bio_add_page failed (probably broken lower level driver),
* retry may or may not help.
* If it does not, you may need to force disconnect. */
- spin_lock_irq(&mdev->req_lock);
- list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
defer:
put_ldev(mdev);
return -EAGAIN;
}
-int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_resync_timer(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
switch (mdev->state.conn) {
case C_VERIFY_S:
- w_make_ov_request(mdev, w, cancel);
+ w_make_ov_request(w, cancel);
break;
case C_SYNC_TARGET:
- w_make_resync_request(mdev, w, cancel);
+ w_make_resync_request(w, cancel);
break;
}
- return 1;
+ return 0;
}
void resync_timer_fn(unsigned long data)
@@ -424,7 +437,7 @@ void resync_timer_fn(unsigned long data)
struct drbd_conf *mdev = (struct drbd_conf *) data;
if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->data.work, &mdev->resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
@@ -456,8 +469,24 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
fb->values[i] += value;
}
+struct fifo_buffer *fifo_alloc(int fifo_size)
+{
+ struct fifo_buffer *fb;
+
+ fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO);
+ if (!fb)
+ return NULL;
+
+ fb->head_index = 0;
+ fb->size = fifo_size;
+ fb->total = 0;
+
+ return fb;
+}
+
static int drbd_rs_controller(struct drbd_conf *mdev)
{
+ struct disk_conf *dc;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
unsigned int want; /* The number of sectors we want in the proxy */
int req_sect; /* Number of sectors to request in this turn */
@@ -466,38 +495,39 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
int steps; /* Number of time steps to plan ahead */
int curr_corr;
int max_sect;
+ struct fifo_buffer *plan;
sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
mdev->rs_in_flight -= sect_in;
- spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+ plan = rcu_dereference(mdev->rs_plan_s);
- steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
+ steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
- want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
+ want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
} else { /* normal path */
- want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
- sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
+ want = dc->c_fill_target ? dc->c_fill_target :
+ sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
}
- correction = want - mdev->rs_in_flight - mdev->rs_planed;
+ correction = want - mdev->rs_in_flight - plan->total;
/* Plan ahead */
cps = correction / steps;
- fifo_add_val(&mdev->rs_plan_s, cps);
- mdev->rs_planed += cps * steps;
+ fifo_add_val(plan, cps);
+ plan->total += cps * steps;
/* What we do in this step */
- curr_corr = fifo_push(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
- mdev->rs_planed -= curr_corr;
+ curr_corr = fifo_push(plan, 0);
+ plan->total -= curr_corr;
req_sect = sect_in + curr_corr;
if (req_sect < 0)
req_sect = 0;
- max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
+ max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
if (req_sect > max_sect)
req_sect = max_sect;
@@ -513,22 +543,25 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
static int drbd_rs_number_requests(struct drbd_conf *mdev)
{
int number;
- if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
+
+ rcu_read_lock();
+ if (rcu_dereference(mdev->rs_plan_s)->size) {
number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
} else {
- mdev->c_sync_rate = mdev->sync_conf.rate;
+ mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate;
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
}
+ rcu_read_unlock();
/* ignore the amount of pending requests, the resync controller should
* throttle down to incoming reply rate soon enough anyways. */
return number;
}
-static int w_make_resync_request(struct drbd_conf *mdev,
- struct drbd_work *w, int cancel)
+int w_make_resync_request(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
@@ -538,12 +571,12 @@ static int w_make_resync_request(struct drbd_conf *mdev,
int i = 0;
if (unlikely(cancel))
- return 1;
+ return 0;
if (mdev->rs_total == 0) {
/* empty resync? */
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
if (!get_ldev(mdev)) {
@@ -552,7 +585,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
- return 1;
+ return 0;
}
max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
@@ -562,15 +595,15 @@ static int w_make_resync_request(struct drbd_conf *mdev,
for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket) {
- queued = mdev->data.socket->sk->sk_wmem_queued;
- sndbuf = mdev->data.socket->sk->sk_sndbuf;
+ mutex_lock(&mdev->tconn->data.mutex);
+ if (mdev->tconn->data.socket) {
+ queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
+ sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
} else {
queued = 1;
sndbuf = 0;
}
- mutex_unlock(&mdev->data.mutex);
+ mutex_unlock(&mdev->tconn->data.mutex);
if (queued > sndbuf / 2)
goto requeue;
@@ -581,7 +614,7 @@ next_sector:
if (bit == DRBD_END_OF_BITMAP) {
mdev->bm_resync_fo = drbd_bm_bits(mdev);
put_ldev(mdev);
- return 1;
+ return 0;
}
sector = BM_BIT_TO_SECT(bit);
@@ -640,11 +673,11 @@ next_sector:
/* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9;
- if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
+ if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
switch (read_for_csum(mdev, sector, size)) {
case -EIO: /* Disk failure */
put_ldev(mdev);
- return 0;
+ return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
drbd_rs_complete_io(mdev, sector);
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -657,13 +690,16 @@ next_sector:
BUG();
}
} else {
+ int err;
+
inc_rs_pending(mdev);
- if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
- sector, size, ID_SYNCER)) {
+ err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
+ sector, size, ID_SYNCER);
+ if (err) {
dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(mdev);
put_ldev(mdev);
- return 0;
+ return err;
}
}
}
@@ -676,21 +712,23 @@ next_sector:
* until then resync "work" is "inactive" ...
*/
put_ldev(mdev);
- return 1;
+ return 0;
}
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
put_ldev(mdev);
- return 1;
+ return 0;
}
-static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static int w_make_ov_request(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
int number, i, size;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
@@ -699,9 +737,17 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
sector = mdev->ov_position;
for (i = 0; i < number; i++) {
- if (sector >= capacity) {
+ if (sector >= capacity)
return 1;
- }
+
+ /* We check for "finished" only in the reply path:
+ * w_e_end_ov_reply().
+ * We need to send at least one request out. */
+ stop_sector_reached = i > 0
+ && verify_can_do_stop_sector(mdev)
+ && sector >= mdev->ov_stop_sector;
+ if (stop_sector_reached)
+ break;
size = BM_BLOCK_SIZE;
@@ -715,7 +761,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
size = (capacity-sector)<<9;
inc_rs_pending(mdev);
- if (!drbd_send_ov_request(mdev, sector, size)) {
+ if (drbd_send_ov_request(mdev, sector, size)) {
dec_rs_pending(mdev);
return 0;
}
@@ -725,56 +771,39 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
- return 1;
-}
-
-
-void start_resync_timer_fn(unsigned long data)
-{
- struct drbd_conf *mdev = (struct drbd_conf *) data;
-
- drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
-}
-
-int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
- if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
- dev_warn(DEV, "w_start_resync later...\n");
- mdev->start_resync_timer.expires = jiffies + HZ/10;
- add_timer(&mdev->start_resync_timer);
- return 1;
- }
-
- drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ if (i == 0 || !stop_sector_reached)
+ mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
-int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_ov_finished(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
kfree(w);
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
-static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static int w_resync_finished(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
kfree(w);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
static void ping_peer(struct drbd_conf *mdev)
{
- clear_bit(GOT_PING_ACK, &mdev->flags);
- request_ping(mdev);
- wait_event(mdev->misc_wait,
- test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
+ struct drbd_tconn *tconn = mdev->tconn;
+
+ clear_bit(GOT_PING_ACK, &tconn->flags);
+ request_ping(tconn);
+ wait_event(tconn->ping_wait,
+ test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
@@ -799,7 +828,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
if (w) {
w->cb = w_resync_finished;
- drbd_queue_work(&mdev->data.work, w);
+ w->mdev = mdev;
+ drbd_queue_work(&mdev->tconn->sender_work, w);
return 1;
}
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -808,7 +838,12 @@ int drbd_resync_finished(struct drbd_conf *mdev)
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
+
db = mdev->rs_total;
+ /* adjust for verify start and stop sectors, respective reached position */
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ db -= mdev->ov_left;
+
dbdt = Bit2KB(db/dt);
mdev->rs_paused /= HZ;
@@ -817,8 +852,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ping_peer(mdev);
- spin_lock_irq(&mdev->req_lock);
- os = mdev->state;
+ spin_lock_irq(&mdev->tconn->req_lock);
+ os = drbd_read_state(mdev);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -831,7 +866,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.conn = C_CONNECTED;
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
- verify_done ? "Online verify " : "Resync",
+ verify_done ? "Online verify" : "Resync",
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(mdev);
@@ -848,7 +883,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target";
- if (mdev->csums_tfm && mdev->rs_total) {
+ if (mdev->tconn->csums_tfm && mdev->rs_total) {
const unsigned long s = mdev->rs_same_csum;
const unsigned long t = mdev->rs_total;
const int ratio =
@@ -906,13 +941,15 @@ int drbd_resync_finished(struct drbd_conf *mdev)
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
put_ldev(mdev);
out:
mdev->rs_total = 0;
mdev->rs_failed = 0;
mdev->rs_paused = 0;
- if (verify_done)
+
+ /* reset start sector, if we reached end of device */
+ if (verify_done && mdev->ov_left == 0)
mdev->ov_start_sector = 0;
drbd_md_sync(mdev);
@@ -924,19 +961,19 @@ out:
}
/* helper */
-static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
{
- if (drbd_ee_has_active_page(e)) {
+ if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
- int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &mdev->pp_in_use_by_net);
atomic_sub(i, &mdev->pp_in_use);
- spin_lock_irq(&mdev->req_lock);
- list_add_tail(&e->w.list, &mdev->net_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
+ list_add_tail(&peer_req->w.list, &mdev->net_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait);
} else
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
}
/**
@@ -945,174 +982,177 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_data_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- int ok;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- ok = drbd_send_block(mdev, P_DATA_REPLY, e);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
- ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
/**
- * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
+ * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
* @mdev: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- int ok;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev_if_state(mdev, D_FAILED)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
if (mdev->state.conn == C_AHEAD) {
- ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
- } else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
+ } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Not sending RSDataReply, "
"partner DISKLESS!\n");
- ok = 1;
+ err = 0;
}
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
- (unsigned long long)e->sector);
+ (unsigned long long)peer_req->i.sector);
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
- drbd_rs_failed_io(mdev, e->sector, e->size);
+ drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
-int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
struct digest_info *di;
int digest_size;
void *digest = NULL;
- int ok, eq = 0;
+ int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
- di = e->digest;
+ di = peer_req->digest;
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
- if (mdev->csums_tfm) {
- digest_size = crypto_hash_digestsize(mdev->csums_tfm);
+ if (mdev->tconn->csums_tfm) {
+ digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
D_ASSERT(digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
+ drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
if (eq) {
- drbd_set_in_sync(mdev, e->sector, e->size);
+ drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
- mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
- ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
+ mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
+ err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
} else {
inc_rs_pending(mdev);
- e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
- e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
+ peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
+ peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
}
} else {
- ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(mdev);
- move_to_net_ee_or_free(mdev, e);
+ move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block/ack() failed\n");
- return ok;
+ return err;
}
-/* TODO merge common code with w_e_send_csum */
-int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
- sector_t sector = e->sector;
- unsigned int size = e->size;
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
int digest_size;
void *digest;
- int ok = 1;
+ int err = 0;
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(mdev->verify_tfm);
+ digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
- ok = 0; /* terminate the connection in case the allocation failed */
+ err = 1; /* terminate the connection in case the allocation failed */
goto out;
}
- if (likely(!(e->flags & EE_WAS_ERROR)))
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ if (likely(!(peer_req->flags & EE_WAS_ERROR)))
+ drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
else
memset(digest, 0, digest_size);
@@ -1120,25 +1160,23 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
- e = NULL;
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
+ peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_OV_REPLY);
- if (!ok)
+ err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
+ if (err)
dec_rs_pending(mdev);
kfree(digest);
out:
- if (e)
- drbd_free_ee(mdev, e);
+ if (peer_req)
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return ok;
+ return err;
}
-void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
{
if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
mdev->ov_last_oos_size += size>>9;
@@ -1149,36 +1187,38 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
drbd_set_out_of_sync(mdev, sector, size);
}
-int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
+ struct drbd_conf *mdev = w->mdev;
struct digest_info *di;
void *digest;
- sector_t sector = e->sector;
- unsigned int size = e->size;
+ sector_t sector = peer_req->i.sector;
+ unsigned int size = peer_req->i.size;
int digest_size;
- int ok, eq = 0;
+ int err, eq = 0;
+ bool stop_sector_reached = false;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, e);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* the resync lru has been cleaned up already */
if (get_ldev(mdev)) {
- drbd_rs_complete_io(mdev, e->sector);
+ drbd_rs_complete_io(mdev, peer_req->i.sector);
put_ldev(mdev);
}
- di = e->digest;
+ di = peer_req->digest;
- if (likely((e->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(mdev->verify_tfm);
+ if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
+ digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+ drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
@@ -1186,19 +1226,19 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
}
- /* Free e and pages before send.
- * In case we block on congestion, we could otherwise run into
- * some distributed deadlock, if the other side blocks on
- * congestion as well, because our receiver blocks in
- * drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, e);
+ /* Free peer_req and pages before send.
+ * In case we block on congestion, we could otherwise run into
+ * some distributed deadlock, if the other side blocks on
+ * congestion as well, because our receiver blocks in
+ * drbd_alloc_pages due to pp_in_use > max_buffers. */
+ drbd_free_peer_req(mdev, peer_req);
if (!eq)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
- ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
- eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
+ err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
+ eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
dec_unacked(mdev);
@@ -1208,73 +1248,102 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if ((mdev->ov_left & 0x200) == 0x200)
drbd_advance_rs_marks(mdev, mdev->ov_left);
- if (mdev->ov_left == 0) {
- ov_oos_print(mdev);
+ stop_sector_reached = verify_can_do_stop_sector(mdev) &&
+ (sector + (size>>9)) >= mdev->ov_stop_sector;
+
+ if (mdev->ov_left == 0 || stop_sector_reached) {
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
- return ok;
+ return err;
}
-int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_prev_work_done(struct drbd_work *w, int cancel)
{
struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
+
complete(&b->done);
- return 1;
+ return 0;
}
-int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+/* FIXME
+ * We need to track the number of pending barrier acks,
+ * and to be able to wait for them.
+ * See also comment in drbd_adm_attach before drbd_suspend_io.
+ */
+int drbd_send_barrier(struct drbd_tconn *tconn)
{
- struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
- struct p_barrier *p = &mdev->data.sbuf.barrier;
- int ok = 1;
-
- /* really avoid racing with tl_clear. w.cb may have been referenced
- * just before it was reassigned and re-queued, so double check that.
- * actually, this race was harmless, since we only try to send the
- * barrier packet here, and otherwise do nothing with the object.
- * but compare with the head of w_clear_epoch */
- spin_lock_irq(&mdev->req_lock);
- if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
- cancel = 1;
- spin_unlock_irq(&mdev->req_lock);
- if (cancel)
- return 1;
+ struct p_barrier *p;
+ struct drbd_socket *sock;
- if (!drbd_get_data_sock(mdev))
- return 0;
- p->barrier = b->br_number;
- /* inc_ap_pending was done where this was queued.
- * dec_ap_pending will be done in got_BarrierAck
- * or (on connection loss) in w_clear_epoch. */
- ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
- (struct p_header80 *)p, sizeof(*p), 0);
- drbd_put_data_sock(mdev);
-
- return ok;
+ sock = &tconn->data;
+ p = conn_prepare_command(tconn, sock);
+ if (!p)
+ return -EIO;
+ p->barrier = tconn->send.current_epoch_nr;
+ p->pad = 0;
+ tconn->send.current_epoch_writes = 0;
+
+ return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
-int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_write_hint(struct drbd_work *w, int cancel)
{
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_socket *sock;
+
if (cancel)
- return 1;
- return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
+ return 0;
+ sock = &mdev->tconn->data;
+ if (!drbd_prepare_command(mdev, sock))
+ return -EIO;
+ return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
}
-int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch)
+{
+ if (!tconn->send.seen_any_write_yet) {
+ tconn->send.seen_any_write_yet = true;
+ tconn->send.current_epoch_nr = epoch;
+ tconn->send.current_epoch_writes = 0;
+ }
+}
+
+static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
+{
+ /* re-init if first write on this connection */
+ if (!tconn->send.seen_any_write_yet)
+ return;
+ if (tconn->send.current_epoch_nr != epoch) {
+ if (tconn->send.current_epoch_writes)
+ drbd_send_barrier(tconn);
+ tconn->send.current_epoch_nr = epoch;
+ }
+}
+
+int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_oos(mdev, req);
- req_mod(req, oos_handed_to_network);
+ /* this time, no tconn->send.current_epoch_writes++;
+ * If it was sent, it was the closing barrier for the last
+ * replicated epoch, before we went into AHEAD mode.
+ * No more barriers will be sent, until we leave AHEAD mode again. */
+ maybe_send_barrier(tconn, req->epoch);
+
+ err = drbd_send_out_of_sync(mdev, req);
+ req_mod(req, OOS_HANDED_TO_NETWORK);
- return ok;
+ return err;
}
/**
@@ -1283,20 +1352,26 @@ int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_dblock(mdev, req);
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ re_init_if_first_write(tconn, req->epoch);
+ maybe_send_barrier(tconn, req->epoch);
+ tconn->send.current_epoch_writes++;
+
+ err = drbd_send_dblock(mdev, req);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
/**
@@ -1305,57 +1380,61 @@ int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* @w: work object.
* @cancel: The connection will be closed anyways
*/
-int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
- int ok;
+ struct drbd_conf *mdev = w->mdev;
+ struct drbd_tconn *tconn = mdev->tconn;
+ int err;
if (unlikely(cancel)) {
- req_mod(req, send_canceled);
- return 1;
+ req_mod(req, SEND_CANCELED);
+ return 0;
}
- ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
- (unsigned long)req);
+ /* Even read requests may close a write epoch,
+ * if there was any yet. */
+ maybe_send_barrier(tconn, req->epoch);
- if (!ok) {
- /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
- * so this is probably redundant */
- if (mdev->state.conn >= C_CONNECTED)
- drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
- }
- req_mod(req, ok ? handed_over_to_network : send_failed);
+ err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
+ (unsigned long)req);
+
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
-int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
+ struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, req->sector);
- /* Calling drbd_al_begin_io() out of the worker might deadlocks
- theoretically. Practically it can not deadlock, since this is
- only used when unfreezing IOs. All the extents of the requests
- that made it into the TL are already active */
+ drbd_al_begin_io(mdev, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
generic_make_request(req->private_bio);
- return 1;
+ return 0;
}
static int _drbd_may_sync_now(struct drbd_conf *mdev)
{
struct drbd_conf *odev = mdev;
+ int resync_after;
while (1) {
- if (odev->sync_conf.after == -1)
+ if (!odev->ldev)
+ return 1;
+ rcu_read_lock();
+ resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
+ rcu_read_unlock();
+ if (resync_after == -1)
+ return 1;
+ odev = minor_to_mdev(resync_after);
+ if (!expect(odev))
return 1;
- odev = minor_to_mdev(odev->sync_conf.after);
- ERR_IF(!odev) return 1;
if ((odev->state.conn >= C_SYNC_SOURCE &&
odev->state.conn <= C_PAUSED_SYNC_T) ||
odev->state.aftr_isp || odev->state.peer_isp ||
@@ -1375,16 +1454,15 @@ static int _drbd_pause_after(struct drbd_conf *mdev)
struct drbd_conf *odev;
int i, rv = 0;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev)
- continue;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev))
rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
!= SS_NOTHING_TO_DO);
}
+ rcu_read_unlock();
return rv;
}
@@ -1400,10 +1478,8 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
struct drbd_conf *odev;
int i, rv = 0;
- for (i = 0; i < minor_count; i++) {
- odev = minor_to_mdev(i);
- if (!odev)
- continue;
+ rcu_read_lock();
+ idr_for_each_entry(&minors, odev, i) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {
@@ -1413,6 +1489,7 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
!= SS_NOTHING_TO_DO) ;
}
}
+ rcu_read_unlock();
return rv;
}
@@ -1430,57 +1507,86 @@ void suspend_other_sg(struct drbd_conf *mdev)
write_unlock_irq(&global_state_lock);
}
-static int sync_after_error(struct drbd_conf *mdev, int o_minor)
+/* caller must hold global_state_lock */
+enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
{
struct drbd_conf *odev;
+ int resync_after;
if (o_minor == -1)
return NO_ERROR;
if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
- return ERR_SYNC_AFTER;
+ return ERR_RESYNC_AFTER;
/* check for loops */
odev = minor_to_mdev(o_minor);
while (1) {
if (odev == mdev)
- return ERR_SYNC_AFTER_CYCLE;
+ return ERR_RESYNC_AFTER_CYCLE;
+ rcu_read_lock();
+ resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
+ rcu_read_unlock();
/* dependency chain ends here, no cycles. */
- if (odev->sync_conf.after == -1)
+ if (resync_after == -1)
return NO_ERROR;
/* follow the dependency chain */
- odev = minor_to_mdev(odev->sync_conf.after);
+ odev = minor_to_mdev(resync_after);
}
}
-int drbd_alter_sa(struct drbd_conf *mdev, int na)
+/* caller must hold global_state_lock */
+void drbd_resync_after_changed(struct drbd_conf *mdev)
{
int changes;
- int retcode;
- write_lock_irq(&global_state_lock);
- retcode = sync_after_error(mdev, na);
- if (retcode == NO_ERROR) {
- mdev->sync_conf.after = na;
- do {
- changes = _drbd_pause_after(mdev);
- changes |= _drbd_resume_next(mdev);
- } while (changes);
- }
- write_unlock_irq(&global_state_lock);
- return retcode;
+ do {
+ changes = _drbd_pause_after(mdev);
+ changes |= _drbd_resume_next(mdev);
+ } while (changes);
}
void drbd_rs_controller_reset(struct drbd_conf *mdev)
{
+ struct fifo_buffer *plan;
+
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
mdev->rs_in_flight = 0;
- mdev->rs_planed = 0;
- spin_lock(&mdev->peer_seq_lock);
- fifo_set(&mdev->rs_plan_s, 0);
- spin_unlock(&mdev->peer_seq_lock);
+
+ /* Updating the RCU protected object in place is necessary since
+ this function gets called from atomic context.
+ It is valid since all other updates also lead to an completely
+ empty fifo */
+ rcu_read_lock();
+ plan = rcu_dereference(mdev->rs_plan_s);
+ plan->total = 0;
+ fifo_set(plan, 0);
+ rcu_read_unlock();
+}
+
+void start_resync_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
+}
+
+int w_start_resync(struct drbd_work *w, int cancel)
+{
+ struct drbd_conf *mdev = w->mdev;
+
+ if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
+ dev_warn(DEV, "w_start_resync later...\n");
+ mdev->start_resync_timer.expires = jiffies + HZ/10;
+ add_timer(&mdev->start_resync_timer);
+ return 0;
+ }
+
+ drbd_start_resync(mdev, C_SYNC_SOURCE);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ return 0;
}
/**
@@ -1501,43 +1607,58 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
- if (side == C_SYNC_TARGET) {
- /* Since application IO was locked out during C_WF_BITMAP_T and
- C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
- we check that we might make the data inconsistent. */
- r = drbd_khelper(mdev, "before-resync-target");
- r = (r >> 8) & 0xff;
- if (r > 0) {
- dev_info(DEV, "before-resync-target handler returned %d, "
- "dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
- }
- } else /* C_SYNC_SOURCE */ {
- r = drbd_khelper(mdev, "before-resync-source");
- r = (r >> 8) & 0xff;
- if (r > 0) {
- if (r == 3) {
- dev_info(DEV, "before-resync-source handler returned %d, "
- "ignoring. Old userland tools?", r);
- } else {
- dev_info(DEV, "before-resync-source handler returned %d, "
+ if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
+ if (side == C_SYNC_TARGET) {
+ /* Since application IO was locked out during C_WF_BITMAP_T and
+ C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
+ we check that we might make the data inconsistent. */
+ r = drbd_khelper(mdev, "before-resync-target");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ dev_info(DEV, "before-resync-target handler returned %d, "
"dropping connection.\n", r);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
return;
}
+ } else /* C_SYNC_SOURCE */ {
+ r = drbd_khelper(mdev, "before-resync-source");
+ r = (r >> 8) & 0xff;
+ if (r > 0) {
+ if (r == 3) {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "ignoring. Old userland tools?", r);
+ } else {
+ dev_info(DEV, "before-resync-source handler returned %d, "
+ "dropping connection.\n", r);
+ conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
+ return;
+ }
+ }
}
}
- drbd_state_lock(mdev);
+ if (current == mdev->tconn->worker.task) {
+ /* The worker should not sleep waiting for state_mutex,
+ that can take long */
+ if (!mutex_trylock(mdev->state_mutex)) {
+ set_bit(B_RS_H_DONE, &mdev->flags);
+ mdev->start_resync_timer.expires = jiffies + HZ/5;
+ add_timer(&mdev->start_resync_timer);
+ return;
+ }
+ } else {
+ mutex_lock(mdev->state_mutex);
+ }
+ clear_bit(B_RS_H_DONE, &mdev->flags);
+
write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
write_unlock_irq(&global_state_lock);
- drbd_state_unlock(mdev);
+ mutex_unlock(mdev->state_mutex);
return;
}
- ns.i = mdev->state.i;
+ ns = drbd_read_state(mdev);
ns.aftr_isp = !_drbd_may_sync_now(mdev);
@@ -1549,7 +1670,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
ns.pdsk = D_INCONSISTENT;
r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
+ ns = drbd_read_state(mdev);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
@@ -1575,6 +1696,10 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
write_unlock_irq(&global_state_lock);
if (r == SS_SUCCESS) {
+ /* reset rs_last_bcast when a resync or verify is started,
+ * to deal with potential jiffies wrap. */
+ mdev->rs_last_bcast = jiffies - HZ;
+
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
drbd_conn_str(ns.conn),
(unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
@@ -1589,10 +1714,10 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */
- if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
+ if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
drbd_gen_and_send_sync_uuid(mdev);
- if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
+ if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
/* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit
@@ -1603,10 +1728,16 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
* detect connection loss, then waiting for a ping
* response (implicit in drbd_resync_finished) reduces
* the race considerably, but does not solve it. */
- if (side == C_SYNC_SOURCE)
- schedule_timeout_interruptible(
- mdev->net_conf->ping_int * HZ +
- mdev->net_conf->ping_timeo*HZ/9);
+ if (side == C_SYNC_SOURCE) {
+ struct net_conf *nc;
+ int timeo;
+
+ rcu_read_lock();
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
+ }
drbd_resync_finished(mdev);
}
@@ -1621,114 +1752,180 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_md_sync(mdev);
}
put_ldev(mdev);
- drbd_state_unlock(mdev);
+ mutex_unlock(mdev->state_mutex);
}
-int drbd_worker(struct drbd_thread *thi)
+/* If the resource already closed the current epoch, but we did not
+ * (because we have not yet seen new requests), we should send the
+ * corresponding barrier now. Must be checked within the same spinlock
+ * that is used to check for new requests. */
+bool need_to_send_barrier(struct drbd_tconn *connection)
{
- struct drbd_conf *mdev = thi->mdev;
- struct drbd_work *w = NULL;
- LIST_HEAD(work_list);
- int intr = 0, i;
+ if (!connection->send.seen_any_write_yet)
+ return false;
+
+ /* Skip barriers that do not contain any writes.
+ * This may happen during AHEAD mode. */
+ if (!connection->send.current_epoch_writes)
+ return false;
+
+ /* ->req_lock is held when requests are queued on
+ * connection->sender_work, and put into ->transfer_log.
+ * It is also held when ->current_tle_nr is increased.
+ * So either there are already new requests queued,
+ * and corresponding barriers will be send there.
+ * Or nothing new is queued yet, so the difference will be 1.
+ */
+ if (atomic_read(&connection->current_tle_nr) !=
+ connection->send.current_epoch_nr + 1)
+ return false;
+
+ return true;
+}
+
+bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
+{
+ spin_lock_irq(&queue->q_lock);
+ list_splice_init(&queue->q, work_list);
+ spin_unlock_irq(&queue->q_lock);
+ return !list_empty(work_list);
+}
- sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
+bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
+{
+ spin_lock_irq(&queue->q_lock);
+ if (!list_empty(&queue->q))
+ list_move(queue->q.next, work_list);
+ spin_unlock_irq(&queue->q_lock);
+ return !list_empty(work_list);
+}
- while (get_t_state(thi) == Running) {
- drbd_thread_current_set_cpu(mdev);
+void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
+{
+ DEFINE_WAIT(wait);
+ struct net_conf *nc;
+ int uncork, cork;
- if (down_trylock(&mdev->data.work.s)) {
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket && !mdev->net_conf->no_cork)
- drbd_tcp_uncork(mdev->data.socket);
- mutex_unlock(&mdev->data.mutex);
+ dequeue_work_item(&connection->sender_work, work_list);
+ if (!list_empty(work_list))
+ return;
- intr = down_interruptible(&mdev->data.work.s);
+ /* Still nothing to do?
+ * Maybe we still need to close the current epoch,
+ * even if no new requests are queued yet.
+ *
+ * Also, poke TCP, just in case.
+ * Then wait for new work (or signal). */
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ uncork = nc ? nc->tcp_cork : 0;
+ rcu_read_unlock();
+ if (uncork) {
+ mutex_lock(&connection->data.mutex);
+ if (connection->data.socket)
+ drbd_tcp_uncork(connection->data.socket);
+ mutex_unlock(&connection->data.mutex);
+ }
- mutex_lock(&mdev->data.mutex);
- if (mdev->data.socket && !mdev->net_conf->no_cork)
- drbd_tcp_cork(mdev->data.socket);
- mutex_unlock(&mdev->data.mutex);
+ for (;;) {
+ int send_barrier;
+ prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
+ spin_lock_irq(&connection->req_lock);
+ spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
+ /* dequeue single item only,
+ * we still use drbd_queue_work_front() in some places */
+ if (!list_empty(&connection->sender_work.q))
+ list_move(connection->sender_work.q.next, work_list);
+ spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
+ if (!list_empty(work_list) || signal_pending(current)) {
+ spin_unlock_irq(&connection->req_lock);
+ break;
}
+ send_barrier = need_to_send_barrier(connection);
+ spin_unlock_irq(&connection->req_lock);
+ if (send_barrier) {
+ drbd_send_barrier(connection);
+ connection->send.current_epoch_nr++;
+ }
+ schedule();
+ /* may be woken up for other things but new work, too,
+ * e.g. if the current epoch got closed.
+ * In which case we send the barrier above. */
+ }
+ finish_wait(&connection->sender_work.q_wait, &wait);
+
+ /* someone may have changed the config while we have been waiting above. */
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ cork = nc ? nc->tcp_cork : 0;
+ rcu_read_unlock();
+ mutex_lock(&connection->data.mutex);
+ if (connection->data.socket) {
+ if (cork)
+ drbd_tcp_cork(connection->data.socket);
+ else if (!uncork)
+ drbd_tcp_uncork(connection->data.socket);
+ }
+ mutex_unlock(&connection->data.mutex);
+}
- if (intr) {
- D_ASSERT(intr == -EINTR);
+int drbd_worker(struct drbd_thread *thi)
+{
+ struct drbd_tconn *tconn = thi->tconn;
+ struct drbd_work *w = NULL;
+ struct drbd_conf *mdev;
+ LIST_HEAD(work_list);
+ int vnr;
+
+ while (get_t_state(thi) == RUNNING) {
+ drbd_thread_current_set_cpu(thi);
+
+ /* as long as we use drbd_queue_work_front(),
+ * we may only dequeue single work items here, not batches. */
+ if (list_empty(&work_list))
+ wait_for_work(tconn, &work_list);
+
+ if (signal_pending(current)) {
flush_signals(current);
- ERR_IF (get_t_state(thi) == Running)
+ if (get_t_state(thi) == RUNNING) {
+ conn_warn(tconn, "Worker got an unexpected signal\n");
continue;
+ }
break;
}
- if (get_t_state(thi) != Running)
+ if (get_t_state(thi) != RUNNING)
break;
- /* With this break, we have done a down() but not consumed
- the entry from the list. The cleanup code takes care of
- this... */
-
- w = NULL;
- spin_lock_irq(&mdev->data.work.q_lock);
- ERR_IF(list_empty(&mdev->data.work.q)) {
- /* something terribly wrong in our logic.
- * we were able to down() the semaphore,
- * but the list is empty... doh.
- *
- * what is the best thing to do now?
- * try again from scratch, restarting the receiver,
- * asender, whatnot? could break even more ugly,
- * e.g. when we are primary, but no good local data.
- *
- * I'll try to get away just starting over this loop.
- */
- spin_unlock_irq(&mdev->data.work.q_lock);
- continue;
- }
- w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
- list_del_init(&w->list);
- spin_unlock_irq(&mdev->data.work.q_lock);
-
- if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
- /* dev_warn(DEV, "worker: a callback failed! \n"); */
- if (mdev->state.conn >= C_CONNECTED)
- drbd_force_state(mdev,
- NS(conn, C_NETWORK_FAILURE));
+
+ while (!list_empty(&work_list)) {
+ w = list_first_entry(&work_list, struct drbd_work, list);
+ list_del_init(&w->list);
+ if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
+ continue;
+ if (tconn->cstate >= C_WF_REPORT_PARAMS)
+ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
}
}
- D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
- D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
-
- spin_lock_irq(&mdev->data.work.q_lock);
- i = 0;
- while (!list_empty(&mdev->data.work.q)) {
- list_splice_init(&mdev->data.work.q, &work_list);
- spin_unlock_irq(&mdev->data.work.q_lock);
+ do {
while (!list_empty(&work_list)) {
- w = list_entry(work_list.next, struct drbd_work, list);
+ w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list);
- w->cb(mdev, w, 1);
- i++; /* dead debugging code */
+ w->cb(w, 1);
}
-
- spin_lock_irq(&mdev->data.work.q_lock);
+ dequeue_work_batch(&tconn->sender_work, &work_list);
+ } while (!list_empty(&work_list));
+
+ rcu_read_lock();
+ idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+ D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
+ kref_get(&mdev->kref);
+ rcu_read_unlock();
+ drbd_mdev_cleanup(mdev);
+ kref_put(&mdev->kref, &drbd_minor_destroy);
+ rcu_read_lock();
}
- sema_init(&mdev->data.work.s, 0);
- /* DANGEROUS race: if someone did queue his work within the spinlock,
- * but up() ed outside the spinlock, we could get an up() on the
- * semaphore without corresponding list entry.
- * So don't do that.
- */
- spin_unlock_irq(&mdev->data.work.q_lock);
-
- D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
- /* _drbd_set_state only uses stop_nowait.
- * wait here for the Exiting receiver. */
- drbd_thread_stop(&mdev->receiver);
- drbd_mdev_cleanup(mdev);
-
- dev_info(DEV, "worker terminated\n");
-
- clear_bit(DEVICE_DYING, &mdev->flags);
- clear_bit(CONFIG_PENDING, &mdev->flags);
- wake_up(&mdev->state_wait);
+ rcu_read_unlock();
return 0;
}
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index 151f1a37478f..328f18e4b4ee 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/mm.h>
+#include "drbd_int.h"
/* see get_sb_bdev and bd_claim */
extern char *drbd_sec_holder;
@@ -20,8 +21,8 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
/* bi_end_io handlers */
extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_sec(struct bio *bio, int error);
-extern void drbd_endio_pri(struct bio *bio, int error);
+extern void drbd_peer_request_endio(struct bio *bio, int error);
+extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
@@ -45,12 +46,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
generic_make_request(bio);
}
-static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
-{
- return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
- == CRYPTO_ALG_TYPE_HASH;
-}
-
#ifndef __CHECKER__
# undef __cond_lock
# define __cond_lock(x,c) (c)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 54046e51160a..ae1251270624 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -463,6 +463,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
+ lo->lo_bio_count++;
bio_list_add(&lo->lo_bio_list, bio);
}
@@ -471,6 +472,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
+ lo->lo_bio_count--;
return bio_list_pop(&lo->lo_bio_list);
}
@@ -489,6 +491,10 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
goto out;
if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
goto out;
+ if (lo->lo_bio_count >= q->nr_congestion_on)
+ wait_event_lock_irq(lo->lo_req_wait,
+ lo->lo_bio_count < q->nr_congestion_off,
+ lo->lo_lock);
loop_add_bio(lo, old_bio);
wake_up(&lo->lo_event);
spin_unlock_irq(&lo->lo_lock);
@@ -546,6 +552,8 @@ static int loop_thread(void *data)
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
+ if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off)
+ wake_up(&lo->lo_req_wait);
spin_unlock_irq(&lo->lo_lock);
BUG_ON(!bio);
@@ -873,6 +881,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
+ lo->lo_bio_count = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
@@ -1673,6 +1682,7 @@ static int loop_add(struct loop_device **l, int i)
lo->lo_number = i;
lo->lo_thread = NULL;
init_waitqueue_head(&lo->lo_event);
+ init_waitqueue_head(&lo->lo_req_wait);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9694dd99bbbc..3fd100990453 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
}
}
- if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
-
- mtip_restart_port(port);
+ if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ mtip_restart_port(port);
+ wake_up_interruptible(&port->svc_wait);
+ }
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
if (port->ic_pause_timer) {
@@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
* Delete our gendisk structure. This also removes the device
* from /dev
*/
- del_gendisk(dd->disk);
+ if (dd->disk) {
+ if (dd->disk->queue)
+ del_gendisk(dd->disk);
+ else
+ put_disk(dd->disk);
+ }
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
@@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
"Shutting down %s ...\n", dd->disk->disk_name);
/* Delete our gendisk structure, and cleanup the blk queue. */
- del_gendisk(dd->disk);
+ if (dd->disk) {
+ if (dd->disk->queue)
+ del_gendisk(dd->disk);
+ else
+ put_disk(dd->disk);
+ }
+
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 931769e133e5..07fb2dfaae13 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -975,8 +975,8 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
}
-static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
- int qid, int cq_size, int vector)
+static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
+ int cq_size, int vector)
{
int result;
struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
@@ -1011,7 +1011,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
return ERR_PTR(result);
}
-static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result = 0;
u32 aqa;
@@ -1408,7 +1408,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1;
}
-static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+static int nvme_setup_io_queues(struct nvme_dev *dev)
{
int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
@@ -1481,7 +1481,7 @@ static void nvme_free_queues(struct nvme_dev *dev)
nvme_free_queue(dev, i);
}
-static int __devinit nvme_dev_add(struct nvme_dev *dev)
+static int nvme_dev_add(struct nvme_dev *dev)
{
int res, nn, i;
struct nvme_ns *ns, *next;
@@ -1619,8 +1619,7 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
-static int __devinit nvme_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int bars, result = -ENOMEM;
struct nvme_dev *dev;
@@ -1702,7 +1701,7 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
return result;
}
-static void __devexit nvme_remove(struct pci_dev *pdev)
+static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
nvme_dev_remove(dev);
@@ -1747,7 +1746,7 @@ static struct pci_driver nvme_driver = {
.name = "nvme",
.id_table = nvme_id_table,
.probe = nvme_probe,
- .remove = __devexit_p(nvme_remove),
+ .remove = nvme_remove,
.suspend = nvme_suspend,
.resume = nvme_resume,
.err_handler = &nvme_err_handler,
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index da0abc1838c1..d754a88d7585 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -401,7 +401,7 @@ static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
-static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
+static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index f58cdcfb305f..75e112d66006 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -536,7 +536,7 @@ static const struct file_operations ps3vram_proc_fops = {
.release = single_release,
};
-static void __devinit ps3vram_proc_init(struct ps3_system_bus_device *dev)
+static void ps3vram_proc_init(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
struct proc_dir_entry *pde;
@@ -618,7 +618,7 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
} while (bio);
}
-static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
+static int ps3vram_probe(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv;
int error, status;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bb3d9be3b1b4..89576a0b3f2e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -61,15 +61,29 @@
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
-#define RBD_MAX_SNAP_NAME_LEN 32
+#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
+#define RBD_MAX_SNAP_NAME_LEN \
+ (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
+
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
+/* This allows a single page to hold an image name sent by OSD */
+#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
+
#define RBD_OBJ_PREFIX_LEN_MAX 64
+/* Feature bits */
+
+#define RBD_FEATURE_LAYERING 1
+
+/* Features supported by this (client software) implementation. */
+
+#define RBD_FEATURES_ALL (0)
+
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
@@ -101,6 +115,27 @@ struct rbd_image_header {
u64 obj_version;
};
+/*
+ * An rbd image specification.
+ *
+ * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
+ * identify an image.
+ */
+struct rbd_spec {
+ u64 pool_id;
+ char *pool_name;
+
+ char *image_id;
+ size_t image_id_len;
+ char *image_name;
+ size_t image_name_len;
+
+ u64 snap_id;
+ char *snap_name;
+
+ struct kref kref;
+};
+
struct rbd_options {
bool read_only;
};
@@ -155,11 +190,8 @@ struct rbd_snap {
};
struct rbd_mapping {
- char *snap_name;
- u64 snap_id;
u64 size;
u64 features;
- bool snap_exists;
bool read_only;
};
@@ -173,7 +205,6 @@ struct rbd_device {
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
- struct rbd_options rbd_opts;
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
@@ -181,17 +212,17 @@ struct rbd_device {
spinlock_t lock; /* queue lock */
struct rbd_image_header header;
- char *image_id;
- size_t image_id_len;
- char *image_name;
- size_t image_name_len;
+ bool exists;
+ struct rbd_spec *spec;
+
char *header_name;
- char *pool_name;
- int pool_id;
struct ceph_osd_event *watch_event;
struct ceph_osd_request *watch_request;
+ struct rbd_spec *parent_spec;
+ u64 parent_overlap;
+
/* protects updating the header */
struct rw_semaphore header_rwsem;
@@ -204,6 +235,7 @@ struct rbd_device {
/* sysfs related */
struct device dev;
+ unsigned long open_count;
};
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
@@ -218,7 +250,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static void __rbd_remove_snap_dev(struct rbd_snap *snap);
+static void rbd_remove_snap_dev(struct rbd_snap *snap);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
@@ -258,17 +290,8 @@ static struct device rbd_root_dev = {
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
-static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
-{
- return get_device(&rbd_dev->dev);
-}
-
-static void rbd_put_dev(struct rbd_device *rbd_dev)
-{
- put_device(&rbd_dev->dev);
-}
-
-static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -277,8 +300,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
- rbd_get_dev(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ (void) get_device(&rbd_dev->dev);
set_device_ro(bdev, rbd_dev->mapping.read_only);
+ rbd_dev->open_count++;
+ mutex_unlock(&ctl_mutex);
return 0;
}
@@ -287,7 +313,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
- rbd_put_dev(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ rbd_assert(rbd_dev->open_count > 0);
+ rbd_dev->open_count--;
+ put_device(&rbd_dev->dev);
+ mutex_unlock(&ctl_mutex);
return 0;
}
@@ -388,7 +418,7 @@ enum {
static match_table_t rbd_opts_tokens = {
/* int args above */
/* string args above */
- {Opt_read_only, "mapping.read_only"},
+ {Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
@@ -441,33 +471,17 @@ static int parse_rbd_opts_token(char *c, void *private)
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
*/
-static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
- size_t mon_addr_len, char *options)
+static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
- struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
- struct ceph_options *ceph_opts;
struct rbd_client *rbdc;
- rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
-
- ceph_opts = ceph_parse_options(options, mon_addr,
- mon_addr + mon_addr_len,
- parse_rbd_opts_token, rbd_opts);
- if (IS_ERR(ceph_opts))
- return PTR_ERR(ceph_opts);
-
rbdc = rbd_client_find(ceph_opts);
- if (rbdc) {
- /* using an existing client */
+ if (rbdc) /* using an existing client */
ceph_destroy_options(ceph_opts);
- } else {
+ else
rbdc = rbd_client_create(ceph_opts);
- if (IS_ERR(rbdc))
- return PTR_ERR(rbdc);
- }
- rbd_dev->rbd_client = rbdc;
- return 0;
+ return rbdc;
}
/*
@@ -492,10 +506,10 @@ static void rbd_client_release(struct kref *kref)
* Drop reference to ceph client node. If it's not referenced anymore, release
* it.
*/
-static void rbd_put_client(struct rbd_device *rbd_dev)
+static void rbd_put_client(struct rbd_client *rbdc)
{
- kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- rbd_dev->rbd_client = NULL;
+ if (rbdc)
+ kref_put(&rbdc->kref, rbd_client_release);
}
/*
@@ -524,6 +538,16 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
return false;
+ /* The bio layer requires at least sector-sized I/O */
+
+ if (ondisk->options.order < SECTOR_SHIFT)
+ return false;
+
+ /* If we use u64 in a few spots we may be able to loosen this */
+
+ if (ondisk->options.order > 8 * sizeof (int) - 1)
+ return false;
+
/*
* The size of a snapshot header has to fit in a size_t, and
* that limits the number of snapshots.
@@ -635,6 +659,20 @@ out_err:
return -ENOMEM;
}
+static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
+{
+ struct rbd_snap *snap;
+
+ if (snap_id == CEPH_NOSNAP)
+ return RBD_SNAP_HEAD_NAME;
+
+ list_for_each_entry(snap, &rbd_dev->snaps, node)
+ if (snap_id == snap->id)
+ return snap->name;
+
+ return NULL;
+}
+
static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
{
@@ -642,7 +680,7 @@ static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
list_for_each_entry(snap, &rbd_dev->snaps, node) {
if (!strcmp(snap_name, snap->name)) {
- rbd_dev->mapping.snap_id = snap->id;
+ rbd_dev->spec->snap_id = snap->id;
rbd_dev->mapping.size = snap->size;
rbd_dev->mapping.features = snap->features;
@@ -653,26 +691,23 @@ static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
return -ENOENT;
}
-static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
+static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
{
int ret;
- if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
+ if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
sizeof (RBD_SNAP_HEAD_NAME))) {
- rbd_dev->mapping.snap_id = CEPH_NOSNAP;
+ rbd_dev->spec->snap_id = CEPH_NOSNAP;
rbd_dev->mapping.size = rbd_dev->header.image_size;
rbd_dev->mapping.features = rbd_dev->header.features;
- rbd_dev->mapping.snap_exists = false;
- rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
ret = 0;
} else {
- ret = snap_by_name(rbd_dev, snap_name);
+ ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
if (ret < 0)
goto done;
- rbd_dev->mapping.snap_exists = true;
rbd_dev->mapping.read_only = true;
}
- rbd_dev->mapping.snap_name = snap_name;
+ rbd_dev->exists = true;
done:
return ret;
}
@@ -695,13 +730,13 @@ static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
u64 segment;
int ret;
- name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
- ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
+ ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
rbd_dev->header.object_prefix, segment);
- if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
+ if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
kfree(name);
@@ -800,77 +835,144 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
}
/*
- * bio_chain_clone - clone a chain of bios up to a certain length.
- * might return a bio_pair that will need to be released.
+ * Clone a portion of a bio, starting at the given byte offset
+ * and continuing for the number of bytes indicated.
*/
-static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
- struct bio_pair **bp,
- int len, gfp_t gfpmask)
-{
- struct bio *old_chain = *old;
- struct bio *new_chain = NULL;
- struct bio *tail;
- int total = 0;
-
- if (*bp) {
- bio_pair_release(*bp);
- *bp = NULL;
- }
+static struct bio *bio_clone_range(struct bio *bio_src,
+ unsigned int offset,
+ unsigned int len,
+ gfp_t gfpmask)
+{
+ struct bio_vec *bv;
+ unsigned int resid;
+ unsigned short idx;
+ unsigned int voff;
+ unsigned short end_idx;
+ unsigned short vcnt;
+ struct bio *bio;
- while (old_chain && (total < len)) {
- struct bio *tmp;
+ /* Handle the easy case for the caller */
- tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
- if (!tmp)
- goto err_out;
- gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
+ if (!offset && len == bio_src->bi_size)
+ return bio_clone(bio_src, gfpmask);
- if (total + old_chain->bi_size > len) {
- struct bio_pair *bp;
+ if (WARN_ON_ONCE(!len))
+ return NULL;
+ if (WARN_ON_ONCE(len > bio_src->bi_size))
+ return NULL;
+ if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
+ return NULL;
- /*
- * this split can only happen with a single paged bio,
- * split_bio will BUG_ON if this is not the case
- */
- dout("bio_chain_clone split! total=%d remaining=%d"
- "bi_size=%u\n",
- total, len - total, old_chain->bi_size);
+ /* Find first affected segment... */
- /* split the bio. We'll release it either in the next
- call, or it will have to be released outside */
- bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
- if (!bp)
- goto err_out;
+ resid = offset;
+ __bio_for_each_segment(bv, bio_src, idx, 0) {
+ if (resid < bv->bv_len)
+ break;
+ resid -= bv->bv_len;
+ }
+ voff = resid;
- __bio_clone(tmp, &bp->bio1);
+ /* ...and the last affected segment */
- *next = &bp->bio2;
- } else {
- __bio_clone(tmp, old_chain);
- *next = old_chain->bi_next;
- }
+ resid += len;
+ __bio_for_each_segment(bv, bio_src, end_idx, idx) {
+ if (resid <= bv->bv_len)
+ break;
+ resid -= bv->bv_len;
+ }
+ vcnt = end_idx - idx + 1;
+
+ /* Build the clone */
- tmp->bi_bdev = NULL;
- tmp->bi_next = NULL;
- if (new_chain)
- tail->bi_next = tmp;
- else
- new_chain = tmp;
- tail = tmp;
- old_chain = old_chain->bi_next;
+ bio = bio_alloc(gfpmask, (unsigned int) vcnt);
+ if (!bio)
+ return NULL; /* ENOMEM */
- total += tmp->bi_size;
+ bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
+ bio->bi_rw = bio_src->bi_rw;
+ bio->bi_flags |= 1 << BIO_CLONED;
+
+ /*
+ * Copy over our part of the bio_vec, then update the first
+ * and last (or only) entries.
+ */
+ memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
+ vcnt * sizeof (struct bio_vec));
+ bio->bi_io_vec[0].bv_offset += voff;
+ if (vcnt > 1) {
+ bio->bi_io_vec[0].bv_len -= voff;
+ bio->bi_io_vec[vcnt - 1].bv_len = resid;
+ } else {
+ bio->bi_io_vec[0].bv_len = len;
}
- rbd_assert(total == len);
+ bio->bi_vcnt = vcnt;
+ bio->bi_size = len;
+ bio->bi_idx = 0;
+
+ return bio;
+}
+
+/*
+ * Clone a portion of a bio chain, starting at the given byte offset
+ * into the first bio in the source chain and continuing for the
+ * number of bytes indicated. The result is another bio chain of
+ * exactly the given length, or a null pointer on error.
+ *
+ * The bio_src and offset parameters are both in-out. On entry they
+ * refer to the first source bio and the offset into that bio where
+ * the start of data to be cloned is located.
+ *
+ * On return, bio_src is updated to refer to the bio in the source
+ * chain that contains first un-cloned byte, and *offset will
+ * contain the offset of that byte within that bio.
+ */
+static struct bio *bio_chain_clone_range(struct bio **bio_src,
+ unsigned int *offset,
+ unsigned int len,
+ gfp_t gfpmask)
+{
+ struct bio *bi = *bio_src;
+ unsigned int off = *offset;
+ struct bio *chain = NULL;
+ struct bio **end;
+
+ /* Build up a chain of clone bios up to the limit */
+
+ if (!bi || off >= bi->bi_size || !len)
+ return NULL; /* Nothing to clone */
- *old = old_chain;
+ end = &chain;
+ while (len) {
+ unsigned int bi_size;
+ struct bio *bio;
+
+ if (!bi)
+ goto out_err; /* EINVAL; ran out of bio's */
+ bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bio = bio_clone_range(bi, off, bi_size, gfpmask);
+ if (!bio)
+ goto out_err; /* ENOMEM */
+
+ *end = bio;
+ end = &bio->bi_next;
+
+ off += bi_size;
+ if (off == bi->bi_size) {
+ bi = bi->bi_next;
+ off = 0;
+ }
+ len -= bi_size;
+ }
+ *bio_src = bi;
+ *offset = off;
- return new_chain;
+ return chain;
+out_err:
+ bio_chain_put(chain);
-err_out:
- dout("bio_chain_clone with err\n");
- bio_chain_put(new_chain);
return NULL;
}
@@ -988,8 +1090,9 @@ static int rbd_do_request(struct request *rq,
req_data->coll_index = coll_index;
}
- dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
- (unsigned long long) ofs, (unsigned long long) len);
+ dout("rbd_do_request object_name=%s ofs=%llu len=%llu coll=%p[%d]\n",
+ object_name, (unsigned long long) ofs,
+ (unsigned long long) len, coll, coll_index);
osdc = &rbd_dev->rbd_client->client->osdc;
req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
@@ -1019,7 +1122,7 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
+ layout->fl_pg_pool = cpu_to_le32((int) rbd_dev->spec->pool_id);
ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
rbd_assert(ret == 0);
@@ -1154,8 +1257,6 @@ done:
static int rbd_do_op(struct request *rq,
struct rbd_device *rbd_dev,
struct ceph_snap_context *snapc,
- u64 snapid,
- int opcode, int flags,
u64 ofs, u64 len,
struct bio *bio,
struct rbd_req_coll *coll,
@@ -1167,6 +1268,9 @@ static int rbd_do_op(struct request *rq,
int ret;
struct ceph_osd_req_op *ops;
u32 payload_len;
+ int opcode;
+ int flags;
+ u64 snapid;
seg_name = rbd_segment_name(rbd_dev, ofs);
if (!seg_name)
@@ -1174,7 +1278,18 @@ static int rbd_do_op(struct request *rq,
seg_len = rbd_segment_length(rbd_dev, ofs, len);
seg_ofs = rbd_segment_offset(rbd_dev, ofs);
- payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
+ if (rq_data_dir(rq) == WRITE) {
+ opcode = CEPH_OSD_OP_WRITE;
+ flags = CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK;
+ snapid = CEPH_NOSNAP;
+ payload_len = seg_len;
+ } else {
+ opcode = CEPH_OSD_OP_READ;
+ flags = CEPH_OSD_FLAG_READ;
+ snapc = NULL;
+ snapid = rbd_dev->spec->snap_id;
+ payload_len = 0;
+ }
ret = -ENOMEM;
ops = rbd_create_rw_ops(1, opcode, payload_len);
@@ -1202,41 +1317,6 @@ done:
}
/*
- * Request async osd write
- */
-static int rbd_req_write(struct request *rq,
- struct rbd_device *rbd_dev,
- struct ceph_snap_context *snapc,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
- CEPH_OSD_OP_WRITE,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ofs, len, bio, coll, coll_index);
-}
-
-/*
- * Request async osd read
- */
-static int rbd_req_read(struct request *rq,
- struct rbd_device *rbd_dev,
- u64 snapid,
- u64 ofs, u64 len,
- struct bio *bio,
- struct rbd_req_coll *coll,
- int coll_index)
-{
- return rbd_do_op(rq, rbd_dev, NULL,
- snapid,
- CEPH_OSD_OP_READ,
- CEPH_OSD_FLAG_READ,
- ofs, len, bio, coll, coll_index);
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_read(struct rbd_device *rbd_dev,
@@ -1304,7 +1384,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
rbd_dev->header_name, (unsigned long long) notify_id,
(unsigned int) opcode);
- rc = rbd_refresh_header(rbd_dev, &hver);
+ rc = rbd_dev_refresh(rbd_dev, &hver);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
" update snaps: %d\n", rbd_dev->major, rc);
@@ -1460,18 +1540,16 @@ static void rbd_rq_fn(struct request_queue *q)
{
struct rbd_device *rbd_dev = q->queuedata;
struct request *rq;
- struct bio_pair *bp = NULL;
while ((rq = blk_fetch_request(q))) {
struct bio *bio;
- struct bio *rq_bio, *next_bio = NULL;
bool do_write;
unsigned int size;
- u64 op_size = 0;
u64 ofs;
int num_segs, cur_seg = 0;
struct rbd_req_coll *coll;
struct ceph_snap_context *snapc;
+ unsigned int bio_offset;
dout("fetched request\n");
@@ -1483,10 +1561,6 @@ static void rbd_rq_fn(struct request_queue *q)
/* deduce our operation (read, write) */
do_write = (rq_data_dir(rq) == WRITE);
-
- size = blk_rq_bytes(rq);
- ofs = blk_rq_pos(rq) * SECTOR_SIZE;
- rq_bio = rq->bio;
if (do_write && rbd_dev->mapping.read_only) {
__blk_end_request_all(rq, -EROFS);
continue;
@@ -1496,8 +1570,8 @@ static void rbd_rq_fn(struct request_queue *q)
down_read(&rbd_dev->header_rwsem);
- if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
- !rbd_dev->mapping.snap_exists) {
+ if (!rbd_dev->exists) {
+ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
up_read(&rbd_dev->header_rwsem);
dout("request for non-existent snapshot");
spin_lock_irq(q->queue_lock);
@@ -1509,6 +1583,10 @@ static void rbd_rq_fn(struct request_queue *q)
up_read(&rbd_dev->header_rwsem);
+ size = blk_rq_bytes(rq);
+ ofs = blk_rq_pos(rq) * SECTOR_SIZE;
+ bio = rq->bio;
+
dout("%s 0x%x bytes at 0x%llx\n",
do_write ? "write" : "read",
size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
@@ -1528,45 +1606,37 @@ static void rbd_rq_fn(struct request_queue *q)
continue;
}
+ bio_offset = 0;
do {
- /* a bio clone to be passed down to OSD req */
+ u64 limit = rbd_segment_length(rbd_dev, ofs, size);
+ unsigned int chain_size;
+ struct bio *bio_chain;
+
+ BUG_ON(limit > (u64) UINT_MAX);
+ chain_size = (unsigned int) limit;
dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
- op_size = rbd_segment_length(rbd_dev, ofs, size);
+
kref_get(&coll->kref);
- bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
- op_size, GFP_ATOMIC);
- if (!bio) {
- rbd_coll_end_req_index(rq, coll, cur_seg,
- -ENOMEM, op_size);
- goto next_seg;
- }
+ /* Pass a cloned bio chain via an osd request */
- /* init OSD command: write or read */
- if (do_write)
- rbd_req_write(rq, rbd_dev,
- snapc,
- ofs,
- op_size, bio,
- coll, cur_seg);
+ bio_chain = bio_chain_clone_range(&bio,
+ &bio_offset, chain_size,
+ GFP_ATOMIC);
+ if (bio_chain)
+ (void) rbd_do_op(rq, rbd_dev, snapc,
+ ofs, chain_size,
+ bio_chain, coll, cur_seg);
else
- rbd_req_read(rq, rbd_dev,
- rbd_dev->mapping.snap_id,
- ofs,
- op_size, bio,
- coll, cur_seg);
-
-next_seg:
- size -= op_size;
- ofs += op_size;
+ rbd_coll_end_req_index(rq, coll, cur_seg,
+ -ENOMEM, chain_size);
+ size -= chain_size;
+ ofs += chain_size;
cur_seg++;
- rq_bio = next_bio;
} while (size > 0);
kref_put(&coll->kref, rbd_coll_release);
- if (bp)
- bio_pair_release(bp);
spin_lock_irq(q->queue_lock);
ceph_put_snap_context(snapc);
@@ -1576,28 +1646,47 @@ next_seg:
/*
* a queue callback. Makes sure that we don't create a bio that spans across
* multiple osd objects. One exception would be with a single page bios,
- * which we handle later at bio_chain_clone
+ * which we handle later at bio_chain_clone_range()
*/
static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct rbd_device *rbd_dev = q->queuedata;
- unsigned int chunk_sectors;
- sector_t sector;
- unsigned int bio_sectors;
- int max;
+ sector_t sector_offset;
+ sector_t sectors_per_obj;
+ sector_t obj_sector_offset;
+ int ret;
- chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
- sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
- bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
+ /*
+ * Find how far into its rbd object the partition-relative
+ * bio start sector is to offset relative to the enclosing
+ * device.
+ */
+ sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
+ sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
+ obj_sector_offset = sector_offset & (sectors_per_obj - 1);
+
+ /*
+ * Compute the number of bytes from that offset to the end
+ * of the object. Account for what's already used by the bio.
+ */
+ ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
+ if (ret > bmd->bi_size)
+ ret -= bmd->bi_size;
+ else
+ ret = 0;
- max = (chunk_sectors - ((sector & (chunk_sectors - 1))
- + bio_sectors)) << SECTOR_SHIFT;
- if (max < 0)
- max = 0; /* bio_add cannot handle a negative return */
- if (max <= bvec->bv_len && bio_sectors == 0)
- return bvec->bv_len;
- return max;
+ /*
+ * Don't send back more than was asked for. And if the bio
+ * was empty, let the whole thing through because: "Note
+ * that a block device *must* allow a single page to be
+ * added to an empty bio."
+ */
+ rbd_assert(bvec->bv_len <= PAGE_SIZE);
+ if (ret > (int) bvec->bv_len || !bmd->bi_size)
+ ret = (int) bvec->bv_len;
+
+ return ret;
}
static void rbd_free_disk(struct rbd_device *rbd_dev)
@@ -1663,13 +1752,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
ret = -ENXIO;
pr_warning("short header read for image %s"
" (want %zd got %d)\n",
- rbd_dev->image_name, size, ret);
+ rbd_dev->spec->image_name, size, ret);
goto out_err;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
pr_warning("invalid header for image %s\n",
- rbd_dev->image_name);
+ rbd_dev->spec->image_name);
goto out_err;
}
@@ -1707,19 +1796,32 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
return ret;
}
-static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
+static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
{
struct rbd_snap *snap;
struct rbd_snap *next;
list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
- __rbd_remove_snap_dev(snap);
+ rbd_remove_snap_dev(snap);
+}
+
+static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
+{
+ sector_t size;
+
+ if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+ return;
+
+ size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long) size);
+ rbd_dev->mapping.size = (u64) size;
+ set_capacity(rbd_dev->disk, size);
}
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
struct rbd_image_header h;
@@ -1730,17 +1832,9 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
down_write(&rbd_dev->header_rwsem);
- /* resized? */
- if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
- sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
-
- if (size != (sector_t) rbd_dev->mapping.size) {
- dout("setting size to %llu sectors",
- (unsigned long long) size);
- rbd_dev->mapping.size = (u64) size;
- set_capacity(rbd_dev->disk, size);
- }
- }
+ /* Update image size, and check for resize of mapped image */
+ rbd_dev->header.image_size = h.image_size;
+ rbd_update_mapping_size(rbd_dev);
/* rbd_dev->header.object_prefix shouldn't change */
kfree(rbd_dev->header.snap_sizes);
@@ -1768,12 +1862,16 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
return ret;
}
-static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- ret = __rbd_refresh_header(rbd_dev, hver);
+ if (rbd_dev->image_format == 1)
+ ret = rbd_dev_v1_refresh(rbd_dev, hver);
+ else
+ ret = rbd_dev_v2_refresh(rbd_dev, hver);
mutex_unlock(&ctl_mutex);
return ret;
@@ -1885,7 +1983,7 @@ static ssize_t rbd_pool_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->pool_name);
+ return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
}
static ssize_t rbd_pool_id_show(struct device *dev,
@@ -1893,7 +1991,8 @@ static ssize_t rbd_pool_id_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%d\n", rbd_dev->pool_id);
+ return sprintf(buf, "%llu\n",
+ (unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_name_show(struct device *dev,
@@ -1901,7 +2000,10 @@ static ssize_t rbd_name_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->image_name);
+ if (rbd_dev->spec->image_name)
+ return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
+
+ return sprintf(buf, "(unknown)\n");
}
static ssize_t rbd_image_id_show(struct device *dev,
@@ -1909,7 +2011,7 @@ static ssize_t rbd_image_id_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->image_id);
+ return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
}
/*
@@ -1922,7 +2024,50 @@ static ssize_t rbd_snap_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
+ return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
+}
+
+/*
+ * For an rbd v2 image, shows the pool id, image id, and snapshot id
+ * for the parent image. If there is no parent, simply shows
+ * "(no parent image)".
+ */
+static ssize_t rbd_parent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ struct rbd_spec *spec = rbd_dev->parent_spec;
+ int count;
+ char *bufp = buf;
+
+ if (!spec)
+ return sprintf(buf, "(no parent image)\n");
+
+ count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
+ (unsigned long long) spec->pool_id, spec->pool_name);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
+ spec->image_name ? spec->image_name : "(unknown)");
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
+ (unsigned long long) spec->snap_id, spec->snap_name);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
+ if (count < 0)
+ return count;
+ bufp += count;
+
+ return (ssize_t) (bufp - buf);
}
static ssize_t rbd_image_refresh(struct device *dev,
@@ -1933,7 +2078,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
- ret = rbd_refresh_header(rbd_dev, NULL);
+ ret = rbd_dev_refresh(rbd_dev, NULL);
return ret < 0 ? ret : size;
}
@@ -1948,6 +2093,7 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1959,6 +2105,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
+ &dev_attr_parent.attr,
&dev_attr_refresh.attr,
NULL
};
@@ -2047,6 +2194,74 @@ static struct device_type rbd_snap_device_type = {
.release = rbd_snap_dev_release,
};
+static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
+{
+ kref_get(&spec->kref);
+
+ return spec;
+}
+
+static void rbd_spec_free(struct kref *kref);
+static void rbd_spec_put(struct rbd_spec *spec)
+{
+ if (spec)
+ kref_put(&spec->kref, rbd_spec_free);
+}
+
+static struct rbd_spec *rbd_spec_alloc(void)
+{
+ struct rbd_spec *spec;
+
+ spec = kzalloc(sizeof (*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+ kref_init(&spec->kref);
+
+ rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
+
+ return spec;
+}
+
+static void rbd_spec_free(struct kref *kref)
+{
+ struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
+
+ kfree(spec->pool_name);
+ kfree(spec->image_id);
+ kfree(spec->image_name);
+ kfree(spec->snap_name);
+ kfree(spec);
+}
+
+struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ struct rbd_spec *spec)
+{
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
+ if (!rbd_dev)
+ return NULL;
+
+ spin_lock_init(&rbd_dev->lock);
+ INIT_LIST_HEAD(&rbd_dev->node);
+ INIT_LIST_HEAD(&rbd_dev->snaps);
+ init_rwsem(&rbd_dev->header_rwsem);
+
+ rbd_dev->spec = spec;
+ rbd_dev->rbd_client = rbdc;
+
+ return rbd_dev;
+}
+
+static void rbd_dev_destroy(struct rbd_device *rbd_dev)
+{
+ rbd_spec_put(rbd_dev->parent_spec);
+ kfree(rbd_dev->header_name);
+ rbd_put_client(rbd_dev->rbd_client);
+ rbd_spec_put(rbd_dev->spec);
+ kfree(rbd_dev);
+}
+
static bool rbd_snap_registered(struct rbd_snap *snap)
{
bool ret = snap->dev.type == &rbd_snap_device_type;
@@ -2057,7 +2272,7 @@ static bool rbd_snap_registered(struct rbd_snap *snap)
return ret;
}
-static void __rbd_remove_snap_dev(struct rbd_snap *snap)
+static void rbd_remove_snap_dev(struct rbd_snap *snap)
{
list_del(&snap->node);
if (device_is_registered(&snap->dev))
@@ -2073,7 +2288,7 @@ static int rbd_register_snap_dev(struct rbd_snap *snap,
dev->type = &rbd_snap_device_type;
dev->parent = parent;
dev->release = rbd_snap_dev_release;
- dev_set_name(dev, "snap_%s", snap->name);
+ dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
dout("%s: registering device for snapshot %s\n", __func__, snap->name);
ret = device_register(dev);
@@ -2189,6 +2404,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
goto out;
+ ret = 0; /* rbd_req_sync_exec() can return positive */
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
@@ -2216,6 +2432,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} features_buf = { 0 };
+ u64 incompat;
int ret;
ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
@@ -2226,6 +2443,11 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
return ret;
+
+ incompat = le64_to_cpu(features_buf.incompat);
+ if (incompat & ~RBD_FEATURES_ALL)
+ return -ENXIO;
+
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
@@ -2242,6 +2464,183 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
&rbd_dev->header.features);
}
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+{
+ struct rbd_spec *parent_spec;
+ size_t size;
+ void *reply_buf = NULL;
+ __le64 snapid;
+ void *p;
+ void *end;
+ char *image_id;
+ u64 overlap;
+ size_t len = 0;
+ int ret;
+
+ parent_spec = rbd_spec_alloc();
+ if (!parent_spec)
+ return -ENOMEM;
+
+ size = sizeof (__le64) + /* pool_id */
+ sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
+ sizeof (__le64) + /* snap_id */
+ sizeof (__le64); /* overlap */
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ snapid = cpu_to_le64(CEPH_NOSNAP);
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "get_parent",
+ (char *) &snapid, sizeof (snapid),
+ (char *) reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+ if (ret < 0)
+ goto out_err;
+
+ ret = -ERANGE;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
+ if (parent_spec->pool_id == CEPH_NOPOOL)
+ goto out; /* No parent? No problem. */
+
+ image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ if (IS_ERR(image_id)) {
+ ret = PTR_ERR(image_id);
+ goto out_err;
+ }
+ parent_spec->image_id = image_id;
+ parent_spec->image_id_len = len;
+ ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
+ ceph_decode_64_safe(&p, end, overlap, out_err);
+
+ rbd_dev->parent_overlap = overlap;
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
+out:
+ ret = 0;
+out_err:
+ kfree(reply_buf);
+ rbd_spec_put(parent_spec);
+
+ return ret;
+}
+
+static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
+{
+ size_t image_id_size;
+ char *image_id;
+ void *p;
+ void *end;
+ size_t size;
+ void *reply_buf = NULL;
+ size_t len = 0;
+ char *image_name = NULL;
+ int ret;
+
+ rbd_assert(!rbd_dev->spec->image_name);
+
+ image_id_size = sizeof (__le32) + rbd_dev->spec->image_id_len;
+ image_id = kmalloc(image_id_size, GFP_KERNEL);
+ if (!image_id)
+ return NULL;
+
+ p = image_id;
+ end = (char *) image_id + image_id_size;
+ ceph_encode_string(&p, end, rbd_dev->spec->image_id,
+ (u32) rbd_dev->spec->image_id_len);
+
+ size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
+ reply_buf = kmalloc(size, GFP_KERNEL);
+ if (!reply_buf)
+ goto out;
+
+ ret = rbd_req_sync_exec(rbd_dev, RBD_DIRECTORY,
+ "rbd", "dir_get_name",
+ image_id, image_id_size,
+ (char *) reply_buf, size,
+ CEPH_OSD_FLAG_READ, NULL);
+ if (ret < 0)
+ goto out;
+ p = reply_buf;
+ end = (char *) reply_buf + size;
+ image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+ if (IS_ERR(image_name))
+ image_name = NULL;
+ else
+ dout("%s: name is %s len is %zd\n", __func__, image_name, len);
+out:
+ kfree(reply_buf);
+ kfree(image_id);
+
+ return image_name;
+}
+
+/*
+ * When a parent image gets probed, we only have the pool, image,
+ * and snapshot ids but not the names of any of them. This call
+ * is made later to fill in those names. It has to be done after
+ * rbd_dev_snaps_update() has completed because some of the
+ * information (in particular, snapshot name) is not available
+ * until then.
+ */
+static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc;
+ const char *name;
+ void *reply_buf = NULL;
+ int ret;
+
+ if (rbd_dev->spec->pool_name)
+ return 0; /* Already have the names */
+
+ /* Look up the pool name */
+
+ osdc = &rbd_dev->rbd_client->client->osdc;
+ name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
+ if (!name)
+ return -EIO; /* pool id too large (>= 2^31) */
+
+ rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
+ if (!rbd_dev->spec->pool_name)
+ return -ENOMEM;
+
+ /* Fetch the image name; tolerate failure here */
+
+ name = rbd_dev_image_name(rbd_dev);
+ if (name) {
+ rbd_dev->spec->image_name_len = strlen(name);
+ rbd_dev->spec->image_name = (char *) name;
+ } else {
+ pr_warning(RBD_DRV_NAME "%d "
+ "unable to get image name for image id %s\n",
+ rbd_dev->major, rbd_dev->spec->image_id);
+ }
+
+ /* Look up the snapshot name. */
+
+ name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
+ if (!name) {
+ ret = -EIO;
+ goto out_err;
+ }
+ rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
+ if(!rbd_dev->spec->snap_name)
+ goto out_err;
+
+ return 0;
+out_err:
+ kfree(reply_buf);
+ kfree(rbd_dev->spec->pool_name);
+ rbd_dev->spec->pool_name = NULL;
+
+ return ret;
+}
+
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
{
size_t size;
@@ -2328,7 +2727,6 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
int ret;
void *p;
void *end;
- size_t snap_name_len;
char *snap_name;
size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
@@ -2348,9 +2746,7 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
p = reply_buf;
end = (char *) reply_buf + size;
- snap_name_len = 0;
- snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len,
- GFP_KERNEL);
+ snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(snap_name)) {
ret = PTR_ERR(snap_name);
goto out;
@@ -2397,6 +2793,41 @@ static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
return ERR_PTR(-EINVAL);
}
+static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
+{
+ int ret;
+ __u8 obj_order;
+
+ down_write(&rbd_dev->header_rwsem);
+
+ /* Grab old order first, to see if it changes */
+
+ obj_order = rbd_dev->header.obj_order,
+ ret = rbd_dev_v2_image_size(rbd_dev);
+ if (ret)
+ goto out;
+ if (rbd_dev->header.obj_order != obj_order) {
+ ret = -EIO;
+ goto out;
+ }
+ rbd_update_mapping_size(rbd_dev);
+
+ ret = rbd_dev_v2_snap_context(rbd_dev, hver);
+ dout("rbd_dev_v2_snap_context returned %d\n", ret);
+ if (ret)
+ goto out;
+ ret = rbd_dev_snaps_update(rbd_dev);
+ dout("rbd_dev_snaps_update returned %d\n", ret);
+ if (ret)
+ goto out;
+ ret = rbd_dev_snaps_register(rbd_dev);
+ dout("rbd_dev_snaps_register returned %d\n", ret);
+out:
+ up_write(&rbd_dev->header_rwsem);
+
+ return ret;
+}
+
/*
* Scan the rbd device's current snapshot list and compare it to the
* newly-received snapshot context. Remove any existing snapshots
@@ -2436,12 +2867,12 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
/* Existing snapshot not in the new snap context */
- if (rbd_dev->mapping.snap_id == snap->id)
- rbd_dev->mapping.snap_exists = false;
- __rbd_remove_snap_dev(snap);
+ if (rbd_dev->spec->snap_id == snap->id)
+ rbd_dev->exists = false;
+ rbd_remove_snap_dev(snap);
dout("%ssnap id %llu has been removed\n",
- rbd_dev->mapping.snap_id == snap->id ?
- "mapped " : "",
+ rbd_dev->spec->snap_id == snap->id ?
+ "mapped " : "",
(unsigned long long) snap->id);
/* Done with this list entry; advance */
@@ -2559,7 +2990,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
do {
ret = rbd_req_sync_watch(rbd_dev);
if (ret == -ERANGE) {
- rc = rbd_refresh_header(rbd_dev, NULL);
+ rc = rbd_dev_refresh(rbd_dev, NULL);
if (rc < 0)
return rc;
}
@@ -2621,8 +3052,8 @@ static void rbd_dev_id_put(struct rbd_device *rbd_dev)
struct rbd_device *rbd_dev;
rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_id > max_id)
- max_id = rbd_id;
+ if (rbd_dev->dev_id > max_id)
+ max_id = rbd_dev->dev_id;
}
spin_unlock(&rbd_dev_list_lock);
@@ -2722,73 +3153,140 @@ static inline char *dup_token(const char **buf, size_t *lenp)
}
/*
- * This fills in the pool_name, image_name, image_name_len, rbd_dev,
- * rbd_md_name, and name fields of the given rbd_dev, based on the
- * list of monitor addresses and other options provided via
- * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
- * copy of the snapshot name to map if successful, or a
- * pointer-coded error otherwise.
+ * Parse the options provided for an "rbd add" (i.e., rbd image
+ * mapping) request. These arrive via a write to /sys/bus/rbd/add,
+ * and the data written is passed here via a NUL-terminated buffer.
+ * Returns 0 if successful or an error code otherwise.
+ *
+ * The information extracted from these options is recorded in
+ * the other parameters which return dynamically-allocated
+ * structures:
+ * ceph_opts
+ * The address of a pointer that will refer to a ceph options
+ * structure. Caller must release the returned pointer using
+ * ceph_destroy_options() when it is no longer needed.
+ * rbd_opts
+ * Address of an rbd options pointer. Fully initialized by
+ * this function; caller must release with kfree().
+ * spec
+ * Address of an rbd image specification pointer. Fully
+ * initialized by this function based on parsed options.
+ * Caller must release with rbd_spec_put().
*
- * Note: rbd_dev is assumed to have been initially zero-filled.
+ * The options passed take this form:
+ * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
+ * where:
+ * <mon_addrs>
+ * A comma-separated list of one or more monitor addresses.
+ * A monitor address is an ip address, optionally followed
+ * by a port number (separated by a colon).
+ * I.e.: ip1[:port1][,ip2[:port2]...]
+ * <options>
+ * A comma-separated list of ceph and/or rbd options.
+ * <pool_name>
+ * The name of the rados pool containing the rbd image.
+ * <image_name>
+ * The name of the image in that pool to map.
+ * <snap_id>
+ * An optional snapshot id. If provided, the mapping will
+ * present data from the image at the time that snapshot was
+ * created. The image head is used if no snapshot id is
+ * provided. Snapshot mappings are always read-only.
*/
-static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
- const char *buf,
- const char **mon_addrs,
- size_t *mon_addrs_size,
- char *options,
- size_t options_size)
+static int rbd_add_parse_args(const char *buf,
+ struct ceph_options **ceph_opts,
+ struct rbd_options **opts,
+ struct rbd_spec **rbd_spec)
{
size_t len;
- char *err_ptr = ERR_PTR(-EINVAL);
- char *snap_name;
+ char *options;
+ const char *mon_addrs;
+ size_t mon_addrs_size;
+ struct rbd_spec *spec = NULL;
+ struct rbd_options *rbd_opts = NULL;
+ struct ceph_options *copts;
+ int ret;
/* The first four tokens are required */
len = next_token(&buf);
if (!len)
- return err_ptr;
- *mon_addrs_size = len + 1;
- *mon_addrs = buf;
-
+ return -EINVAL; /* Missing monitor address(es) */
+ mon_addrs = buf;
+ mon_addrs_size = len + 1;
buf += len;
- len = copy_token(&buf, options, options_size);
- if (!len || len >= options_size)
- return err_ptr;
+ ret = -EINVAL;
+ options = dup_token(&buf, NULL);
+ if (!options)
+ return -ENOMEM;
+ if (!*options)
+ goto out_err; /* Missing options */
- err_ptr = ERR_PTR(-ENOMEM);
- rbd_dev->pool_name = dup_token(&buf, NULL);
- if (!rbd_dev->pool_name)
- goto out_err;
+ spec = rbd_spec_alloc();
+ if (!spec)
+ goto out_mem;
- rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
- if (!rbd_dev->image_name)
- goto out_err;
+ spec->pool_name = dup_token(&buf, NULL);
+ if (!spec->pool_name)
+ goto out_mem;
+ if (!*spec->pool_name)
+ goto out_err; /* Missing pool name */
- /* Snapshot name is optional */
+ spec->image_name = dup_token(&buf, &spec->image_name_len);
+ if (!spec->image_name)
+ goto out_mem;
+ if (!*spec->image_name)
+ goto out_err; /* Missing image name */
+
+ /*
+ * Snapshot name is optional; default is to use "-"
+ * (indicating the head/no snapshot).
+ */
len = next_token(&buf);
if (!len) {
buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
- }
- snap_name = kmalloc(len + 1, GFP_KERNEL);
- if (!snap_name)
+ } else if (len > RBD_MAX_SNAP_NAME_LEN) {
+ ret = -ENAMETOOLONG;
goto out_err;
- memcpy(snap_name, buf, len);
- *(snap_name + len) = '\0';
+ }
+ spec->snap_name = kmalloc(len + 1, GFP_KERNEL);
+ if (!spec->snap_name)
+ goto out_mem;
+ memcpy(spec->snap_name, buf, len);
+ *(spec->snap_name + len) = '\0';
-dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
+ /* Initialize all rbd options to the defaults */
- return snap_name;
+ rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
+ if (!rbd_opts)
+ goto out_mem;
+
+ rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
+
+ copts = ceph_parse_options(options, mon_addrs,
+ mon_addrs + mon_addrs_size - 1,
+ parse_rbd_opts_token, rbd_opts);
+ if (IS_ERR(copts)) {
+ ret = PTR_ERR(copts);
+ goto out_err;
+ }
+ kfree(options);
+ *ceph_opts = copts;
+ *opts = rbd_opts;
+ *rbd_spec = spec;
+
+ return 0;
+out_mem:
+ ret = -ENOMEM;
out_err:
- kfree(rbd_dev->image_name);
- rbd_dev->image_name = NULL;
- rbd_dev->image_name_len = 0;
- kfree(rbd_dev->pool_name);
- rbd_dev->pool_name = NULL;
+ kfree(rbd_opts);
+ rbd_spec_put(spec);
+ kfree(options);
- return err_ptr;
+ return ret;
}
/*
@@ -2814,14 +3312,22 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
void *p;
/*
+ * When probing a parent image, the image id is already
+ * known (and the image name likely is not). There's no
+ * need to fetch the image id again in this case.
+ */
+ if (rbd_dev->spec->image_id)
+ return 0;
+
+ /*
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
- size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
+ size = sizeof (RBD_ID_PREFIX) + rbd_dev->spec->image_name_len;
object_name = kmalloc(size, GFP_NOIO);
if (!object_name)
return -ENOMEM;
- sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
+ sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
dout("rbd id object name is %s\n", object_name);
/* Response will be an encoded string, which includes a length */
@@ -2841,17 +3347,18 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
if (ret < 0)
goto out;
+ ret = 0; /* rbd_req_sync_exec() can return positive */
p = response;
- rbd_dev->image_id = ceph_extract_encoded_string(&p,
+ rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
p + RBD_IMAGE_ID_LEN_MAX,
- &rbd_dev->image_id_len,
+ &rbd_dev->spec->image_id_len,
GFP_NOIO);
- if (IS_ERR(rbd_dev->image_id)) {
- ret = PTR_ERR(rbd_dev->image_id);
- rbd_dev->image_id = NULL;
+ if (IS_ERR(rbd_dev->spec->image_id)) {
+ ret = PTR_ERR(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
} else {
- dout("image_id is %s\n", rbd_dev->image_id);
+ dout("image_id is %s\n", rbd_dev->spec->image_id);
}
out:
kfree(response);
@@ -2867,26 +3374,33 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
/* Version 1 images have no id; empty string is used */
- rbd_dev->image_id = kstrdup("", GFP_KERNEL);
- if (!rbd_dev->image_id)
+ rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
+ if (!rbd_dev->spec->image_id)
return -ENOMEM;
- rbd_dev->image_id_len = 0;
+ rbd_dev->spec->image_id_len = 0;
/* Record the header object name for this rbd image. */
- size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX);
+ size = rbd_dev->spec->image_name_len + sizeof (RBD_SUFFIX);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name) {
ret = -ENOMEM;
goto out_err;
}
- sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
+ sprintf(rbd_dev->header_name, "%s%s",
+ rbd_dev->spec->image_name, RBD_SUFFIX);
/* Populate rbd image metadata */
ret = rbd_read_header(rbd_dev, &rbd_dev->header);
if (ret < 0)
goto out_err;
+
+ /* Version 1 images have no parent (no layering) */
+
+ rbd_dev->parent_spec = NULL;
+ rbd_dev->parent_overlap = 0;
+
rbd_dev->image_format = 1;
dout("discovered version 1 image, header name is %s\n",
@@ -2897,8 +3411,8 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
out_err:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
- kfree(rbd_dev->image_id);
- rbd_dev->image_id = NULL;
+ kfree(rbd_dev->spec->image_id);
+ rbd_dev->spec->image_id = NULL;
return ret;
}
@@ -2913,12 +3427,12 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
* Image id was filled in by the caller. Record the header
* object name for this rbd image.
*/
- size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len;
+ size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->spec->image_id_len;
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name)
return -ENOMEM;
sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, rbd_dev->image_id);
+ RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
/* Get the size and object order for the image */
@@ -2932,12 +3446,20 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
if (ret < 0)
goto out_err;
- /* Get the features for the image */
+ /* Get the and check features for the image */
ret = rbd_dev_v2_features(rbd_dev);
if (ret < 0)
goto out_err;
+ /* If the image supports layering, get the parent info */
+
+ if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
+ ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret < 0)
+ goto out_err;
+ }
+
/* crypto and compression type aren't (yet) supported for v2 images */
rbd_dev->header.crypt_type = 0;
@@ -2955,8 +3477,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
dout("discovered version 2 image, header name is %s\n",
rbd_dev->header_name);
- return -ENOTSUPP;
+ return 0;
out_err:
+ rbd_dev->parent_overlap = 0;
+ rbd_spec_put(rbd_dev->parent_spec);
+ rbd_dev->parent_spec = NULL;
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
kfree(rbd_dev->header.object_prefix);
@@ -2965,91 +3490,22 @@ out_err:
return ret;
}
-/*
- * Probe for the existence of the header object for the given rbd
- * device. For format 2 images this includes determining the image
- * id.
- */
-static int rbd_dev_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
{
int ret;
- /*
- * Get the id from the image id object. If it's not a
- * format 2 image, we'll get ENOENT back, and we'll assume
- * it's a format 1 image.
- */
- ret = rbd_dev_image_id(rbd_dev);
- if (ret)
- ret = rbd_dev_v1_probe(rbd_dev);
- else
- ret = rbd_dev_v2_probe(rbd_dev);
+ /* no need to lock here, as rbd_dev is not registered yet */
+ ret = rbd_dev_snaps_update(rbd_dev);
if (ret)
- dout("probe failed, returning %d\n", ret);
-
- return ret;
-}
-
-static ssize_t rbd_add(struct bus_type *bus,
- const char *buf,
- size_t count)
-{
- char *options;
- struct rbd_device *rbd_dev = NULL;
- const char *mon_addrs = NULL;
- size_t mon_addrs_size = 0;
- struct ceph_osd_client *osdc;
- int rc = -ENOMEM;
- char *snap_name;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- options = kmalloc(count, GFP_KERNEL);
- if (!options)
- goto err_out_mem;
- rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
- if (!rbd_dev)
- goto err_out_mem;
-
- /* static rbd_device initialization */
- spin_lock_init(&rbd_dev->lock);
- INIT_LIST_HEAD(&rbd_dev->node);
- INIT_LIST_HEAD(&rbd_dev->snaps);
- init_rwsem(&rbd_dev->header_rwsem);
-
- /* parse add command */
- snap_name = rbd_add_parse_args(rbd_dev, buf,
- &mon_addrs, &mon_addrs_size, options, count);
- if (IS_ERR(snap_name)) {
- rc = PTR_ERR(snap_name);
- goto err_out_mem;
- }
-
- rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
- if (rc < 0)
- goto err_out_args;
-
- /* pick the pool */
- osdc = &rbd_dev->rbd_client->client->osdc;
- rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
- if (rc < 0)
- goto err_out_client;
- rbd_dev->pool_id = rc;
-
- rc = rbd_dev_probe(rbd_dev);
- if (rc < 0)
- goto err_out_client;
- rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ return ret;
- /* no need to lock here, as rbd_dev is not registered yet */
- rc = rbd_dev_snaps_update(rbd_dev);
- if (rc)
- goto err_out_header;
+ ret = rbd_dev_probe_update_spec(rbd_dev);
+ if (ret)
+ goto err_out_snaps;
- rc = rbd_dev_set_mapping(rbd_dev, snap_name);
- if (rc)
- goto err_out_header;
+ ret = rbd_dev_set_mapping(rbd_dev);
+ if (ret)
+ goto err_out_snaps;
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
@@ -3061,34 +3517,33 @@ static ssize_t rbd_add(struct bus_type *bus,
/* Get our block major device number. */
- rc = register_blkdev(0, rbd_dev->name);
- if (rc < 0)
+ ret = register_blkdev(0, rbd_dev->name);
+ if (ret < 0)
goto err_out_id;
- rbd_dev->major = rc;
+ rbd_dev->major = ret;
/* Set up the blkdev mapping. */
- rc = rbd_init_disk(rbd_dev);
- if (rc)
+ ret = rbd_init_disk(rbd_dev);
+ if (ret)
goto err_out_blkdev;
- rc = rbd_bus_add_dev(rbd_dev);
- if (rc)
+ ret = rbd_bus_add_dev(rbd_dev);
+ if (ret)
goto err_out_disk;
/*
* At this point cleanup in the event of an error is the job
* of the sysfs code (initiated by rbd_bus_del_dev()).
*/
-
down_write(&rbd_dev->header_rwsem);
- rc = rbd_dev_snaps_register(rbd_dev);
+ ret = rbd_dev_snaps_register(rbd_dev);
up_write(&rbd_dev->header_rwsem);
- if (rc)
+ if (ret)
goto err_out_bus;
- rc = rbd_init_watch_dev(rbd_dev);
- if (rc)
+ ret = rbd_init_watch_dev(rbd_dev);
+ if (ret)
goto err_out_bus;
/* Everything's ready. Announce the disk to the world. */
@@ -3098,37 +3553,119 @@ static ssize_t rbd_add(struct bus_type *bus,
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
- return count;
-
+ return ret;
err_out_bus:
/* this will also clean up rest of rbd_dev stuff */
rbd_bus_del_dev(rbd_dev);
- kfree(options);
- return rc;
+ return ret;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
-err_out_header:
- rbd_header_free(&rbd_dev->header);
+err_out_snaps:
+ rbd_remove_all_snaps(rbd_dev);
+
+ return ret;
+}
+
+/*
+ * Probe for the existence of the header object for the given rbd
+ * device. For format 2 images this includes determining the image
+ * id.
+ */
+static int rbd_dev_probe(struct rbd_device *rbd_dev)
+{
+ int ret;
+
+ /*
+ * Get the id from the image id object. If it's not a
+ * format 2 image, we'll get ENOENT back, and we'll assume
+ * it's a format 1 image.
+ */
+ ret = rbd_dev_image_id(rbd_dev);
+ if (ret)
+ ret = rbd_dev_v1_probe(rbd_dev);
+ else
+ ret = rbd_dev_v2_probe(rbd_dev);
+ if (ret) {
+ dout("probe failed, returning %d\n", ret);
+
+ return ret;
+ }
+
+ ret = rbd_dev_probe_finish(rbd_dev);
+ if (ret)
+ rbd_header_free(&rbd_dev->header);
+
+ return ret;
+}
+
+static ssize_t rbd_add(struct bus_type *bus,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ struct ceph_options *ceph_opts = NULL;
+ struct rbd_options *rbd_opts = NULL;
+ struct rbd_spec *spec = NULL;
+ struct rbd_client *rbdc;
+ struct ceph_osd_client *osdc;
+ int rc = -ENOMEM;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ /* parse add command */
+ rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
+ if (rc < 0)
+ goto err_out_module;
+
+ rbdc = rbd_get_client(ceph_opts);
+ if (IS_ERR(rbdc)) {
+ rc = PTR_ERR(rbdc);
+ goto err_out_args;
+ }
+ ceph_opts = NULL; /* rbd_dev client now owns this */
+
+ /* pick the pool */
+ osdc = &rbdc->client->osdc;
+ rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
+ if (rc < 0)
+ goto err_out_client;
+ spec->pool_id = (u64) rc;
+
+ rbd_dev = rbd_dev_create(rbdc, spec);
+ if (!rbd_dev)
+ goto err_out_client;
+ rbdc = NULL; /* rbd_dev now owns this */
+ spec = NULL; /* rbd_dev now owns this */
+
+ rbd_dev->mapping.read_only = rbd_opts->read_only;
+ kfree(rbd_opts);
+ rbd_opts = NULL; /* done with this */
+
+ rc = rbd_dev_probe(rbd_dev);
+ if (rc < 0)
+ goto err_out_rbd_dev;
+
+ return count;
+err_out_rbd_dev:
+ rbd_dev_destroy(rbd_dev);
err_out_client:
- kfree(rbd_dev->header_name);
- rbd_put_client(rbd_dev);
- kfree(rbd_dev->image_id);
+ rbd_put_client(rbdc);
err_out_args:
- kfree(rbd_dev->mapping.snap_name);
- kfree(rbd_dev->image_name);
- kfree(rbd_dev->pool_name);
-err_out_mem:
- kfree(rbd_dev);
- kfree(options);
+ if (ceph_opts)
+ ceph_destroy_options(ceph_opts);
+ kfree(rbd_opts);
+ rbd_spec_put(spec);
+err_out_module:
+ module_put(THIS_MODULE);
dout("Error adding device %s\n", buf);
- module_put(THIS_MODULE);
return (ssize_t) rc;
}
@@ -3163,7 +3700,6 @@ static void rbd_dev_release(struct device *dev)
if (rbd_dev->watch_event)
rbd_req_sync_unwatch(rbd_dev);
- rbd_put_client(rbd_dev);
/* clean up and free blkdev */
rbd_free_disk(rbd_dev);
@@ -3173,13 +3709,9 @@ static void rbd_dev_release(struct device *dev)
rbd_header_free(&rbd_dev->header);
/* done with the id, and with the rbd_dev */
- kfree(rbd_dev->mapping.snap_name);
- kfree(rbd_dev->image_id);
- kfree(rbd_dev->header_name);
- kfree(rbd_dev->pool_name);
- kfree(rbd_dev->image_name);
rbd_dev_id_put(rbd_dev);
- kfree(rbd_dev);
+ rbd_assert(rbd_dev->rbd_client != NULL);
+ rbd_dev_destroy(rbd_dev);
/* release module ref */
module_put(THIS_MODULE);
@@ -3211,7 +3743,12 @@ static ssize_t rbd_remove(struct bus_type *bus,
goto done;
}
- __rbd_remove_all_snaps(rbd_dev);
+ if (rbd_dev->open_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ rbd_remove_all_snaps(rbd_dev);
rbd_bus_del_dev(rbd_dev);
done:
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index cbe77fa105ba..49d77cbcf8bd 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -46,8 +46,6 @@
#define RBD_MIN_OBJ_ORDER 16
#define RBD_MAX_OBJ_ORDER 30
-#define RBD_MAX_SEG_NAME_LEN 128
-
#define RBD_COMP_NONE 0
#define RBD_CRYPT_NONE 0
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 9dcf76a10bb6..564156a8e572 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -25,7 +25,7 @@
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "June 25, 2007"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
@@ -592,7 +592,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
return err;
}
-static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
+static int vdc_alloc_tx_ring(struct vdc_port *port)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
unsigned long len, entry_size;
@@ -725,7 +725,7 @@ static struct vio_driver_ops vdc_vio_ops = {
.handshake_complete = vdc_handshake_complete,
};
-static void __devinit print_version(void)
+static void print_version(void)
{
static int version_printed;
@@ -733,8 +733,7 @@ static void __devinit print_version(void)
printk(KERN_INFO "%s", version);
}
-static int __devinit vdc_port_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
+static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vdc_port *port;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 6d5a914b9619..765fa2b3d337 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -788,8 +788,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
return get_disk(swd->unit[drive].disk);
}
-static int __devinit swim_add_floppy(struct swim_priv *swd,
- enum drive_location location)
+static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
{
struct floppy_state *fs = &swd->unit[swd->floppy_count];
struct swim __iomem *base = swd->base;
@@ -812,7 +811,7 @@ static int __devinit swim_add_floppy(struct swim_priv *swd,
return 0;
}
-static int __devinit swim_floppy_init(struct swim_priv *swd)
+static int swim_floppy_init(struct swim_priv *swd)
{
int err;
int drive;
@@ -875,7 +874,7 @@ exit_put_disks:
return err;
}
-static int __devinit swim_probe(struct platform_device *dev)
+static int swim_probe(struct platform_device *dev)
{
struct resource *res;
struct swim __iomem *swim_base;
@@ -936,7 +935,7 @@ out:
return ret;
}
-static int __devexit swim_remove(struct platform_device *dev)
+static int swim_remove(struct platform_device *dev)
{
struct swim_priv *swd = platform_get_drvdata(dev);
int drive;
@@ -972,7 +971,7 @@ static int __devexit swim_remove(struct platform_device *dev)
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove = __devexit_p(swim_remove),
+ .remove = swim_remove,
.driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 89ddab127e33..57763c54363a 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1194,7 +1194,8 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
return rc;
}
-static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
+static int swim3_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
struct gendisk *disk;
int index, rc;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index eb0d8216f557..ad70868f8a96 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -789,8 +789,7 @@ static const struct block_device_operations mm_fops = {
.revalidate_disk = mm_revalidate,
};
-static int __devinit mm_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret = -ENODEV;
struct cardinfo *card = &cards[num_cards];
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 0bdde8fba397..8ad21a25bc0d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -696,7 +696,7 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
-static int __devinit virtblk_probe(struct virtio_device *vdev)
+static int virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
struct request_queue *q;
@@ -885,10 +885,11 @@ out:
return err;
}
-static void __devexit virtblk_remove(struct virtio_device *vdev)
+static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
+ int refc;
/* Prevent config work handler from accessing the device. */
mutex_lock(&vblk->config_lock);
@@ -903,11 +904,15 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
+ refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev);
kfree(vblk);
- ida_simple_remove(&vd_index_ida, index);
+
+ /* Only free device id if we don't have any users */
+ if (refc == 1)
+ ida_simple_remove(&vd_index_ida, index);
}
#ifdef CONFIG_PM
@@ -961,19 +966,14 @@ static unsigned int features[] = {
VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
};
-/*
- * virtio_blk causes spurious section mismatch warning by
- * simultaneously referring to a __devinit and a __devexit function.
- * Use __refdata to avoid this warning.
- */
-static struct virtio_driver __refdata virtio_blk = {
+static struct virtio_driver virtio_blk = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtblk_probe,
- .remove = __devexit_p(virtblk_remove),
+ .remove = virtblk_remove,
.config_changed = virtblk_config_changed,
#ifdef CONFIG_PM
.freeze = virtblk_freeze,
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 280a13846e6c..5ac841ff6cc7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,6 +39,7 @@
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/bitmap.h>
#include <xen/events.h>
#include <xen/page.h>
@@ -79,6 +80,7 @@ struct pending_req {
unsigned short operation;
int status;
struct list_head free_list;
+ DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
};
#define BLKBACK_INVALID_HANDLE (~0)
@@ -99,6 +101,36 @@ struct xen_blkbk {
static struct xen_blkbk *blkbk;
/*
+ * Maximum number of grant pages that can be mapped in blkback.
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of
+ * pages that blkback will persistently map.
+ * Currently, this is:
+ * RING_SIZE = 32 (for all known ring types)
+ * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
+ * sizeof(struct persistent_gnt) = 48
+ * So the maximum memory used to store the grants is:
+ * 32 * 11 * 48 = 16896 bytes
+ */
+static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol)
+{
+ switch (protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ return __CONST_RING_SIZE(blkif, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ case BLKIF_PROTOCOL_X86_32:
+ return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ case BLKIF_PROTOCOL_X86_64:
+ return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+
+/*
* Little helpful macro to figure out the index and virtual address of the
* pending_pages[..]. For each 'pending_req' we have have up to
* BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
@@ -129,6 +161,94 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st);
+#define foreach_grant_safe(pos, n, rbtree, node) \
+ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+ (n) = rb_next(&(pos)->node); \
+ &(pos)->node != NULL; \
+ (pos) = container_of(n, typeof(*(pos)), node), \
+ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
+
+
+static void add_persistent_gnt(struct rb_root *root,
+ struct persistent_gnt *persistent_gnt)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct persistent_gnt *this;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ this = container_of(*new, struct persistent_gnt, node);
+
+ parent = *new;
+ if (persistent_gnt->gnt < this->gnt)
+ new = &((*new)->rb_left);
+ else if (persistent_gnt->gnt > this->gnt)
+ new = &((*new)->rb_right);
+ else {
+ pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n");
+ BUG();
+ }
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&(persistent_gnt->node), parent, new);
+ rb_insert_color(&(persistent_gnt->node), root);
+}
+
+static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
+ grant_ref_t gref)
+{
+ struct persistent_gnt *data;
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ data = container_of(node, struct persistent_gnt, node);
+
+ if (gref < data->gnt)
+ node = node->rb_left;
+ else if (gref > data->gnt)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void free_persistent_gnts(struct rb_root *root, unsigned int num)
+{
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt;
+ struct rb_node *n;
+ int ret = 0;
+ int segs_to_unmap = 0;
+
+ foreach_grant_safe(persistent_gnt, n, root, node) {
+ BUG_ON(persistent_gnt->handle ==
+ BLKBACK_INVALID_HANDLE);
+ gnttab_set_unmap_op(&unmap[segs_to_unmap],
+ (unsigned long) pfn_to_kaddr(page_to_pfn(
+ persistent_gnt->page)),
+ GNTMAP_host_map,
+ persistent_gnt->handle);
+
+ pages[segs_to_unmap] = persistent_gnt->page;
+
+ if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
+ !rb_next(&persistent_gnt->node)) {
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
+ BUG_ON(ret);
+ segs_to_unmap = 0;
+ }
+
+ rb_erase(&persistent_gnt->node, root);
+ kfree(persistent_gnt);
+ num--;
+ }
+ BUG_ON(num != 0);
+}
+
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
@@ -302,6 +422,14 @@ int xen_blkif_schedule(void *arg)
print_stats(blkif);
}
+ /* Free all persistent grant pages */
+ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
+ free_persistent_gnts(&blkif->persistent_gnts,
+ blkif->persistent_gnt_c);
+
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+ blkif->persistent_gnt_c = 0;
+
if (log_stats)
print_stats(blkif);
@@ -328,6 +456,8 @@ static void xen_blkbk_unmap(struct pending_req *req)
int ret;
for (i = 0; i < req->nr_pages; i++) {
+ if (!test_bit(i, req->unmap_seg))
+ continue;
handle = pending_handle(req, i);
if (handle == BLKBACK_INVALID_HANDLE)
continue;
@@ -344,12 +474,26 @@ static void xen_blkbk_unmap(struct pending_req *req)
static int xen_blkbk_map(struct blkif_request *req,
struct pending_req *pending_req,
- struct seg_buf seg[])
+ struct seg_buf seg[],
+ struct page *pages[])
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- int i;
+ struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt = NULL;
+ struct xen_blkif *blkif = pending_req->blkif;
+ phys_addr_t addr = 0;
+ int i, j;
+ bool new_map;
int nseg = req->u.rw.nr_segments;
+ int segs_to_map = 0;
int ret = 0;
+ int use_persistent_gnts;
+
+ use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
+
+ BUG_ON(blkif->persistent_gnt_c >
+ max_mapped_grant_pages(pending_req->blkif->blk_protocol));
/*
* Fill out preq.nr_sects with proper amount of sectors, and setup
@@ -359,36 +503,146 @@ static int xen_blkbk_map(struct blkif_request *req,
for (i = 0; i < nseg; i++) {
uint32_t flags;
- flags = GNTMAP_host_map;
- if (pending_req->operation != BLKIF_OP_READ)
- flags |= GNTMAP_readonly;
- gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
- req->u.rw.seg[i].gref,
- pending_req->blkif->domid);
+ if (use_persistent_gnts)
+ persistent_gnt = get_persistent_gnt(
+ &blkif->persistent_gnts,
+ req->u.rw.seg[i].gref);
+
+ if (persistent_gnt) {
+ /*
+ * We are using persistent grants and
+ * the grant is already mapped
+ */
+ new_map = false;
+ } else if (use_persistent_gnts &&
+ blkif->persistent_gnt_c <
+ max_mapped_grant_pages(blkif->blk_protocol)) {
+ /*
+ * We are using persistent grants, the grant is
+ * not mapped but we have room for it
+ */
+ new_map = true;
+ persistent_gnt = kmalloc(
+ sizeof(struct persistent_gnt),
+ GFP_KERNEL);
+ if (!persistent_gnt)
+ return -ENOMEM;
+ persistent_gnt->page = alloc_page(GFP_KERNEL);
+ if (!persistent_gnt->page) {
+ kfree(persistent_gnt);
+ return -ENOMEM;
+ }
+ persistent_gnt->gnt = req->u.rw.seg[i].gref;
+ persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
+
+ pages_to_gnt[segs_to_map] =
+ persistent_gnt->page;
+ addr = (unsigned long) pfn_to_kaddr(
+ page_to_pfn(persistent_gnt->page));
+
+ add_persistent_gnt(&blkif->persistent_gnts,
+ persistent_gnt);
+ blkif->persistent_gnt_c++;
+ pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
+ persistent_gnt->gnt, blkif->persistent_gnt_c,
+ max_mapped_grant_pages(blkif->blk_protocol));
+ } else {
+ /*
+ * We are either using persistent grants and
+ * hit the maximum limit of grants mapped,
+ * or we are not using persistent grants.
+ */
+ if (use_persistent_gnts &&
+ !blkif->vbd.overflow_max_grants) {
+ blkif->vbd.overflow_max_grants = 1;
+ pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
+ blkif->domid, blkif->vbd.handle);
+ }
+ new_map = true;
+ pages[i] = blkbk->pending_page(pending_req, i);
+ addr = vaddr(pending_req, i);
+ pages_to_gnt[segs_to_map] =
+ blkbk->pending_page(pending_req, i);
+ }
+
+ if (persistent_gnt) {
+ pages[i] = persistent_gnt->page;
+ persistent_gnts[i] = persistent_gnt;
+ } else {
+ persistent_gnts[i] = NULL;
+ }
+
+ if (new_map) {
+ flags = GNTMAP_host_map;
+ if (!persistent_gnt &&
+ (pending_req->operation != BLKIF_OP_READ))
+ flags |= GNTMAP_readonly;
+ gnttab_set_map_op(&map[segs_to_map++], addr,
+ flags, req->u.rw.seg[i].gref,
+ blkif->domid);
+ }
}
- ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
- BUG_ON(ret);
+ if (segs_to_map) {
+ ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
+ BUG_ON(ret);
+ }
/*
* Now swizzle the MFN in our domain with the MFN from the other domain
* so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain.
*/
- for (i = 0; i < nseg; i++) {
- if (unlikely(map[i].status != 0)) {
- pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
- map[i].handle = BLKBACK_INVALID_HANDLE;
- ret |= 1;
+ bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ for (i = 0, j = 0; i < nseg; i++) {
+ if (!persistent_gnts[i] ||
+ persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
+ /* This is a newly mapped grant */
+ BUG_ON(j >= segs_to_map);
+ if (unlikely(map[j].status != 0)) {
+ pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
+ map[j].handle = BLKBACK_INVALID_HANDLE;
+ ret |= 1;
+ if (persistent_gnts[i]) {
+ rb_erase(&persistent_gnts[i]->node,
+ &blkif->persistent_gnts);
+ blkif->persistent_gnt_c--;
+ kfree(persistent_gnts[i]);
+ persistent_gnts[i] = NULL;
+ }
+ }
+ }
+ if (persistent_gnts[i]) {
+ if (persistent_gnts[i]->handle ==
+ BLKBACK_INVALID_HANDLE) {
+ /*
+ * If this is a new persistent grant
+ * save the handler
+ */
+ persistent_gnts[i]->handle = map[j].handle;
+ persistent_gnts[i]->dev_bus_addr =
+ map[j++].dev_bus_addr;
+ }
+ pending_handle(pending_req, i) =
+ persistent_gnts[i]->handle;
+
+ if (ret)
+ continue;
+
+ seg[i].buf = persistent_gnts[i]->dev_bus_addr |
+ (req->u.rw.seg[i].first_sect << 9);
+ } else {
+ pending_handle(pending_req, i) = map[j].handle;
+ bitmap_set(pending_req->unmap_seg, i, 1);
+
+ if (ret) {
+ j++;
+ continue;
+ }
+
+ seg[i].buf = map[j++].dev_bus_addr |
+ (req->u.rw.seg[i].first_sect << 9);
}
-
- pending_handle(pending_req, i) = map[i].handle;
-
- if (ret)
- continue;
-
- seg[i].buf = map[i].dev_bus_addr |
- (req->u.rw.seg[i].first_sect << 9);
}
return ret;
}
@@ -591,6 +845,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
int operation;
struct blk_plug plug;
bool drain = false;
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
switch (req->operation) {
case BLKIF_OP_READ:
@@ -677,7 +932,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (xen_blkbk_map(req, pending_req, seg))
+ if (xen_blkbk_map(req, pending_req, seg, pages))
goto fail_flush;
/*
@@ -689,7 +944,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
(bio_add_page(bio,
- blkbk->pending_page(pending_req, i),
+ pages[i],
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9a54623e52d7..6072390c7f57 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/io.h>
+#include <linux/rbtree.h>
#include <asm/setup.h>
#include <asm/pgalloc.h>
#include <asm/hypervisor.h>
@@ -160,10 +161,21 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ unsigned int feature_gnt_persistent:1;
+ unsigned int overflow_max_grants:1;
};
struct backend_info;
+
+struct persistent_gnt {
+ struct page *page;
+ grant_ref_t gnt;
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+ struct rb_node node;
+};
+
struct xen_blkif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -190,6 +202,10 @@ struct xen_blkif {
struct task_struct *xenblkd;
unsigned int waiting_reqs;
+ /* tree to store persistent grants */
+ struct rb_root persistent_gnts;
+ unsigned int persistent_gnt_c;
+
/* statistics */
unsigned long st_print;
int st_rd_req;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f58434c2617c..63980722db41 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -117,6 +117,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
atomic_set(&blkif->drain, 0);
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
+ blkif->persistent_gnts.rb_node = NULL;
return blkif;
}
@@ -672,6 +673,13 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
+ dev->nodename);
+ goto abort;
+ }
+
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
if (err) {
@@ -720,6 +728,7 @@ static int connect_ring(struct backend_info *be)
struct xenbus_device *dev = be->dev;
unsigned long ring_ref;
unsigned int evtchn;
+ unsigned int pers_grants;
char protocol[64] = "";
int err;
@@ -749,8 +758,18 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -1;
}
- pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
- ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "feature-persistent", "%u",
+ &pers_grants, NULL);
+ if (err)
+ pers_grants = 0;
+
+ be->blkif->vbd.feature_gnt_persistent = pers_grants;
+ be->blkif->vbd.overflow_max_grants = 0;
+
+ pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
+ ring_ref, evtchn, be->blkif->blk_protocol, protocol,
+ pers_grants ? "persistent grants" : "");
/* Map the shared frame, irq etc. */
err = xen_blkif_map(be->blkif, ring_ref, evtchn);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 007db8986e84..11043c18ac5a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -44,6 +44,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
+#include <linux/llist.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -64,10 +65,17 @@ enum blkif_state {
BLKIF_STATE_SUSPENDED,
};
+struct grant {
+ grant_ref_t gref;
+ unsigned long pfn;
+ struct llist_node node;
+};
+
struct blk_shadow {
struct blkif_request req;
struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
static DEFINE_MUTEX(blkfront_mutex);
@@ -97,6 +105,8 @@ struct blkfront_info
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
+ struct llist_head persistent_gnts;
+ unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
@@ -104,6 +114,7 @@ struct blkfront_info
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int feature_persistent:1;
int is_ready;
};
@@ -287,21 +298,36 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref;
+
+ /*
+ * Used to store if we are able to queue the request by just using
+ * existing persistent grants, or if we have to get new grants,
+ * as there are not sufficiently many free.
+ */
+ bool new_persistent_gnts;
grant_ref_t gref_head;
+ struct page *granted_page;
+ struct grant *gnt_list_entry = NULL;
struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (gnttab_alloc_grant_references(
- BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
- gnttab_request_free_callback(
- &info->callback,
- blkif_restart_queue_callback,
- info,
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
- return 1;
- }
+ /* Check if we have enought grants to allocate a requests */
+ if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ new_persistent_gnts = 1;
+ if (gnttab_alloc_grant_references(
+ BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c,
+ &gref_head) < 0) {
+ gnttab_request_free_callback(
+ &info->callback,
+ blkif_restart_queue_callback,
+ info,
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ return 1;
+ }
+ } else
+ new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
@@ -341,18 +367,73 @@ static int blkif_queue_request(struct request *req)
BLKIF_MAX_SEGMENTS_PER_REQUEST);
for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
- buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&gref_head);
- BUG_ON(ref == -ENOSPC);
- gnttab_grant_foreign_access_ref(
- ref,
+ if (info->persistent_gnts_c) {
+ BUG_ON(llist_empty(&info->persistent_gnts));
+ gnt_list_entry = llist_entry(
+ llist_del_first(&info->persistent_gnts),
+ struct grant, node);
+
+ ref = gnt_list_entry->gref;
+ buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+ info->persistent_gnts_c--;
+ } else {
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnt_list_entry =
+ kmalloc(sizeof(struct grant),
+ GFP_ATOMIC);
+ if (!gnt_list_entry)
+ return -ENOMEM;
+
+ granted_page = alloc_page(GFP_ATOMIC);
+ if (!granted_page) {
+ kfree(gnt_list_entry);
+ return -ENOMEM;
+ }
+
+ gnt_list_entry->pfn =
+ page_to_pfn(granted_page);
+ gnt_list_entry->gref = ref;
+
+ buffer_mfn = pfn_to_mfn(page_to_pfn(
+ granted_page));
+ gnttab_grant_foreign_access_ref(ref,
info->xbdev->otherend_id,
- buffer_mfn,
- rq_data_dir(req));
+ buffer_mfn, 0);
+ }
+
+ info->shadow[id].grants_used[i] = gnt_list_entry;
+
+ if (rq_data_dir(req)) {
+ char *bvec_data;
+ void *shared_data;
+
+ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+
+ shared_data = kmap_atomic(
+ pfn_to_page(gnt_list_entry->pfn));
+ bvec_data = kmap_atomic(sg_page(sg));
+
+ /*
+ * this does not wipe data stored outside the
+ * range sg->offset..sg->offset+sg->length.
+ * Therefore, blkback *could* see data from
+ * previous requests. This is OK as long as
+ * persistent grants are shared with just one
+ * domain. It may need refactoring if this
+ * changes
+ */
+ memcpy(shared_data + sg->offset,
+ bvec_data + sg->offset,
+ sg->length);
+
+ kunmap_atomic(bvec_data);
+ kunmap_atomic(shared_data);
+ }
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
ring_req->u.rw.seg[i] =
@@ -368,7 +449,8 @@ static int blkif_queue_request(struct request *req)
/* Keep a private copy so we can reissue requests when recovering. */
info->shadow[id].req = *ring_req;
- gnttab_free_grant_references(gref_head);
+ if (new_persistent_gnts)
+ gnttab_free_grant_references(gref_head);
return 0;
}
@@ -480,12 +562,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_flush(info->rq, info->feature_flush);
- printk(KERN_INFO "blkfront: %s: %s: %s\n",
+ printk(KERN_INFO "blkfront: %s: %s: %s %s\n",
info->gd->disk_name,
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
"flush diskcache" : "barrier or flush"),
- info->feature_flush ? "enabled" : "disabled");
+ info->feature_flush ? "enabled" : "disabled",
+ info->feature_persistent ? "using persistent grants" : "");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -707,6 +790,10 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
+ struct llist_node *all_gnts;
+ struct grant *persistent_gnt;
+ struct llist_node *n;
+
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
info->connected = suspend ?
@@ -714,6 +801,18 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* No more blkif_request(). */
if (info->rq)
blk_stop_queue(info->rq);
+
+ /* Remove all persistent grants */
+ if (info->persistent_gnts_c) {
+ all_gnts = llist_del_all(&info->persistent_gnts);
+ llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
+ gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+ __free_page(pfn_to_page(persistent_gnt->pfn));
+ kfree(persistent_gnt);
+ }
+ info->persistent_gnts_c = 0;
+ }
+
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&info->io_lock);
@@ -734,13 +833,44 @@ static void blkif_free(struct blkfront_info *info, int suspend)
}
-static void blkif_completion(struct blk_shadow *s)
+static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ struct blkif_response *bret)
{
- int i;
- /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
- * flag. */
- for (i = 0; i < s->req.u.rw.nr_segments; i++)
- gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
+ int i = 0;
+ struct bio_vec *bvec;
+ struct req_iterator iter;
+ unsigned long flags;
+ char *bvec_data;
+ void *shared_data;
+ unsigned int offset = 0;
+
+ if (bret->operation == BLKIF_OP_READ) {
+ /*
+ * Copy the data received from the backend into the bvec.
+ * Since bv_offset can be different than 0, and bv_len different
+ * than PAGE_SIZE, we have to keep track of the current offset,
+ * to be sure we are copying the data from the right shared page.
+ */
+ rq_for_each_segment(bvec, s->request, iter) {
+ BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
+ if (bvec->bv_offset < offset)
+ i++;
+ BUG_ON(i >= s->req.u.rw.nr_segments);
+ shared_data = kmap_atomic(
+ pfn_to_page(s->grants_used[i]->pfn));
+ bvec_data = bvec_kmap_irq(bvec, &flags);
+ memcpy(bvec_data, shared_data + bvec->bv_offset,
+ bvec->bv_len);
+ bvec_kunmap_irq(bvec_data, &flags);
+ kunmap_atomic(shared_data);
+ offset = bvec->bv_offset + bvec->bv_len;
+ }
+ }
+ /* Add the persistent grant into the list of free grants */
+ for (i = 0; i < s->req.u.rw.nr_segments; i++) {
+ llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
+ info->persistent_gnts_c++;
+ }
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -783,7 +913,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
- blkif_completion(&info->shadow[id]);
+ blkif_completion(&info->shadow[id], info, bret);
if (add_id_to_freelist(info, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
@@ -942,6 +1072,11 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-persistent", "%u", 1);
+ if (err)
+ dev_warn(&dev->dev,
+ "writing persistent grants feature to xenbus");
err = xenbus_transaction_end(xbt, 0);
if (err) {
@@ -1029,6 +1164,8 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
+ init_llist_head(&info->persistent_gnts);
+ info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue);
@@ -1093,7 +1230,7 @@ static int blkif_recover(struct blkfront_info *info)
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
- rq_data_dir(info->shadow[req->u.rw.id].request));
+ 0);
}
info->shadow[req->u.rw.id].req = *req;
@@ -1225,7 +1362,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard;
+ int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1303,6 +1440,14 @@ static void blkfront_connect(struct blkfront_info *info)
if (!err && discard)
blkfront_setup_discard(info);
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 1a17e338735e..1f38643173ca 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -961,7 +961,7 @@ static const struct block_device_operations ace_fops = {
/* --------------------------------------------------------------------
* SystemACE device setup/teardown code
*/
-static int __devinit ace_setup(struct ace_device *ace)
+static int ace_setup(struct ace_device *ace)
{
u16 version;
u16 val;
@@ -1074,7 +1074,7 @@ err_ioremap:
return -ENOMEM;
}
-static void __devexit ace_teardown(struct ace_device *ace)
+static void ace_teardown(struct ace_device *ace)
{
if (ace->gd) {
del_gendisk(ace->gd);
@@ -1092,9 +1092,8 @@ static void __devexit ace_teardown(struct ace_device *ace)
iounmap(ace->baseaddr);
}
-static int __devinit
-ace_alloc(struct device *dev, int id, resource_size_t physaddr,
- int irq, int bus_width)
+static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
+ int irq, int bus_width)
{
struct ace_device *ace;
int rc;
@@ -1135,7 +1134,7 @@ err_noreg:
return rc;
}
-static void __devexit ace_free(struct device *dev)
+static void ace_free(struct device *dev)
{
struct ace_device *ace = dev_get_drvdata(dev);
dev_dbg(dev, "ace_free(%p)\n", dev);
@@ -1151,7 +1150,7 @@ static void __devexit ace_free(struct device *dev)
* Platform Bus Support
*/
-static int __devinit ace_probe(struct platform_device *dev)
+static int ace_probe(struct platform_device *dev)
{
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
@@ -1182,7 +1181,7 @@ static int __devinit ace_probe(struct platform_device *dev)
/*
* Platform bus remove() method
*/
-static int __devexit ace_remove(struct platform_device *dev)
+static int ace_remove(struct platform_device *dev)
{
ace_free(&dev->dev);
return 0;
@@ -1190,7 +1189,7 @@ static int __devexit ace_remove(struct platform_device *dev)
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
-static const struct of_device_id ace_of_match[] __devinitconst = {
+static const struct of_device_id ace_of_match[] = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
{ .compatible = "xlnx,opb-sysace-1.00.c", },
{ .compatible = "xlnx,xps-sysace-1.00.a", },
@@ -1204,7 +1203,7 @@ MODULE_DEVICE_TABLE(of, ace_of_match);
static struct platform_driver ace_platform_driver = {
.probe = ace_probe,
- .remove = __devexit_p(ace_remove),
+ .remove = ace_remove,
.driver = {
.owner = THIS_MODULE,
.name = "xsysace",
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b00000e8aef6..33c9a44a9678 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -77,10 +77,15 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x04CA, 0x3006) },
+ { USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe056) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -104,10 +109,15 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 3f4bfc814dc7..9959d4cb23dc 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -492,7 +492,7 @@ done:
static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
{
u16 buf_len = 0;
- int ret, buf_block_len, blksz;
+ int ret, num_blocks, blksz;
struct sk_buff *skb = NULL;
u32 type;
u8 *payload = NULL;
@@ -514,18 +514,17 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
}
blksz = SDIO_BLOCK_SIZE;
- buf_block_len = (buf_len + blksz - 1) / blksz;
+ num_blocks = DIV_ROUND_UP(buf_len, blksz);
if (buf_len <= SDIO_HEADER_LEN
- || (buf_block_len * blksz) > ALLOC_BUF_SIZE) {
+ || (num_blocks * blksz) > ALLOC_BUF_SIZE) {
BT_ERR("invalid packet length: %d", buf_len);
ret = -EINVAL;
goto exit;
}
/* Allocate buffer */
- skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN,
- GFP_ATOMIC);
+ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
goto exit;
@@ -541,7 +540,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
payload = skb->data;
ret = sdio_readsb(card->func, payload, card->ioport,
- buf_block_len * blksz);
+ num_blocks * blksz);
if (ret < 0) {
BT_ERR("readsb failed: %d", ret);
ret = -EIO;
@@ -553,7 +552,16 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
*/
buf_len = payload[0];
- buf_len |= (u16) payload[1] << 8;
+ buf_len |= payload[1] << 8;
+ buf_len |= payload[2] << 16;
+
+ if (buf_len > blksz * num_blocks) {
+ BT_ERR("Skip incorrect packet: hdrlen %d buffer %d",
+ buf_len, blksz * num_blocks);
+ ret = -EIO;
+ goto exit;
+ }
+
type = payload[3];
switch (type) {
@@ -589,8 +597,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
default:
BT_ERR("Unknown packet type:%d", type);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload,
- blksz * buf_block_len);
+ BT_ERR("hex: %*ph", blksz * num_blocks, payload);
kfree_skb(skb);
skb = NULL;
@@ -849,8 +856,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
if (ret < 0) {
i++;
BT_ERR("i=%d writesb failed: %d", i, ret);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- payload, nb);
+ BT_ERR("hex: %*ph", nb, payload);
ret = -EIO;
if (i > MAX_WRITE_IOMEM_RETRY)
goto exit;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ee82f2fb65f0..7e351e345476 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -96,6 +96,7 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0b05, 0x17b5) },
{ USB_DEVICE(0x04ca, 0x2003) },
{ USB_DEVICE(0x0489, 0xe042) },
{ USB_DEVICE(0x413c, 0x8197) },
@@ -134,10 +135,15 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c8abce3d2d9c..ed0fade46aed 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -270,15 +270,10 @@ static int hci_uart_send_frame(struct sk_buff *skb)
*/
static int hci_uart_tty_open(struct tty_struct *tty)
{
- struct hci_uart *hu = (void *) tty->disc_data;
+ struct hci_uart *hu;
BT_DBG("tty %p", tty);
- /* FIXME: This btw is bogus, nothing requires the old ldisc to clear
- the pointer */
- if (hu)
- return -EEXIST;
-
/* Error if the tty has no write op instead of leaving an exploitable
hole */
if (tty->ops->write == NULL)
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index bbec35d21fe5..0f51ed687dc8 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -6,6 +6,7 @@ menu "Bus devices"
config OMAP_OCP2SCP
tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
help
Driver to enable ocp2scp module which transforms ocp interface
protocol to scp protocol. In OMAP4, USB PHY is connected via
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 0c48b0e05ed6..fe7191663bbd 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -52,7 +52,7 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
return 0;
}
-static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
+static int omap_ocp2scp_probe(struct platform_device *pdev)
{
int ret;
unsigned res_cnt, i;
@@ -116,7 +116,7 @@ err0:
return ret;
}
-static int __devexit omap_ocp2scp_remove(struct platform_device *pdev)
+static int omap_ocp2scp_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
@@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove = __devexit_p(omap_ocp2scp_remove),
+ .remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.owner = THIS_MODULE,
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index ab911a33f8a8..feeecae623f6 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -128,7 +128,7 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
return IRQ_HANDLED;
}
-static int __devinit omap4_l3_probe(struct platform_device *pdev)
+static int omap4_l3_probe(struct platform_device *pdev)
{
static struct omap4_l3 *l3;
struct resource *res;
@@ -219,7 +219,7 @@ err0:
return ret;
}
-static int __devexit omap4_l3_remove(struct platform_device *pdev)
+static int omap4_l3_remove(struct platform_device *pdev)
{
struct omap4_l3 *l3 = platform_get_drvdata(pdev);
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, l3_noc_match);
static struct platform_driver omap4_l3_driver = {
.probe = omap4_l3_probe,
- .remove = __devexit_p(omap4_l3_remove),
+ .remove = omap4_l3_remove,
.driver = {
.name = "omap_l3_noc",
.owner = THIS_MODULE,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 75d485afe56c..d59cdcb8fe39 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -557,7 +557,7 @@ static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit gdrom_set_interrupt_handlers(void)
+static int gdrom_set_interrupt_handlers(void)
{
int err;
@@ -681,7 +681,7 @@ static void gdrom_request(struct request_queue *rq)
}
/* Print string identifying GD ROM device */
-static int __devinit gdrom_outputversion(void)
+static int gdrom_outputversion(void)
{
struct gdrom_id *id;
char *model_name, *manuf_name, *firmw_ver;
@@ -715,7 +715,7 @@ free_id:
}
/* set the default mode for DMA transfer */
-static int __devinit gdrom_init_dma_mode(void)
+static int gdrom_init_dma_mode(void)
{
__raw_writeb(0x13, GDROM_ERROR_REG);
__raw_writeb(0x22, GDROM_INTSEC_REG);
@@ -736,7 +736,7 @@ static int __devinit gdrom_init_dma_mode(void)
return 0;
}
-static void __devinit probe_gdrom_setupcd(void)
+static void probe_gdrom_setupcd(void)
{
gd.cd_info->ops = &gdrom_ops;
gd.cd_info->capacity = 1;
@@ -745,7 +745,7 @@ static void __devinit probe_gdrom_setupcd(void)
CDC_SELECT_DISC;
}
-static void __devinit probe_gdrom_setupdisk(void)
+static void probe_gdrom_setupdisk(void)
{
gd.disk->major = gdrom_major;
gd.disk->first_minor = 1;
@@ -753,7 +753,7 @@ static void __devinit probe_gdrom_setupdisk(void)
strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
}
-static int __devinit probe_gdrom_setupqueue(void)
+static int probe_gdrom_setupqueue(void)
{
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
@@ -768,7 +768,7 @@ static int __devinit probe_gdrom_setupqueue(void)
* register this as a block device and as compliant with the
* universal CD Rom driver interface
*/
-static int __devinit probe_gdrom(struct platform_device *devptr)
+static int probe_gdrom(struct platform_device *devptr)
{
int err;
/* Start the device */
@@ -838,7 +838,7 @@ probe_fail_no_mem:
return err;
}
-static int __devexit remove_gdrom(struct platform_device *devptr)
+static int remove_gdrom(struct platform_device *devptr)
{
flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
@@ -854,7 +854,7 @@ static int __devexit remove_gdrom(struct platform_device *devptr)
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove = __devexit_p(remove_gdrom),
+ .remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index fd793519ea2b..443cd6751ca2 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -249,7 +249,7 @@ static const struct agp_bridge_driver ali_m1541_bridge = {
};
-static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
+static struct agp_device_ids ali_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_AL_M1541,
@@ -299,8 +299,7 @@ static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
{ }, /* dummy final entry, always present */
};
-static int __devinit agp_ali_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ali_agp_device_ids;
struct agp_bridge_data *bridge;
@@ -374,7 +373,7 @@ found:
return agp_add_bridge(bridge);
}
-static void __devexit agp_ali_remove(struct pci_dev *pdev)
+static void agp_ali_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index f7e88787af97..779f0ab845a9 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -388,7 +388,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
+static struct agp_device_ids amd_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
@@ -405,8 +405,8 @@ static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
{ }, /* dummy final entry, always present */
};
-static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_amdk7_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -480,7 +480,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
+static void agp_amdk7_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 444f8b6ab411..d79d692d05b8 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -240,7 +240,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
};
/* Some basic sanity checks for the aperture. */
-static int __devinit agp_aperture_valid(u64 aper, u32 size)
+static int agp_aperture_valid(u64 aper, u32 size)
{
if (!aperture_valid(aper, size, 32*1024*1024))
return 0;
@@ -267,8 +267,7 @@ static int __devinit agp_aperture_valid(u64 aper, u32 size)
* to allocate that much memory. But at least error out cleanly instead of
* crashing.
*/
-static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
- u16 cap)
+static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
{
u32 aper_low, aper_hi;
u64 aper, nb_aper;
@@ -326,7 +325,7 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
return 0;
}
-static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
+static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
{
int i;
@@ -352,7 +351,7 @@ static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
}
/* Handle AMD 8151 quirks */
-static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
+static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
{
char *revstring;
@@ -390,7 +389,7 @@ static const struct aper_size_info_32 uli_sizes[7] =
{8, 2048, 1, 4},
{4, 1024, 0, 3}
};
-static int __devinit uli_agp_init(struct pci_dev *pdev)
+static int uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
@@ -513,8 +512,8 @@ put:
return ret;
}
-static int __devinit agp_amd64_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_amd64_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -579,7 +578,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
return 0;
}
-static void __devexit agp_amd64_remove(struct pci_dev *pdev)
+static void agp_amd64_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index dc30e2243494..0628d7b65c71 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -445,7 +445,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
};
-static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
+static struct agp_device_ids ati_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_ATI_RS100,
@@ -490,8 +490,7 @@ static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
{ }, /* dummy final entry, always present */
};
-static int __devinit agp_ati_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ati_agp_device_ids;
struct agp_bridge_data *bridge;
@@ -533,7 +532,7 @@ found:
return agp_add_bridge(bridge);
}
-static void __devexit agp_ati_remove(struct pci_dev *pdev)
+static void agp_ati_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index d607f53d8afc..6974d5032053 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -343,8 +343,8 @@ static const struct agp_bridge_driver efficeon_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_efficeon_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -407,7 +407,7 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
+static void agp_efficeon_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 75b763cb3ea1..15b240ea4848 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -587,8 +587,8 @@ const struct agp_bridge_driver intel_i460_driver = {
.cant_use_aperture = true,
};
-static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_intel_i460_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -611,7 +611,7 @@ static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_intel_i460_remove(struct pci_dev *pdev)
+static void agp_intel_i460_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
@@ -637,7 +637,7 @@ static struct pci_driver agp_intel_i460_pci_driver = {
.name = "agpgart-intel-i460",
.id_table = agp_intel_i460_pci_table,
.probe = agp_intel_i460_probe,
- .remove = __devexit_p(agp_intel_i460_remove),
+ .remove = agp_intel_i460_remove,
};
static int __init agp_intel_i460_init(void)
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index b130df0a1958..a426ee1f57a6 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -732,8 +732,8 @@ static const struct intel_agp_driver_description {
{ 0, NULL, NULL }
};
-static int __devinit agp_intel_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_intel_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr = 0;
@@ -819,7 +819,7 @@ found_gmch:
return err;
}
-static void __devexit agp_intel_remove(struct pci_dev *pdev)
+static void agp_intel_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
@@ -912,7 +912,7 @@ static struct pci_driver agp_intel_pci_driver = {
.name = "agpgart-intel",
.id_table = agp_intel_pci_table,
.probe = agp_intel_probe,
- .remove = __devexit_p(agp_intel_remove),
+ .remove = agp_intel_remove,
#ifdef CONFIG_PM
.resume = agp_intel_resume,
#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6ec0fff79bc2..1042c1b90376 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED 0x00000002
-#define HSW_PTE_UNCACHED 0x00000000
-#define GEN6_PTE_LLC 0x00000004
-#define GEN6_PTE_LLC_MLC 0x00000006
-#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab6..dbd901e94ea6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
- } else if (INTEL_GTT_GEN == 6) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen_size = MB(192);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen_size = MB(288);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen_size = MB(320);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen_size = MB(384);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen_size = MB(416);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen_size = MB(448);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen_size = MB(480);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen_size = MB(512);
- break;
- }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
- int size;
-
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
- else if (INTEL_GTT_GEN == 6) {
- u16 snb_gmch_ctl;
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- size = MB(2);
- break;
- }
- return size/4;
- } else {
+ else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
- if (INTEL_GTT_GEN >= 6)
- return true;
-
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool gen6_check_flags(unsigned int flags)
-{
- return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else {
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-
- writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
- u32 reg_addr;
+ u32 reg_addr, gtt_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- if (INTEL_GTT_GEN >= 7)
- size = MB(2);
-
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
- if (INTEL_GTT_GEN == 3) {
- u32 gtt_addr;
-
+ switch (INTEL_GTT_GEN) {
+ case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
- } else {
- u32 gtt_offset;
-
- switch (INTEL_GTT_GEN) {
- case 5:
- case 6:
- case 7:
- gtt_offset = MB(2);
- break;
- case 4:
- default:
- gtt_offset = KB(512);
- break;
- }
- intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ break;
+ case 5:
+ intel_private.gtt_bus_addr = reg_addr + MB(2);
+ break;
+ default:
+ intel_private.gtt_bus_addr = reg_addr + KB(512);
+ break;
}
if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = gen6_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = haswell_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
- .gen = 7,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = valleyview_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
-};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
- "ValleyView", &valleyview_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index b9734a978186..62be3ec0da4b 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -332,8 +332,8 @@ static const struct agp_bridge_driver nvidia_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_nvidia_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -388,7 +388,7 @@ static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_nvidia_remove(struct pci_dev *pdev)
+static void agp_nvidia_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 3a5af2f9b015..05b8d0241bde 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -270,7 +270,7 @@ const struct agp_bridge_driver sgi_tioca_driver = {
.num_aperture_sizes = 1,
};
-static int __devinit agp_sgi_init(void)
+static int agp_sgi_init(void)
{
unsigned int j;
struct tioca_kernel *info;
@@ -327,7 +327,7 @@ static int __devinit agp_sgi_init(void)
return 0;
}
-static void __devexit agp_sgi_cleanup(void)
+static void agp_sgi_cleanup(void)
{
kfree(sgi_tioca_agp_bridges);
sgi_tioca_agp_bridges = NULL;
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 08704ae53956..79c838c434bc 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -17,8 +17,8 @@
#define PCI_DEVICE_ID_SI_662 0x0662
#define PCI_DEVICE_ID_SI_671 0x0671
-static bool __devinitdata agp_sis_force_delay = 0;
-static int __devinitdata agp_sis_agp_spec = -1;
+static bool agp_sis_force_delay = 0;
+static int agp_sis_agp_spec = -1;
static int sis_fetch_size(void)
{
@@ -148,13 +148,13 @@ static struct agp_bridge_driver sis_driver = {
};
// chipsets that require the 'delay hack'
-static int sis_broken_chipsets[] __devinitdata = {
+static int sis_broken_chipsets[] = {
PCI_DEVICE_ID_SI_648,
PCI_DEVICE_ID_SI_746,
0 // terminator
};
-static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
+static void sis_get_driver(struct agp_bridge_data *bridge)
{
int i;
@@ -180,8 +180,7 @@ static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
}
-static int __devinit agp_sis_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
@@ -211,7 +210,7 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_sis_remove(struct pci_dev *pdev)
+static void agp_sis_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index f02f9b07fd4c..9b163b49d976 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -445,8 +445,8 @@ static const struct agp_bridge_driver sworks_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_serverworks_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
struct pci_dev *bridge_dev;
@@ -518,7 +518,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
+static void agp_serverworks_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index a32c492baf5c..a56ee9bedd11 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -557,7 +557,7 @@ const struct agp_bridge_driver u3_agp_driver = {
.needs_scratch_page = true,
};
-static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
+static struct agp_device_ids uninorth_agp_device_ids[] = {
{
.device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
.chipset_name = "UniNorth",
@@ -592,8 +592,8 @@ static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
},
};
-static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_uninorth_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct agp_device_ids *devs = uninorth_agp_device_ids;
struct agp_bridge_data *bridge;
@@ -663,7 +663,7 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_uninorth_remove(struct pci_dev *pdev)
+static void agp_uninorth_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 8bc384937401..74d3aa3773bf 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -224,7 +224,7 @@ static const struct agp_bridge_driver via_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static struct agp_device_ids via_agp_device_ids[] __devinitdata =
+static struct agp_device_ids via_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_VIA_82C597_0,
@@ -438,8 +438,7 @@ static void check_via_agp3 (struct agp_bridge_data *bridge)
}
-static int __devinit agp_via_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = via_agp_device_ids;
struct agp_bridge_data *bridge;
@@ -485,7 +484,7 @@ static int __devinit agp_via_probe(struct pci_dev *pdev,
return agp_add_bridge(bridge);
}
-static void __devexit agp_via_remove(struct pci_dev *pdev)
+static void agp_via_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index dfd7876f127c..fe6d4be48296 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -816,7 +816,7 @@ static unsigned long __hpet_calibrate(struct hpets *hpetp)
static unsigned long hpet_calibrate(struct hpets *hpetp)
{
- unsigned long ret = -1;
+ unsigned long ret = ~0UL;
unsigned long tmp;
/*
@@ -1001,6 +1001,9 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
irqp = &res->data.extended_irq;
for (i = 0; i < irqp->interrupt_count; i++) {
+ if (hdp->hd_nirqs >= HPET_MAX_TIMERS)
+ break;
+
irq = acpi_register_gsi(NULL, irqp->interrupts[i],
irqp->triggering, irqp->polarity);
if (irq < 0)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c58ea9b80b1a..c5a0262251bc 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -216,7 +216,7 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
- depends on HW_RANDOM && PLAT_NOMADIK
+ depends on HW_RANDOM && ARCH_NOMADIK
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 731c9046cf7b..7c73d4aca36b 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -98,7 +98,7 @@ err_enable:
return ret;
}
-static int __devexit atmel_trng_remove(struct platform_device *pdev)
+static int atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
@@ -138,7 +138,7 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove = __devexit_p(atmel_trng_remove),
+ .remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index aec6a4277caa..f343b7d0dfa1 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -61,7 +61,7 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
return 4;
}
-static int __devinit bcm63xx_rng_probe(struct platform_device *pdev)
+static int bcm63xx_rng_probe(struct platform_device *pdev)
{
struct resource *r;
struct clk *clk;
@@ -145,7 +145,7 @@ out:
return ret;
}
-static int __devexit bcm63xx_rng_remove(struct platform_device *pdev)
+static int bcm63xx_rng_remove(struct platform_device *pdev)
{
struct hwrng *rng = platform_get_drvdata(pdev);
struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
@@ -161,7 +161,7 @@ static int __devexit bcm63xx_rng_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_rng_driver = {
.probe = bcm63xx_rng_probe,
- .remove = __devexit_p(bcm63xx_rng_remove),
+ .remove = bcm63xx_rng_remove,
.driver = {
.name = "bcm63xx-rng",
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 232ba9ce579c..48bbfeca4b5d 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -101,7 +101,7 @@ static int exynos_read(struct hwrng *rng, void *buf,
return 4;
}
-static int __devinit exynos_rng_probe(struct platform_device *pdev)
+static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
@@ -134,7 +134,7 @@ static int __devinit exynos_rng_probe(struct platform_device *pdev)
return hwrng_register(&exynos_rng->rng);
}
-static int __devexit exynos_rng_remove(struct platform_device *pdev)
+static int exynos_rng_remove(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -172,7 +172,7 @@ static struct platform_driver exynos_rng_driver = {
.pm = &exynos_rng_pm_ops,
},
.probe = exynos_rng_probe,
- .remove = __devexit_p(exynos_rng_remove),
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index ebd48f0135da..20b962e1d832 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -25,7 +25,7 @@
#define DRV_MODULE_VERSION "0.2"
#define DRV_MODULE_RELDATE "July 27, 2011"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
@@ -611,7 +611,7 @@ static void n2rng_work(struct work_struct *work)
schedule_delayed_work(&np->work, HZ * 2);
}
-static void __devinit n2rng_driver_version(void)
+static void n2rng_driver_version(void)
{
static int n2rng_version_printed;
@@ -620,7 +620,7 @@ static void __devinit n2rng_driver_version(void)
}
static const struct of_device_id n2rng_match[];
-static int __devinit n2rng_probe(struct platform_device *op)
+static int n2rng_probe(struct platform_device *op)
{
const struct of_device_id *match;
int multi_capable;
@@ -719,7 +719,7 @@ out:
return err;
}
-static int __devexit n2rng_remove(struct platform_device *op)
+static int n2rng_remove(struct platform_device *op)
{
struct n2rng *np = dev_get_drvdata(&op->dev);
@@ -767,7 +767,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove = __devexit_p(n2rng_remove),
+ .remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 5c34c092af71..1eada566ca70 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -56,7 +56,7 @@ static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
return sizeof(u32);
}
-static int __devinit octeon_rng_probe(struct platform_device *pdev)
+static int octeon_rng_probe(struct platform_device *pdev)
{
struct resource *res_ports;
struct resource *res_result;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index a5effd813abd..d8c54e253761 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -27,8 +27,6 @@
#include <asm/io.h>
-#include <plat/cpu.h>
-
#define RNG_OUT_REG 0x00 /* Output register */
#define RNG_STAT_REG 0x04 /* Status register
[0] = STAT_BUSY */
@@ -106,7 +104,7 @@ static struct hwrng omap_rng_ops = {
.data_read = omap_rng_data_read,
};
-static int __devinit omap_rng_probe(struct platform_device *pdev)
+static int omap_rng_probe(struct platform_device *pdev)
{
struct omap_rng_private_data *priv;
int ret;
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 3a632673aed5..c6df5b29af08 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -94,7 +94,7 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read,
};
-static int __devinit rng_probe(struct platform_device *ofdev)
+static int rng_probe(struct platform_device *ofdev)
{
void __iomem *rng_regs;
struct device_node *rng_np = ofdev->dev.of_node;
@@ -122,7 +122,7 @@ static int __devinit rng_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit rng_remove(struct platform_device *dev)
+static int rng_remove(struct platform_device *dev)
{
void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index 97bd891422c7..973b95113edf 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -151,7 +151,7 @@ err_enable:
return ret;
}
-static int __devexit picoxcell_trng_remove(struct platform_device *pdev)
+static int picoxcell_trng_remove(struct platform_device *pdev)
{
hwrng_unregister(&picoxcell_trng);
clk_disable(rng_clk);
@@ -181,7 +181,7 @@ static const struct dev_pm_ops picoxcell_trng_pm_ops = {
static struct platform_driver picoxcell_trng_driver = {
.probe = picoxcell_trng_probe,
- .remove = __devexit_p(picoxcell_trng_remove),
+ .remove = picoxcell_trng_remove,
.driver = {
.name = "picoxcell-trng",
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index c51762c13031..732c330805fd 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -90,7 +90,7 @@ static struct hwrng ppc4xx_rng = {
.data_read = ppc4xx_rng_data_read,
};
-static int __devinit ppc4xx_rng_probe(struct platform_device *dev)
+static int ppc4xx_rng_probe(struct platform_device *dev)
{
void __iomem *rng_regs;
int err = 0;
@@ -111,7 +111,7 @@ static int __devinit ppc4xx_rng_probe(struct platform_device *dev)
return err;
}
-static int __devexit ppc4xx_rng_remove(struct platform_device *dev)
+static int ppc4xx_rng_remove(struct platform_device *dev)
{
void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index f1a1618db1fb..849db199c02c 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -88,7 +88,7 @@ static struct hwrng timeriomem_rng_ops = {
.priv = 0,
};
-static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
+static int timeriomem_rng_probe(struct platform_device *pdev)
{
struct resource *res;
int ret;
@@ -130,7 +130,7 @@ failed:
return ret;
}
-static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
+static int timeriomem_rng_remove(struct platform_device *pdev)
{
del_timer_sync(&timeriomem_rng_timer);
hwrng_unregister(&timeriomem_rng_ops);
@@ -146,7 +146,7 @@ static struct platform_driver timeriomem_rng_driver = {
.owner = THIS_MODULE,
},
.probe = timeriomem_rng_probe,
- .remove = __devexit_p(timeriomem_rng_remove),
+ .remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 5708299507d0..b65c10395959 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -119,7 +119,7 @@ static int virtrng_probe(struct virtio_device *vdev)
return probe_common(vdev);
}
-static void __devexit virtrng_remove(struct virtio_device *vdev)
+static void virtrng_remove(struct virtio_device *vdev)
{
remove_common(vdev);
}
@@ -147,7 +147,7 @@ static struct virtio_driver virtio_rng_driver = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtrng_probe,
- .remove = __devexit_p(virtrng_remove),
+ .remove = virtrng_remove,
#ifdef CONFIG_PM
.freeze = virtrng_freeze,
.restore = virtrng_restore,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a0c84bb30856..053201b062a4 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3789,7 +3789,7 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
- /* It's an asyncronous event. */
+ /* It's an asynchronous event. */
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 32a6c7e256bd..1c7fdcd22a98 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -155,7 +155,7 @@ enum si_stat_indexes {
/* Number of watchdog pretimeouts. */
SI_STAT_watchdog_pretimeouts,
- /* Number of asyncronous messages received. */
+ /* Number of asynchronous messages received. */
SI_STAT_incoming_messages,
@@ -1836,7 +1836,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
return rv;
}
-static int __devinit hardcode_find_bmc(void)
+static int hardcode_find_bmc(void)
{
int ret = -ENODEV;
int i;
@@ -2023,7 +2023,7 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static int __devinit try_init_spmi(struct SPMITable *spmi)
+static int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
@@ -2106,7 +2106,7 @@ static int __devinit try_init_spmi(struct SPMITable *spmi)
return 0;
}
-static void __devinit spmi_find_bmc(void)
+static void spmi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
@@ -2128,7 +2128,7 @@ static void __devinit spmi_find_bmc(void)
}
}
-static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
+static int ipmi_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
{
struct acpi_device *acpi_dev;
@@ -2228,7 +2228,7 @@ err_free:
return -EINVAL;
}
-static void __devexit ipmi_pnp_remove(struct pnp_dev *dev)
+static void ipmi_pnp_remove(struct pnp_dev *dev)
{
struct smi_info *info = pnp_get_drvdata(dev);
@@ -2243,7 +2243,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
static struct pnp_driver ipmi_pnp_driver = {
.name = DEVICE_NAME,
.probe = ipmi_pnp_probe,
- .remove = __devexit_p(ipmi_pnp_remove),
+ .remove = ipmi_pnp_remove,
.id_table = pnp_dev_table,
};
#endif
@@ -2258,7 +2258,7 @@ struct dmi_ipmi_data {
u8 slave_addr;
};
-static int __devinit decode_dmi(const struct dmi_header *dm,
+static int decode_dmi(const struct dmi_header *dm,
struct dmi_ipmi_data *dmi)
{
const u8 *data = (const u8 *)dm;
@@ -2320,7 +2320,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
return 0;
}
-static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{
struct smi_info *info;
@@ -2388,7 +2388,7 @@ static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
kfree(info);
}
-static void __devinit dmi_find_bmc(void)
+static void dmi_find_bmc(void)
{
const struct dmi_device *dev = NULL;
struct dmi_ipmi_data data;
@@ -2424,7 +2424,7 @@ static void ipmi_pci_cleanup(struct smi_info *info)
pci_disable_device(pdev);
}
-static int __devinit ipmi_pci_probe_regspacing(struct smi_info *info)
+static int ipmi_pci_probe_regspacing(struct smi_info *info)
{
if (info->si_type == SI_KCS) {
unsigned char status;
@@ -2456,7 +2456,7 @@ static int __devinit ipmi_pci_probe_regspacing(struct smi_info *info)
return DEFAULT_REGSPACING;
}
-static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
+static int ipmi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rv;
@@ -2529,7 +2529,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
return 0;
}
-static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
+static void ipmi_pci_remove(struct pci_dev *pdev)
{
struct smi_info *info = pci_get_drvdata(pdev);
cleanup_one_si(info);
@@ -2546,12 +2546,12 @@ static struct pci_driver ipmi_pci_driver = {
.name = DEVICE_NAME,
.id_table = ipmi_pci_devices,
.probe = ipmi_pci_probe,
- .remove = __devexit_p(ipmi_pci_remove),
+ .remove = ipmi_pci_remove,
};
#endif /* CONFIG_PCI */
static struct of_device_id ipmi_match[];
-static int __devinit ipmi_probe(struct platform_device *dev)
+static int ipmi_probe(struct platform_device *dev)
{
#ifdef CONFIG_OF
const struct of_device_id *match;
@@ -2635,7 +2635,7 @@ static int __devinit ipmi_probe(struct platform_device *dev)
return 0;
}
-static int __devexit ipmi_remove(struct platform_device *dev)
+static int ipmi_remove(struct platform_device *dev)
{
#ifdef CONFIG_OF
cleanup_one_si(dev_get_drvdata(&dev->dev));
@@ -2661,7 +2661,7 @@ static struct platform_driver ipmi_driver = {
.of_match_table = ipmi_match,
},
.probe = ipmi_probe,
- .remove = __devexit_p(ipmi_remove),
+ .remove = ipmi_remove,
};
static int wait_for_msg_done(struct smi_info *smi_info)
@@ -3047,7 +3047,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
}
}
-static __devinitdata struct ipmi_default_vals
+static struct ipmi_default_vals
{
int type;
int port;
@@ -3059,7 +3059,7 @@ static __devinitdata struct ipmi_default_vals
{ .port = 0 }
};
-static void __devinit default_find_bmc(void)
+static void default_find_bmc(void)
{
struct smi_info *info;
int i;
@@ -3359,7 +3359,7 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
-static int __devinit init_ipmi_si(void)
+static int init_ipmi_si(void)
{
int i;
char *str;
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index f74e892711dd..e5d3e3f7a49b 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
return 0;
}
-static const struct cx_device_id __devinitconst mbcs_id_table[] = {
+static const struct cx_device_id mbcs_id_table[] = {
{
.part_num = MBCS_PART_NUM,
.mfg_num = MBCS_MFG_NUM,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 0537903c985b..c6fa3bc2baa8 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -48,7 +48,7 @@ static inline unsigned long size_inside_page(unsigned long start,
}
#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
-static inline int valid_phys_addr_range(unsigned long addr, size_t count)
+static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
return addr + count <= __pa(high_memory);
}
@@ -96,7 +96,7 @@ void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
static ssize_t read_mem(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long p = *ppos;
+ phys_addr_t p = *ppos;
ssize_t read, sz;
char *ptr;
@@ -153,7 +153,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
static ssize_t write_mem(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- unsigned long p = *ppos;
+ phys_addr_t p = *ppos;
ssize_t written, sz;
unsigned long copied;
void *ptr;
@@ -226,7 +226,7 @@ int __weak phys_mem_access_prot_allowed(struct file *file,
*
*/
#ifdef pgprot_noncached
-static int uncached_access(struct file *file, unsigned long addr)
+static int uncached_access(struct file *file, phys_addr_t addr)
{
#if defined(CONFIG_IA64)
/*
@@ -258,7 +258,7 @@ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
#ifdef pgprot_noncached
- unsigned long offset = pfn << PAGE_SHIFT;
+ phys_addr_t offset = pfn << PAGE_SHIFT;
if (uncached_access(file, offset))
return pgprot_noncached(vma_prot);
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index b304ec052501..3f79a9fb6b1b 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -345,8 +345,7 @@ static void __exit pc8736x_gpio_cleanup(void)
unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ platform_device_unregister(pdev);
}
module_init(pc8736x_gpio_init);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 21721d25e388..b66eaa04f8cb 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -549,8 +549,10 @@ static int mgslpc_probe(struct pcmcia_device *link)
/* Initialize the struct pcmcia_device structure */
ret = mgslpc_config(link);
- if (ret)
+ if (ret) {
+ tty_port_destroy(&info->port);
return ret;
+ }
mgslpc_add_device(info);
@@ -2757,6 +2759,7 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
hdlcdev_exit(info);
#endif
release_resources(info);
+ tty_port_destroy(&info->port);
kfree(info);
mgslpc_device_count--;
return;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d0d824ebf2c1..1cd49241e60e 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -251,12 +251,8 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
break;
}
- if (signal_pending (current)) {
- if (!bytes_written) {
- bytes_written = -EINTR;
- }
+ if (signal_pending (current))
break;
- }
cond_resched();
}
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 6abdde4da2b7..588063ac9517 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -363,7 +363,7 @@ static struct miscdevice ps3flash_misc = {
.fops = &ps3flash_fops,
};
-static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
+static int ps3flash_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3flash_private *priv;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b86eae9b77df..85e81ec1451e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -399,7 +399,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
-#if 0
static bool debug;
module_param(debug, bool, 0644);
#define DEBUG_ENT(fmt, arg...) do { \
@@ -410,9 +409,6 @@ module_param(debug, bool, 0644);
blocking_pool.entropy_count,\
nonblocking_pool.entropy_count,\
## arg); } while (0)
-#else
-#define DEBUG_ENT(fmt, arg...) do {} while (0)
-#endif
/**********************************************************************
*
@@ -437,6 +433,7 @@ struct entropy_store {
int entropy_count;
int entropy_total;
unsigned int initialized:1;
+ bool last_data_init;
__u8 last_data[EXTRACT_SIZE];
};
@@ -829,7 +826,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
bytes = min_t(int, bytes, sizeof(tmp));
DEBUG_ENT("going to reseed %s with %d bits "
- "(%d of %d requested)\n",
+ "(%zu of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes = extract_entropy(r->pull, tmp, bytes,
@@ -860,7 +857,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
spin_lock_irqsave(&r->lock, flags);
BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
- DEBUG_ENT("trying to extract %d bits from %s\n",
+ DEBUG_ENT("trying to extract %zu bits from %s\n",
nbytes * 8, r->name);
/* Can we pull enough? */
@@ -882,7 +879,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
}
}
- DEBUG_ENT("debiting %d entropy credits from %s%s\n",
+ DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
spin_unlock_irqrestore(&r->lock, flags);
@@ -957,6 +954,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
+ if (fips_enabled && !r->last_data_init)
+ nbytes += EXTRACT_SIZE;
+
trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -967,6 +968,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
if (fips_enabled) {
unsigned long flags;
+
+ /* prime last_data value if need be, per fips 140-2 */
+ if (!r->last_data_init) {
+ spin_lock_irqsave(&r->lock, flags);
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ r->last_data_init = true;
+ nbytes -= EXTRACT_SIZE;
+ spin_unlock_irqrestore(&r->lock, flags);
+ extract_buf(r, tmp);
+ }
+
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -1086,6 +1098,7 @@ static void init_std_data(struct entropy_store *r)
r->entropy_count = 0;
r->entropy_total = 0;
+ r->last_data_init = false;
mix_pool_bytes(r, &now, sizeof(now), NULL);
for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_long(&rv))
@@ -1142,11 +1155,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
if (n > SEC_XFER_SIZE)
n = SEC_XFER_SIZE;
- DEBUG_ENT("reading %d bits\n", n*8);
+ DEBUG_ENT("reading %zu bits\n", n*8);
n = extract_entropy_user(&blocking_pool, buf, n);
- DEBUG_ENT("read got %d bits (%d still needed)\n",
+ if (n < 0) {
+ retval = n;
+ break;
+ }
+
+ DEBUG_ENT("read got %zd bits (%zd still needed)\n",
n*8, (nbytes-n)*8);
if (n == 0) {
@@ -1171,10 +1189,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
continue;
}
- if (n < 0) {
- retval = n;
- break;
- }
count += n;
buf += n;
nbytes -= n;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 9b4f0116ff21..d780295a1473 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1164,7 +1164,7 @@ static struct acpi_driver sonypi_acpi_driver = {
};
#endif
-static int __devinit sonypi_create_input_devices(struct platform_device *pdev)
+static int sonypi_create_input_devices(struct platform_device *pdev)
{
struct input_dev *jog_dev;
struct input_dev *key_dev;
@@ -1225,7 +1225,7 @@ static int __devinit sonypi_create_input_devices(struct platform_device *pdev)
return error;
}
-static int __devinit sonypi_setup_ioports(struct sonypi_device *dev,
+static int sonypi_setup_ioports(struct sonypi_device *dev,
const struct sonypi_ioport_list *ioport_list)
{
/* try to detect if sony-laptop is being used and thus
@@ -1265,7 +1265,7 @@ static int __devinit sonypi_setup_ioports(struct sonypi_device *dev,
return -EBUSY;
}
-static int __devinit sonypi_setup_irq(struct sonypi_device *dev,
+static int sonypi_setup_irq(struct sonypi_device *dev,
const struct sonypi_irq_list *irq_list)
{
while (irq_list->irq) {
@@ -1282,7 +1282,7 @@ static int __devinit sonypi_setup_irq(struct sonypi_device *dev,
return -EBUSY;
}
-static void __devinit sonypi_display_info(void)
+static void sonypi_display_info(void)
{
printk(KERN_INFO "sonypi: detected type%d model, "
"verbose = %d, fnkeyinit = %s, camera = %s, "
@@ -1304,7 +1304,7 @@ static void __devinit sonypi_display_info(void)
sonypi_misc_device.minor);
}
-static int __devinit sonypi_probe(struct platform_device *dev)
+static int sonypi_probe(struct platform_device *dev)
{
const struct sonypi_ioport_list *ioport_list;
const struct sonypi_irq_list *irq_list;
@@ -1428,7 +1428,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
return error;
}
-static int __devexit sonypi_remove(struct platform_device *dev)
+static int sonypi_remove(struct platform_device *dev)
{
sonypi_disable();
@@ -1491,7 +1491,7 @@ static struct platform_driver sonypi_driver = {
.pm = SONYPI_PM,
},
.probe = sonypi_probe,
- .remove = __devexit_p(sonypi_remove),
+ .remove = sonypi_remove,
.shutdown = sonypi_shutdown,
};
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index ad264185eb10..34c63f85104d 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -284,7 +284,7 @@ static void tb0219_pci_irq_init(void)
vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW);
}
-static int __devinit tb0219_probe(struct platform_device *dev)
+static int tb0219_probe(struct platform_device *dev)
{
int retval;
@@ -318,7 +318,7 @@ static int __devinit tb0219_probe(struct platform_device *dev)
return 0;
}
-static int __devexit tb0219_remove(struct platform_device *dev)
+static int tb0219_remove(struct platform_device *dev)
{
_machine_restart = old_machine_restart;
@@ -334,7 +334,7 @@ static struct platform_device *tb0219_platform_device;
static struct platform_driver tb0219_device_driver = {
.probe = tb0219_probe,
- .remove = __devexit_p(tb0219_remove),
+ .remove = tb0219_remove,
.driver = {
.name = "TB0219",
.owner = THIS_MODULE,
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 5a831aec9d4b..fb447bd0cb61 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -555,7 +555,7 @@ static struct tpm_vendor_specific tpm_tis_i2c = {
.miscdev.fops = &tis_ops,
};
-static int __devinit tpm_tis_i2c_init(struct device *dev)
+static int tpm_tis_i2c_init(struct device *dev)
{
u32 vendor;
int rc = 0;
@@ -632,7 +632,7 @@ static const struct i2c_device_id tpm_tis_i2c_table[] = {
MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
-static int __devinit tpm_tis_i2c_probe(struct i2c_client *client,
+static int tpm_tis_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int rc;
@@ -656,7 +656,7 @@ static int __devinit tpm_tis_i2c_probe(struct i2c_client *client,
return rc;
}
-static int __devexit tpm_tis_i2c_remove(struct i2c_client *client)
+static int tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = tpm_dev.chip;
release_locality(chip, chip->vendor.locality, 1);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index efc4ab36a9d6..9978609d93b2 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -32,14 +32,12 @@
static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
-static struct vio_device_id tpm_ibmvtpm_device_table[] __devinitdata = {
+static struct vio_device_id tpm_ibmvtpm_device_table[] = {
{ "IBM,vtpm", "IBM,vtpm"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
-DECLARE_WAIT_QUEUE_HEAD(wq);
-
/**
* ibmvtpm_send_crq - Send a CRQ request
* @vdev: vio device struct
@@ -83,6 +81,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm;
u16 len;
+ int sig;
ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
@@ -91,22 +90,23 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return 0;
}
- wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
+ sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
+ if (sig)
+ return -EINTR;
+
+ len = ibmvtpm->res_len;
- if (count < ibmvtpm->crq_res.len) {
+ if (count < len) {
dev_err(ibmvtpm->dev,
"Invalid size in recv: count=%ld, crq_size=%d\n",
- count, ibmvtpm->crq_res.len);
+ count, len);
return -EIO;
}
spin_lock(&ibmvtpm->rtce_lock);
- memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
- memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
- ibmvtpm->crq_res.valid = 0;
- ibmvtpm->crq_res.msg = 0;
- len = ibmvtpm->crq_res.len;
- ibmvtpm->crq_res.len = 0;
+ memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+ memset(ibmvtpm->rtce_buf, 0, len);
+ ibmvtpm->res_len = 0;
spin_unlock(&ibmvtpm->rtce_lock);
return len;
}
@@ -267,13 +267,12 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
* Return value:
* 0
*/
-static int __devexit tpm_ibmvtpm_remove(struct vio_dev *vdev)
+static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
int rc = 0;
free_irq(vdev->irq, ibmvtpm);
- tasklet_kill(&ibmvtpm->tasklet);
do {
if (rc)
@@ -372,7 +371,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
- unsigned long flags;
int rc = 0;
do {
@@ -387,10 +385,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
return rc;
}
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
@@ -467,7 +466,7 @@ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
if (crq->valid & VTPM_MSG_RES) {
if (++crq_q->index == crq_q->num_entry)
crq_q->index = 0;
- rmb();
+ smp_rmb();
} else
crq = NULL;
return crq;
@@ -535,11 +534,9 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
ibmvtpm->vtpm_version = crq->data;
return;
case VTPM_TPM_COMMAND_RES:
- ibmvtpm->crq_res.valid = crq->valid;
- ibmvtpm->crq_res.msg = crq->msg;
- ibmvtpm->crq_res.len = crq->len;
- ibmvtpm->crq_res.data = crq->data;
- wake_up_interruptible(&wq);
+ /* len of the data in rtce buffer */
+ ibmvtpm->res_len = crq->len;
+ wake_up_interruptible(&ibmvtpm->wq);
return;
default:
return;
@@ -559,38 +556,19 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
{
struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
- unsigned long flags;
-
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ibmvtpm_tasklet - Interrupt handler tasklet
- * @data: ibm vtpm device struct
- *
- * Returns:
- * Nothing
- **/
-static void ibmvtpm_tasklet(void *data)
-{
- struct ibmvtpm_dev *ibmvtpm = data;
struct ibmvtpm_crq *crq;
- unsigned long flags;
- spin_lock_irqsave(&ibmvtpm->lock, flags);
+ /* while loop is needed for initial setup (get version and
+ * get rtce_size). There should be only one tpm request at any
+ * given time.
+ */
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
crq->valid = 0;
- wmb();
+ smp_wmb();
}
- vio_enable_interrupts(ibmvtpm->vdev);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ return IRQ_HANDLED;
}
/**
@@ -602,7 +580,7 @@ static void ibmvtpm_tasklet(void *data)
* 0 - Success
* Non-zero - Failure
*/
-static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
const struct vio_device_id *id)
{
struct ibmvtpm_dev *ibmvtpm;
@@ -650,9 +628,6 @@ static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto reg_crq_cleanup;
}
- tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
- (unsigned long)ibmvtpm);
-
rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
tpm_ibmvtpm_driver_name, ibmvtpm);
if (rc) {
@@ -666,13 +641,14 @@ static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto init_irq_cleanup;
}
+ init_waitqueue_head(&ibmvtpm->wq);
+
crq_q->index = 0;
ibmvtpm->dev = dev;
ibmvtpm->vdev = vio_dev;
chip->vendor.data = (void *)ibmvtpm;
- spin_lock_init(&ibmvtpm->lock);
spin_lock_init(&ibmvtpm->rtce_lock);
rc = ibmvtpm_crq_send_init(ibmvtpm);
@@ -689,7 +665,6 @@ static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
return rc;
init_irq_cleanup:
- tasklet_kill(&ibmvtpm->tasklet);
do {
rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
index 4296eb4b4d82..bd82a791f995 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/drivers/char/tpm/tpm_ibmvtpm.h
@@ -38,13 +38,12 @@ struct ibmvtpm_dev {
struct vio_dev *vdev;
struct ibmvtpm_crq_queue crq_queue;
dma_addr_t crq_dma_handle;
- spinlock_t lock;
- struct tasklet_struct tasklet;
u32 rtce_size;
void __iomem *rtce_buf;
dma_addr_t rtce_dma_handle;
spinlock_t rtce_lock;
- struct ibmvtpm_crq crq_res;
+ wait_queue_head_t wq;
+ u16 res_len;
u32 vtpm_version;
};
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 3251a44e8ceb..2b480c2960bb 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -415,7 +415,7 @@ static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
-static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
+static int tpm_inf_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
{
int rc = 0;
@@ -594,7 +594,7 @@ err_last:
return rc;
}
-static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
+static void tpm_inf_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
@@ -655,7 +655,7 @@ static struct pnp_driver tpm_inf_pnp_driver = {
.probe = tpm_inf_pnp_probe,
.suspend = tpm_inf_pnp_suspend,
.resume = tpm_inf_pnp_resume,
- .remove = __devexit_p(tpm_inf_pnp_remove)
+ .remove = tpm_inf_pnp_remove
};
static int __init init_inf(void)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 6bdf2671254f..ea31dafbcac2 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -729,7 +729,7 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
#endif
#ifdef CONFIG_PNP
-static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
+static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
{
resource_size_t start, len;
@@ -769,7 +769,7 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
return ret;
}
-static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
+static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
{"IFX0102", 0}, /* Infineon */
@@ -783,7 +783,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
};
MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
-static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
+static void tpm_tis_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index af98f6d6509b..4945bd3d18d0 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -179,7 +179,6 @@ static int __init ttyprintk_init(void)
{
int ret = -ENOMEM;
- tty_port_init(&tpk_port.port);
tpk_port.port.ops = &null_ops;
mutex_init(&tpk_port.port_write_mutex);
@@ -190,6 +189,8 @@ static int __init ttyprintk_init(void)
if (IS_ERR(ttyprintk_driver))
return PTR_ERR(ttyprintk_driver);
+ tty_port_init(&tpk_port.port);
+
ttyprintk_driver->driver_name = "ttyprintk";
ttyprintk_driver->name = "ttyprintk";
ttyprintk_driver->major = TTYAUX_MAJOR;
@@ -211,6 +212,7 @@ static int __init ttyprintk_init(void)
error:
tty_unregister_driver(ttyprintk_driver);
put_tty_driver(ttyprintk_driver);
+ tty_port_destroy(&tpk_port.port);
ttyprintk_driver = NULL;
return ret;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8ab9c3d4bf13..ee4dbeafb377 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -37,8 +37,12 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/kconfig.h>
#include "../tty/hvc/hvc_console.h"
+#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -111,6 +115,21 @@ struct port_buffer {
size_t len;
/* offset in the buf from which to consume data */
size_t offset;
+
+ /* DMA address of buffer */
+ dma_addr_t dma;
+
+ /* Device we got DMA memory from */
+ struct device *dev;
+
+ /* List of pending dma buffers to free */
+ struct list_head list;
+
+ /* If sgpages == 0 then buf is used */
+ unsigned int sgpages;
+
+ /* sg is used if spages > 0. sg must be the last in is struct */
+ struct scatterlist sg[0];
};
/*
@@ -325,6 +344,11 @@ static bool is_console_port(struct port *port)
return false;
}
+static bool is_rproc_serial(const struct virtio_device *vdev)
+{
+ return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
+}
+
static inline bool use_multiport(struct ports_device *portdev)
{
/*
@@ -336,20 +360,110 @@ static inline bool use_multiport(struct ports_device *portdev)
return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
}
-static void free_buf(struct port_buffer *buf)
+static DEFINE_SPINLOCK(dma_bufs_lock);
+static LIST_HEAD(pending_free_dma_bufs);
+
+static void free_buf(struct port_buffer *buf, bool can_sleep)
{
- kfree(buf->buf);
+ unsigned int i;
+
+ for (i = 0; i < buf->sgpages; i++) {
+ struct page *page = sg_page(&buf->sg[i]);
+ if (!page)
+ break;
+ put_page(page);
+ }
+
+ if (!buf->dev) {
+ kfree(buf->buf);
+ } else if (is_rproc_enabled) {
+ unsigned long flags;
+
+ /* dma_free_coherent requires interrupts to be enabled. */
+ if (!can_sleep) {
+ /* queue up dma-buffers to be freed later */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_add_tail(&buf->list, &pending_free_dma_bufs);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+ return;
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
+
+ /* Release device refcnt and allow it to be freed */
+ put_device(buf->dev);
+ }
+
kfree(buf);
}
-static struct port_buffer *alloc_buf(size_t buf_size)
+static void reclaim_dma_bufs(void)
+{
+ unsigned long flags;
+ struct port_buffer *buf, *tmp;
+ LIST_HEAD(tmp_list);
+
+ if (list_empty(&pending_free_dma_bufs))
+ return;
+
+ /* Create a copy of the pending_free_dma_bufs while holding the lock */
+ spin_lock_irqsave(&dma_bufs_lock, flags);
+ list_cut_position(&tmp_list, &pending_free_dma_bufs,
+ pending_free_dma_bufs.prev);
+ spin_unlock_irqrestore(&dma_bufs_lock, flags);
+
+ /* Release the dma buffers, without irqs enabled */
+ list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
+ list_del(&buf->list);
+ free_buf(buf, true);
+ }
+}
+
+static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+ int pages)
{
struct port_buffer *buf;
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ reclaim_dma_bufs();
+
+ /*
+ * Allocate buffer and the sg list. The sg list array is allocated
+ * directly after the port_buffer struct.
+ */
+ buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages,
+ GFP_KERNEL);
if (!buf)
goto fail;
- buf->buf = kzalloc(buf_size, GFP_KERNEL);
+
+ buf->sgpages = pages;
+ if (pages > 0) {
+ buf->dev = NULL;
+ buf->buf = NULL;
+ return buf;
+ }
+
+ if (is_rproc_serial(vq->vdev)) {
+ /*
+ * Allocate DMA memory from ancestor. When a virtio
+ * device is created by remoteproc, the DMA memory is
+ * associated with the grandparent device:
+ * vdev => rproc => platform-dev.
+ * The code here would have been less quirky if
+ * DMA_MEMORY_INCLUDES_CHILDREN had been supported
+ * in dma-coherent.c
+ */
+ if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
+ goto free_buf;
+ buf->dev = vq->vdev->dev.parent->parent;
+
+ /* Increase device refcnt to avoid freeing it */
+ get_device(buf->dev);
+ buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
+ GFP_KERNEL);
+ } else {
+ buf->dev = NULL;
+ buf->buf = kmalloc(buf_size, GFP_KERNEL);
+ }
+
if (!buf->buf)
goto free_buf;
buf->len = 0;
@@ -396,6 +510,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
+ if (!ret)
+ ret = vq->num_free;
return ret;
}
@@ -416,7 +532,7 @@ static void discard_port_data(struct port *port)
port->stats.bytes_discarded += buf->len - buf->offset;
if (add_inbuf(port->in_vq, buf) < 0) {
err++;
- free_buf(buf);
+ free_buf(buf, false);
}
port->inbuf = NULL;
buf = get_inbuf(port);
@@ -459,7 +575,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
@@ -476,55 +592,29 @@ static ssize_t send_control_msg(struct port *port, unsigned int event,
return 0;
}
-struct buffer_token {
- union {
- void *buf;
- struct scatterlist *sg;
- } u;
- /* If sgpages == 0 then buf is used, else sg is used */
- unsigned int sgpages;
-};
-
-static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages)
-{
- int i;
- struct page *page;
-
- for (i = 0; i < nrpages; i++) {
- page = sg_page(&sg[i]);
- if (!page)
- break;
- put_page(page);
- }
- kfree(sg);
-}
/* Callers must take the port->outvq_lock */
static void reclaim_consumed_buffers(struct port *port)
{
- struct buffer_token *tok;
+ struct port_buffer *buf;
unsigned int len;
if (!port->portdev) {
/* Device has been unplugged. vqs are already gone. */
return;
}
- while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
- if (tok->sgpages)
- reclaim_sg_pages(tok->u.sg, tok->sgpages);
- else
- kfree(tok->u.buf);
- kfree(tok);
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ free_buf(buf, false);
port->outvq_full = false;
}
}
static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
int nents, size_t in_count,
- struct buffer_token *tok, bool nonblock)
+ void *data, bool nonblock)
{
struct virtqueue *out_vq;
- ssize_t ret;
+ int err;
unsigned long flags;
unsigned int len;
@@ -534,17 +624,17 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
reclaim_consumed_buffers(port);
- ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
+ err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
- if (ret < 0) {
+ if (err) {
in_count = 0;
goto done;
}
- if (ret == 0)
+ if (out_vq->num_free == 0)
port->outvq_full = true;
if (nonblock)
@@ -572,37 +662,6 @@ done:
return in_count;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
- bool nonblock)
-{
- struct scatterlist sg[1];
- struct buffer_token *tok;
-
- tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
- if (!tok)
- return -ENOMEM;
- tok->sgpages = 0;
- tok->u.buf = in_buf;
-
- sg_init_one(sg, in_buf, in_count);
-
- return __send_to_port(port, sg, 1, in_count, tok, nonblock);
-}
-
-static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
- size_t in_count, bool nonblock)
-{
- struct buffer_token *tok;
-
- tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
- if (!tok)
- return -ENOMEM;
- tok->sgpages = nents;
- tok->u.sg = sg;
-
- return __send_to_port(port, sg, nents, in_count, tok, nonblock);
-}
-
/*
* Give out the data that's requested from the buffer that we have
* queued up.
@@ -748,9 +807,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
struct port *port;
- char *buf;
+ struct port_buffer *buf;
ssize_t ret;
bool nonblock;
+ struct scatterlist sg[1];
/* Userspace could be out to fool us */
if (!count)
@@ -766,11 +826,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
count = min((size_t)(32 * 1024), count);
- buf = kmalloc(count, GFP_KERNEL);
+ buf = alloc_buf(port->out_vq, count, 0);
if (!buf)
return -ENOMEM;
- ret = copy_from_user(buf, ubuf, count);
+ ret = copy_from_user(buf->buf, ubuf, count);
if (ret) {
ret = -EFAULT;
goto free_buf;
@@ -784,13 +844,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
* through to the host.
*/
nonblock = true;
- ret = send_buf(port, buf, count, nonblock);
+ sg_init_one(sg, buf->buf, count);
+ ret = __send_to_port(port, sg, 1, count, buf, nonblock);
if (nonblock && ret > 0)
goto out;
free_buf:
- kfree(buf);
+ free_buf(buf, true);
out:
return ret;
}
@@ -856,6 +917,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
struct port *port = filp->private_data;
struct sg_list sgl;
ssize_t ret;
+ struct port_buffer *buf;
struct splice_desc sd = {
.total_len = len,
.flags = flags,
@@ -863,22 +925,34 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
.u.data = &sgl,
};
+ /*
+ * Rproc_serial does not yet support splice. To support splice
+ * pipe_to_sg() must allocate dma-buffers and copy content from
+ * regular pages to dma pages. And alloc_buf and free_buf must
+ * support allocating and freeing such a list of dma-buffers.
+ */
+ if (is_rproc_serial(port->out_vq->vdev))
+ return -EINVAL;
+
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
return ret;
+ buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
+ if (!buf)
+ return -ENOMEM;
+
sgl.n = 0;
sgl.len = 0;
sgl.size = pipe->nrbufs;
- sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL);
- if (unlikely(!sgl.sg))
- return -ENOMEM;
-
+ sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
if (likely(ret > 0))
- ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
+ ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
+ if (unlikely(ret <= 0))
+ free_buf(buf, true);
return ret;
}
@@ -927,6 +1001,7 @@ static int port_fops_release(struct inode *inode, struct file *filp)
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
+ reclaim_dma_bufs();
/*
* Locks aren't necessary here as a port can't be opened after
* unplug, and if a port isn't unplugged, a kref would already
@@ -1031,6 +1106,7 @@ static const struct file_operations port_fops = {
static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
+ struct scatterlist sg[1];
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1039,7 +1115,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
if (!port)
return -EPIPE;
- return send_buf(port, (void *)buf, count, false);
+ sg_init_one(sg, buf, count);
+ return __send_to_port(port, sg, 1, count, (void *)buf, false);
}
/*
@@ -1076,7 +1153,10 @@ static void resize_console(struct port *port)
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+
+ /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
hvc_resize(port->cons.hvc, port->cons.ws);
}
@@ -1260,7 +1340,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
nr_added_bufs = 0;
do {
- buf = alloc_buf(PAGE_SIZE);
+ buf = alloc_buf(vq, PAGE_SIZE, 0);
if (!buf)
break;
@@ -1268,7 +1348,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
- free_buf(buf);
+ free_buf(buf, true);
break;
}
nr_added_bufs++;
@@ -1356,10 +1436,18 @@ static int add_port(struct ports_device *portdev, u32 id)
goto free_device;
}
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
+ if (is_rproc_serial(port->portdev->vdev))
+ /*
+ * For rproc_serial assume remote processor is connected.
+ * rproc_serial does not want the console port, only
+ * the generic port implementation.
+ */
+ port->host_connected = true;
+ else if (!use_multiport(port->portdev)) {
+ /*
+ * If we're not using multiport support,
+ * this has to be a console port.
+ */
err = init_port_console(port);
if (err)
goto free_inbufs;
@@ -1392,7 +1480,7 @@ static int add_port(struct ports_device *portdev, u32 id)
free_inbufs:
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1434,7 +1522,11 @@ static void remove_port_data(struct port *port)
/* Remove buffers we queued up for the Host to send us data in. */
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ free_buf(buf, true);
+
+ /* Free pending buffers from the out-queue. */
+ while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
+ free_buf(buf, true);
}
/*
@@ -1636,7 +1728,7 @@ static void control_work_handler(struct work_struct *work)
if (add_inbuf(portdev->c_ivq, buf) < 0) {
dev_warn(&portdev->vdev->dev,
"Error adding buffer to queue\n");
- free_buf(buf);
+ free_buf(buf, false);
}
}
spin_unlock(&portdev->cvq_lock);
@@ -1832,10 +1924,10 @@ static void remove_controlq_data(struct ports_device *portdev)
return;
while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf);
+ free_buf(buf, true);
while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf);
+ free_buf(buf, true);
}
/*
@@ -1846,7 +1938,7 @@ static void remove_controlq_data(struct ports_device *portdev)
* config space to see how many ports the host has spawned. We
* initialize each port found.
*/
-static int __devinit virtcons_probe(struct virtio_device *vdev)
+static int virtcons_probe(struct virtio_device *vdev)
{
struct ports_device *portdev;
int err;
@@ -1882,11 +1974,15 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
multiport = false;
portdev->config.max_nr_ports = 1;
- if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
- offsetof(struct virtio_console_config,
- max_nr_ports),
- &portdev->config.max_nr_ports) == 0)
+
+ /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
+ if (!is_rproc_serial(vdev) &&
+ virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+ offsetof(struct virtio_console_config,
+ max_nr_ports),
+ &portdev->config.max_nr_ports) == 0) {
multiport = true;
+ }
err = init_vqs(portdev);
if (err < 0) {
@@ -1966,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
- cancel_work_sync(&portdev->control_work);
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);
@@ -1996,6 +2093,16 @@ static unsigned int features[] = {
VIRTIO_CONSOLE_F_MULTIPORT,
};
+static struct virtio_device_id rproc_serial_id_table[] = {
+#if IS_ENABLED(CONFIG_REMOTEPROC)
+ { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
+#endif
+ { 0 },
+};
+
+static unsigned int rproc_serial_features[] = {
+};
+
#ifdef CONFIG_PM
static int virtcons_freeze(struct virtio_device *vdev)
{
@@ -2080,6 +2187,16 @@ static struct virtio_driver virtio_console = {
#endif
};
+static struct virtio_driver virtio_rproc_serial = {
+ .feature_table = rproc_serial_features,
+ .feature_table_size = ARRAY_SIZE(rproc_serial_features),
+ .driver.name = "virtio_rproc_serial",
+ .driver.owner = THIS_MODULE,
+ .id_table = rproc_serial_id_table,
+ .probe = virtcons_probe,
+ .remove = virtcons_remove,
+};
+
static int __init init(void)
{
int err;
@@ -2104,7 +2221,15 @@ static int __init init(void)
pr_err("Error %d registering virtio driver\n", err);
goto free;
}
+ err = register_virtio_driver(&virtio_rproc_serial);
+ if (err < 0) {
+ pr_err("Error %d registering virtio rproc serial driver\n",
+ err);
+ goto unregister;
+ }
return 0;
+unregister:
+ unregister_virtio_driver(&virtio_console);
free:
if (pdrvdata.debugfs_dir)
debugfs_remove_recursive(pdrvdata.debugfs_dir);
@@ -2114,7 +2239,10 @@ free:
static void __exit fini(void)
{
+ reclaim_dma_bufs();
+
unregister_virtio_driver(&virtio_console);
+ unregister_virtio_driver(&virtio_rproc_serial);
class_destroy(pdrvdata.class);
if (pdrvdata.debugfs_dir)
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 2c5d15beea35..5224da5202d3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -595,7 +595,7 @@ static const struct file_operations hwicap_fops = {
.llseek = noop_llseek,
};
-static int __devinit hwicap_setup(struct device *dev, int id,
+static int hwicap_setup(struct device *dev, int id,
const struct resource *regs_res,
const struct hwicap_driver_config *config,
const struct config_registers *config_regs)
@@ -717,7 +717,7 @@ static struct hwicap_driver_config fifo_icap_config = {
.reset = fifo_icap_reset,
};
-static int __devexit hwicap_remove(struct device *dev)
+static int hwicap_remove(struct device *dev)
{
struct hwicap_drvdata *drvdata;
@@ -740,7 +740,7 @@ static int __devexit hwicap_remove(struct device *dev)
}
#ifdef CONFIG_OF
-static int __devinit hwicap_of_probe(struct platform_device *op,
+static int hwicap_of_probe(struct platform_device *op,
const struct hwicap_driver_config *config)
{
struct resource res;
@@ -785,8 +785,8 @@ static inline int hwicap_of_probe(struct platform_device *op,
}
#endif /* CONFIG_OF */
-static const struct of_device_id __devinitconst hwicap_of_match[];
-static int __devinit hwicap_drv_probe(struct platform_device *pdev)
+static const struct of_device_id hwicap_of_match[];
+static int hwicap_drv_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct resource *res;
@@ -822,14 +822,14 @@ static int __devinit hwicap_drv_probe(struct platform_device *pdev)
&buffer_icap_config, regs);
}
-static int __devexit hwicap_drv_remove(struct platform_device *pdev)
+static int hwicap_drv_remove(struct platform_device *pdev)
{
return hwicap_remove(&pdev->dev);
}
#ifdef CONFIG_OF
/* Match table for device tree binding */
-static const struct of_device_id __devinitconst hwicap_of_match[] = {
+static const struct of_device_id hwicap_of_match[] = {
{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
{ .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
{},
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 823f62d900ba..a47e6ee98b8c 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -64,3 +64,5 @@ config CLK_TWL6040
as functional clock.
endmenu
+
+source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 2701235d5757..ee90e87e7675 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -13,12 +13,15 @@ obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_ARCH_PRIMA2) += clk-prima2.o
+obj-$(CONFIG_PLAT_ORION) += mvebu/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
+obj-$(CONFIG_ARCH_SUNXI) += clk-sunxi.o
+obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
# Chip specific
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index b61ee2c5af84..e69991aab43a 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -55,5 +55,5 @@ void __init bcm2835_init_clocks(void)
pr_err("uart1_pclk not registered\n");
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
if (ret)
- pr_err("uart0_pclk alias not registered\n");
+ pr_err("uart1_pclk alias not registered\n");
}
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index ac5f5434cb9a..d098f72e1d5f 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -143,7 +143,7 @@ static int max77686_clk_register(struct device *dev,
return 0;
}
-static __devinit int max77686_clk_probe(struct platform_device *pdev)
+static int max77686_clk_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max77686_clk **max77686_clks;
@@ -199,7 +199,7 @@ out:
return ret;
}
-static int __devexit max77686_clk_remove(struct platform_device *pdev)
+static int max77686_clk_remove(struct platform_device *pdev)
{
struct max77686_clk **max77686_clks = platform_get_drvdata(pdev);
int i;
@@ -223,7 +223,7 @@ static struct platform_driver max77686_clk_driver = {
.owner = THIS_MODULE,
},
.probe = max77686_clk_probe,
- .remove = __devexit_p(max77686_clk_remove),
+ .remove = max77686_clk_remove,
.id_table = max77686_clk_id,
};
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 517a8ff7121e..6b4c70f7d23d 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -20,6 +20,7 @@ void __init nomadik_clk_init(void)
clk_register_clkdev(clk, NULL, "gpio.2");
clk_register_clkdev(clk, NULL, "gpio.3");
clk_register_clkdev(clk, NULL, "rng");
+ clk_register_clkdev(clk, NULL, "fsmc-nand");
/*
* The 2.4 MHz TIMCLK reference clock is active at boot time, this is
diff --git a/drivers/clk/clk-sunxi.c b/drivers/clk/clk-sunxi.c
new file mode 100644
index 000000000000..0e831b584ba7
--- /dev/null
+++ b/drivers/clk/clk-sunxi.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/sunxi.h>
+#include <linux/of.h>
+
+static const __initconst struct of_device_id clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ {}
+};
+
+void __init sunxi_init_clocks(void)
+{
+ of_clk_init(clk_match);
+}
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index bc1e713e7b9c..3af729b1b89d 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -78,7 +78,7 @@ static struct clk_init_data wm831x_clkout_init = {
.flags = CLK_IS_ROOT,
};
-static int __devinit twl6040_clk_probe(struct platform_device *pdev)
+static int twl6040_clk_probe(struct platform_device *pdev)
{
struct twl6040 *twl6040 = dev_get_drvdata(pdev->dev.parent);
struct twl6040_clk *clkdata;
@@ -100,7 +100,7 @@ static int __devinit twl6040_clk_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit twl6040_clk_remove(struct platform_device *pdev)
+static int twl6040_clk_remove(struct platform_device *pdev)
{
struct twl6040_clk *clkdata = dev_get_drvdata(&pdev->dev);
@@ -115,7 +115,7 @@ static struct platform_driver twl6040_clk_driver = {
.owner = THIS_MODULE,
},
.probe = twl6040_clk_probe,
- .remove = __devexit_p(twl6040_clk_remove),
+ .remove = twl6040_clk_remove,
};
module_platform_driver(twl6040_clk_driver);
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index db4fbf20ffd7..16ed06808554 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -350,7 +350,7 @@ static struct clk_init_data wm831x_clkout_init = {
.flags = CLK_SET_RATE_PARENT,
};
-static __devinit int wm831x_clk_probe(struct platform_device *pdev)
+static int wm831x_clk_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_clk *clkdata;
@@ -389,14 +389,14 @@ static __devinit int wm831x_clk_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit wm831x_clk_remove(struct platform_device *pdev)
+static int wm831x_clk_remove(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver wm831x_clk_driver = {
.probe = wm831x_clk_probe,
- .remove = __devexit_p(wm831x_clk_remove),
+ .remove = wm831x_clk_remove,
.driver = {
.name = "wm831x-clk",
.owner = THIS_MODULE,
diff --git a/drivers/clk/clk-zynq.c b/drivers/clk/clk-zynq.c
new file mode 100644
index 000000000000..37a30514fd66
--- /dev/null
+++ b/drivers/clk/clk-zynq.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2012 National Instruments
+ *
+ * Josh Cartwright <josh.cartwright@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+
+static void __iomem *slcr_base;
+
+struct zynq_pll_clk {
+ struct clk_hw hw;
+ void __iomem *pll_ctrl;
+ void __iomem *pll_cfg;
+};
+
+#define to_zynq_pll_clk(hw) container_of(hw, struct zynq_pll_clk, hw)
+
+#define CTRL_PLL_FDIV(x) ((x) >> 12)
+
+static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct zynq_pll_clk *pll = to_zynq_pll_clk(hw);
+ return parent_rate * CTRL_PLL_FDIV(ioread32(pll->pll_ctrl));
+}
+
+static const struct clk_ops zynq_pll_clk_ops = {
+ .recalc_rate = zynq_pll_recalc_rate,
+};
+
+static void __init zynq_pll_clk_setup(struct device_node *np)
+{
+ struct clk_init_data init;
+ struct zynq_pll_clk *pll;
+ const char *parent_name;
+ struct clk *clk;
+ u32 regs[2];
+ int ret;
+
+ ret = of_property_read_u32_array(np, "reg", regs, ARRAY_SIZE(regs));
+ if (WARN_ON(ret))
+ return;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (WARN_ON(!pll))
+ return;
+
+ pll->pll_ctrl = slcr_base + regs[0];
+ pll->pll_cfg = slcr_base + regs[1];
+
+ of_property_read_string(np, "clock-output-names", &init.name);
+
+ init.ops = &zynq_pll_clk_ops;
+ parent_name = of_clk_get_parent_name(np, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (WARN_ON(ret))
+ return;
+}
+
+struct zynq_periph_clk {
+ struct clk_hw hw;
+ struct clk_onecell_data onecell_data;
+ struct clk *gates[2];
+ void __iomem *clk_ctrl;
+ spinlock_t clkact_lock;
+};
+
+#define to_zynq_periph_clk(hw) container_of(hw, struct zynq_periph_clk, hw)
+
+static const u8 periph_clk_parent_map[] = {
+ 0, 0, 1, 2
+};
+#define PERIPH_CLK_CTRL_SRC(x) (periph_clk_parent_map[((x) & 0x30) >> 4])
+#define PERIPH_CLK_CTRL_DIV(x) (((x) & 0x3F00) >> 8)
+
+static unsigned long zynq_periph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
+ return parent_rate / PERIPH_CLK_CTRL_DIV(ioread32(periph->clk_ctrl));
+}
+
+static u8 zynq_periph_get_parent(struct clk_hw *hw)
+{
+ struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
+ return PERIPH_CLK_CTRL_SRC(ioread32(periph->clk_ctrl));
+}
+
+static const struct clk_ops zynq_periph_clk_ops = {
+ .recalc_rate = zynq_periph_recalc_rate,
+ .get_parent = zynq_periph_get_parent,
+};
+
+static void __init zynq_periph_clk_setup(struct device_node *np)
+{
+ struct zynq_periph_clk *periph;
+ const char *parent_names[3];
+ struct clk_init_data init;
+ int clk_num = 0, err;
+ const char *name;
+ struct clk *clk;
+ u32 reg;
+ int i;
+
+ err = of_property_read_u32(np, "reg", &reg);
+ if (WARN_ON(err))
+ return;
+
+ periph = kzalloc(sizeof(*periph), GFP_KERNEL);
+ if (WARN_ON(!periph))
+ return;
+
+ periph->clk_ctrl = slcr_base + reg;
+ spin_lock_init(&periph->clkact_lock);
+
+ init.name = np->name;
+ init.ops = &zynq_periph_clk_ops;
+ for (i = 0; i < ARRAY_SIZE(parent_names); i++)
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ init.parent_names = parent_names;
+ init.num_parents = ARRAY_SIZE(parent_names);
+
+ periph->hw.init = &init;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (WARN_ON(err))
+ return;
+
+ err = of_property_read_string_index(np, "clock-output-names", 0,
+ &name);
+ if (WARN_ON(err))
+ return;
+
+ periph->gates[0] = clk_register_gate(NULL, name, np->name, 0,
+ periph->clk_ctrl, 0, 0,
+ &periph->clkact_lock);
+ if (WARN_ON(IS_ERR(periph->gates[0])))
+ return;
+ clk_num++;
+
+ /* some periph clks have 2 downstream gates */
+ err = of_property_read_string_index(np, "clock-output-names", 1,
+ &name);
+ if (err != -ENODATA) {
+ periph->gates[1] = clk_register_gate(NULL, name, np->name, 0,
+ periph->clk_ctrl, 1, 0,
+ &periph->clkact_lock);
+ if (WARN_ON(IS_ERR(periph->gates[1])))
+ return;
+ clk_num++;
+ }
+
+ periph->onecell_data.clks = periph->gates;
+ periph->onecell_data.clk_num = clk_num;
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get,
+ &periph->onecell_data);
+ if (WARN_ON(err))
+ return;
+}
+
+/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
+ * derivative rates depend on CLK_621_TRUE
+ */
+
+struct zynq_cpu_clk {
+ struct clk_hw hw;
+ struct clk_onecell_data onecell_data;
+ struct clk *subclks[4];
+ void __iomem *clk_ctrl;
+ spinlock_t clkact_lock;
+};
+
+#define to_zynq_cpu_clk(hw) container_of(hw, struct zynq_cpu_clk, hw)
+
+static const u8 zynq_cpu_clk_parent_map[] = {
+ 1, 1, 2, 0
+};
+#define CPU_CLK_SRCSEL(x) (zynq_cpu_clk_parent_map[(((x) & 0x30) >> 4)])
+#define CPU_CLK_CTRL_DIV(x) (((x) & 0x3F00) >> 8)
+
+static u8 zynq_cpu_clk_get_parent(struct clk_hw *hw)
+{
+ struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
+ return CPU_CLK_SRCSEL(ioread32(cpuclk->clk_ctrl));
+}
+
+static unsigned long zynq_cpu_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
+ return parent_rate / CPU_CLK_CTRL_DIV(ioread32(cpuclk->clk_ctrl));
+}
+
+static const struct clk_ops zynq_cpu_clk_ops = {
+ .get_parent = zynq_cpu_clk_get_parent,
+ .recalc_rate = zynq_cpu_clk_recalc_rate,
+};
+
+struct zynq_cpu_subclk {
+ struct clk_hw hw;
+ void __iomem *clk_621;
+ enum {
+ CPU_SUBCLK_6X4X,
+ CPU_SUBCLK_3X2X,
+ CPU_SUBCLK_2X,
+ CPU_SUBCLK_1X,
+ } which;
+};
+
+#define CLK_621_TRUE(x) ((x) & 1)
+
+#define to_zynq_cpu_subclk(hw) container_of(hw, struct zynq_cpu_subclk, hw);
+
+static unsigned long zynq_cpu_subclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long uninitialized_var(rate);
+ struct zynq_cpu_subclk *subclk;
+ bool is_621;
+
+ subclk = to_zynq_cpu_subclk(hw)
+ is_621 = CLK_621_TRUE(ioread32(subclk->clk_621));
+
+ switch (subclk->which) {
+ case CPU_SUBCLK_6X4X:
+ rate = parent_rate;
+ break;
+ case CPU_SUBCLK_3X2X:
+ rate = parent_rate / 2;
+ break;
+ case CPU_SUBCLK_2X:
+ rate = parent_rate / (is_621 ? 3 : 2);
+ break;
+ case CPU_SUBCLK_1X:
+ rate = parent_rate / (is_621 ? 6 : 4);
+ break;
+ };
+
+ return rate;
+}
+
+static const struct clk_ops zynq_cpu_subclk_ops = {
+ .recalc_rate = zynq_cpu_subclk_recalc_rate,
+};
+
+static struct clk *zynq_cpu_subclk_setup(struct device_node *np, u8 which,
+ void __iomem *clk_621)
+{
+ struct zynq_cpu_subclk *subclk;
+ struct clk_init_data init;
+ struct clk *clk;
+ int err;
+
+ err = of_property_read_string_index(np, "clock-output-names",
+ which, &init.name);
+ if (WARN_ON(err))
+ goto err_read_output_name;
+
+ subclk = kzalloc(sizeof(*subclk), GFP_KERNEL);
+ if (!subclk)
+ goto err_subclk_alloc;
+
+ subclk->clk_621 = clk_621;
+ subclk->which = which;
+
+ init.ops = &zynq_cpu_subclk_ops;
+ init.parent_names = &np->name;
+ init.num_parents = 1;
+
+ subclk->hw.init = &init;
+
+ clk = clk_register(NULL, &subclk->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ goto err_clk_register;
+
+ return clk;
+
+err_clk_register:
+ kfree(subclk);
+err_subclk_alloc:
+err_read_output_name:
+ return ERR_PTR(-EINVAL);
+}
+
+static void __init zynq_cpu_clk_setup(struct device_node *np)
+{
+ struct zynq_cpu_clk *cpuclk;
+ const char *parent_names[3];
+ struct clk_init_data init;
+ void __iomem *clk_621;
+ struct clk *clk;
+ u32 reg[2];
+ int err;
+ int i;
+
+ err = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
+ if (WARN_ON(err))
+ return;
+
+ cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
+ if (WARN_ON(!cpuclk))
+ return;
+
+ cpuclk->clk_ctrl = slcr_base + reg[0];
+ clk_621 = slcr_base + reg[1];
+ spin_lock_init(&cpuclk->clkact_lock);
+
+ init.name = np->name;
+ init.ops = &zynq_cpu_clk_ops;
+ for (i = 0; i < ARRAY_SIZE(parent_names); i++)
+ parent_names[i] = of_clk_get_parent_name(np, i);
+ init.parent_names = parent_names;
+ init.num_parents = ARRAY_SIZE(parent_names);
+
+ cpuclk->hw.init = &init;
+
+ clk = clk_register(NULL, &cpuclk->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
+ err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (WARN_ON(err))
+ return;
+
+ for (i = 0; i < 4; i++) {
+ cpuclk->subclks[i] = zynq_cpu_subclk_setup(np, i, clk_621);
+ if (WARN_ON(IS_ERR(cpuclk->subclks[i])))
+ return;
+ }
+
+ cpuclk->onecell_data.clks = cpuclk->subclks;
+ cpuclk->onecell_data.clk_num = i;
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get,
+ &cpuclk->onecell_data);
+ if (WARN_ON(err))
+ return;
+}
+
+static const __initconst struct of_device_id zynq_clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
+ { .compatible = "xlnx,zynq-periph-clock",
+ .data = zynq_periph_clk_setup, },
+ { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
+ {}
+};
+
+void __init xilinx_zynq_clocks_init(void __iomem *slcr)
+{
+ slcr_base = slcr;
+ of_clk_init(zynq_clk_match);
+}
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
new file mode 100644
index 000000000000..57323fd15ec9
--- /dev/null
+++ b/drivers/clk/mvebu/Kconfig
@@ -0,0 +1,8 @@
+config MVEBU_CLK_CORE
+ bool
+
+config MVEBU_CLK_CPU
+ bool
+
+config MVEBU_CLK_GATING
+ bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
new file mode 100644
index 000000000000..58df3dc49363
--- /dev/null
+++ b/drivers/clk/mvebu/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MVEBU_CLK_CORE) += clk.o clk-core.o
+obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
+obj-$(CONFIG_MVEBU_CLK_GATING) += clk-gating-ctrl.o
diff --git a/drivers/clk/mvebu/clk-core.c b/drivers/clk/mvebu/clk-core.c
new file mode 100644
index 000000000000..69056a7479e8
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.c
@@ -0,0 +1,675 @@
+/*
+ * Marvell EBU clock core handling defined at reset
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include "clk-core.h"
+
+struct core_ratio {
+ int id;
+ const char *name;
+};
+
+struct core_clocks {
+ u32 (*get_tclk_freq)(void __iomem *sar);
+ u32 (*get_cpu_freq)(void __iomem *sar);
+ void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div);
+ const struct core_ratio *ratios;
+ int num_ratios;
+};
+
+static struct clk_onecell_data clk_data;
+
+static void __init mvebu_clk_core_setup(struct device_node *np,
+ struct core_clocks *coreclk)
+{
+ const char *tclk_name = "tclk";
+ const char *cpuclk_name = "cpuclk";
+ void __iomem *base;
+ unsigned long rate;
+ int n;
+
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return;
+
+ /*
+ * Allocate struct for TCLK, cpu clk, and core ratio clocks
+ */
+ clk_data.clk_num = 2 + coreclk->num_ratios;
+ clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!clk_data.clks))
+ return;
+
+ /*
+ * Register TCLK
+ */
+ of_property_read_string_index(np, "clock-output-names", 0,
+ &tclk_name);
+ rate = coreclk->get_tclk_freq(base);
+ clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[0]));
+
+ /*
+ * Register CPU clock
+ */
+ of_property_read_string_index(np, "clock-output-names", 1,
+ &cpuclk_name);
+ rate = coreclk->get_cpu_freq(base);
+ clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
+ CLK_IS_ROOT, rate);
+ WARN_ON(IS_ERR(clk_data.clks[1]));
+
+ /*
+ * Register fixed-factor clocks derived from CPU clock
+ */
+ for (n = 0; n < coreclk->num_ratios; n++) {
+ const char *rclk_name = coreclk->ratios[n].name;
+ int mult, div;
+
+ of_property_read_string_index(np, "clock-output-names",
+ 2+n, &rclk_name);
+ coreclk->get_clk_ratio(base, coreclk->ratios[n].id,
+ &mult, &div);
+ clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
+ cpuclk_name, 0, mult, div);
+ WARN_ON(IS_ERR(clk_data.clks[2+n]));
+ };
+
+ /*
+ * SAR register isn't needed anymore
+ */
+ iounmap(base);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+#ifdef CONFIG_MACH_ARMADA_370_XP
+/*
+ * Armada 370/XP Sample At Reset is a 64 bit bitfiled split in two
+ * register of 32 bits
+ */
+
+#define SARL 0 /* Low part [0:31] */
+#define SARL_AXP_PCLK_FREQ_OPT 21
+#define SARL_AXP_PCLK_FREQ_OPT_MASK 0x7
+#define SARL_A370_PCLK_FREQ_OPT 11
+#define SARL_A370_PCLK_FREQ_OPT_MASK 0xF
+#define SARL_AXP_FAB_FREQ_OPT 24
+#define SARL_AXP_FAB_FREQ_OPT_MASK 0xF
+#define SARL_A370_FAB_FREQ_OPT 15
+#define SARL_A370_FAB_FREQ_OPT_MASK 0x1F
+#define SARL_A370_TCLK_FREQ_OPT 20
+#define SARL_A370_TCLK_FREQ_OPT_MASK 0x1
+#define SARH 4 /* High part [32:63] */
+#define SARH_AXP_PCLK_FREQ_OPT (52-32)
+#define SARH_AXP_PCLK_FREQ_OPT_MASK 0x1
+#define SARH_AXP_PCLK_FREQ_OPT_SHIFT 3
+#define SARH_AXP_FAB_FREQ_OPT (51-32)
+#define SARH_AXP_FAB_FREQ_OPT_MASK 0x1
+#define SARH_AXP_FAB_FREQ_OPT_SHIFT 4
+
+static const u32 __initconst armada_370_tclk_frequencies[] = {
+ 16600000,
+ 20000000,
+};
+
+static u32 __init armada_370_get_tclk_freq(void __iomem *sar)
+{
+ u8 tclk_freq_select = 0;
+
+ tclk_freq_select = ((readl(sar) >> SARL_A370_TCLK_FREQ_OPT) &
+ SARL_A370_TCLK_FREQ_OPT_MASK);
+ return armada_370_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 __initconst armada_370_cpu_frequencies[] = {
+ 400000000,
+ 533000000,
+ 667000000,
+ 800000000,
+ 1000000000,
+ 1067000000,
+ 1200000000,
+};
+
+static u32 __init armada_370_get_cpu_freq(void __iomem *sar)
+{
+ u32 cpu_freq;
+ u8 cpu_freq_select = 0;
+
+ cpu_freq_select = ((readl(sar) >> SARL_A370_PCLK_FREQ_OPT) &
+ SARL_A370_PCLK_FREQ_OPT_MASK);
+ if (cpu_freq_select > ARRAY_SIZE(armada_370_cpu_frequencies)) {
+ pr_err("CPU freq select unsuported %d\n", cpu_freq_select);
+ cpu_freq = 0;
+ } else
+ cpu_freq = armada_370_cpu_frequencies[cpu_freq_select];
+
+ return cpu_freq;
+}
+
+enum { A370_XP_NBCLK, A370_XP_HCLK, A370_XP_DRAMCLK };
+
+static const struct core_ratio __initconst armada_370_xp_core_ratios[] = {
+ { .id = A370_XP_NBCLK, .name = "nbclk" },
+ { .id = A370_XP_HCLK, .name = "hclk" },
+ { .id = A370_XP_DRAMCLK, .name = "dramclk" },
+};
+
+static const int __initconst armada_370_xp_nbclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 2}, {2, 2},
+ {1, 2}, {1, 2}, {1, 1}, {2, 3},
+ {0, 1}, {1, 2}, {2, 4}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {2, 2},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {2, 3}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_370_xp_hclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 6}, {2, 3},
+ {1, 3}, {1, 4}, {1, 2}, {2, 6},
+ {0, 1}, {1, 6}, {2, 10}, {0, 1},
+ {1, 4}, {0, 1}, {0, 1}, {2, 5},
+ {0, 1}, {0, 1}, {0, 1}, {1, 2},
+ {2, 6}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_370_xp_dramclk_ratios[32][2] = {
+ {0, 1}, {1, 2}, {2, 3}, {2, 3},
+ {1, 3}, {1, 2}, {1, 2}, {2, 6},
+ {0, 1}, {1, 3}, {2, 5}, {0, 1},
+ {1, 4}, {0, 1}, {0, 1}, {2, 5},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {2, 3}, {0, 1}, {0, 1}, {0, 1},
+ {0, 1}, {0, 1}, {0, 1}, {1, 1},
+ {0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_370_xp_get_clk_ratio(u32 opt,
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case A370_XP_NBCLK:
+ *mult = armada_370_xp_nbclk_ratios[opt][0];
+ *div = armada_370_xp_nbclk_ratios[opt][1];
+ break;
+ case A370_XP_HCLK:
+ *mult = armada_370_xp_hclk_ratios[opt][0];
+ *div = armada_370_xp_hclk_ratios[opt][1];
+ break;
+ case A370_XP_DRAMCLK:
+ *mult = armada_370_xp_dramclk_ratios[opt][0];
+ *div = armada_370_xp_dramclk_ratios[opt][1];
+ break;
+ }
+}
+
+static void __init armada_370_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ u32 opt = ((readl(sar) >> SARL_A370_FAB_FREQ_OPT) &
+ SARL_A370_FAB_FREQ_OPT_MASK);
+
+ armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
+}
+
+
+static const struct core_clocks armada_370_core_clocks = {
+ .get_tclk_freq = armada_370_get_tclk_freq,
+ .get_cpu_freq = armada_370_get_cpu_freq,
+ .get_clk_ratio = armada_370_get_clk_ratio,
+ .ratios = armada_370_xp_core_ratios,
+ .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
+};
+
+static const u32 __initconst armada_xp_cpu_frequencies[] = {
+ 1000000000,
+ 1066000000,
+ 1200000000,
+ 1333000000,
+ 1500000000,
+ 1666000000,
+ 1800000000,
+ 2000000000,
+ 667000000,
+ 0,
+ 800000000,
+ 1600000000,
+};
+
+/* For Armada XP TCLK frequency is fix: 250MHz */
+static u32 __init armada_xp_get_tclk_freq(void __iomem *sar)
+{
+ return 250 * 1000 * 1000;
+}
+
+static u32 __init armada_xp_get_cpu_freq(void __iomem *sar)
+{
+ u32 cpu_freq;
+ u8 cpu_freq_select = 0;
+
+ cpu_freq_select = ((readl(sar) >> SARL_AXP_PCLK_FREQ_OPT) &
+ SARL_AXP_PCLK_FREQ_OPT_MASK);
+ /*
+ * The upper bit is not contiguous to the other ones and
+ * located in the high part of the SAR registers
+ */
+ cpu_freq_select |= (((readl(sar+4) >> SARH_AXP_PCLK_FREQ_OPT) &
+ SARH_AXP_PCLK_FREQ_OPT_MASK)
+ << SARH_AXP_PCLK_FREQ_OPT_SHIFT);
+ if (cpu_freq_select > ARRAY_SIZE(armada_xp_cpu_frequencies)) {
+ pr_err("CPU freq select unsuported: %d\n", cpu_freq_select);
+ cpu_freq = 0;
+ } else
+ cpu_freq = armada_xp_cpu_frequencies[cpu_freq_select];
+
+ return cpu_freq;
+}
+
+static void __init armada_xp_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+
+ u32 opt = ((readl(sar) >> SARL_AXP_FAB_FREQ_OPT) &
+ SARL_AXP_FAB_FREQ_OPT_MASK);
+ /*
+ * The upper bit is not contiguous to the other ones and
+ * located in the high part of the SAR registers
+ */
+ opt |= (((readl(sar+4) >> SARH_AXP_FAB_FREQ_OPT) &
+ SARH_AXP_FAB_FREQ_OPT_MASK)
+ << SARH_AXP_FAB_FREQ_OPT_SHIFT);
+
+ armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
+}
+
+static const struct core_clocks armada_xp_core_clocks = {
+ .get_tclk_freq = armada_xp_get_tclk_freq,
+ .get_cpu_freq = armada_xp_get_cpu_freq,
+ .get_clk_ratio = armada_xp_get_clk_ratio,
+ .ratios = armada_370_xp_core_ratios,
+ .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
+};
+
+#endif /* CONFIG_MACH_ARMADA_370_XP */
+
+/*
+ * Dove PLL sample-at-reset configuration
+ *
+ * SAR0[8:5] : CPU frequency
+ * 5 = 1000 MHz
+ * 6 = 933 MHz
+ * 7 = 933 MHz
+ * 8 = 800 MHz
+ * 9 = 800 MHz
+ * 10 = 800 MHz
+ * 11 = 1067 MHz
+ * 12 = 667 MHz
+ * 13 = 533 MHz
+ * 14 = 400 MHz
+ * 15 = 333 MHz
+ * others reserved.
+ *
+ * SAR0[11:9] : CPU to L2 Clock divider ratio
+ * 0 = (1/1) * CPU
+ * 2 = (1/2) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * others reserved.
+ *
+ * SAR0[15:12] : CPU to DDR DRAM Clock divider ratio
+ * 0 = (1/1) * CPU
+ * 2 = (1/2) * CPU
+ * 3 = (2/5) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * 8 = (1/5) * CPU
+ * 10 = (1/6) * CPU
+ * 12 = (1/7) * CPU
+ * 14 = (1/8) * CPU
+ * 15 = (1/10) * CPU
+ * others reserved.
+ *
+ * SAR0[24:23] : TCLK frequency
+ * 0 = 166 MHz
+ * 1 = 125 MHz
+ * others reserved.
+ */
+#ifdef CONFIG_ARCH_DOVE
+#define SAR_DOVE_CPU_FREQ 5
+#define SAR_DOVE_CPU_FREQ_MASK 0xf
+#define SAR_DOVE_L2_RATIO 9
+#define SAR_DOVE_L2_RATIO_MASK 0x7
+#define SAR_DOVE_DDR_RATIO 12
+#define SAR_DOVE_DDR_RATIO_MASK 0xf
+#define SAR_DOVE_TCLK_FREQ 23
+#define SAR_DOVE_TCLK_FREQ_MASK 0x3
+
+static const u32 __initconst dove_tclk_frequencies[] = {
+ 166666667,
+ 125000000,
+ 0, 0
+};
+
+static u32 __init dove_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_DOVE_TCLK_FREQ) &
+ SAR_DOVE_TCLK_FREQ_MASK;
+ return dove_tclk_frequencies[opt];
+}
+
+static const u32 __initconst dove_cpu_frequencies[] = {
+ 0, 0, 0, 0, 0,
+ 1000000000,
+ 933333333, 933333333,
+ 800000000, 800000000, 800000000,
+ 1066666667,
+ 666666667,
+ 533333333,
+ 400000000,
+ 333333333
+};
+
+static u32 __init dove_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_DOVE_CPU_FREQ) &
+ SAR_DOVE_CPU_FREQ_MASK;
+ return dove_cpu_frequencies[opt];
+}
+
+enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
+
+static const struct core_ratio __initconst dove_core_ratios[] = {
+ { .id = DOVE_CPU_TO_L2, .name = "l2clk", },
+ { .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
+};
+
+static const int __initconst dove_cpu_l2_ratios[8][2] = {
+ { 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
+};
+
+static const int __initconst dove_cpu_ddr_ratios[16][2] = {
+ { 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
+ { 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
+ { 1, 7 }, { 0, 1 }, { 1, 8 }, { 1, 10 }
+};
+
+static void __init dove_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case DOVE_CPU_TO_L2:
+ {
+ u32 opt = (readl(sar) >> SAR_DOVE_L2_RATIO) &
+ SAR_DOVE_L2_RATIO_MASK;
+ *mult = dove_cpu_l2_ratios[opt][0];
+ *div = dove_cpu_l2_ratios[opt][1];
+ break;
+ }
+ case DOVE_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_DOVE_DDR_RATIO) &
+ SAR_DOVE_DDR_RATIO_MASK;
+ *mult = dove_cpu_ddr_ratios[opt][0];
+ *div = dove_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks dove_core_clocks = {
+ .get_tclk_freq = dove_get_tclk_freq,
+ .get_cpu_freq = dove_get_cpu_freq,
+ .get_clk_ratio = dove_get_clk_ratio,
+ .ratios = dove_core_ratios,
+ .num_ratios = ARRAY_SIZE(dove_core_ratios),
+};
+#endif /* CONFIG_ARCH_DOVE */
+
+/*
+ * Kirkwood PLL sample-at-reset configuration
+ * (6180 has different SAR layout than other Kirkwood SoCs)
+ *
+ * SAR0[4:3,22,1] : CPU frequency (6281,6292,6282)
+ * 4 = 600 MHz
+ * 6 = 800 MHz
+ * 7 = 1000 MHz
+ * 9 = 1200 MHz
+ * 12 = 1500 MHz
+ * 13 = 1600 MHz
+ * 14 = 1800 MHz
+ * 15 = 2000 MHz
+ * others reserved.
+ *
+ * SAR0[19,10:9] : CPU to L2 Clock divider ratio (6281,6292,6282)
+ * 1 = (1/2) * CPU
+ * 3 = (1/3) * CPU
+ * 5 = (1/4) * CPU
+ * others reserved.
+ *
+ * SAR0[8:5] : CPU to DDR DRAM Clock divider ratio (6281,6292,6282)
+ * 2 = (1/2) * CPU
+ * 4 = (1/3) * CPU
+ * 6 = (1/4) * CPU
+ * 7 = (2/9) * CPU
+ * 8 = (1/5) * CPU
+ * 9 = (1/6) * CPU
+ * others reserved.
+ *
+ * SAR0[4:2] : Kirkwood 6180 cpu/l2/ddr clock configuration (6180 only)
+ * 5 = [CPU = 600 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/3) * CPU]
+ * 6 = [CPU = 800 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/4) * CPU]
+ * 7 = [CPU = 1000 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/5) * CPU]
+ * others reserved.
+ *
+ * SAR0[21] : TCLK frequency
+ * 0 = 200 MHz
+ * 1 = 166 MHz
+ * others reserved.
+ */
+#ifdef CONFIG_ARCH_KIRKWOOD
+#define SAR_KIRKWOOD_CPU_FREQ(x) \
+ (((x & (1 << 1)) >> 1) | \
+ ((x & (1 << 22)) >> 21) | \
+ ((x & (3 << 3)) >> 1))
+#define SAR_KIRKWOOD_L2_RATIO(x) \
+ (((x & (3 << 9)) >> 9) | \
+ (((x & (1 << 19)) >> 17)))
+#define SAR_KIRKWOOD_DDR_RATIO 5
+#define SAR_KIRKWOOD_DDR_RATIO_MASK 0xf
+#define SAR_MV88F6180_CLK 2
+#define SAR_MV88F6180_CLK_MASK 0x7
+#define SAR_KIRKWOOD_TCLK_FREQ 21
+#define SAR_KIRKWOOD_TCLK_FREQ_MASK 0x1
+
+enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
+
+static const struct core_ratio __initconst kirkwood_core_ratios[] = {
+ { .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
+ { .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
+};
+
+static u32 __init kirkwood_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_KIRKWOOD_TCLK_FREQ) &
+ SAR_KIRKWOOD_TCLK_FREQ_MASK;
+ return (opt) ? 166666667 : 200000000;
+}
+
+static const u32 __initconst kirkwood_cpu_frequencies[] = {
+ 0, 0, 0, 0,
+ 600000000,
+ 0,
+ 800000000,
+ 1000000000,
+ 0,
+ 1200000000,
+ 0, 0,
+ 1500000000,
+ 1600000000,
+ 1800000000,
+ 2000000000
+};
+
+static u32 __init kirkwood_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = SAR_KIRKWOOD_CPU_FREQ(readl(sar));
+ return kirkwood_cpu_frequencies[opt];
+}
+
+static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
+ { 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
+ { 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
+};
+
+static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
+ { 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
+ { 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
+ { 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }
+};
+
+static void __init kirkwood_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case KIRKWOOD_CPU_TO_L2:
+ {
+ u32 opt = SAR_KIRKWOOD_L2_RATIO(readl(sar));
+ *mult = kirkwood_cpu_l2_ratios[opt][0];
+ *div = kirkwood_cpu_l2_ratios[opt][1];
+ break;
+ }
+ case KIRKWOOD_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_KIRKWOOD_DDR_RATIO) &
+ SAR_KIRKWOOD_DDR_RATIO_MASK;
+ *mult = kirkwood_cpu_ddr_ratios[opt][0];
+ *div = kirkwood_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks kirkwood_core_clocks = {
+ .get_tclk_freq = kirkwood_get_tclk_freq,
+ .get_cpu_freq = kirkwood_get_cpu_freq,
+ .get_clk_ratio = kirkwood_get_clk_ratio,
+ .ratios = kirkwood_core_ratios,
+ .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
+};
+
+static const u32 __initconst mv88f6180_cpu_frequencies[] = {
+ 0, 0, 0, 0, 0,
+ 600000000,
+ 800000000,
+ 1000000000
+};
+
+static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) & SAR_MV88F6180_CLK_MASK;
+ return mv88f6180_cpu_frequencies[opt];
+}
+
+static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
+ { 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
+};
+
+static void __init mv88f6180_get_clk_ratio(
+ void __iomem *sar, int id, int *mult, int *div)
+{
+ switch (id) {
+ case KIRKWOOD_CPU_TO_L2:
+ {
+ /* mv88f6180 has a fixed 1:2 CPU-to-L2 ratio */
+ *mult = 1;
+ *div = 2;
+ break;
+ }
+ case KIRKWOOD_CPU_TO_DDR:
+ {
+ u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) &
+ SAR_MV88F6180_CLK_MASK;
+ *mult = mv88f6180_cpu_ddr_ratios[opt][0];
+ *div = mv88f6180_cpu_ddr_ratios[opt][1];
+ break;
+ }
+ }
+}
+
+static const struct core_clocks mv88f6180_core_clocks = {
+ .get_tclk_freq = kirkwood_get_tclk_freq,
+ .get_cpu_freq = mv88f6180_get_cpu_freq,
+ .get_clk_ratio = mv88f6180_get_clk_ratio,
+ .ratios = kirkwood_core_ratios,
+ .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
+};
+#endif /* CONFIG_ARCH_KIRKWOOD */
+
+static const __initdata struct of_device_id clk_core_match[] = {
+#ifdef CONFIG_MACH_ARMADA_370_XP
+ {
+ .compatible = "marvell,armada-370-core-clock",
+ .data = &armada_370_core_clocks,
+ },
+ {
+ .compatible = "marvell,armada-xp-core-clock",
+ .data = &armada_xp_core_clocks,
+ },
+#endif
+#ifdef CONFIG_ARCH_DOVE
+ {
+ .compatible = "marvell,dove-core-clock",
+ .data = &dove_core_clocks,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+ {
+ .compatible = "marvell,kirkwood-core-clock",
+ .data = &kirkwood_core_clocks,
+ },
+ {
+ .compatible = "marvell,mv88f6180-core-clock",
+ .data = &mv88f6180_core_clocks,
+ },
+#endif
+
+ { }
+};
+
+void __init mvebu_core_clk_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, clk_core_match) {
+ const struct of_device_id *match =
+ of_match_node(clk_core_match, np);
+ mvebu_clk_core_setup(np, (struct core_clocks *)match->data);
+ }
+}
diff --git a/drivers/clk/mvebu/clk-core.h b/drivers/clk/mvebu/clk-core.h
new file mode 100644
index 000000000000..28b5e02e9885
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.h
@@ -0,0 +1,18 @@
+/*
+ * * Marvell EBU clock core handling defined at reset
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_CORE_H
+#define __MVEBU_CLK_CORE_H
+
+void __init mvebu_core_clk_init(void);
+
+#endif
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
new file mode 100644
index 000000000000..9dd2551a0a41
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -0,0 +1,189 @@
+/*
+ * Marvell MVEBU CPU clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include "clk-cpu.h"
+
+#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
+#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
+#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
+
+#define MAX_CPU 4
+struct cpu_clk {
+ struct clk_hw hw;
+ int cpu;
+ const char *clk_name;
+ const char *parent_name;
+ void __iomem *reg_base;
+};
+
+static struct clk **clks;
+
+static struct clk_onecell_data clk_data;
+
+#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
+
+static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
+ return parent_rate / div;
+}
+
+static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Valid ratio are 1:1, 1:2 and 1:3 */
+ u32 div;
+
+ div = *parent_rate / rate;
+ if (div == 0)
+ div = 1;
+ else if (div > 3)
+ div = 3;
+
+ return *parent_rate / div;
+}
+
+static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+ u32 reg, div;
+ u32 reload_mask;
+
+ div = parent_rate / rate;
+ reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
+ & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
+ | (div << (cpuclk->cpu * 8));
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
+ /* Set clock divider reload smooth bit mask */
+ reload_mask = 1 << (20 + cpuclk->cpu);
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ | reload_mask;
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ /* Now trigger the clock update */
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
+ | 1 << 24;
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ /* Wait for clocks to settle down then clear reload request */
+ udelay(1000);
+ reg &= ~(reload_mask | 1 << 24);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ udelay(1000);
+
+ return 0;
+}
+
+static const struct clk_ops cpu_ops = {
+ .recalc_rate = clk_cpu_recalc_rate,
+ .round_rate = clk_cpu_round_rate,
+ .set_rate = clk_cpu_set_rate,
+};
+
+void __init of_cpu_clk_setup(struct device_node *node)
+{
+ struct cpu_clk *cpuclk;
+ void __iomem *clock_complex_base = of_iomap(node, 0);
+ int ncpus = 0;
+ struct device_node *dn;
+
+ if (clock_complex_base == NULL) {
+ pr_err("%s: clock-complex base register not set\n",
+ __func__);
+ return;
+ }
+
+ for_each_node_by_type(dn, "cpu")
+ ncpus++;
+
+ cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
+ if (WARN_ON(!cpuclk))
+ return;
+
+ clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
+ if (WARN_ON(!clks))
+ goto clks_out;
+
+ for_each_node_by_type(dn, "cpu") {
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk *parent_clk;
+ char *clk_name = kzalloc(5, GFP_KERNEL);
+ int cpu, err;
+
+ if (WARN_ON(!clk_name))
+ goto bail_out;
+
+ err = of_property_read_u32(dn, "reg", &cpu);
+ if (WARN_ON(err))
+ goto bail_out;
+
+ sprintf(clk_name, "cpu%d", cpu);
+ parent_clk = of_clk_get(node, 0);
+
+ cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
+ cpuclk[cpu].clk_name = clk_name;
+ cpuclk[cpu].cpu = cpu;
+ cpuclk[cpu].reg_base = clock_complex_base;
+ cpuclk[cpu].hw.init = &init;
+
+ init.name = cpuclk[cpu].clk_name;
+ init.ops = &cpu_ops;
+ init.flags = 0;
+ init.parent_names = &cpuclk[cpu].parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &cpuclk[cpu].hw);
+ if (WARN_ON(IS_ERR(clk)))
+ goto bail_out;
+ clks[cpu] = clk;
+ }
+ clk_data.clk_num = MAX_CPU;
+ clk_data.clks = clks;
+ of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
+
+ return;
+bail_out:
+ kfree(clks);
+ while(ncpus--)
+ kfree(cpuclk[ncpus].clk_name);
+clks_out:
+ kfree(cpuclk);
+}
+
+static const __initconst struct of_device_id clk_cpu_match[] = {
+ {
+ .compatible = "marvell,armada-xp-cpu-clock",
+ .data = of_cpu_clk_setup,
+ },
+ {
+ /* sentinel */
+ },
+};
+
+void __init mvebu_cpu_clk_init(void)
+{
+ of_clk_init(clk_cpu_match);
+}
diff --git a/drivers/clk/mvebu/clk-cpu.h b/drivers/clk/mvebu/clk-cpu.h
new file mode 100644
index 000000000000..08e2affba4e6
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.h
@@ -0,0 +1,22 @@
+/*
+ * Marvell MVEBU CPU clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_CPU_H
+#define __MVEBU_CLK_CPU_H
+
+#ifdef CONFIG_MVEBU_CLK_CPU
+void __init mvebu_cpu_clk_init(void);
+#else
+static inline void mvebu_cpu_clk_init(void) {}
+#endif
+
+#endif
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
new file mode 100644
index 000000000000..8fa5408b6c7d
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -0,0 +1,249 @@
+/*
+ * Marvell MVEBU clock gating control.
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct mvebu_gating_ctrl {
+ spinlock_t lock;
+ struct clk **gates;
+ int num_gates;
+};
+
+struct mvebu_soc_descr {
+ const char *name;
+ const char *parent;
+ int bit_idx;
+};
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static struct clk *mvebu_clk_gating_get_src(
+ struct of_phandle_args *clkspec, void *data)
+{
+ struct mvebu_gating_ctrl *ctrl = (struct mvebu_gating_ctrl *)data;
+ int n;
+
+ if (clkspec->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ for (n = 0; n < ctrl->num_gates; n++) {
+ struct clk_gate *gate =
+ to_clk_gate(__clk_get_hw(ctrl->gates[n]));
+ if (clkspec->args[0] == gate->bit_idx)
+ return ctrl->gates[n];
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static void __init mvebu_clk_gating_setup(
+ struct device_node *np, const struct mvebu_soc_descr *descr)
+{
+ struct mvebu_gating_ctrl *ctrl;
+ struct clk *clk;
+ void __iomem *base;
+ const char *default_parent = NULL;
+ int n;
+
+ base = of_iomap(np, 0);
+
+ clk = of_clk_get(np, 0);
+ if (!IS_ERR(clk)) {
+ default_parent = __clk_get_name(clk);
+ clk_put(clk);
+ }
+
+ ctrl = kzalloc(sizeof(struct mvebu_gating_ctrl), GFP_KERNEL);
+ if (WARN_ON(!ctrl))
+ return;
+
+ spin_lock_init(&ctrl->lock);
+
+ /*
+ * Count, allocate, and register clock gates
+ */
+ for (n = 0; descr[n].name;)
+ n++;
+
+ ctrl->num_gates = n;
+ ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (WARN_ON(!ctrl->gates)) {
+ kfree(ctrl);
+ return;
+ }
+
+ for (n = 0; n < ctrl->num_gates; n++) {
+ u8 flags = 0;
+ const char *parent =
+ (descr[n].parent) ? descr[n].parent : default_parent;
+
+ /*
+ * On Armada 370, the DDR clock is a special case: it
+ * isn't taken by any driver, but should anyway be
+ * kept enabled, so we mark it as IGNORE_UNUSED for
+ * now.
+ */
+ if (!strcmp(descr[n].name, "ddr"))
+ flags |= CLK_IGNORE_UNUSED;
+
+ ctrl->gates[n] = clk_register_gate(NULL, descr[n].name, parent,
+ flags, base, descr[n].bit_idx, 0, &ctrl->lock);
+ WARN_ON(IS_ERR(ctrl->gates[n]));
+ }
+ of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl);
+}
+
+/*
+ * SoC specific clock gating control
+ */
+
+#ifdef CONFIG_MACH_ARMADA_370
+static const struct mvebu_soc_descr __initconst armada_370_gating_descr[] = {
+ { "audio", NULL, 0 },
+ { "pex0_en", NULL, 1 },
+ { "pex1_en", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 9 },
+ { "sata0", NULL, 15 },
+ { "sdio", NULL, 17 },
+ { "tdm", NULL, 25 },
+ { "ddr", NULL, 28 },
+ { "sata1", NULL, 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_XP
+static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
+ { "audio", NULL, 0 },
+ { "ge3", NULL, 1 },
+ { "ge2", NULL, 2 },
+ { "ge1", NULL, 3 },
+ { "ge0", NULL, 4 },
+ { "pex0", NULL, 5 },
+ { "pex1", NULL, 6 },
+ { "pex2", NULL, 7 },
+ { "pex3", NULL, 8 },
+ { "bp", NULL, 13 },
+ { "sata0lnk", NULL, 14 },
+ { "sata0", "sata0lnk", 15 },
+ { "lcd", NULL, 16 },
+ { "sdio", NULL, 17 },
+ { "usb0", NULL, 18 },
+ { "usb1", NULL, 19 },
+ { "usb2", NULL, 20 },
+ { "xor0", NULL, 22 },
+ { "crypto", NULL, 23 },
+ { "tdm", NULL, 25 },
+ { "xor1", NULL, 28 },
+ { "sata1lnk", NULL, 29 },
+ { "sata1", "sata1lnk", 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_ARCH_DOVE
+static const struct mvebu_soc_descr __initconst dove_gating_descr[] = {
+ { "usb0", NULL, 0 },
+ { "usb1", NULL, 1 },
+ { "ge", "gephy", 2 },
+ { "sata", NULL, 3 },
+ { "pex0", NULL, 4 },
+ { "pex1", NULL, 5 },
+ { "sdio0", NULL, 8 },
+ { "sdio1", NULL, 9 },
+ { "nand", NULL, 10 },
+ { "camera", NULL, 11 },
+ { "i2s0", NULL, 12 },
+ { "i2s1", NULL, 13 },
+ { "crypto", NULL, 15 },
+ { "ac97", NULL, 21 },
+ { "pdma", NULL, 22 },
+ { "xor0", NULL, 23 },
+ { "xor1", NULL, 24 },
+ { "gephy", NULL, 30 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
+ { "ge0", NULL, 0 },
+ { "pex0", NULL, 2 },
+ { "usb0", NULL, 3 },
+ { "sdio", NULL, 4 },
+ { "tsu", NULL, 5 },
+ { "runit", NULL, 7 },
+ { "xor0", NULL, 8 },
+ { "audio", NULL, 9 },
+ { "sata0", NULL, 14 },
+ { "sata1", NULL, 15 },
+ { "xor1", NULL, 16 },
+ { "crypto", NULL, 17 },
+ { "pex1", NULL, 18 },
+ { "ge1", NULL, 19 },
+ { "tdm", NULL, 20 },
+ { }
+};
+#endif
+
+static const __initdata struct of_device_id clk_gating_match[] = {
+#ifdef CONFIG_MACH_ARMADA_370
+ {
+ .compatible = "marvell,armada-370-gating-clock",
+ .data = armada_370_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_XP
+ {
+ .compatible = "marvell,armada-xp-gating-clock",
+ .data = armada_xp_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_DOVE
+ {
+ .compatible = "marvell,dove-gating-clock",
+ .data = dove_gating_descr,
+ },
+#endif
+
+#ifdef CONFIG_ARCH_KIRKWOOD
+ {
+ .compatible = "marvell,kirkwood-gating-clock",
+ .data = kirkwood_gating_descr,
+ },
+#endif
+
+ { }
+};
+
+void __init mvebu_gating_clk_init(void)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, clk_gating_match) {
+ const struct of_device_id *match =
+ of_match_node(clk_gating_match, np);
+ mvebu_clk_gating_setup(np,
+ (const struct mvebu_soc_descr *)match->data);
+ }
+}
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.h b/drivers/clk/mvebu/clk-gating-ctrl.h
new file mode 100644
index 000000000000..9275d1e51f1b
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.h
@@ -0,0 +1,22 @@
+/*
+ * Marvell EBU gating clock handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_CLK_GATING_H
+#define __MVEBU_CLK_GATING_H
+
+#ifdef CONFIG_MVEBU_CLK_GATING
+void __init mvebu_gating_clk_init(void);
+#else
+void mvebu_gating_clk_init(void) {}
+#endif
+
+#endif
diff --git a/drivers/clk/mvebu/clk.c b/drivers/clk/mvebu/clk.c
new file mode 100644
index 000000000000..855681b8a9dc
--- /dev/null
+++ b/drivers/clk/mvebu/clk.c
@@ -0,0 +1,27 @@
+/*
+ * Marvell EBU SoC clock handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include "clk-core.h"
+#include "clk-cpu.h"
+#include "clk-gating-ctrl.h"
+
+void __init mvebu_clocks_init(void)
+{
+ mvebu_core_clk_init();
+ mvebu_gating_clk_init();
+ mvebu_cpu_clk_init();
+}
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 147e25f00405..ed9af4278619 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -20,6 +20,7 @@
#include <mach/spear.h>
#include "clk.h"
+#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
/* PLL related registers and bit values */
#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
/* PLL_CFG bit values */
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index e27c52317ffe..9f7400d74fa7 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -34,7 +34,7 @@ static int ab9540_reg_clks(struct device *dev)
return 0;
}
-static int __devinit abx500_clk_probe(struct platform_device *pdev)
+static int abx500_clk_probe(struct platform_device *pdev)
{
struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
int ret;
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
index 7d0e0258f204..6b889a0e90b3 100644
--- a/drivers/clk/ux500/u8500_clk.c
+++ b/drivers/clk/ux500/u8500_clk.c
@@ -12,7 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/clk-ux500.h>
-
+#include <mach/db8500-regs.h>
#include "clk.h"
void u8500_clk_init(void)
@@ -160,12 +160,6 @@ void u8500_clk_init(void)
clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
clk_register_clkdev(clk, NULL, "uicc");
- /*
- * FIXME: The MTU clocks might need some kind of "parent muxed join"
- * and these have no K-clocks. For now, we ignore the missing
- * connection to the corresponding P-clocks, p6_mtu0_clk and
- * p6_mtu1_clk. Instead timclk is used which is the valid parent.
- */
clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
clk_register_clkdev(clk, NULL, "mtu0");
clk_register_clkdev(clk, NULL, "mtu1");
@@ -405,8 +399,11 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", U8500_CLKRST6_BASE,
BIT(6), 0);
+ clk_register_clkdev(clk, "apb_pclk", "mtu0");
+
clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", U8500_CLKRST6_BASE,
BIT(7), 0);
+ clk_register_clkdev(clk, "apb_pclk", "mtu1");
/* PRCC K-clocks
*
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 6a78073c3808..7fdcbd3f4da5 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -22,6 +22,24 @@ config DW_APB_TIMER_OF
config ARMADA_370_XP_TIMER
bool
+config SUNXI_TIMER
+ bool
+
+config CLKSRC_NOMADIK_MTU
+ bool
+ depends on (ARCH_NOMADIK || ARCH_U8500)
+ select CLKSRC_MMIO
+ help
+ Support for Multi Timer Unit. MTU provides access
+ to multiple interrupt generating programmable
+ 32-bit free running decrementing counters.
+
+config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
+ bool
+ depends on CLKSRC_NOMADIK_MTU
+ help
+ Use the Multi Timer Unit as the sched_clock.
+
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB8500
@@ -31,7 +49,7 @@ config CLKSRC_DBX500_PRCMU
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
- depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
+ depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
default y
help
Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 603be366f762..f93453d01673 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -11,8 +11,10 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
+obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
+obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 6b5cf02c35c8..6efe4d1ab3aa 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -73,7 +73,7 @@ static struct clocksource clocksource_acpi_pm = {
#ifdef CONFIG_PCI
-static int __devinitdata acpi_pm_good;
+static int acpi_pm_good;
static int __init acpi_pm_good_setup(char *__str)
{
acpi_pm_good = 1;
@@ -102,7 +102,7 @@ static inline void acpi_pm_need_workaround(void)
* incorrect when read). As a result, the ACPI free running count up
* timer specification is violated due to erroneous reads.
*/
-static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
+static void acpi_pm_check_blacklist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
@@ -120,7 +120,7 @@ static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
acpi_pm_check_blacklist);
-static void __devinit acpi_pm_check_graylist(struct pci_dev *dev)
+static void acpi_pm_check_graylist(struct pci_dev *dev)
{
if (acpi_pm_good)
return;
@@ -233,16 +233,15 @@ fs_initcall(init_acpi_pm_clocksource);
*/
static int __init parse_pmtmr(char *arg)
{
- unsigned long base;
+ unsigned int base;
+ int ret;
- if (strict_strtoul(arg, 16, &base))
- return -EINVAL;
-#ifdef CONFIG_X86_64
- if (base > UINT_MAX)
- return -ERANGE;
-#endif
- printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
- pmtmr_ioport, base);
+ ret = kstrtouint(arg, 16, &base);
+ if (ret)
+ return ret;
+
+ pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport,
+ base);
pmtmr_ioport = base;
return 1;
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
index c4d9f9566c64..8ae1a61523ff 100644
--- a/drivers/clocksource/arm_generic.c
+++ b/drivers/clocksource/arm_generic.c
@@ -109,7 +109,7 @@ static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
enable_percpu_irq(clk->irq, 0);
- /* Ensure the physical counter is visible to userspace for the vDSO. */
+ /* Ensure the virtual counter is visible to userspace for the vDSO. */
arch_counter_enable_user_access();
}
@@ -127,7 +127,7 @@ static void __init arch_timer_calibrate(void)
/* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
+ sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate);
pr_info("Architected local timer running at %u.%02uMHz.\n",
arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
@@ -221,10 +221,10 @@ int __init arm_generic_timer_init(void)
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
/* Calibrate the delay loop directly */
- lpj_fine = arch_timer_rate / HZ;
+ lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
/* Immediately configure the timer on the boot CPU */
- arch_timer_setup(per_cpu_ptr(&arch_timer_evt, smp_processor_id()));
+ arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
register_cpu_notifier(&arch_timer_cpu_nb);
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 372051d1bba8..e6a553cb73e8 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -311,7 +311,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
clockevents_config_and_register(ced, 1, 2, 0xffffffff);
}
-static int __devinit em_sti_probe(struct platform_device *pdev)
+static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
struct resource *res;
@@ -379,12 +379,12 @@ err0:
return ret;
}
-static int __devexit em_sti_remove(struct platform_device *pdev)
+static int em_sti_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
-static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+static const struct of_device_id em_sti_dt_ids[] = {
{ .compatible = "renesas,em-sti", },
{},
};
@@ -392,7 +392,7 @@ MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
static struct platform_driver em_sti_device_driver = {
.probe = em_sti_probe,
- .remove = __devexit_p(em_sti_remove),
+ .remove = em_sti_remove,
.driver = {
.name = "em_sti",
.of_match_table = em_sti_dt_ids,
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index e7cab2da910f..14ee3efcc404 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -35,7 +35,7 @@ static cycle_t i8253_read(struct clocksource *cs)
raw_spin_lock_irqsave(&i8253_lock, flags);
/*
- * Although our caller may have the read side of xtime_lock,
+ * Although our caller may have the read side of jiffies_lock,
* this is now a seqlock, and we are cheating in this routine
* by having side effects on state that we cannot undo if
* there is a collision on the seqlock and our caller has to
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
new file mode 100644
index 000000000000..8914c3c1c88b
--- /dev/null
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2008 STMicroelectronics
+ * Copyright (C) 2010 Alessandro Rubini
+ * Copyright (C) 2010 Linus Walleij for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/clk.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/platform_data/clocksource-nomadik-mtu.h>
+#include <asm/mach/time.h>
+#include <asm/sched_clock.h>
+
+/*
+ * The MTU device hosts four different counters, with 4 set of
+ * registers. These are register names.
+ */
+
+#define MTU_IMSC 0x00 /* Interrupt mask set/clear */
+#define MTU_RIS 0x04 /* Raw interrupt status */
+#define MTU_MIS 0x08 /* Masked interrupt status */
+#define MTU_ICR 0x0C /* Interrupt clear register */
+
+/* per-timer registers take 0..3 as argument */
+#define MTU_LR(x) (0x10 + 0x10 * (x) + 0x00) /* Load value */
+#define MTU_VAL(x) (0x10 + 0x10 * (x) + 0x04) /* Current value */
+#define MTU_CR(x) (0x10 + 0x10 * (x) + 0x08) /* Control reg */
+#define MTU_BGLR(x) (0x10 + 0x10 * (x) + 0x0c) /* At next overflow */
+
+/* bits for the control register */
+#define MTU_CRn_ENA 0x80
+#define MTU_CRn_PERIODIC 0x40 /* if 0 = free-running */
+#define MTU_CRn_PRESCALE_MASK 0x0c
+#define MTU_CRn_PRESCALE_1 0x00
+#define MTU_CRn_PRESCALE_16 0x04
+#define MTU_CRn_PRESCALE_256 0x08
+#define MTU_CRn_32BITS 0x02
+#define MTU_CRn_ONESHOT 0x01 /* if 0 = wraps reloading from BGLR*/
+
+/* Other registers are usual amba/primecell registers, currently not used */
+#define MTU_ITCR 0xff0
+#define MTU_ITOP 0xff4
+
+#define MTU_PERIPH_ID0 0xfe0
+#define MTU_PERIPH_ID1 0xfe4
+#define MTU_PERIPH_ID2 0xfe8
+#define MTU_PERIPH_ID3 0xfeC
+
+#define MTU_PCELL0 0xff0
+#define MTU_PCELL1 0xff4
+#define MTU_PCELL2 0xff8
+#define MTU_PCELL3 0xffC
+
+static void __iomem *mtu_base;
+static bool clkevt_periodic;
+static u32 clk_prescale;
+static u32 nmdk_cycle; /* write-once */
+
+#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
+/*
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel.
+ */
+static u32 notrace nomadik_read_sched_clock(void)
+{
+ if (unlikely(!mtu_base))
+ return 0;
+
+ return -readl(mtu_base + MTU_VAL(0));
+}
+#endif
+
+/* Clockevent device: use one-shot mode */
+static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
+{
+ writel(1 << 1, mtu_base + MTU_IMSC);
+ writel(evt, mtu_base + MTU_LR(1));
+ /* Load highest value, enable device, enable interrupts */
+ writel(MTU_CRn_ONESHOT | clk_prescale |
+ MTU_CRn_32BITS | MTU_CRn_ENA,
+ mtu_base + MTU_CR(1));
+
+ return 0;
+}
+
+void nmdk_clkevt_reset(void)
+{
+ if (clkevt_periodic) {
+ /* Timer: configure load and background-load, and fire it up */
+ writel(nmdk_cycle, mtu_base + MTU_LR(1));
+ writel(nmdk_cycle, mtu_base + MTU_BGLR(1));
+
+ writel(MTU_CRn_PERIODIC | clk_prescale |
+ MTU_CRn_32BITS | MTU_CRn_ENA,
+ mtu_base + MTU_CR(1));
+ writel(1 << 1, mtu_base + MTU_IMSC);
+ } else {
+ /* Generate an interrupt to start the clockevent again */
+ (void) nmdk_clkevt_next(nmdk_cycle, NULL);
+ }
+}
+
+static void nmdk_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ clkevt_periodic = true;
+ nmdk_clkevt_reset();
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ clkevt_periodic = false;
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ writel(0, mtu_base + MTU_IMSC);
+ /* disable timer */
+ writel(0, mtu_base + MTU_CR(1));
+ /* load some high default value */
+ writel(0xffffffff, mtu_base + MTU_LR(1));
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device nmdk_clkevt = {
+ .name = "mtu_1",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 200,
+ .set_mode = nmdk_clkevt_mode,
+ .set_next_event = nmdk_clkevt_next,
+};
+
+/*
+ * IRQ Handler for timer 1 of the MTU block.
+ */
+static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evdev = dev_id;
+
+ writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
+ evdev->event_handler(evdev);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction nmdk_timer_irq = {
+ .name = "Nomadik Timer Tick",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = nmdk_timer_interrupt,
+ .dev_id = &nmdk_clkevt,
+};
+
+void nmdk_clksrc_reset(void)
+{
+ /* Disable */
+ writel(0, mtu_base + MTU_CR(0));
+
+ /* ClockSource: configure load and background-load, and fire it up */
+ writel(nmdk_cycle, mtu_base + MTU_LR(0));
+ writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
+
+ writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
+ mtu_base + MTU_CR(0));
+}
+
+void __init nmdk_timer_init(void __iomem *base, int irq)
+{
+ unsigned long rate;
+ struct clk *clk0, *pclk0;
+
+ mtu_base = base;
+
+ pclk0 = clk_get_sys("mtu0", "apb_pclk");
+ BUG_ON(IS_ERR(pclk0));
+ BUG_ON(clk_prepare(pclk0) < 0);
+ BUG_ON(clk_enable(pclk0) < 0);
+
+ clk0 = clk_get_sys("mtu0", NULL);
+ BUG_ON(IS_ERR(clk0));
+ BUG_ON(clk_prepare(clk0) < 0);
+ BUG_ON(clk_enable(clk0) < 0);
+
+ /*
+ * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+ * for ux500.
+ * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+ * At 32 MHz, the timer (with 32 bit counter) can be programmed
+ * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+ * with 16 gives too low timer resolution.
+ */
+ rate = clk_get_rate(clk0);
+ if (rate > 32000000) {
+ rate /= 16;
+ clk_prescale = MTU_CRn_PRESCALE_16;
+ } else {
+ clk_prescale = MTU_CRn_PRESCALE_1;
+ }
+
+ /* Cycles for periodic mode */
+ nmdk_cycle = DIV_ROUND_CLOSEST(rate, HZ);
+
+
+ /* Timer 0 is the free running clocksource */
+ nmdk_clksrc_reset();
+
+ if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
+ rate, 200, 32, clocksource_mmio_readl_down))
+ pr_err("timer: failed to initialize clock source %s\n",
+ "mtu_0");
+
+#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
+ setup_sched_clock(nomadik_read_sched_clock, 32, rate);
+#endif
+
+ /* Timer 1 is used for events, register irq and clockevents */
+ setup_irq(irq, &nmdk_timer_irq);
+ nmdk_clkevt.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
+}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a5f7829f2799..488c14cc8dbf 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -726,7 +726,7 @@ err0:
return ret;
}
-static int __devinit sh_cmt_probe(struct platform_device *pdev)
+static int sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
struct sh_timer_config *cfg = pdev->dev.platform_data;
@@ -767,14 +767,14 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit sh_cmt_remove(struct platform_device *pdev)
+static int sh_cmt_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
- .remove = __devexit_p(sh_cmt_remove),
+ .remove = sh_cmt_remove,
.driver = {
.name = "sh_cmt",
}
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index c5eea858054a..83943e27cfac 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -321,7 +321,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
return ret;
}
-static int __devinit sh_mtu2_probe(struct platform_device *pdev)
+static int sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
struct sh_timer_config *cfg = pdev->dev.platform_data;
@@ -362,14 +362,14 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit sh_mtu2_remove(struct platform_device *pdev)
+static int sh_mtu2_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent */
}
static struct platform_driver sh_mtu2_device_driver = {
.probe = sh_mtu2_probe,
- .remove = __devexit_p(sh_mtu2_remove),
+ .remove = sh_mtu2_remove,
.driver = {
.name = "sh_mtu2",
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0cc4add88279..b4502edce2a1 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -484,7 +484,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
return ret;
}
-static int __devinit sh_tmu_probe(struct platform_device *pdev)
+static int sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
struct sh_timer_config *cfg = pdev->dev.platform_data;
@@ -525,14 +525,14 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit sh_tmu_remove(struct platform_device *pdev)
+static int sh_tmu_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
- .remove = __devexit_p(sh_tmu_remove),
+ .remove = sh_tmu_remove,
.driver = {
.name = "sh_tmu",
}
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c
new file mode 100644
index 000000000000..3cd1bd3d7aee
--- /dev/null
+++ b/drivers/clocksource/sunxi_timer.c
@@ -0,0 +1,171 @@
+/*
+ * Allwinner A1X SoCs timer handling.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * Benn Huang <benn@allwinnertech.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sunxi_timer.h>
+#include <linux/clk/sunxi.h>
+
+#define TIMER_CTL_REG 0x00
+#define TIMER_CTL_ENABLE (1 << 0)
+#define TIMER_IRQ_ST_REG 0x04
+#define TIMER0_CTL_REG 0x10
+#define TIMER0_CTL_ENABLE (1 << 0)
+#define TIMER0_CTL_AUTORELOAD (1 << 1)
+#define TIMER0_CTL_ONESHOT (1 << 7)
+#define TIMER0_INTVAL_REG 0x14
+#define TIMER0_CNTVAL_REG 0x18
+
+#define TIMER_SCAL 16
+
+static void __iomem *timer_base;
+
+static void sunxi_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ u32 u = readl(timer_base + TIMER0_CTL_REG);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ u &= ~(TIMER0_CTL_ONESHOT);
+ writel(u | TIMER0_CTL_ENABLE, timer_base + TIMER0_CTL_REG);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ writel(u | TIMER0_CTL_ONESHOT, timer_base + TIMER0_CTL_REG);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ writel(u & ~(TIMER0_CTL_ENABLE), timer_base + TIMER0_CTL_REG);
+ break;
+ }
+}
+
+static int sunxi_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ u32 u = readl(timer_base + TIMER0_CTL_REG);
+ writel(evt, timer_base + TIMER0_CNTVAL_REG);
+ writel(u | TIMER0_CTL_ENABLE | TIMER0_CTL_AUTORELOAD,
+ timer_base + TIMER0_CTL_REG);
+
+ return 0;
+}
+
+static struct clock_event_device sunxi_clockevent = {
+ .name = "sunxi_tick",
+ .shift = 32,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sunxi_clkevt_mode,
+ .set_next_event = sunxi_clkevt_next_event,
+};
+
+
+static irqreturn_t sunxi_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+
+ writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction sunxi_timer_irq = {
+ .name = "sunxi_timer0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = sunxi_timer_interrupt,
+ .dev_id = &sunxi_clockevent,
+};
+
+static struct of_device_id sunxi_timer_dt_ids[] = {
+ { .compatible = "allwinner,sunxi-timer" },
+ { }
+};
+
+static void __init sunxi_timer_init(void)
+{
+ struct device_node *node;
+ unsigned long rate = 0;
+ struct clk *clk;
+ int ret, irq;
+ u32 val;
+
+ node = of_find_matching_node(NULL, sunxi_timer_dt_ids);
+ if (!node)
+ panic("No sunxi timer node");
+
+ timer_base = of_iomap(node, 0);
+ if (!timer_base)
+ panic("Can't map registers");
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0)
+ panic("Can't parse IRQ");
+
+ sunxi_init_clocks();
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk))
+ panic("Can't get timer clock");
+
+ rate = clk_get_rate(clk);
+
+ writel(rate / (TIMER_SCAL * HZ),
+ timer_base + TIMER0_INTVAL_REG);
+
+ /* set clock source to HOSC, 16 pre-division */
+ val = readl(timer_base + TIMER0_CTL_REG);
+ val &= ~(0x07 << 4);
+ val &= ~(0x03 << 2);
+ val |= (4 << 4) | (1 << 2);
+ writel(val, timer_base + TIMER0_CTL_REG);
+
+ /* set mode to auto reload */
+ val = readl(timer_base + TIMER0_CTL_REG);
+ writel(val | TIMER0_CTL_AUTORELOAD, timer_base + TIMER0_CTL_REG);
+
+ ret = setup_irq(irq, &sunxi_timer_irq);
+ if (ret)
+ pr_warn("failed to setup irq %d\n", irq);
+
+ /* Enable timer0 interrupt */
+ val = readl(timer_base + TIMER_CTL_REG);
+ writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG);
+
+ sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL,
+ NSEC_PER_SEC,
+ sunxi_clockevent.shift);
+ sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0xff,
+ &sunxi_clockevent);
+ sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x1,
+ &sunxi_clockevent);
+ sunxi_clockevent.cpumask = cpumask_of(0);
+
+ clockevents_register_device(&sunxi_clockevent);
+}
+
+struct sys_timer sunxi_timer = {
+ .init = sunxi_timer_init,
+};
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 4674f94957cd..a4605fd7e303 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -167,7 +168,6 @@ void __init armada_370_xp_timer_init(void)
u32 u;
struct device_node *np;
unsigned int timer_clk;
- int ret;
np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
@@ -179,13 +179,14 @@ void __init armada_370_xp_timer_init(void)
timer_base + TIMER_CTRL_OFF);
timer_clk = 25000000;
} else {
- u32 clk = 0;
- ret = of_property_read_u32(np, "clock-frequency", &clk);
- WARN_ON(!clk || ret < 0);
+ unsigned long rate = 0;
+ struct clk *clk = of_clk_get(np, 0);
+ WARN_ON(IS_ERR(clk));
+ rate = clk_get_rate(clk);
u = readl(timer_base + TIMER_CTRL_OFF);
writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
timer_base + TIMER_CTRL_OFF);
- timer_clk = clk / TIMER_DIVIDER;
+ timer_clk = rate / TIMER_DIVIDER;
}
/* We use timer 0 as clocksource, and timer 1 for
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 965b7811e04f..7b695913cb30 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -256,7 +256,7 @@ static struct cn_dev cdev = {
.input = cn_rx_skb,
};
-static int __devinit cn_init(void)
+static int cn_init(void)
{
struct cn_dev *dev = &cdev;
struct netlink_kernel_cfg cfg = {
@@ -281,7 +281,7 @@ static int __devinit cn_init(void)
return 0;
}
-static void __devexit cn_fini(void)
+static void cn_fini(void)
{
struct cn_dev *dev = &cdev;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index ea512f47b789..e0a899f25e37 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -20,6 +20,9 @@ if CPU_FREQ
config CPU_FREQ_TABLE
tristate
+config CPU_FREQ_GOV_COMMON
+ bool
+
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
select CPU_FREQ_TABLE
@@ -141,6 +144,7 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
select CPU_FREQ_TABLE
+ select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
The governor does a periodic polling and
@@ -159,6 +163,7 @@ config CPU_FREQ_GOV_ONDEMAND
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
+ select CPU_FREQ_GOV_COMMON
help
'conservative' - this driver is rather similar to the 'ondemand'
governor both in its source code and its purpose, the difference is
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5961e6415f08..a0b3661d90b0 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+
+config ARM_SPEAR_CPUFREQ
+ bool "SPEAr CPUFreq support"
+ depends on PLAT_SPEAR
+ default y
+ help
+ This adds the CPUFreq driver support for SPEAr SOCs.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 934854ae5eb4..7227cd734042 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -106,7 +106,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
select CPU_FREQ_TABLE
- depends on ACPI && ACPI_PROCESSOR
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 1bc90e1306d8..fadc4d496e2f 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
@@ -50,6 +51,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0d048f6a2b23..7b0d49d78c61 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -1030,4 +1030,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
+static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_ACPI),
+ X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index e9158278c71d..debc5a7c8db6 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
}
if (cpu_reg) {
+ rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
return PTR_ERR(opp);
}
volt = opp_get_voltage(opp);
+ rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
@@ -174,7 +177,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.attr = cpu0_cpufreq_attr,
};
-static int __devinit cpu0_cpufreq_driver_init(void)
+static int cpu0_cpufreq_driver_init(void)
{
struct device_node *np;
int ret;
@@ -236,12 +239,14 @@ static int __devinit cpu0_cpufreq_driver_init(void)
*/
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
+ rcu_read_lock();
opp = opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_uV = opp_get_voltage(opp);
opp = opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
max_uV = opp_get_voltage(opp);
+ rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fb8a5279c5d8..1f93dbd72355 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -127,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
pure_initcall(init_cpufreq_transition_notifier_list);
static int off __read_mostly;
-int cpufreq_disabled(void)
+static int cpufreq_disabled(void)
{
return off;
}
@@ -402,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
- unsigned int ret = -EINVAL; \
+ unsigned int ret; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -445,7 +447,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
policy->governor->name);
return -EINVAL;
}
@@ -457,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- unsigned int ret = -EINVAL;
+ unsigned int ret;
char str_governor[16];
struct cpufreq_policy new_policy;
@@ -491,7 +493,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
*/
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
}
/**
@@ -512,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
+ i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
out:
i += sprintf(&buf[i], "\n");
@@ -581,7 +583,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
}
/**
- * show_scaling_driver - show the current cpufreq HW/BIOS limitation
+ * show_bios_limit - show the current cpufreq HW/BIOS limitation
*/
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
@@ -1468,12 +1470,23 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
int retval = -EINVAL;
+ unsigned int old_target_freq = target_freq;
if (cpufreq_disabled())
return -ENODEV;
- pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
- target_freq, relation);
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ policy->cpu, target_freq, relation, old_target_freq);
+
+ if (target_freq == policy->cur)
+ return 0;
+
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
@@ -1509,12 +1522,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
+ if (!(cpu_online(cpu) && cpufreq_driver->getavg))
+ return 0;
+
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
return -EINVAL;
- if (cpu_online(cpu) && cpufreq_driver->getavg)
- ret = cpufreq_driver->getavg(policy, cpu);
+ ret = cpufreq_driver->getavg(policy, cpu);
cpufreq_cpu_put(policy);
return ret;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index a152af7e1991..64ef737e7e72 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -11,83 +11,30 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <linux/ktime.h>
-#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/percpu-defs.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
-/*
- * dbs is used in this file as a shortform for demandbased switching
- * It helps to keep variable names smaller, simpler
- */
+#include "cpufreq_governor.h"
+/* Conservative governor macors */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
-
-/*
- * The polling frequency of this governor depends on the capability of
- * the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
- * rate.
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work.
- * All times here are in uS.
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-
-static unsigned int min_sampling_rate;
-
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (100)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
-static void do_dbs_timer(struct work_struct *work);
-
-struct cpu_dbs_info_s {
- cputime64_t prev_cpu_idle;
- cputime64_t prev_cpu_wall;
- cputime64_t prev_cpu_nice;
- struct cpufreq_policy *cur_policy;
- struct delayed_work work;
- unsigned int down_skip;
- unsigned int requested_freq;
- int cpu;
- unsigned int enable:1;
- /*
- * percpu mutex that serializes governor limit change with
- * do_dbs_timer invocation. We do not want do_dbs_timer to run
- * when user is changing the governor or limits.
- */
- struct mutex timer_mutex;
-};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
-static unsigned int dbs_enable; /* number of CPUs using this policy */
+static struct dbs_data cs_dbs_data;
+static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
-/*
- * dbs_mutex protects dbs_enable in governor start/stop.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
-static struct dbs_tuners {
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int down_threshold;
- unsigned int ignore_nice;
- unsigned int freq_step;
-} dbs_tuners_ins = {
+static struct cs_dbs_tuners cs_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
@@ -95,95 +42,121 @@ static struct dbs_tuners {
.freq_step = 5,
};
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency Every sampling_rate *
+ * sampling_down_factor, we check, if current idle time is more than 80%, then
+ * we try to decrease frequency
+ *
+ * Any frequency increase takes it to the maximum frequency. Frequency reduction
+ * happens at minimum steps of 5% (default) of maximum frequency
+ */
+static void cs_check_cpu(int cpu, unsigned int load)
{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
+ struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ unsigned int freq_target;
+
+ /*
+ * break out if we 'cannot' reduce the speed as the user might
+ * want freq_step to be zero
+ */
+ if (cs_tuners.freq_step == 0)
+ return;
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+ /* Check for frequency increase */
+ if (load > cs_tuners.up_threshold) {
+ dbs_info->down_skip = 0;
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+ /* if we are already at full speed then break out early */
+ if (dbs_info->requested_freq == policy->max)
+ return;
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = jiffies_to_usecs(cur_wall_time);
+ freq_target = (cs_tuners.freq_step * policy->max) / 100;
- return jiffies_to_usecs(idle_time);
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_target == 0))
+ freq_target = 5;
+
+ dbs_info->requested_freq += freq_target;
+ if (dbs_info->requested_freq > policy->max)
+ dbs_info->requested_freq = policy->max;
+
+ __cpufreq_driver_target(policy, dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
+
+ /*
+ * The optimal frequency is the frequency that is the lowest that can
+ * support the current CPU usage without triggering the up policy. To be
+ * safe, we focus 10 points under the threshold.
+ */
+ if (load < (cs_tuners.down_threshold - 10)) {
+ freq_target = (cs_tuners.freq_step * policy->max) / 100;
+
+ dbs_info->requested_freq -= freq_target;
+ if (dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = policy->min;
+
+ /*
+ * if we cannot reduce the frequency anymore, break out early
+ */
+ if (policy->cur == policy->min)
+ return;
+
+ __cpufreq_driver_target(policy, dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
}
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+static void cs_dbs_timer(struct work_struct *work)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+ struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
+ struct cs_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cpu;
+ int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else
- idle_time += get_cpu_iowait_time_us(cpu, wall);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- return idle_time;
+ dbs_check_cpu(&cs_dbs_data, cpu);
+
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
-/* keep track of frequency transitions */
-static int
-dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
{
struct cpufreq_freqs *freq = data;
- struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
- freq->cpu);
-
+ struct cs_cpu_dbs_info_s *dbs_info =
+ &per_cpu(cs_cpu_dbs_info, freq->cpu);
struct cpufreq_policy *policy;
- if (!this_dbs_info->enable)
+ if (!dbs_info->enable)
return 0;
- policy = this_dbs_info->cur_policy;
+ policy = dbs_info->cdbs.cur_policy;
/*
- * we only care if our internally tracked freq moves outside
- * the 'valid' ranges of freqency available to us otherwise
- * we do not change it
+ * we only care if our internally tracked freq moves outside the 'valid'
+ * ranges of freqency available to us otherwise we do not change it
*/
- if (this_dbs_info->requested_freq > policy->max
- || this_dbs_info->requested_freq < policy->min)
- this_dbs_info->requested_freq = freq->new;
+ if (dbs_info->requested_freq > policy->max
+ || dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = freq->new;
return 0;
}
-static struct notifier_block dbs_cpufreq_notifier_block = {
- .notifier_call = dbs_cpufreq_notifier
-};
-
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", min_sampling_rate);
+ return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
}
-define_one_global_ro(sampling_rate_min);
-
-/* cpufreq_conservative Governor Tunables */
-#define show_one(file_name, object) \
-static ssize_t show_##file_name \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
-}
-show_one(sampling_rate, sampling_rate);
-show_one(sampling_down_factor, sampling_down_factor);
-show_one(up_threshold, up_threshold);
-show_one(down_threshold, down_threshold);
-show_one(ignore_nice_load, ignore_nice);
-show_one(freq_step, freq_step);
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -195,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- dbs_tuners_ins.sampling_down_factor = input;
+ cs_tuners.sampling_down_factor = input;
return count;
}
@@ -209,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
+ cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
return count;
}
@@ -220,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold)
+ if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
return -EINVAL;
- dbs_tuners_ins.up_threshold = input;
+ cs_tuners.up_threshold = input;
return count;
}
@@ -237,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold)
+ input >= cs_tuners.up_threshold)
return -EINVAL;
- dbs_tuners_ins.down_threshold = input;
+ cs_tuners.down_threshold = input;
return count;
}
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
- unsigned int input;
+ unsigned int input, j;
int ret;
- unsigned int j;
-
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -259,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
+ if (input == cs_tuners.ignore_nice) /* nothing to do */
return count;
- dbs_tuners_ins.ignore_nice = input;
+ cs_tuners.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
+ struct cs_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
- dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall);
+ if (cs_tuners.ignore_nice)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
}
@@ -289,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
if (input > 100)
input = 100;
- /* no need to test here if freq_step is zero as the user might actually
- * want this, they would be crazy though :) */
- dbs_tuners_ins.freq_step = input;
+ /*
+ * no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :)
+ */
+ cs_tuners.freq_step = input;
return count;
}
+show_one(cs, sampling_rate, sampling_rate);
+show_one(cs, sampling_down_factor, sampling_down_factor);
+show_one(cs, up_threshold, up_threshold);
+show_one(cs, down_threshold, down_threshold);
+show_one(cs, ignore_nice_load, ignore_nice);
+show_one(cs, freq_step, freq_step);
+
define_one_global_rw(sampling_rate);
define_one_global_rw(sampling_down_factor);
define_one_global_rw(up_threshold);
define_one_global_rw(down_threshold);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
+define_one_global_ro(sampling_rate_min);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -313,283 +294,38 @@ static struct attribute *dbs_attributes[] = {
NULL
};
-static struct attribute_group dbs_attr_group = {
+static struct attribute_group cs_attr_group = {
.attrs = dbs_attributes,
.name = "conservative",
};
/************************** sysfs end ************************/
-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
-{
- unsigned int load = 0;
- unsigned int max_load = 0;
- unsigned int freq_target;
-
- struct cpufreq_policy *policy;
- unsigned int j;
-
- policy = this_dbs_info->cur_policy;
-
- /*
- * Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
- * Every sampling_rate*sampling_down_factor, we check, if current
- * idle time is more than 80%, then we try to decrease frequency
- *
- * Any frequency increase takes it to the maximum frequency.
- * Frequency reduction happens at minimum steps of
- * 5% (default) of maximum frequency
- */
-
- /* Get Absolute Load */
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
-
- j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
-
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
-
- wall_time = (unsigned int)
- (cur_wall_time - j_dbs_info->prev_cpu_wall);
- j_dbs_info->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_dbs_info->prev_cpu_idle);
- j_dbs_info->prev_cpu_idle = cur_idle_time;
-
- if (dbs_tuners_ins.ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- j_dbs_info->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
-
- j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
- }
+define_get_cpu_dbs_routines(cs_cpu_dbs_info);
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- load = 100 * (wall_time - idle_time) / wall_time;
-
- if (load > max_load)
- max_load = load;
- }
-
- /*
- * break out if we 'cannot' reduce the speed as the user might
- * want freq_step to be zero
- */
- if (dbs_tuners_ins.freq_step == 0)
- return;
-
- /* Check for frequency increase */
- if (max_load > dbs_tuners_ins.up_threshold) {
- this_dbs_info->down_skip = 0;
-
- /* if we are already at full speed then break out early */
- if (this_dbs_info->requested_freq == policy->max)
- return;
-
- freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
-
- /* max freq cannot be less than 100. But who knows.... */
- if (unlikely(freq_target == 0))
- freq_target = 5;
-
- this_dbs_info->requested_freq += freq_target;
- if (this_dbs_info->requested_freq > policy->max)
- this_dbs_info->requested_freq = policy->max;
-
- __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
- return;
- }
-
- /*
- * The optimal frequency is the frequency that is the lowest that
- * can support the current CPU usage without triggering the up
- * policy. To be safe, we focus 10 points under the threshold.
- */
- if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
- freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
-
- this_dbs_info->requested_freq -= freq_target;
- if (this_dbs_info->requested_freq < policy->min)
- this_dbs_info->requested_freq = policy->min;
-
- /*
- * if we cannot reduce the frequency anymore, break out early
- */
- if (policy->cur == policy->min)
- return;
-
- __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
- return;
- }
-}
-
-static void do_dbs_timer(struct work_struct *work)
-{
- struct cpu_dbs_info_s *dbs_info =
- container_of(work, struct cpu_dbs_info_s, work.work);
- unsigned int cpu = dbs_info->cpu;
-
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
-
- delay -= jiffies % delay;
-
- mutex_lock(&dbs_info->timer_mutex);
-
- dbs_check_cpu(dbs_info);
-
- schedule_delayed_work_on(cpu, &dbs_info->work, delay);
- mutex_unlock(&dbs_info->timer_mutex);
-}
-
-static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
-{
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
- delay -= jiffies % delay;
+static struct notifier_block cs_cpufreq_notifier_block = {
+ .notifier_call = dbs_cpufreq_notifier,
+};
- dbs_info->enable = 1;
- INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
-}
+static struct cs_ops cs_ops = {
+ .notifier_block = &cs_cpufreq_notifier_block,
+};
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
-{
- dbs_info->enable = 0;
- cancel_delayed_work_sync(&dbs_info->work);
-}
+static struct dbs_data cs_dbs_data = {
+ .governor = GOV_CONSERVATIVE,
+ .attr_group = &cs_attr_group,
+ .tuners = &cs_tuners,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = cs_dbs_timer,
+ .gov_check_cpu = cs_check_cpu,
+ .gov_ops = &cs_ops,
+};
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info_s *this_dbs_info;
- unsigned int j;
- int rc;
-
- this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
- return -EINVAL;
-
- mutex_lock(&dbs_mutex);
-
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- j_dbs_info->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- this_dbs_info->cpu = cpu;
- this_dbs_info->down_skip = 0;
- this_dbs_info->requested_freq = policy->cur;
-
- mutex_init(&this_dbs_info->timer_mutex);
- dbs_enable++;
- /*
- * Start the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 1) {
- unsigned int latency;
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &dbs_attr_group);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
- /*
- * conservative does not implement micro like ondemand
- * governor, thus we are bound to jiffes/HZ
- */
- min_sampling_rate =
- MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
- /* Bring kernel and HW constraints together */
- min_sampling_rate = max(min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_tuners_ins.sampling_rate =
- max(min_sampling_rate,
- latency * LATENCY_MULTIPLIER);
-
- cpufreq_register_notifier(
- &dbs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
- mutex_unlock(&dbs_mutex);
-
- dbs_timer_init(this_dbs_info);
-
- break;
-
- case CPUFREQ_GOV_STOP:
- dbs_timer_exit(this_dbs_info);
-
- mutex_lock(&dbs_mutex);
- dbs_enable--;
- mutex_destroy(&this_dbs_info->timer_mutex);
-
- /*
- * Stop the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 0)
- cpufreq_unregister_notifier(
- &dbs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- mutex_unlock(&dbs_mutex);
- if (!dbs_enable)
- sysfs_remove_group(cpufreq_global_kobject,
- &dbs_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&this_dbs_info->timer_mutex);
- if (policy->max < this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(
- this_dbs_info->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(
- this_dbs_info->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(this_dbs_info);
- mutex_unlock(&this_dbs_info->timer_mutex);
-
- break;
- }
- return 0;
+ return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -597,13 +333,14 @@ static
#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
- .governor = cpufreq_governor_dbs,
+ .governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static int __init cpufreq_gov_dbs_init(void)
{
+ mutex_init(&cs_dbs_data.mutex);
return cpufreq_register_governor(&cpufreq_gov_conservative);
}
@@ -612,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_conservative);
}
-
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors "
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
new file mode 100644
index 000000000000..6c5f1d383cdc
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -0,0 +1,318 @@
+/*
+ * drivers/cpufreq/cpufreq_governor.c
+ *
+ * CPUFREQ governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cputime.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/kernel_stat.h>
+#include <linux/mutex.h>
+#include <linux/tick.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "cpufreq_governor.h"
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+
+void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
+{
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ struct cpufreq_policy *policy;
+ unsigned int max_load = 0;
+ unsigned int ignore_nice;
+ unsigned int j;
+
+ if (dbs_data->governor == GOV_ONDEMAND)
+ ignore_nice = od_tuners->ignore_nice;
+ else
+ ignore_nice = cs_tuners->ignore_nice;
+
+ policy = cdbs->cur_policy;
+
+ /* Get Absolute Load (in terms of freq for ondemand gov) */
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+ u64 cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
+ unsigned int load;
+
+ j_cdbs = dbs_data->get_cpu_cdbs(j);
+
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+ wall_time = (unsigned int)
+ (cur_wall_time - j_cdbs->prev_cpu_wall);
+ j_cdbs->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int)
+ (cur_idle_time - j_cdbs->prev_cpu_idle);
+ j_cdbs->prev_cpu_idle = cur_idle_time;
+
+ if (ignore_nice) {
+ u64 cur_nice;
+ unsigned long cur_nice_jiffies;
+
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ cdbs->prev_cpu_nice;
+ /*
+ * Assumption: nice time between sampling periods will
+ * be less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
+
+ cdbs->prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
+ if (dbs_data->governor == GOV_ONDEMAND) {
+ struct od_cpu_dbs_info_s *od_j_dbs_info =
+ dbs_data->get_cpu_dbs_info_s(cpu);
+
+ cur_iowait_time = get_cpu_iowait_time_us(j,
+ &cur_wall_time);
+ if (cur_iowait_time == -1ULL)
+ cur_iowait_time = 0;
+
+ iowait_time = (unsigned int) (cur_iowait_time -
+ od_j_dbs_info->prev_cpu_iowait);
+ od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
+
+ /*
+ * For the purpose of ondemand, waiting for disk IO is
+ * an indication that you're performance critical, and
+ * not that the system is actually idle. So subtract the
+ * iowait time from the cpu idle time.
+ */
+ if (od_tuners->io_is_busy && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+ }
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ load = 100 * (wall_time - idle_time) / wall_time;
+
+ if (dbs_data->governor == GOV_ONDEMAND) {
+ int freq_avg = __cpufreq_driver_getavg(policy, j);
+ if (freq_avg <= 0)
+ freq_avg = policy->cur;
+
+ load *= freq_avg;
+ }
+
+ if (load > max_load)
+ max_load = load;
+ }
+
+ dbs_data->gov_check_cpu(cpu, max_load);
+}
+EXPORT_SYMBOL_GPL(dbs_check_cpu);
+
+static inline void dbs_timer_init(struct dbs_data *dbs_data,
+ struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
+{
+ int delay = delay_for_sampling_rate(sampling_rate);
+
+ INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
+ schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
+}
+
+static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
+{
+ cancel_delayed_work_sync(&cdbs->work);
+}
+
+int cpufreq_governor_dbs(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy, unsigned int event)
+{
+ struct od_cpu_dbs_info_s *od_dbs_info = NULL;
+ struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ struct cpu_dbs_common_info *cpu_cdbs;
+ unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+ int rc;
+
+ cpu_cdbs = dbs_data->get_cpu_cdbs(cpu);
+
+ if (dbs_data->governor == GOV_CONSERVATIVE) {
+ cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
+ sampling_rate = &cs_tuners->sampling_rate;
+ ignore_nice = cs_tuners->ignore_nice;
+ } else {
+ od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
+ sampling_rate = &od_tuners->sampling_rate;
+ ignore_nice = od_tuners->ignore_nice;
+ }
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!policy->cur))
+ return -EINVAL;
+
+ mutex_lock(&dbs_data->mutex);
+
+ dbs_data->enable++;
+ cpu_cdbs->cpu = cpu;
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+ j_cdbs = dbs_data->get_cpu_cdbs(j);
+
+ j_cdbs->cur_policy = policy;
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_cdbs->prev_cpu_wall);
+ if (ignore_nice)
+ j_cdbs->prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ }
+
+ /*
+ * Start the timerschedule work, when this governor is used for
+ * first time
+ */
+ if (dbs_data->enable != 1)
+ goto second_time;
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_data->mutex);
+ return rc;
+ }
+
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
+ /*
+ * conservative does not implement micro like ondemand
+ * governor, thus we are bound to jiffes/HZ
+ */
+ if (dbs_data->governor == GOV_CONSERVATIVE) {
+ struct cs_ops *ops = dbs_data->gov_ops;
+
+ cpufreq_register_notifier(ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ } else {
+ struct od_ops *ops = dbs_data->gov_ops;
+
+ od_tuners->io_is_busy = ops->io_busy();
+ }
+
+ /* Bring kernel and HW constraints together */
+ dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ *sampling_rate = max(dbs_data->min_sampling_rate, latency *
+ LATENCY_MULTIPLIER);
+
+second_time:
+ if (dbs_data->governor == GOV_CONSERVATIVE) {
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
+ } else {
+ struct od_ops *ops = dbs_data->gov_ops;
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ ops->powersave_bias_init_cpu(cpu);
+ }
+ mutex_unlock(&dbs_data->mutex);
+
+ mutex_init(&cpu_cdbs->timer_mutex);
+ dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ if (dbs_data->governor == GOV_CONSERVATIVE)
+ cs_dbs_info->enable = 0;
+
+ dbs_timer_exit(cpu_cdbs);
+
+ mutex_lock(&dbs_data->mutex);
+ mutex_destroy(&cpu_cdbs->timer_mutex);
+ dbs_data->enable--;
+ if (!dbs_data->enable) {
+ struct cs_ops *ops = dbs_data->gov_ops;
+
+ sysfs_remove_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (dbs_data->governor == GOV_CONSERVATIVE)
+ cpufreq_unregister_notifier(ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ mutex_unlock(&dbs_data->mutex);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&cpu_cdbs->timer_mutex);
+ if (policy->max < cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ dbs_check_cpu(dbs_data, cpu);
+ mutex_unlock(&cpu_cdbs->timer_mutex);
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
new file mode 100644
index 000000000000..f6616540c53d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -0,0 +1,176 @@
+/*
+ * drivers/cpufreq/cpufreq_governor.h
+ *
+ * Header file for CPUFreq governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _CPUFREQ_GOVERNER_H
+#define _CPUFREQ_GOVERNER_H
+
+#include <linux/cpufreq.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * governor will work on any processor with transition latency <= 10mS, using
+ * appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work. All times here are in uS.
+ */
+#define MIN_SAMPLING_RATE_RATIO (2)
+#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (100)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
+/* Ondemand Sampling types */
+enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
+
+/* Macro creating sysfs show routines */
+#define show_one(_gov, file_name, object) \
+static ssize_t show_##file_name \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", _gov##_tuners.object); \
+}
+
+#define define_get_cpu_dbs_routines(_dbs_info) \
+static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
+{ \
+ return &per_cpu(_dbs_info, cpu).cdbs; \
+} \
+ \
+static void *get_cpu_dbs_info_s(int cpu) \
+{ \
+ return &per_cpu(_dbs_info, cpu); \
+}
+
+/*
+ * Abbreviations:
+ * dbs: used as a shortform for demand based switching It helps to keep variable
+ * names smaller, simpler
+ * cdbs: common dbs
+ * on_*: On-demand governor
+ * cs_*: Conservative governor
+ */
+
+/* Per cpu structures */
+struct cpu_dbs_common_info {
+ int cpu;
+ u64 prev_cpu_idle;
+ u64 prev_cpu_wall;
+ u64 prev_cpu_nice;
+ struct cpufreq_policy *cur_policy;
+ struct delayed_work work;
+ /*
+ * percpu mutex that serializes governor limit change with gov_dbs_timer
+ * invocation. We do not want gov_dbs_timer to run when user is changing
+ * the governor or limits.
+ */
+ struct mutex timer_mutex;
+};
+
+struct od_cpu_dbs_info_s {
+ struct cpu_dbs_common_info cdbs;
+ u64 prev_cpu_iowait;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_lo;
+ unsigned int freq_lo_jiffies;
+ unsigned int freq_hi_jiffies;
+ unsigned int rate_mult;
+ unsigned int sample_type:1;
+};
+
+struct cs_cpu_dbs_info_s {
+ struct cpu_dbs_common_info cdbs;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+ unsigned int enable:1;
+};
+
+/* Governers sysfs tunables */
+struct od_dbs_tuners {
+ unsigned int ignore_nice;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_differential;
+ unsigned int powersave_bias;
+ unsigned int io_is_busy;
+};
+
+struct cs_dbs_tuners {
+ unsigned int ignore_nice;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int freq_step;
+};
+
+/* Per Governer data */
+struct dbs_data {
+ /* Common across governors */
+ #define GOV_ONDEMAND 0
+ #define GOV_CONSERVATIVE 1
+ int governor;
+ unsigned int min_sampling_rate;
+ unsigned int enable; /* number of CPUs using this policy */
+ struct attribute_group *attr_group;
+ void *tuners;
+
+ /* dbs_mutex protects dbs_enable in governor start/stop */
+ struct mutex mutex;
+
+ struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
+ void *(*get_cpu_dbs_info_s)(int cpu);
+ void (*gov_dbs_timer)(struct work_struct *work);
+ void (*gov_check_cpu)(int cpu, unsigned int load);
+
+ /* Governor specific ops, see below */
+ void *gov_ops;
+};
+
+/* Governor specific ops, will be passed to dbs_data->gov_ops */
+struct od_ops {
+ int (*io_busy)(void);
+ void (*powersave_bias_init_cpu)(int cpu);
+ unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
+ unsigned int freq_next, unsigned int relation);
+ void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
+};
+
+struct cs_ops {
+ struct notifier_block *notifier_block;
+};
+
+static inline int delay_for_sampling_rate(unsigned int sampling_rate)
+{
+ int delay = usecs_to_jiffies(sampling_rate);
+
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
+
+ return delay;
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
+void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+int cpufreq_governor_dbs(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy, unsigned int event);
+#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 396322f2a83f..7731f7c7e79a 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -10,24 +10,23 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/hrtimer.h>
+#include <linux/percpu-defs.h>
+#include <linux/sysfs.h>
#include <linux/tick.h>
-#include <linux/ktime.h>
-#include <linux/sched.h>
+#include <linux/types.h>
-/*
- * dbs is used in this file as a shortform for demandbased switching
- * It helps to keep variable names smaller, simpler
- */
+#include "cpufreq_governor.h"
+/* On-demand governor macors */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -38,80 +37,14 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-/*
- * The polling frequency of this governor depends on the capability of
- * the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
- * rate.
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work.
- * All times here are in uS.
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-
-static unsigned int min_sampling_rate;
-
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (100)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
-static void do_dbs_timer(struct work_struct *work);
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event);
+static struct dbs_data od_dbs_data;
+static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
+static struct cpufreq_governor cpufreq_gov_ondemand;
#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
- .name = "ondemand",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-struct cpu_dbs_info_s {
- cputime64_t prev_cpu_idle;
- cputime64_t prev_cpu_iowait;
- cputime64_t prev_cpu_wall;
- cputime64_t prev_cpu_nice;
- struct cpufreq_policy *cur_policy;
- struct delayed_work work;
- struct cpufreq_frequency_table *freq_table;
- unsigned int freq_lo;
- unsigned int freq_lo_jiffies;
- unsigned int freq_hi_jiffies;
- unsigned int rate_mult;
- int cpu;
- unsigned int sample_type:1;
- /*
- * percpu mutex that serializes governor limit change with
- * do_dbs_timer invocation. We do not want do_dbs_timer to run
- * when user is changing the governor or limits.
- */
- struct mutex timer_mutex;
-};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
-
-static unsigned int dbs_enable; /* number of CPUs using this policy */
-
-/*
- * dbs_mutex protects dbs_enable in governor start/stop.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
-static struct dbs_tuners {
- unsigned int sampling_rate;
- unsigned int up_threshold;
- unsigned int down_differential;
- unsigned int ignore_nice;
- unsigned int sampling_down_factor;
- unsigned int powersave_bias;
- unsigned int io_is_busy;
-} dbs_tuners_ins = {
+static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -119,48 +52,35 @@ static struct dbs_tuners {
.powersave_bias = 0,
};
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
-
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = jiffies_to_usecs(cur_wall_time);
-
- return jiffies_to_usecs(idle_time);
-}
-
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+static void ondemand_powersave_bias_init_cpu(int cpu)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
-
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else
- idle_time += get_cpu_iowait_time_us(cpu, wall);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- return idle_time;
+ dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ dbs_info->freq_lo = 0;
}
-static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
-
- if (iowait_time == -1ULL)
- return 0;
-
- return iowait_time;
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
}
/*
@@ -169,14 +89,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal
* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
*/
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
- unsigned int freq_next,
- unsigned int relation)
+ unsigned int freq_next, unsigned int relation)
{
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
policy->cpu);
if (!dbs_info->freq_table) {
@@ -188,7 +107,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
relation, &index);
freq_req = dbs_info->freq_table[index].frequency;
- freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
+ freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
@@ -207,7 +126,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
dbs_info->freq_lo_jiffies = 0;
return freq_lo;
}
- jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
jiffies_hi += ((freq_hi - freq_lo) / 2);
jiffies_hi /= (freq_hi - freq_lo);
@@ -218,13 +137,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
return freq_hi;
}
-static void ondemand_powersave_bias_init_cpu(int cpu)
-{
- struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
- dbs_info->freq_lo = 0;
-}
-
static void ondemand_powersave_bias_init(void)
{
int i;
@@ -233,83 +145,173 @@ static void ondemand_powersave_bias_init(void)
}
}
-/************************** sysfs interface ************************/
+static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+{
+ if (od_tuners.powersave_bias)
+ freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+ else if (p->cur == p->max)
+ return;
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
+ CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency Every sampling_rate, we look for
+ * a the lowest frequency which can sustain the load while keeping idle time
+ * over 30%. If such a frequency exist, we try to decrease to this frequency.
+ *
+ * Any frequency increase takes it to the maximum frequency. Frequency reduction
+ * happens at minimum steps of 5% (default) of current frequency
+ */
+static void od_check_cpu(int cpu, unsigned int load_freq)
{
- return sprintf(buf, "%u\n", min_sampling_rate);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+
+ dbs_info->freq_lo = 0;
+
+ /* Check for frequency increase */
+ if (load_freq > od_tuners.up_threshold * policy->cur) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ dbs_info->rate_mult =
+ od_tuners.sampling_down_factor;
+ dbs_freq_increase(policy, policy->max);
+ return;
+ }
+
+ /* Check for frequency decrease */
+ /* if we cannot reduce the frequency anymore, break out early */
+ if (policy->cur == policy->min)
+ return;
+
+ /*
+ * The optimal frequency is the frequency that is the lowest that can
+ * support the current CPU usage without triggering the up policy. To be
+ * safe, we focus 10 points under the threshold.
+ */
+ if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
+ policy->cur) {
+ unsigned int freq_next;
+ freq_next = load_freq / (od_tuners.up_threshold -
+ od_tuners.down_differential);
+
+ /* No longer fully busy, reset rate_mult */
+ dbs_info->rate_mult = 1;
+
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
+ if (!od_tuners.powersave_bias) {
+ __cpufreq_driver_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ } else {
+ int freq = powersave_bias_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq,
+ CPUFREQ_RELATION_L);
+ }
+ }
}
-define_one_global_ro(sampling_rate_min);
+static void od_dbs_timer(struct work_struct *work)
+{
+ struct od_cpu_dbs_info_s *dbs_info =
+ container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cpu;
+ int delay, sample_type = dbs_info->sample_type;
+
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
+
+ /* Common NORMAL_SAMPLE setup */
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ if (sample_type == OD_SUB_SAMPLE) {
+ delay = dbs_info->freq_lo_jiffies;
+ __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
+ dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ } else {
+ dbs_check_cpu(&od_dbs_data, cpu);
+ if (dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = dbs_info->freq_hi_jiffies;
+ } else {
+ delay = delay_for_sampling_rate(od_tuners.sampling_rate
+ * dbs_info->rate_mult);
+ }
+ }
+
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+}
+
+/************************** sysfs interface ************************/
-/* cpufreq_ondemand Governor Tunables */
-#define show_one(file_name, object) \
-static ssize_t show_##file_name \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+static ssize_t show_sampling_rate_min(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
}
-show_one(sampling_rate, sampling_rate);
-show_one(io_is_busy, io_is_busy);
-show_one(up_threshold, up_threshold);
-show_one(sampling_down_factor, sampling_down_factor);
-show_one(ignore_nice_load, ignore_nice);
-show_one(powersave_bias, powersave_bias);
/**
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
* If new rate is smaller than the old, simply updaing
- * dbs_tuners_int.sampling_rate might not be appropriate. For example,
- * if the original sampling_rate was 1 second and the requested new sampling
- * rate is 10 ms because the user needs immediate reaction from ondemand
- * governor, but not sure if higher frequency will be required or not,
- * then, the governor may change the sampling rate too late; up to 1 second
- * later. Thus, if we are reducing the sampling rate, we need to make the
- * new value effective immediately.
+ * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
*/
static void update_sampling_rate(unsigned int new_rate)
{
int cpu;
- dbs_tuners_ins.sampling_rate = new_rate
- = max(new_rate, min_sampling_rate);
+ od_tuners.sampling_rate = new_rate = max(new_rate,
+ od_dbs_data.min_sampling_rate);
for_each_online_cpu(cpu) {
struct cpufreq_policy *policy;
- struct cpu_dbs_info_s *dbs_info;
+ struct od_cpu_dbs_info_s *dbs_info;
unsigned long next_sampling, appointed_at;
policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
+ if (policy->governor != &cpufreq_gov_ondemand) {
+ cpufreq_cpu_put(policy);
+ continue;
+ }
dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
cpufreq_cpu_put(policy);
- mutex_lock(&dbs_info->timer_mutex);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- if (!delayed_work_pending(&dbs_info->work)) {
- mutex_unlock(&dbs_info->timer_mutex);
+ if (!delayed_work_pending(&dbs_info->cdbs.work)) {
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
continue;
}
- next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->work.timer.expires;
-
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = dbs_info->cdbs.work.timer.expires;
if (time_before(next_sampling, appointed_at)) {
- mutex_unlock(&dbs_info->timer_mutex);
- cancel_delayed_work_sync(&dbs_info->work);
- mutex_lock(&dbs_info->timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->cdbs.work);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
- usecs_to_jiffies(new_rate));
+ schedule_delayed_work_on(dbs_info->cdbs.cpu,
+ &dbs_info->cdbs.work,
+ usecs_to_jiffies(new_rate));
}
- mutex_unlock(&dbs_info->timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
}
@@ -334,7 +336,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.io_is_busy = !!input;
+ od_tuners.io_is_busy = !!input;
return count;
}
@@ -349,7 +351,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
- dbs_tuners_ins.up_threshold = input;
+ od_tuners.up_threshold = input;
return count;
}
@@ -362,12 +364,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- dbs_tuners_ins.sampling_down_factor = input;
+ od_tuners.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ j);
dbs_info->rate_mult = 1;
}
return count;
@@ -388,19 +390,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
+ if (input == od_tuners.ignore_nice) { /* nothing to do */
return count;
}
- dbs_tuners_ins.ignore_nice = input;
+ od_tuners.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
+ struct od_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(od_cpu_dbs_info, j);
- dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall);
+ if (od_tuners.ignore_nice)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
@@ -419,17 +422,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- dbs_tuners_ins.powersave_bias = input;
+ od_tuners.powersave_bias = input;
ondemand_powersave_bias_init();
return count;
}
+show_one(od, sampling_rate, sampling_rate);
+show_one(od, io_is_busy, io_is_busy);
+show_one(od, up_threshold, up_threshold);
+show_one(od, sampling_down_factor, sampling_down_factor);
+show_one(od, ignore_nice_load, ignore_nice);
+show_one(od, powersave_bias, powersave_bias);
+
define_one_global_rw(sampling_rate);
define_one_global_rw(io_is_busy);
define_one_global_rw(up_threshold);
define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
+define_one_global_ro(sampling_rate_min);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -442,354 +453,71 @@ static struct attribute *dbs_attributes[] = {
NULL
};
-static struct attribute_group dbs_attr_group = {
+static struct attribute_group od_attr_group = {
.attrs = dbs_attributes,
.name = "ondemand",
};
/************************** sysfs end ************************/
-static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
-{
- if (dbs_tuners_ins.powersave_bias)
- freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
- else if (p->cur == p->max)
- return;
-
- __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
- CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
-}
-
-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
-{
- unsigned int max_load_freq;
-
- struct cpufreq_policy *policy;
- unsigned int j;
-
- this_dbs_info->freq_lo = 0;
- policy = this_dbs_info->cur_policy;
-
- /*
- * Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
- * Every sampling_rate, we look for a the lowest
- * frequency which can sustain the load while keeping idle time over
- * 30%. If such a frequency exist, we try to decrease to this frequency.
- *
- * Any frequency increase takes it to the maximum frequency.
- * Frequency reduction happens at minimum steps of
- * 5% (default) of current frequency
- */
-
- /* Get Absolute Load - in terms of freq */
- max_load_freq = 0;
-
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
- unsigned int idle_time, wall_time, iowait_time;
- unsigned int load, load_freq;
- int freq_avg;
-
- j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
-
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
- cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
-
- wall_time = (unsigned int)
- (cur_wall_time - j_dbs_info->prev_cpu_wall);
- j_dbs_info->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_dbs_info->prev_cpu_idle);
- j_dbs_info->prev_cpu_idle = cur_idle_time;
-
- iowait_time = (unsigned int)
- (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
- j_dbs_info->prev_cpu_iowait = cur_iowait_time;
-
- if (dbs_tuners_ins.ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- j_dbs_info->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
-
- j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
- }
-
- /*
- * For the purpose of ondemand, waiting for disk IO is an
- * indication that you're performance critical, and not that
- * the system is actually idle. So subtract the iowait time
- * from the cpu idle time.
- */
-
- if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
- idle_time -= iowait_time;
-
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- load = 100 * (wall_time - idle_time) / wall_time;
-
- freq_avg = __cpufreq_driver_getavg(policy, j);
- if (freq_avg <= 0)
- freq_avg = policy->cur;
-
- load_freq = load * freq_avg;
- if (load_freq > max_load_freq)
- max_load_freq = load_freq;
- }
+define_get_cpu_dbs_routines(od_cpu_dbs_info);
- /* Check for frequency increase */
- if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
- /* If switching to max speed, apply sampling_down_factor */
- if (policy->cur < policy->max)
- this_dbs_info->rate_mult =
- dbs_tuners_ins.sampling_down_factor;
- dbs_freq_increase(policy, policy->max);
- return;
- }
-
- /* Check for frequency decrease */
- /* if we cannot reduce the frequency anymore, break out early */
- if (policy->cur == policy->min)
- return;
-
- /*
- * The optimal frequency is the frequency that is the lowest that
- * can support the current CPU usage without triggering the up
- * policy. To be safe, we focus 10 points under the threshold.
- */
- if (max_load_freq <
- (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
- policy->cur) {
- unsigned int freq_next;
- freq_next = max_load_freq /
- (dbs_tuners_ins.up_threshold -
- dbs_tuners_ins.down_differential);
-
- /* No longer fully busy, reset rate_mult */
- this_dbs_info->rate_mult = 1;
-
- if (freq_next < policy->min)
- freq_next = policy->min;
-
- if (!dbs_tuners_ins.powersave_bias) {
- __cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- } else {
- int freq = powersave_bias_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- __cpufreq_driver_target(policy, freq,
- CPUFREQ_RELATION_L);
- }
- }
-}
-
-static void do_dbs_timer(struct work_struct *work)
-{
- struct cpu_dbs_info_s *dbs_info =
- container_of(work, struct cpu_dbs_info_s, work.work);
- unsigned int cpu = dbs_info->cpu;
- int sample_type = dbs_info->sample_type;
-
- int delay;
-
- mutex_lock(&dbs_info->timer_mutex);
-
- /* Common NORMAL_SAMPLE setup */
- dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- if (!dbs_tuners_ins.powersave_bias ||
- sample_type == DBS_NORMAL_SAMPLE) {
- dbs_check_cpu(dbs_info);
- if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = DBS_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
- } else {
- /* We want all CPUs to do sampling nearly on
- * same jiffy
- */
- delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
- }
- } else {
- __cpufreq_driver_target(dbs_info->cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
- delay = dbs_info->freq_lo_jiffies;
- }
- schedule_delayed_work_on(cpu, &dbs_info->work, delay);
- mutex_unlock(&dbs_info->timer_mutex);
-}
-
-static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
-{
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+static struct od_ops od_ops = {
+ .io_busy = should_io_be_busy,
+ .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
+ .powersave_bias_target = powersave_bias_target,
+ .freq_increase = dbs_freq_increase,
+};
- dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
-}
+static struct dbs_data od_dbs_data = {
+ .governor = GOV_ONDEMAND,
+ .attr_group = &od_attr_group,
+ .tuners = &od_tuners,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = od_dbs_timer,
+ .gov_check_cpu = od_check_cpu,
+ .gov_ops = &od_ops,
+};
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
{
- cancel_delayed_work_sync(&dbs_info->work);
+ return cpufreq_governor_dbs(&od_dbs_data, policy, event);
}
-/*
- * Not all CPUs want IO time to be accounted as busy; this dependson how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (androidlcom) calis this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) andl later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static
#endif
- return 0;
-}
-
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
-{
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info_s *this_dbs_info;
- unsigned int j;
- int rc;
-
- this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
- return -EINVAL;
-
- mutex_lock(&dbs_mutex);
-
- dbs_enable++;
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- j_dbs_info->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- this_dbs_info->cpu = cpu;
- this_dbs_info->rate_mult = 1;
- ondemand_powersave_bias_init_cpu(cpu);
- /*
- * Start the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 1) {
- unsigned int latency;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &dbs_attr_group);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
- /* Bring kernel and HW constraints together */
- min_sampling_rate = max(min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_tuners_ins.sampling_rate =
- max(min_sampling_rate,
- latency * LATENCY_MULTIPLIER);
- dbs_tuners_ins.io_is_busy = should_io_be_busy();
- }
- mutex_unlock(&dbs_mutex);
-
- mutex_init(&this_dbs_info->timer_mutex);
- dbs_timer_init(this_dbs_info);
- break;
-
- case CPUFREQ_GOV_STOP:
- dbs_timer_exit(this_dbs_info);
-
- mutex_lock(&dbs_mutex);
- mutex_destroy(&this_dbs_info->timer_mutex);
- dbs_enable--;
- mutex_unlock(&dbs_mutex);
- if (!dbs_enable)
- sysfs_remove_group(cpufreq_global_kobject,
- &dbs_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&this_dbs_info->timer_mutex);
- if (policy->max < this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(this_dbs_info);
- mutex_unlock(&this_dbs_info->timer_mutex);
- break;
- }
- return 0;
-}
+struct cpufreq_governor cpufreq_gov_ondemand = {
+ .name = "ondemand",
+ .governor = od_cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+};
static int __init cpufreq_gov_dbs_init(void)
{
u64 idle_time;
int cpu = get_cpu();
+ mutex_init(&od_dbs_data.mutex);
idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
- dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- dbs_tuners_ins.down_differential =
- MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
- min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+ od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
/* For correct statistics, we need 10 ticks for each measure */
- min_sampling_rate =
- MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
+ od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
}
return cpufreq_register_governor(&cpufreq_gov_ondemand);
@@ -800,7 +528,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
}
-
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index f13a8a9af6a1..ceee06849b91 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 4c2eb512f2bc..2d948a171155 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 399831690fed..9d7732b81044 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -37,7 +37,7 @@ struct cpufreq_stats {
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
- cputime64_t *time_in_state;
+ u64 *time_in_state;
unsigned int *freq_table;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
@@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
count++;
}
- alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
+ alloc_size = count * sizeof(int) + count * sizeof(u64);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
@@ -364,18 +364,21 @@ static int __init cpufreq_stats_init(void)
if (ret)
return ret;
+ register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_update_policy(cpu);
+
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
+ unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_free_table(cpu);
return ret;
}
- register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- for_each_online_cpu(cpu) {
- cpufreq_update_policy(cpu);
- }
return 0;
}
static void __exit cpufreq_stats_exit(void)
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index bedac1aa9be3..c8c3d293cc57 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index af2d81e10f71..7012ea8bf1e7 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -31,13 +31,13 @@ static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
-int exynos_verify_speed(struct cpufreq_policy *policy)
+static int exynos_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
exynos_info->freq_table);
}
-unsigned int exynos_getspeed(unsigned int cpu)
+static unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
@@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy,
}
arm_volt = volt_table[index];
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
@@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy,
if (freqs.new != freqs.old)
exynos_info->set_freq(old_index, index);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
@@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->related_cpus, cpu_possible_mask);
cpumask_copy(policy->cpus, cpu_online_mask);
} else {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 90431cb92804..49cda256efb2 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 53ddbc760af7..1180d536d1eb 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -77,7 +77,7 @@ static unsigned int longhaul_index;
static int scale_voltage;
static int disable_acpi_c3;
static int revid_errata;
-
+static int enable;
/* Clock ratios multiplied by 10 */
static int mults[32];
@@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
+static int longhaul_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
@@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = {
.target = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = __devexit_p(longhaul_cpu_exit),
+ .exit = longhaul_cpu_exit,
.name = "longhaul",
.owner = THIS_MODULE,
.attr = longhaul_attr,
@@ -965,6 +965,10 @@ static int __init longhaul_init(void)
if (!x86_match_cpu(longhaul_id))
return -ENODEV;
+ if (!enable) {
+ printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+ return -ENODEV;
+ }
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
printk(KERN_ERR PFX "More than 1 CPU detected, "
@@ -1021,6 +1025,10 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
* such. */
module_param(revid_errata, int, 0644);
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
+/* By default driver is disabled to prevent incompatible
+ * system freeze. */
+module_param(enable, int, 0644);
+MODULE_PARM_DESC(enable, "Enable driver");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1f3417a8322d..97102b05843f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,
freq = ret;
if (mpu_reg) {
+ rcu_read_lock();
opp = opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
__func__, freqs.new);
return -EINVAL;
}
volt = opp_get_voltage(opp);
+ rcu_read_unlock();
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e3ebb4fa2c3e..056faf6af1a9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1186,7 +1186,7 @@ err_out:
return -ENODEV;
}
-static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
+static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
@@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.target = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = powernowk8_cpu_init,
- .exit = __devexit_p(powernowk8_cpu_exit),
+ .exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
.owner = THIS_MODULE,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
new file mode 100644
index 000000000000..4575cfe41755
--- /dev/null
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -0,0 +1,291 @@
+/*
+ * drivers/cpufreq/spear-cpufreq.c
+ *
+ * CPU Frequency Scaling for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* SPEAr CPUFreq driver data structure */
+static struct {
+ struct clk *clk;
+ unsigned int transition_latency;
+ struct cpufreq_frequency_table *freq_tbl;
+ u32 cnt;
+} spear_cpufreq;
+
+int spear_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
+}
+
+static unsigned int spear_cpufreq_get(unsigned int cpu)
+{
+ return clk_get_rate(spear_cpufreq.clk) / 1000;
+}
+
+static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
+{
+ struct clk *sys_pclk;
+ int pclk;
+ /*
+ * In SPEAr1340, cpu clk's parent sys clk can take input from
+ * following sources
+ */
+ const char *sys_clk_src[] = {
+ "sys_syn_clk",
+ "pll1_clk",
+ "pll2_clk",
+ "pll3_clk",
+ };
+
+ /*
+ * As sys clk can have multiple source with their own range
+ * limitation so we choose possible sources accordingly
+ */
+ if (newfreq <= 300000000)
+ pclk = 0; /* src is sys_syn_clk */
+ else if (newfreq > 300000000 && newfreq <= 500000000)
+ pclk = 3; /* src is pll3_clk */
+ else if (newfreq == 600000000)
+ pclk = 1; /* src is pll1_clk */
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Get parent to sys clock */
+ sys_pclk = clk_get(NULL, sys_clk_src[pclk]);
+ if (IS_ERR(sys_pclk))
+ pr_err("Failed to get %s clock\n", sys_clk_src[pclk]);
+
+ return sys_pclk;
+}
+
+/*
+ * In SPEAr1340, we cannot use newfreq directly because we need to actually
+ * access a source clock (clk) which might not be ancestor of cpu at present.
+ * Hence in SPEAr1340 we would operate on source clock directly before switching
+ * cpu clock to it.
+ */
+static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
+{
+ struct clk *sys_clk;
+ int ret = 0;
+
+ sys_clk = clk_get_parent(spear_cpufreq.clk);
+ if (IS_ERR(sys_clk)) {
+ pr_err("failed to get cpu's parent (sys) clock\n");
+ return PTR_ERR(sys_clk);
+ }
+
+ /* Set the rate of the source clock before changing the parent */
+ ret = clk_set_rate(sys_pclk, newfreq);
+ if (ret) {
+ pr_err("Failed to set sys clk rate to %lu\n", newfreq);
+ return ret;
+ }
+
+ ret = clk_set_parent(sys_clk, sys_pclk);
+ if (ret) {
+ pr_err("Failed to set sys clk parent\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spear_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned long newfreq;
+ struct clk *srcclk;
+ int index, ret, mult = 1;
+
+ if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ freqs.cpu = policy->cpu;
+ freqs.old = spear_cpufreq_get(0);
+
+ newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+ if (of_machine_is_compatible("st,spear1340")) {
+ /*
+ * SPEAr1340 is special in the sense that due to the possibility
+ * of multiple clock sources for cpu clk's parent we can have
+ * different clock source for different frequency of cpu clk.
+ * Hence we need to choose one from amongst these possible clock
+ * sources.
+ */
+ srcclk = spear1340_cpu_get_possible_parent(newfreq);
+ if (IS_ERR(srcclk)) {
+ pr_err("Failed to get src clk\n");
+ return PTR_ERR(srcclk);
+ }
+
+ /* SPEAr1340: src clk is always 2 * intended cpu clk */
+ mult = 2;
+ } else {
+ /*
+ * src clock to be altered is ancestor of cpu clock. Hence we
+ * can directly work on cpu clk
+ */
+ srcclk = spear_cpufreq.clk;
+ }
+
+ newfreq = clk_round_rate(srcclk, newfreq * mult);
+ if (newfreq < 0) {
+ pr_err("clk_round_rate failed for cpu src clock\n");
+ return newfreq;
+ }
+
+ freqs.new = newfreq / 1000;
+ freqs.new /= mult;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ if (mult == 2)
+ ret = spear1340_set_cpu_rate(srcclk, newfreq);
+ else
+ ret = clk_set_rate(spear_cpufreq.clk, newfreq);
+
+ /* Get current rate after clk_set_rate, in case of failure */
+ if (ret) {
+ pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
+ freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ return ret;
+}
+
+static int spear_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
+ if (ret) {
+ pr_err("cpufreq_frequency_table_cpuinfo() failed");
+ return ret;
+ }
+
+ cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
+ policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
+ policy->cur = spear_cpufreq_get(0);
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(policy->related_cpus, policy->cpus);
+
+ return 0;
+}
+
+static int spear_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *spear_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver spear_cpufreq_driver = {
+ .name = "cpufreq-spear",
+ .flags = CPUFREQ_STICKY,
+ .verify = spear_cpufreq_verify,
+ .target = spear_cpufreq_target,
+ .get = spear_cpufreq_get,
+ .init = spear_cpufreq_init,
+ .exit = spear_cpufreq_exit,
+ .attr = spear_cpufreq_attr,
+};
+
+static int spear_cpufreq_driver_init(void)
+{
+ struct device_node *np;
+ const struct property *prop;
+ struct cpufreq_frequency_table *freq_tbl;
+ const __be32 *val;
+ int cnt, i, ret;
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np) {
+ pr_err("No cpu node found");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(np, "clock-latency",
+ &spear_cpufreq.transition_latency))
+ spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+
+ prop = of_find_property(np, "cpufreq_tbl", NULL);
+ if (!prop || !prop->value) {
+ pr_err("Invalid cpufreq_tbl");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cnt = prop->length / sizeof(u32);
+ val = prop->value;
+
+ freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+ if (!freq_tbl) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ freq_tbl[i].index = i;
+ freq_tbl[i].frequency = be32_to_cpup(val++);
+ }
+
+ freq_tbl[i].index = i;
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ spear_cpufreq.freq_tbl = freq_tbl;
+
+ of_node_put(np);
+
+ spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(spear_cpufreq.clk)) {
+ pr_err("Unable to get CPU clock\n");
+ ret = PTR_ERR(spear_cpufreq.clk);
+ goto out_put_mem;
+ }
+
+ ret = cpufreq_register_driver(&spear_cpufreq_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("failed register driver: %d\n", ret);
+ clk_put(spear_cpufreq.clk);
+
+out_put_mem:
+ kfree(freq_tbl);
+ return ret;
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+late_initcall(spear_cpufreq_driver_init);
+
+MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
+MODULE_DESCRIPTION("SPEAr CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index a76b689e553b..c4cc27e5c8a5 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -9,6 +9,15 @@ config CPU_IDLE
If you're using an ACPI-enabled platform, you should say Y here.
+config CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Support multiple cpuidle drivers"
+ depends on CPU_IDLE
+ default n
+ help
+ Allows the cpuidle framework to use different drivers for each CPU.
+ This is useful if you have a system with different CPU latencies and
+ states. If unsure say N.
+
config CPU_IDLE_GOV_LADDER
bool
depends on CPU_IDLE
@@ -21,3 +30,13 @@ config CPU_IDLE_GOV_MENU
config ARCH_NEEDS_CPU_IDLE_COUPLED
def_bool n
+
+if CPU_IDLE
+
+config CPU_IDLE_CALXEDA
+ bool "CPU Idle Driver for Calxeda processors"
+ depends on ARCH_HIGHBANK
+ help
+ Select this to enable cpuidle on Calxeda processors.
+
+endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 38c8f69f30cf..03ee87482c71 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -4,3 +4,5 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
+
+obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 3265844839bf..2a297f86dbad 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -209,7 +209,7 @@ inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
int all;
int ret;
- all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ all = coupled->online_count | (coupled->online_count << WAITING_BITS);
ret = atomic_add_unless(&coupled->ready_waiting_counts,
-MAX_WAITING_CPUS, all);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
new file mode 100644
index 000000000000..e1aab38c5a8d
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * Based on arch/arm/plat-mxc/cpuidle.c:
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <asm/cpuidle.h>
+#include <asm/proc-fns.h>
+#include <asm/smp_scu.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+
+extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
+extern void *scu_base_addr;
+
+static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
+
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+static noinline void calxeda_idle_restore(void)
+{
+ set_cr(get_cr() | CR_C);
+ set_auxcr(get_auxcr() | 0x40);
+ scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
+}
+
+static int calxeda_idle_finish(unsigned long val)
+{
+ /* Already flushed cache, but do it again as the outer cache functions
+ * dirty the cache with spinlocks */
+ flush_cache_all();
+
+ set_auxcr(get_auxcr() & ~0x40);
+ set_cr(get_cr() & ~CR_C);
+
+ scu_power_mode(scu_base_addr, SCU_PM_DORMANT);
+
+ cpu_do_idle();
+
+ /* Restore things if we didn't enter power-gating */
+ calxeda_idle_restore();
+ return 1;
+}
+
+static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ highbank_set_cpu_jump(smp_processor_id(), cpu_resume);
+ cpu_suspend(0, calxeda_idle_finish);
+ return index;
+}
+
+static void calxeda_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+ struct cpuidle_device *dev;
+
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(calxeda_idle_cpuidle_devices);
+}
+
+static struct cpuidle_driver calxeda_idle_driver = {
+ .name = "calxeda_idle",
+ .en_core_tk_irqen = 1,
+ .states = {
+ ARM_CPUIDLE_WFI_STATE,
+ {
+ .name = "PG",
+ .desc = "Power Gate",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 30,
+ .power_usage = 50,
+ .target_residency = 200,
+ .enter = calxeda_pwrdown_idle,
+ },
+ },
+ .state_count = 2,
+};
+
+static int __init calxeda_cpuidle_init(void)
+{
+ int cpu_id;
+ int ret;
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv = &calxeda_idle_driver;
+
+ if (!of_machine_is_compatible("calxeda,highbank"))
+ return -ENODEV;
+
+ ret = cpuidle_register_driver(drv);
+ if (ret)
+ return ret;
+
+ calxeda_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (calxeda_idle_cpuidle_devices == NULL) {
+ ret = -ENOMEM;
+ goto unregister_drv;
+ }
+
+ /* initialize state data for each cpuidle_device */
+ for_each_possible_cpu(cpu_id) {
+ dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, cpu_id);
+ dev->cpu = cpu_id;
+ dev->state_count = drv->state_count;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("Failed to register cpu %u, error: %d\n",
+ cpu_id, ret);
+ goto uninit;
+ }
+ }
+
+ return 0;
+
+uninit:
+ calxeda_idle_cpuidle_devices_uninit();
+unregister_drv:
+ cpuidle_unregister_driver(drv);
+ return ret;
+}
+module_init(calxeda_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f15b8514a18..e1f6860e069c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -68,25 +68,16 @@ static cpuidle_enter_t cpuidle_enter_ops;
int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_driver();
- int i, dead_state = -1;
- int power_usage = -1;
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int i;
if (!drv)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
-
- if (s->power_usage < power_usage && s->enter_dead) {
- power_usage = s->power_usage;
- dead_state = i;
- }
- }
-
- if (dead_state != -1)
- return drv->states[dead_state].enter_dead(dev, dead_state);
+ for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+ if (drv->states[i].enter_dead)
+ return drv->states[i].enter_dead(dev, i);
return -ENODEV;
}
@@ -109,8 +100,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* This can be moved to within driver enter routine
* but that results in multiple copies of same code.
*/
- dev->states_usage[entered_state].time +=
- (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
} else {
dev->last_residency = 0;
@@ -128,7 +118,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
int next_state, entered_state;
if (off)
@@ -141,9 +131,15 @@ int cpuidle_idle_call(void)
if (!dev || !dev->enabled)
return -EBUSY;
+ drv = cpuidle_get_cpu_driver(dev);
+
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(drv, dev);
if (need_resched()) {
+ dev->last_residency = 0;
+ /* give the governor an opportunity to reflect on the outcome */
+ if (cpuidle_curr_governor->reflect)
+ cpuidle_curr_governor->reflect(dev, next_state);
local_irq_enable();
return 0;
}
@@ -308,15 +304,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
int cpuidle_enable_device(struct cpuidle_device *dev)
{
int ret, i;
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
if (!dev)
return -EINVAL;
if (dev->enabled)
return 0;
+
+ drv = cpuidle_get_cpu_driver(dev);
+
if (!drv || !cpuidle_curr_governor)
return -EIO;
+
if (!dev->state_count)
dev->state_count = drv->state_count;
@@ -331,7 +331,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
poll_idle_init(drv);
- if ((ret = cpuidle_add_state_sysfs(dev)))
+ ret = cpuidle_add_device_sysfs(dev);
+ if (ret)
return ret;
if (cpuidle_curr_governor->enable &&
@@ -352,7 +353,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return 0;
fail_sysfs:
- cpuidle_remove_state_sysfs(dev);
+ cpuidle_remove_device_sysfs(dev);
return ret;
}
@@ -368,17 +369,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device);
*/
void cpuidle_disable_device(struct cpuidle_device *dev)
{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
if (!dev || !dev->enabled)
return;
- if (!cpuidle_get_driver() || !cpuidle_curr_governor)
+
+ if (!drv || !cpuidle_curr_governor)
return;
dev->enabled = 0;
if (cpuidle_curr_governor->disable)
- cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
+ cpuidle_curr_governor->disable(drv, dev);
- cpuidle_remove_state_sysfs(dev);
+ cpuidle_remove_device_sysfs(dev);
enabled_devices--;
}
@@ -394,17 +398,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
- struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
- if (!try_module_get(cpuidle_driver->owner))
+ if (!try_module_get(drv->owner))
return -EINVAL;
- init_completion(&dev->kobj_unregister);
-
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- ret = cpuidle_add_sysfs(cpu_dev);
+ ret = cpuidle_add_sysfs(dev);
if (ret)
goto err_sysfs;
@@ -416,12 +417,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
return 0;
err_coupled:
- cpuidle_remove_sysfs(cpu_dev);
- wait_for_completion(&dev->kobj_unregister);
+ cpuidle_remove_sysfs(dev);
err_sysfs:
list_del(&dev->device_list);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
- module_put(cpuidle_driver->owner);
+ module_put(drv->owner);
return ret;
}
@@ -460,8 +460,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
if (dev->registered == 0)
return;
@@ -470,16 +469,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
- cpuidle_remove_sysfs(cpu_dev);
+ cpuidle_remove_sysfs(dev);
list_del(&dev->device_list);
- wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
cpuidle_coupled_unregister_device(dev);
cpuidle_resume_and_unlock();
- module_put(cpuidle_driver->owner);
+ module_put(drv->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 76e7f696ad8c..ee97e9672ecf 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,8 +5,6 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
-#include <linux/device.h>
-
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
extern struct list_head cpuidle_governors;
@@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
+
+struct device;
+
extern int cpuidle_add_interface(struct device *dev);
extern void cpuidle_remove_interface(struct device *dev);
-extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
-extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct device *dev);
-extern void cpuidle_remove_sysfs(struct device *dev);
+extern int cpuidle_add_device_sysfs(struct cpuidle_device *device);
+extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device);
+extern int cpuidle_add_sysfs(struct cpuidle_device *dev);
+extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 87db3877fead..422c7b69ba7c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,37 +14,17 @@
#include "cpuidle.h"
-static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
-int cpuidle_driver_refcount;
-
-static void set_power_states(struct cpuidle_driver *drv)
-{
- int i;
-
- /*
- * cpuidle driver should set the drv->power_specified bit
- * before registering if the driver provides
- * power_usage numbers.
- *
- * If power_specified is not set,
- * we fill in power_usage with decreasing values as the
- * cpuidle code has an implicit assumption that state Cn
- * uses less power than C(n-1).
- *
- * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
- * an power value of -1. So we use -2, -3, etc, for other
- * c-states.
- */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
- drv->states[i].power_usage = -1 - i;
+
+static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
+static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
+
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ drv->refcnt = 0;
}
-/**
- * cpuidle_register_driver - registers a driver
- * @drv: the driver
- */
-int cpuidle_register_driver(struct cpuidle_driver *drv)
+static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
{
if (!drv || !drv->state_count)
return -EINVAL;
@@ -52,31 +32,101 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (cpuidle_disabled())
return -ENODEV;
- spin_lock(&cpuidle_driver_lock);
- if (cpuidle_curr_driver) {
- spin_unlock(&cpuidle_driver_lock);
+ if (__cpuidle_get_cpu_driver(cpu))
return -EBUSY;
+
+ __cpuidle_driver_init(drv);
+
+ __cpuidle_set_cpu_driver(drv, cpu);
+
+ return 0;
+}
+
+static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
+{
+ if (drv != __cpuidle_get_cpu_driver(cpu))
+ return;
+
+ if (!WARN_ON(drv->refcnt > 0))
+ __cpuidle_set_cpu_driver(NULL, cpu);
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+
+static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers);
+
+static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ per_cpu(cpuidle_drivers, cpu) = drv;
+}
+
+static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
+{
+ return per_cpu(cpuidle_drivers, cpu);
+}
+
+static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ __cpuidle_unregister_driver(drv, cpu);
+}
+
+static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv)
+{
+ int ret = 0;
+ int i, cpu;
+
+ for_each_present_cpu(cpu) {
+ ret = __cpuidle_register_driver(drv, cpu);
+ if (ret)
+ break;
}
- if (!drv->power_specified)
- set_power_states(drv);
+ if (ret)
+ for_each_present_cpu(i) {
+ if (i == cpu)
+ break;
+ __cpuidle_unregister_driver(drv, i);
+ }
- cpuidle_curr_driver = drv;
+ return ret;
+}
+
+int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ int ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
- return 0;
+ return ret;
+}
+
+void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
}
-EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
- * cpuidle_get_driver - return the current driver
+ * cpuidle_register_driver - registers a driver
+ * @drv: the driver
*/
-struct cpuidle_driver *cpuidle_get_driver(void)
+int cpuidle_register_driver(struct cpuidle_driver *drv)
{
- return cpuidle_curr_driver;
+ int ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_all_cpu_driver(drv);
+ spin_unlock(&cpuidle_driver_lock);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
* cpuidle_unregister_driver - unregisters a driver
@@ -84,20 +134,88 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver);
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (drv != cpuidle_curr_driver) {
- WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
- drv->name);
- return;
- }
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_all_cpu_driver(drv);
+ spin_unlock(&cpuidle_driver_lock);
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+
+#else
+
+static struct cpuidle_driver *cpuidle_curr_driver;
+
+static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ cpuidle_curr_driver = drv;
+}
+
+static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
+{
+ return cpuidle_curr_driver;
+}
+
+/**
+ * cpuidle_register_driver - registers a driver
+ * @drv: the driver
+ */
+int cpuidle_register_driver(struct cpuidle_driver *drv)
+{
+ int ret, cpu;
+ cpu = get_cpu();
spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
+ put_cpu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpuidle_register_driver);
- if (!WARN_ON(cpuidle_driver_refcount > 0))
- cpuidle_curr_driver = NULL;
+/**
+ * cpuidle_unregister_driver - unregisters a driver
+ * @drv: the driver
+ */
+void cpuidle_unregister_driver(struct cpuidle_driver *drv)
+{
+ int cpu;
+ cpu = get_cpu();
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
+ put_cpu();
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+#endif
+
+/**
+ * cpuidle_get_driver - return the current driver
+ */
+struct cpuidle_driver *cpuidle_get_driver(void)
+{
+ struct cpuidle_driver *drv;
+ int cpu;
+
+ cpu = get_cpu();
+ drv = __cpuidle_get_cpu_driver(cpu);
+ put_cpu();
+
+ return drv;
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
+ * cpuidle_get_cpu_driver - return the driver tied with a cpu
+ */
+struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
+{
+ if (!dev)
+ return NULL;
+
+ return __cpuidle_get_cpu_driver(dev->cpu);
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
struct cpuidle_driver *cpuidle_driver_ref(void)
{
@@ -105,8 +223,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
spin_lock(&cpuidle_driver_lock);
- drv = cpuidle_curr_driver;
- cpuidle_driver_refcount++;
+ drv = cpuidle_get_driver();
+ drv->refcnt++;
spin_unlock(&cpuidle_driver_lock);
return drv;
@@ -114,10 +232,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
void cpuidle_driver_unref(void)
{
+ struct cpuidle_driver *drv = cpuidle_get_driver();
+
spin_lock(&cpuidle_driver_lock);
- if (!WARN_ON(cpuidle_driver_refcount <= 0))
- cpuidle_driver_refcount--;
+ if (drv && !WARN_ON(drv->refcnt <= 0))
+ drv->refcnt--;
spin_unlock(&cpuidle_driver_lock);
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 5b1f2c372c1f..fe343a06b7da 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -28,6 +28,13 @@
#define MAX_INTERESTING 50000
#define STDDEV_THRESH 400
+/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
+#define MAX_DEVIATION 60
+
+static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
+static DEFINE_PER_CPU(int, hrtimer_status);
+/* menu hrtimer mode */
+enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
/*
* Concepts and ideas behind the menu governor
@@ -109,6 +116,13 @@
*
*/
+/*
+ * The C-state residency is so long that is is worthwhile to exit
+ * from the shallow C-state and re-enter into a deeper C-state.
+ */
+static unsigned int perfect_cstate_ms __read_mostly = 30;
+module_param(perfect_cstate_ms, uint, 0000);
+
struct menu_device {
int last_state_idx;
int needs_update;
@@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
+/* Cancel the hrtimer if it is not triggered yet */
+void menu_hrtimer_cancel(void)
+{
+ int cpu = smp_processor_id();
+ struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
+
+ /* The timer is still not time out*/
+ if (per_cpu(hrtimer_status, cpu)) {
+ hrtimer_cancel(hrtmr);
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
+ }
+}
+EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
+
+/* Call back for hrtimer is triggered */
+static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
+{
+ int cpu = smp_processor_id();
+ struct menu_device *data = &per_cpu(menu_devices, cpu);
+
+ /* In general case, the expected residency is much larger than
+ * deepest C-state target residency, but prediction logic still
+ * predicts a small predicted residency, so the prediction
+ * history is totally broken if the timer is triggered.
+ * So reset the correction factor.
+ */
+ if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
+ data->correction_factor[data->bucket] = RESOLUTION * DECAY;
+
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
+
+ return HRTIMER_NORESTART;
+}
+
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static void detect_repeating_patterns(struct menu_device *data)
+static u32 get_typical_interval(struct menu_device *data)
{
- int i;
- uint64_t avg = 0;
- uint64_t stddev = 0; /* contains the square of the std deviation */
-
- /* first calculate average and standard deviation of the past */
- for (i = 0; i < INTERVALS; i++)
- avg += data->intervals[i];
- avg = avg / INTERVALS;
-
- /* if the avg is beyond the known next tick, it's worthless */
- if (avg > data->expected_us)
- return;
+ int i = 0, divisor = 0;
+ uint64_t max = 0, avg = 0, stddev = 0;
+ int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
+ unsigned int ret = 0;
- for (i = 0; i < INTERVALS; i++)
- stddev += (data->intervals[i] - avg) *
- (data->intervals[i] - avg);
+again:
- stddev = stddev / INTERVALS;
+ /* first calculate average and standard deviation of the past */
+ max = avg = divisor = stddev = 0;
+ for (i = 0; i < INTERVALS; i++) {
+ int64_t value = data->intervals[i];
+ if (value <= thresh) {
+ avg += value;
+ divisor++;
+ if (value > max)
+ max = value;
+ }
+ }
+ do_div(avg, divisor);
+ for (i = 0; i < INTERVALS; i++) {
+ int64_t value = data->intervals[i];
+ if (value <= thresh) {
+ int64_t diff = value - avg;
+ stddev += diff * diff;
+ }
+ }
+ do_div(stddev, divisor);
+ stddev = int_sqrt(stddev);
/*
- * now.. if stddev is small.. then assume we have a
- * repeating pattern and predict we keep doing this.
+ * If we have outliers to the upside in our distribution, discard
+ * those by setting the threshold to exclude these outliers, then
+ * calculate the average and standard deviation again. Once we get
+ * down to the bottom 3/4 of our samples, stop excluding samples.
+ *
+ * This can deal with workloads that have long pauses interspersed
+ * with sporadic activity with a bunch of short pauses.
+ *
+ * The typical interval is obtained when standard deviation is small
+ * or standard deviation is small compared to the average interval.
*/
-
- if (avg && stddev < STDDEV_THRESH)
+ if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
+ || stddev <= 20) {
data->predicted_us = avg;
+ ret = 1;
+ return ret;
+
+ } else if ((divisor * 4) > INTERVALS * 3) {
+ /* Exclude the max interval */
+ thresh = max - 1;
+ goto again;
+ }
+
+ return ret;
}
/**
@@ -236,10 +312,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- int power_usage = -1;
int i;
int multiplier;
struct timespec t;
+ int repeat = 0, low_predicted = 0;
+ int cpu = smp_processor_id();
+ struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
if (data->needs_update) {
menu_update(drv, dev);
@@ -274,7 +352,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- detect_repeating_patterns(data);
+ repeat = get_typical_interval(data);
/*
* We want to default to C1 (hlt), not to busy polling
@@ -295,18 +373,55 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable)
continue;
- if (s->target_residency > data->predicted_us)
+ if (s->target_residency > data->predicted_us) {
+ low_predicted = 1;
continue;
+ }
if (s->exit_latency > latency_req)
continue;
if (s->exit_latency * multiplier > data->predicted_us)
continue;
- if (s->power_usage < power_usage) {
- power_usage = s->power_usage;
- data->last_state_idx = i;
- data->exit_us = s->exit_latency;
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
+ }
+
+ /* not deepest C-state chosen for low predicted residency */
+ if (low_predicted) {
+ unsigned int timer_us = 0;
+ unsigned int perfect_us = 0;
+
+ /*
+ * Set a timer to detect whether this sleep is much
+ * longer than repeat mode predicted. If the timer
+ * triggers, the code will evaluate whether to put
+ * the CPU into a deeper C-state.
+ * The timer is cancelled on CPU wakeup.
+ */
+ timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
+
+ perfect_us = perfect_cstate_ms * 1000;
+
+ if (repeat && (4 * timer_us < data->expected_us)) {
+ RCU_NONIDLE(hrtimer_start(hrtmr,
+ ns_to_ktime(1000 * timer_us),
+ HRTIMER_MODE_REL_PINNED));
+ /* In repeat case, menu hrtimer is started */
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
+ } else if (perfect_us < data->expected_us) {
+ /*
+ * The next timer is long. This could be because
+ * we did not make a useful prediction.
+ * In that case, it makes sense to re-enter
+ * into a deeper C-state after some time.
+ */
+ RCU_NONIDLE(hrtimer_start(hrtmr,
+ ns_to_ktime(1000 * timer_us),
+ HRTIMER_MODE_REL_PINNED));
+ /* In general case, menu hrtimer is started */
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
}
+
}
return data->last_state_idx;
@@ -399,6 +514,9 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
+ struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ t->function = menu_hrtimer_notify;
memset(data, 0, sizeof(struct menu_device));
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 5f809e337b89..428754af6236 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/capability.h>
+#include <linux/device.h>
#include "cpuidle.h"
@@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = {
NULL
};
+struct cpuidle_state_kobj {
+ struct cpuidle_state *state;
+ struct cpuidle_state_usage *state_usage;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
@@ -356,14 +364,14 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
}
/**
- * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes
+ * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes
* @device: the target device
*/
-int cpuidle_add_state_sysfs(struct cpuidle_device *device)
+static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
for (i = 0; i < device->state_count; i++) {
@@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
- ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
- "state%d", i);
+ ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
+ &device->kobj, "state%d", i);
if (ret) {
kfree(kobj);
goto error_state;
@@ -393,10 +401,10 @@ error_state:
}
/**
- * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes
+ * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes
* @device: the target device
*/
-void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
+static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
{
int i;
@@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
cpuidle_free_state_kobj(device, i);
}
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+#define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj)
+#define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr)
+
+#define define_one_driver_ro(_name, show) \
+ static struct cpuidle_driver_attr attr_driver_##_name = \
+ __ATTR(_name, 0644, show, NULL)
+
+struct cpuidle_driver_kobj {
+ struct cpuidle_driver *drv;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
+struct cpuidle_driver_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpuidle_driver *, char *);
+ ssize_t (*store)(struct cpuidle_driver *, const char *, size_t);
+};
+
+static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
+{
+ ssize_t ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ spin_unlock(&cpuidle_driver_lock);
+
+ return ret;
+}
+
+static void cpuidle_driver_sysfs_release(struct kobject *kobj)
+{
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ complete(&driver_kobj->kobj_unregister);
+}
+
+static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr,
+ char * buf)
+{
+ int ret = -EIO;
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
+
+ if (dattr->show)
+ ret = dattr->show(driver_kobj->drv, buf);
+
+ return ret;
+}
+
+static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = -EIO;
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
+
+ if (dattr->store)
+ ret = dattr->store(driver_kobj->drv, buf, size);
+
+ return ret;
+}
+
+define_one_driver_ro(name, show_driver_name);
+
+static const struct sysfs_ops cpuidle_driver_sysfs_ops = {
+ .show = cpuidle_driver_show,
+ .store = cpuidle_driver_store,
+};
+
+static struct attribute *cpuidle_driver_default_attrs[] = {
+ &attr_driver_name.attr,
+ NULL
+};
+
+static struct kobj_type ktype_driver_cpuidle = {
+ .sysfs_ops = &cpuidle_driver_sysfs_ops,
+ .default_attrs = cpuidle_driver_default_attrs,
+ .release = cpuidle_driver_sysfs_release,
+};
+
+/**
+ * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
+ * @device: the target device
+ */
+static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver_kobj *kdrv;
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int ret;
+
+ kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL);
+ if (!kdrv)
+ return -ENOMEM;
+
+ kdrv->drv = drv;
+ init_completion(&kdrv->kobj_unregister);
+
+ ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
+ &dev->kobj, "driver");
+ if (ret) {
+ kfree(kdrv);
+ return ret;
+ }
+
+ kobject_uevent(&kdrv->kobj, KOBJ_ADD);
+ dev->kobj_driver = kdrv;
+
+ return ret;
+}
+
+/**
+ * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
+ * @device: the target device
+ */
+static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver_kobj *kdrv = dev->kobj_driver;
+ kobject_put(&kdrv->kobj);
+ wait_for_completion(&kdrv->kobj_unregister);
+ kfree(kdrv);
+}
+#else
+static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
+{
+ ;
+}
+#endif
+
+/**
+ * cpuidle_add_device_sysfs - adds device specific sysfs attributes
+ * @device: the target device
+ */
+int cpuidle_add_device_sysfs(struct cpuidle_device *device)
+{
+ int ret;
+
+ ret = cpuidle_add_state_sysfs(device);
+ if (ret)
+ return ret;
+
+ ret = cpuidle_add_driver_sysfs(device);
+ if (ret)
+ cpuidle_remove_state_sysfs(device);
+ return ret;
+}
+
+/**
+ * cpuidle_remove_device_sysfs : removes device specific sysfs attributes
+ * @device : the target device
+ */
+void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
+{
+ cpuidle_remove_driver_sysfs(device);
+ cpuidle_remove_state_sysfs(device);
+}
+
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
* @dev: the target device
*/
-int cpuidle_add_sysfs(struct device *cpu_dev)
+int cpuidle_add_sysfs(struct cpuidle_device *dev)
{
- int cpu = cpu_dev->id;
- struct cpuidle_device *dev;
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
- dev = per_cpu(cpuidle_devices, cpu);
+ init_completion(&dev->kobj_unregister);
+
error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (!error)
@@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev)
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
* @dev: the target device
*/
-void cpuidle_remove_sysfs(struct device *cpu_dev)
+void cpuidle_remove_sysfs(struct cpuidle_device *dev)
{
- int cpu = cpu_dev->id;
- struct cpuidle_device *dev;
-
- dev = per_cpu(cpuidle_devices, cpu);
kobject_put(&dev->kobj);
+ wait_for_completion(&dev->kobj_unregister);
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f6644f59fd9d..87ec4d027c25 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -254,6 +254,7 @@ config CRYPTO_DEV_OMAP_AES
tristate "Support for OMAP AES hw engine"
depends on ARCH_OMAP2 || ARCH_OMAP3
select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 8061336e07e7..c9d9d5c16f94 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1036,7 +1036,7 @@ err_aes_algs:
return err;
}
-static int __devinit atmel_aes_probe(struct platform_device *pdev)
+static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
struct aes_platform_data *pdata;
@@ -1152,7 +1152,7 @@ aes_dd_err:
return err;
}
-static int __devexit atmel_aes_remove(struct platform_device *pdev)
+static int atmel_aes_remove(struct platform_device *pdev)
{
static struct atmel_aes_dev *aes_dd;
@@ -1185,7 +1185,7 @@ static int __devexit atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove = __devexit_p(atmel_aes_remove),
+ .remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index bcdf55fdc623..4918e9424d31 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -964,7 +964,7 @@ err_sha_algs:
return err;
}
-static int __devinit atmel_sha_probe(struct platform_device *pdev)
+static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
struct device *dev = &pdev->dev;
@@ -1063,7 +1063,7 @@ sha_dd_err:
return err;
}
-static int __devexit atmel_sha_remove(struct platform_device *pdev)
+static int atmel_sha_remove(struct platform_device *pdev)
{
static struct atmel_sha_dev *sha_dd;
@@ -1093,7 +1093,7 @@ static int __devexit atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove = __devexit_p(atmel_sha_remove),
+ .remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 7495f98c7221..7c73fbb17538 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1053,7 +1053,7 @@ err_tdes_algs:
return err;
}
-static int __devinit atmel_tdes_probe(struct platform_device *pdev)
+static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
struct device *dev = &pdev->dev;
@@ -1162,7 +1162,7 @@ tdes_dd_err:
return err;
}
-static int __devexit atmel_tdes_remove(struct platform_device *pdev)
+static int atmel_tdes_remove(struct platform_device *pdev)
{
static struct atmel_tdes_dev *tdes_dd;
@@ -1195,7 +1195,7 @@ static int __devexit atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove = __devexit_p(atmel_tdes_remove),
+ .remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 5398580b4313..a22f1a9f895f 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -586,7 +586,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
* bfin_crypto_crc_probe - Initialize module
*
*/
-static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
+static int bfin_crypto_crc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -707,7 +707,7 @@ out_error_free_mem:
* bfin_crypto_crc_remove - Initialize module
*
*/
-static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
+static int bfin_crypto_crc_remove(struct platform_device *pdev)
{
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
@@ -731,7 +731,7 @@ static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
static struct platform_driver bfin_crypto_crc_driver = {
.probe = bfin_crypto_crc_probe,
- .remove = __devexit_p(bfin_crypto_crc_remove),
+ .remove = bfin_crypto_crc_remove,
.suspend = bfin_crypto_crc_suspend,
.resume = bfin_crypto_crc_resume,
.driver = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bf20dd891705..1c56f63524f2 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -420,7 +420,7 @@ static struct platform_driver caam_driver = {
.of_match_table = caam_match,
},
.probe = caam_probe,
- .remove = __devexit_p(caam_remove),
+ .remove = caam_remove,
};
module_platform_driver(caam_driver);
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 51f196d77f21..0c9ff4971724 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -498,8 +498,7 @@ static struct crypto_alg geode_ecb_alg = {
}
};
-static void __devexit
-geode_aes_remove(struct pci_dev *dev)
+static void geode_aes_remove(struct pci_dev *dev)
{
crypto_unregister_alg(&geode_alg);
crypto_unregister_alg(&geode_ecb_alg);
@@ -513,8 +512,7 @@ geode_aes_remove(struct pci_dev *dev)
}
-static int __devinit
-geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
ret = pci_enable_device(dev);
@@ -582,7 +580,7 @@ static struct pci_driver geode_aes_driver = {
.name = "Geode LX AES",
.id_table = geode_aes_tbl,
.probe = geode_aes_probe,
- .remove = __devexit_p(geode_aes_remove)
+ .remove = geode_aes_remove,
};
module_pci_driver(geode_aes_driver);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index fda32968a66b..ebf130e894b5 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2561,7 +2561,7 @@ static void hifn_tasklet_callback(unsigned long data)
hifn_process_queue(dev);
}
-static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err, i;
struct hifn_device *dev;
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device:
return err;
}
-static void __devexit hifn_remove(struct pci_dev *pdev)
+static void hifn_remove(struct pci_dev *pdev)
{
int i;
struct hifn_device *dev;
@@ -2740,7 +2740,7 @@ static struct pci_driver hifn_pci_driver = {
.name = "hifn795x",
.id_table = hifn_pci_tbl,
.probe = hifn_probe,
- .remove = __devexit_p(hifn_remove),
+ .remove = hifn_remove,
};
static int __init hifn_init(void)
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 24ccae453e79..ce6290e5471a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1184,7 +1184,7 @@ MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
static struct platform_driver marvell_crypto = {
.probe = mv_probe,
- .remove = __devexit_p(mv_remove),
+ .remove = mv_remove,
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index aab257403b4a..e1f0ab413c3b 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -34,7 +34,7 @@
#define DRV_MODULE_VERSION "0.2"
#define DRV_MODULE_RELDATE "July 28, 2011"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
@@ -1388,7 +1388,7 @@ static int n2_cipher_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
{
struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
struct crypto_alg *alg;
@@ -1424,7 +1424,7 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
return err;
}
-static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
+static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
{
struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
struct ahash_alg *ahash;
@@ -1462,7 +1462,7 @@ static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
return err;
}
-static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
{
struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
struct hash_alg_common *halg;
@@ -1517,7 +1517,7 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
return err;
}
-static int __devinit n2_register_algs(void)
+static int n2_register_algs(void)
{
int i, err = 0;
@@ -1545,7 +1545,7 @@ out:
return err;
}
-static void __devexit n2_unregister_algs(void)
+static void n2_unregister_algs(void)
{
mutex_lock(&spu_lock);
if (!--algs_registered)
@@ -1822,8 +1822,8 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de
return err;
}
-static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
+static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
+ struct spu_mdesc_info *ip)
{
const u64 *ino;
int ino_len;
@@ -1851,10 +1851,10 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
return 0;
}
-static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
+static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+ struct platform_device *dev,
+ struct spu_mdesc_info *ip,
+ const char *node_name)
{
const unsigned int *reg;
u64 node;
@@ -1883,7 +1883,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
static unsigned long n2_spu_hvapi_major;
static unsigned long n2_spu_hvapi_minor;
-static int __devinit n2_spu_hvapi_register(void)
+static int n2_spu_hvapi_register(void)
{
int err;
@@ -1909,7 +1909,7 @@ static void n2_spu_hvapi_unregister(void)
static int global_ref;
-static int __devinit grab_global_resources(void)
+static int grab_global_resources(void)
{
int err = 0;
@@ -1973,7 +1973,7 @@ static void release_global_resources(void)
mutex_unlock(&spu_lock);
}
-static struct n2_crypto * __devinit alloc_n2cp(void)
+static struct n2_crypto *alloc_n2cp(void)
{
struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
@@ -1993,7 +1993,7 @@ static void free_n2cp(struct n2_crypto *np)
kfree(np);
}
-static void __devinit n2_spu_driver_version(void)
+static void n2_spu_driver_version(void)
{
static int n2_spu_version_printed;
@@ -2001,7 +2001,7 @@ static void __devinit n2_spu_driver_version(void)
pr_info("%s", version);
}
-static int __devinit n2_crypto_probe(struct platform_device *dev)
+static int n2_crypto_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
const char *full_name;
@@ -2077,7 +2077,7 @@ out_free_n2cp:
return err;
}
-static int __devexit n2_crypto_remove(struct platform_device *dev)
+static int n2_crypto_remove(struct platform_device *dev)
{
struct n2_crypto *np = dev_get_drvdata(&dev->dev);
@@ -2092,7 +2092,7 @@ static int __devexit n2_crypto_remove(struct platform_device *dev)
return 0;
}
-static struct n2_mau * __devinit alloc_ncp(void)
+static struct n2_mau *alloc_ncp(void)
{
struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
@@ -2112,7 +2112,7 @@ static void free_ncp(struct n2_mau *mp)
kfree(mp);
}
-static int __devinit n2_mau_probe(struct platform_device *dev)
+static int n2_mau_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
const char *full_name;
@@ -2179,7 +2179,7 @@ out_free_ncp:
return err;
}
-static int __devexit n2_mau_remove(struct platform_device *dev)
+static int n2_mau_remove(struct platform_device *dev)
{
struct n2_mau *mp = dev_get_drvdata(&dev->dev);
@@ -2217,7 +2217,7 @@ static struct platform_driver n2_crypto_driver = {
.of_match_table = n2_crypto_match,
},
.probe = n2_crypto_probe,
- .remove = __devexit_p(n2_crypto_remove),
+ .remove = n2_crypto_remove,
};
static struct of_device_id n2_mau_match[] = {
@@ -2245,7 +2245,7 @@ static struct platform_driver n2_mau_driver = {
.of_match_table = n2_mau_match,
},
.probe = n2_mau_probe,
- .remove = __devexit_p(n2_mau_remove),
+ .remove = n2_mau_remove,
};
static int __init n2_init(void)
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 0ce625738677..6c4c000671c5 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/vio.h>
#include "nx_csbcpb.h" /* struct nx_csbcpb */
@@ -1014,26 +1013,23 @@ error_out:
* NOTIFY_BAD encoded with error number on failure, use
* notifier_to_errno() to decode this value
*/
-static int nx842_OF_notifier(struct notifier_block *np,
- unsigned long action,
- void *update)
+static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
+ void *update)
{
- struct pSeries_reconfig_prop_update *upd;
+ struct of_prop_reconfig *upd = update;
struct nx842_devdata *local_devdata;
struct device_node *node = NULL;
- upd = (struct pSeries_reconfig_prop_update *)update;
-
rcu_read_lock();
local_devdata = rcu_dereference(devdata);
if (local_devdata)
node = local_devdata->dev->of_node;
if (local_devdata &&
- action == PSERIES_UPDATE_PROPERTY &&
- !strcmp(upd->node->name, node->name)) {
+ action == OF_RECONFIG_UPDATE_PROPERTY &&
+ !strcmp(upd->dn->name, node->name)) {
rcu_read_unlock();
- nx842_OF_upd(upd->property);
+ nx842_OF_upd(upd->prop);
} else
rcu_read_unlock();
@@ -1182,7 +1178,7 @@ static int __init nx842_probe(struct vio_dev *viodev,
synchronize_rcu();
kfree(old_devdata);
- pSeries_reconfig_notifier_register(&nx842_of_nb);
+ of_reconfig_notifier_register(&nx842_of_nb);
ret = nx842_OF_upd(NULL);
if (ret && ret != -ENODEV) {
@@ -1228,7 +1224,7 @@ static int __exit nx842_remove(struct vio_dev *viodev)
spin_lock_irqsave(&devdata_mutex, flags);
old_devdata = rcu_dereference_check(devdata,
lockdep_is_held(&devdata_mutex));
- pSeries_reconfig_notifier_unregister(&nx842_of_nb);
+ of_reconfig_notifier_unregister(&nx842_of_nb);
rcu_assign_pointer(devdata, NULL);
spin_unlock_irqrestore(&devdata_mutex, flags);
synchronize_rcu();
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 638110efae9b..c767f232e693 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -33,7 +33,6 @@
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/hvcall.h>
#include <asm/vio.h>
@@ -635,8 +634,7 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
nx_ctx->out_sg = NULL;
}
-static int __devinit nx_probe(struct vio_dev *viodev,
- const struct vio_device_id *id)
+static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
viodev->name, viodev->resource_id);
@@ -654,7 +652,7 @@ static int __devinit nx_probe(struct vio_dev *viodev,
return nx_register_algs();
}
-static int __devexit nx_remove(struct vio_dev *viodev)
+static int nx_remove(struct vio_dev *viodev)
{
dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
viodev->unit_address);
@@ -690,7 +688,7 @@ static void __exit nx_fini(void)
vio_unregister_driver(&nx_driver.viodriver);
}
-static struct vio_device_id nx_crypto_driver_ids[] __devinitdata = {
+static struct vio_device_id nx_crypto_driver_ids[] = {
{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
{ "", "" }
};
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 093a8af59cbe..e66e8ee5a9af 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -29,8 +29,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
-#include <plat/cpu.h>
-#include <plat/dma.h>
+#include <linux/omap-dma.h>
/* OMAP TRM gives bitfields as start:end, where start is the higher bit
number. For example 7:0 */
@@ -941,11 +940,6 @@ static int __init omap_aes_mod_init(void)
{
pr_info("loading %s driver\n", "omap-aes");
- if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) {
- pr_err("Unsupported cpu\n");
- return -ENODEV;
- }
-
return platform_driver_register(&omap_aes_driver);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a3fd6fc504b1..90d34adc2a66 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,8 +37,7 @@
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <plat/cpu.h>
-#include <plat/dma.h>
+#include <linux/omap-dma.h>
#include <mach/irqs.h>
#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
@@ -1138,7 +1137,7 @@ static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
}
}
-static int __devinit omap_sham_probe(struct platform_device *pdev)
+static int omap_sham_probe(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
struct device *dev = &pdev->dev;
@@ -1251,7 +1250,7 @@ data_err:
return err;
}
-static int __devexit omap_sham_remove(struct platform_device *pdev)
+static int omap_sham_remove(struct platform_device *pdev)
{
static struct omap_sham_dev *dd;
int i;
@@ -1289,13 +1288,6 @@ static int __init omap_sham_mod_init(void)
{
pr_info("loading %s driver\n", "omap-sham");
- if (!cpu_class_is_omap2() ||
- (omap_type() != OMAP2_DEVICE_TYPE_SEC &&
- omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
- pr_err("Unsupported cpu\n");
- return -ENODEV;
- }
-
return platform_driver_register(&omap_sham_driver);
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 410a03c01ca4..2096d4685a9e 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1708,7 +1708,7 @@ static bool spacc_is_compatible(struct platform_device *pdev,
return false;
}
-static int __devinit spacc_probe(struct platform_device *pdev)
+static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret = -EINVAL;
struct resource *mem, *irq;
@@ -1841,7 +1841,7 @@ static int __devinit spacc_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit spacc_remove(struct platform_device *pdev)
+static int spacc_remove(struct platform_device *pdev)
{
struct spacc_alg *alg, *next;
struct spacc_engine *engine = platform_get_drvdata(pdev);
@@ -1863,11 +1863,12 @@ static int __devexit spacc_remove(struct platform_device *pdev)
static const struct platform_device_id spacc_id_table[] = {
{ "picochip,spacc-ipsec", },
{ "picochip,spacc-l2", },
+ { }
};
static struct platform_driver spacc_driver = {
.probe = spacc_probe,
- .remove = __devexit_p(spacc_remove),
+ .remove = spacc_remove,
.driver = {
.name = "picochip,spacc",
#ifdef CONFIG_PM
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index a22714412cda..49ad8cbade69 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -30,7 +30,7 @@
#include <crypto/ctr.h>
#include <plat/cpu.h>
-#include <plat/dma.h>
+#include <mach/dma.h>
#define _SBF(s, v) ((v) << (s))
#define _BIT(b) _SBF(b, 1)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index da1112765a44..09b184adf31b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -936,8 +936,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
sg_count--;
link_tbl_ptr--;
}
- link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
- + cryptlen);
+ be16_add_cpu(&link_tbl_ptr->len, cryptlen);
/* tag end of link table */
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 37185e6630cd..85ea7525fa36 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -41,8 +41,6 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
-#include <mach/clk.h>
-
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
#include <crypto/internal/rng.h>
@@ -674,8 +672,10 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
mutex_lock(&aes_lock);
ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
+ if (ret) {
+ mutex_unlock(&aes_lock);
return ret;
+ }
ctx->dd = dd;
dd->ctx = ctx;
@@ -759,8 +759,10 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
ret = clk_prepare_enable(dd->aes_clk);
- if (ret)
+ if (ret) {
+ mutex_unlock(&aes_lock);
return ret;
+ }
aes_set_key(dd);
@@ -1031,7 +1033,7 @@ out:
if (dd->buf_out)
dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
dd->buf_out, dd->dma_buf_out);
- if (IS_ERR(dd->aes_clk))
+ if (!IS_ERR(dd->aes_clk))
clk_put(dd->aes_clk);
if (aes_wq)
destroy_workqueue(aes_wq);
@@ -1045,7 +1047,7 @@ out:
return err;
}
-static int __devexit tegra_aes_remove(struct platform_device *pdev)
+static int tegra_aes_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
@@ -1072,7 +1074,7 @@ static int __devexit tegra_aes_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_aes_of_match[] __devinitdata = {
+static struct of_device_id tegra_aes_of_match[] = {
{ .compatible = "nvidia,tegra20-aes", },
{ .compatible = "nvidia,tegra30-aes", },
{ },
@@ -1080,7 +1082,7 @@ static struct of_device_id tegra_aes_of_match[] __devinitdata = {
static struct platform_driver tegra_aes_driver = {
.probe = tegra_aes_probe,
- .remove = __devexit_p(tegra_aes_remove),
+ .remove = tegra_aes_remove,
.driver = {
.name = "tegra-aes",
.owner = THIS_MODULE,
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index bc615cc56266..8bc5fef07e7a 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/semaphore.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
@@ -30,8 +31,6 @@
#include <crypto/des.h>
#include <crypto/scatterwalk.h>
-#include <plat/ste_dma40.h>
-
#include <linux/platform_data/crypto-ux500.h>
#include <mach/hardware.h>
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index f6b0a6e2ea50..0f079be13305 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -30,7 +30,7 @@ if PM_DEVFREQ
comment "DEVFREQ Governors"
config DEVFREQ_GOV_SIMPLE_ONDEMAND
- bool "Simple Ondemand"
+ tristate "Simple Ondemand"
help
Chooses frequency based on the recent load on the device. Works
similar as ONDEMAND governor of CPUFREQ does. A device with
@@ -39,7 +39,7 @@ config DEVFREQ_GOV_SIMPLE_ONDEMAND
values to the governor with data field at devfreq_add_device().
config DEVFREQ_GOV_PERFORMANCE
- bool "Performance"
+ tristate "Performance"
help
Sets the frequency at the maximum available frequency.
This governor always returns UINT_MAX as frequency so that
@@ -47,7 +47,7 @@ config DEVFREQ_GOV_PERFORMANCE
at any time.
config DEVFREQ_GOV_POWERSAVE
- bool "Powersave"
+ tristate "Powersave"
help
Sets the frequency at the minimum available frequency.
This governor always returns 0 as frequency so that
@@ -55,7 +55,7 @@ config DEVFREQ_GOV_POWERSAVE
at any time.
config DEVFREQ_GOV_USERSPACE
- bool "Userspace"
+ tristate "Userspace"
help
Sets the frequency at the user specified one.
This governor returns the user configured frequency if there
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index b146d76f04cf..3b367973a802 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,21 +27,17 @@
#include <linux/hrtimer.h>
#include "governor.h"
-struct class *devfreq_class;
+static struct class *devfreq_class;
/*
- * devfreq_work periodically monitors every registered device.
- * The minimum polling interval is one jiffy. The polling interval is
- * determined by the minimum polling period among all polling devfreq
- * devices. The resolution of polling interval is one jiffy.
+ * devfreq core provides delayed work based load monitoring helper
+ * functions. Governors can use these or can implement their own
+ * monitoring mechanism.
*/
-static bool polling;
static struct workqueue_struct *devfreq_wq;
-static struct delayed_work devfreq_work;
-
-/* wait removing if this is to be removed */
-static struct devfreq *wait_remove_device;
+/* The list of all device-devfreq governors */
+static LIST_HEAD(devfreq_governor_list);
/* The list of all device-devfreq */
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(devfreq_list_lock);
@@ -73,6 +69,79 @@ static struct devfreq *find_device_devfreq(struct device *dev)
}
/**
+ * devfreq_get_freq_level() - Lookup freq_table for the frequency
+ * @devfreq: the devfreq instance
+ * @freq: the target frequency
+ */
+static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+{
+ int lev;
+
+ for (lev = 0; lev < devfreq->profile->max_state; lev++)
+ if (freq == devfreq->profile->freq_table[lev])
+ return lev;
+
+ return -EINVAL;
+}
+
+/**
+ * devfreq_update_status() - Update statistics of devfreq behavior
+ * @devfreq: the devfreq instance
+ * @freq: the update target frequency
+ */
+static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+{
+ int lev, prev_lev;
+ unsigned long cur_time;
+
+ lev = devfreq_get_freq_level(devfreq, freq);
+ if (lev < 0)
+ return lev;
+
+ cur_time = jiffies;
+ devfreq->time_in_state[lev] +=
+ cur_time - devfreq->last_stat_updated;
+ if (freq != devfreq->previous_freq) {
+ prev_lev = devfreq_get_freq_level(devfreq,
+ devfreq->previous_freq);
+ devfreq->trans_table[(prev_lev *
+ devfreq->profile->max_state) + lev]++;
+ devfreq->total_trans++;
+ }
+ devfreq->last_stat_updated = cur_time;
+
+ return 0;
+}
+
+/**
+ * find_devfreq_governor() - find devfreq governor from name
+ * @name: name of the governor
+ *
+ * Search the list of devfreq governors and return the matched
+ * governor's pointer. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq_governor *find_devfreq_governor(const char *name)
+{
+ struct devfreq_governor *tmp_governor;
+
+ if (unlikely(IS_ERR_OR_NULL(name))) {
+ pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ WARN(!mutex_is_locked(&devfreq_list_lock),
+ "devfreq_list_lock must be locked.");
+
+ list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
+ if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
+ return tmp_governor;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/* Load monitoring helper functions for governors use */
+
+/**
* update_devfreq() - Reevaluate the device and configure frequency.
* @devfreq: the devfreq instance.
*
@@ -90,6 +159,9 @@ int update_devfreq(struct devfreq *devfreq)
return -EINVAL;
}
+ if (!devfreq->governor)
+ return -EINVAL;
+
/* Reevaluate the proper frequency */
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
@@ -116,16 +188,173 @@ int update_devfreq(struct devfreq *devfreq)
if (err)
return err;
+ if (devfreq->profile->freq_table)
+ if (devfreq_update_status(devfreq, freq))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
+
devfreq->previous_freq = freq;
return err;
}
+EXPORT_SYMBOL(update_devfreq);
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work: the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+ int err;
+ struct devfreq *devfreq = container_of(work,
+ struct devfreq, work.work);
+
+ mutex_lock(&devfreq->lock);
+ err = update_devfreq(devfreq);
+ if (err)
+ dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ mutex_unlock(&devfreq->lock);
+}
+
+/**
+ * devfreq_monitor_start() - Start load monitoring of devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function for starting devfreq device load monitoing. By
+ * default delayed work based monitoring is supported. Function
+ * to be called from governor in response to DEVFREQ_GOV_START
+ * event when device is added to devfreq framework.
+ */
+void devfreq_monitor_start(struct devfreq *devfreq)
+{
+ INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+ if (devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+}
+EXPORT_SYMBOL(devfreq_monitor_start);
+
+/**
+ * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to stop devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_STOP
+ * event when device is removed from devfreq framework.
+ */
+void devfreq_monitor_stop(struct devfreq *devfreq)
+{
+ cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_stop);
+
+/**
+ * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to suspend devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_SUSPEND
+ * event or when polling interval is set to zero.
+ *
+ * Note: Though this function is same as devfreq_monitor_stop(),
+ * intentionally kept separate to provide hooks for collecting
+ * transition statistics.
+ */
+void devfreq_monitor_suspend(struct devfreq *devfreq)
+{
+ mutex_lock(&devfreq->lock);
+ if (devfreq->stop_polling) {
+ mutex_unlock(&devfreq->lock);
+ return;
+ }
+
+ devfreq->stop_polling = true;
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_suspend);
+
+/**
+ * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to resume devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_RESUME
+ * event or when polling interval is set to non-zero.
+ */
+void devfreq_monitor_resume(struct devfreq *devfreq)
+{
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling)
+ goto out;
+
+ if (!delayed_work_pending(&devfreq->work) &&
+ devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ devfreq->stop_polling = false;
+
+out:
+ mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_monitor_resume);
+
+/**
+ * devfreq_interval_update() - Update device devfreq monitoring interval
+ * @devfreq: the devfreq instance.
+ * @delay: new polling interval to be set.
+ *
+ * Helper function to set new load monitoring polling interval. Function
+ * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ */
+void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+{
+ unsigned int cur_delay = devfreq->profile->polling_ms;
+ unsigned int new_delay = *delay;
+
+ mutex_lock(&devfreq->lock);
+ devfreq->profile->polling_ms = new_delay;
+
+ if (devfreq->stop_polling)
+ goto out;
+
+ /* if new delay is zero, stop polling */
+ if (!new_delay) {
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ return;
+ }
+
+ /* if current delay is zero, start polling with new delay */
+ if (!cur_delay) {
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ goto out;
+ }
+
+ /* if current delay is greater than new delay, restart polling */
+ if (cur_delay > new_delay) {
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ }
+out:
+ mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
* has been changed out of devfreq framework.
- * @nb the notifier_block (supposed to be devfreq->nb)
- * @type not used
- * @devp not used
+ * @nb: the notifier_block (supposed to be devfreq->nb)
+ * @type: not used
+ * @devp: not used
*
* Called by a notifier that uses devfreq->nb.
*/
@@ -143,59 +372,34 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
}
/**
- * _remove_devfreq() - Remove devfreq from the device.
+ * _remove_devfreq() - Remove devfreq from the list and release its resources.
* @devfreq: the devfreq struct
* @skip: skip calling device_unregister().
- *
- * Note that the caller should lock devfreq->lock before calling
- * this. _remove_devfreq() will unlock it and free devfreq
- * internally. devfreq_list_lock should be locked by the caller
- * as well (not relased at return)
- *
- * Lock usage:
- * devfreq->lock: locked before call.
- * unlocked at return (and freed)
- * devfreq_list_lock: locked before call.
- * kept locked at return.
- * if devfreq is centrally polled.
- *
- * Freed memory:
- * devfreq
*/
static void _remove_devfreq(struct devfreq *devfreq, bool skip)
{
- if (!mutex_is_locked(&devfreq->lock)) {
- WARN(true, "devfreq->lock must be locked by the caller.\n");
- return;
- }
- if (!devfreq->governor->no_central_polling &&
- !mutex_is_locked(&devfreq_list_lock)) {
- WARN(true, "devfreq_list_lock must be locked by the caller.\n");
+ mutex_lock(&devfreq_list_lock);
+ if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
+ mutex_unlock(&devfreq_list_lock);
+ dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
return;
}
+ list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
- if (devfreq->being_removed)
- return;
-
- devfreq->being_removed = true;
+ if (devfreq->governor)
+ devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
- if (devfreq->governor->exit)
- devfreq->governor->exit(devfreq);
-
if (!skip && get_device(&devfreq->dev)) {
device_unregister(&devfreq->dev);
put_device(&devfreq->dev);
}
- if (!devfreq->governor->no_central_polling)
- list_del(&devfreq->node);
-
- mutex_unlock(&devfreq->lock);
mutex_destroy(&devfreq->lock);
-
kfree(devfreq);
}
@@ -210,163 +414,39 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip)
static void devfreq_dev_release(struct device *dev)
{
struct devfreq *devfreq = to_devfreq(dev);
- bool central_polling = !devfreq->governor->no_central_polling;
-
- /*
- * If devfreq_dev_release() was called by device_unregister() of
- * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
- * being_removed is already set. This also partially checks the case
- * where devfreq_dev_release() is called from a thread other than
- * the one called _remove_devfreq(); however, this case is
- * dealt completely with another following being_removed check.
- *
- * Because being_removed is never being
- * unset, we do not need to worry about race conditions on
- * being_removed.
- */
- if (devfreq->being_removed)
- return;
-
- if (central_polling)
- mutex_lock(&devfreq_list_lock);
-
- mutex_lock(&devfreq->lock);
- /*
- * Check being_removed flag again for the case where
- * devfreq_dev_release() was called in a thread other than the one
- * possibly called _remove_devfreq().
- */
- if (devfreq->being_removed) {
- mutex_unlock(&devfreq->lock);
- goto out;
- }
-
- /* devfreq->lock is unlocked and removed in _removed_devfreq() */
_remove_devfreq(devfreq, true);
-
-out:
- if (central_polling)
- mutex_unlock(&devfreq_list_lock);
-}
-
-/**
- * devfreq_monitor() - Periodically poll devfreq objects.
- * @work: the work struct used to run devfreq_monitor periodically.
- *
- */
-static void devfreq_monitor(struct work_struct *work)
-{
- static unsigned long last_polled_at;
- struct devfreq *devfreq, *tmp;
- int error;
- unsigned long jiffies_passed;
- unsigned long next_jiffies = ULONG_MAX, now = jiffies;
- struct device *dev;
-
- /* Initially last_polled_at = 0, polling every device at bootup */
- jiffies_passed = now - last_polled_at;
- last_polled_at = now;
- if (jiffies_passed == 0)
- jiffies_passed = 1;
-
- mutex_lock(&devfreq_list_lock);
- list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
- mutex_lock(&devfreq->lock);
- dev = devfreq->dev.parent;
-
- /* Do not remove tmp for a while */
- wait_remove_device = tmp;
-
- if (devfreq->governor->no_central_polling ||
- devfreq->next_polling == 0) {
- mutex_unlock(&devfreq->lock);
- continue;
- }
- mutex_unlock(&devfreq_list_lock);
-
- /*
- * Reduce more next_polling if devfreq_wq took an extra
- * delay. (i.e., CPU has been idled.)
- */
- if (devfreq->next_polling <= jiffies_passed) {
- error = update_devfreq(devfreq);
-
- /* Remove a devfreq with an error. */
- if (error && error != -EAGAIN) {
-
- dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
- error, devfreq->governor->name);
-
- /*
- * Unlock devfreq before locking the list
- * in order to avoid deadlock with
- * find_device_devfreq or others
- */
- mutex_unlock(&devfreq->lock);
- mutex_lock(&devfreq_list_lock);
- /* Check if devfreq is already removed */
- if (IS_ERR(find_device_devfreq(dev)))
- continue;
- mutex_lock(&devfreq->lock);
- /* This unlocks devfreq->lock and free it */
- _remove_devfreq(devfreq, false);
- continue;
- }
- devfreq->next_polling = devfreq->polling_jiffies;
- } else {
- devfreq->next_polling -= jiffies_passed;
- }
-
- if (devfreq->next_polling)
- next_jiffies = (next_jiffies > devfreq->next_polling) ?
- devfreq->next_polling : next_jiffies;
-
- mutex_unlock(&devfreq->lock);
- mutex_lock(&devfreq_list_lock);
- }
- wait_remove_device = NULL;
- mutex_unlock(&devfreq_list_lock);
-
- if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
- } else {
- polling = false;
- }
}
/**
* devfreq_add_device() - Add devfreq feature to the device
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
- * @governor: the policy to choose frequency.
+ * @governor_name: name of the policy to choose frequency.
* @data: private data for the governor. The devfreq framework does not
* touch this value.
*/
struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
- const struct devfreq_governor *governor,
+ const char *governor_name,
void *data)
{
struct devfreq *devfreq;
+ struct devfreq_governor *governor;
int err = 0;
- if (!dev || !profile || !governor) {
+ if (!dev || !profile || !governor_name) {
dev_err(dev, "%s: Invalid parameters.\n", __func__);
return ERR_PTR(-EINVAL);
}
-
- if (!governor->no_central_polling) {
- mutex_lock(&devfreq_list_lock);
- devfreq = find_device_devfreq(dev);
- mutex_unlock(&devfreq_list_lock);
- if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
- err = -EINVAL;
- goto err_out;
- }
+ mutex_lock(&devfreq_list_lock);
+ devfreq = find_device_devfreq(dev);
+ mutex_unlock(&devfreq_list_lock);
+ if (!IS_ERR(devfreq)) {
+ dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+ err = -EINVAL;
+ goto err_out;
}
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
@@ -383,92 +463,316 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
devfreq->profile = profile;
- devfreq->governor = governor;
+ strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->data = data;
- devfreq->next_polling = devfreq->polling_jiffies
- = msecs_to_jiffies(devfreq->profile->polling_ms);
devfreq->nb.notifier_call = devfreq_notifier_call;
+ devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->profile->max_state *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->last_stat_updated = jiffies;
+
dev_set_name(&devfreq->dev, dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
put_device(&devfreq->dev);
+ mutex_unlock(&devfreq->lock);
goto err_dev;
}
- if (governor->init)
- err = governor->init(devfreq);
- if (err)
- goto err_init;
-
mutex_unlock(&devfreq->lock);
- if (governor->no_central_polling)
- goto out;
-
mutex_lock(&devfreq_list_lock);
-
list_add(&devfreq->node, &devfreq_list);
- if (devfreq_wq && devfreq->next_polling && !polling) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work,
- devfreq->next_polling);
- }
+ governor = find_devfreq_governor(devfreq->governor_name);
+ if (!IS_ERR(governor))
+ devfreq->governor = governor;
+ if (devfreq->governor)
+ err = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_START, NULL);
mutex_unlock(&devfreq_list_lock);
-out:
+ if (err) {
+ dev_err(dev, "%s: Unable to start governor for the device\n",
+ __func__);
+ goto err_init;
+ }
+
return devfreq;
err_init:
+ list_del(&devfreq->node);
device_unregister(&devfreq->dev);
err_dev:
- mutex_unlock(&devfreq->lock);
kfree(devfreq);
err_out:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(devfreq_add_device);
/**
* devfreq_remove_device() - Remove devfreq feature from a device.
- * @devfreq the devfreq instance to be removed
+ * @devfreq: the devfreq instance to be removed
*/
int devfreq_remove_device(struct devfreq *devfreq)
{
- bool central_polling;
+ if (!devfreq)
+ return -EINVAL;
+
+ _remove_devfreq(devfreq, false);
+ return 0;
+}
+EXPORT_SYMBOL(devfreq_remove_device);
+
+/**
+ * devfreq_suspend_device() - Suspend devfreq of a device.
+ * @devfreq: the devfreq instance to be suspended
+ */
+int devfreq_suspend_device(struct devfreq *devfreq)
+{
if (!devfreq)
return -EINVAL;
- central_polling = !devfreq->governor->no_central_polling;
+ if (!devfreq->governor)
+ return 0;
+
+ return devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_SUSPEND, NULL);
+}
+EXPORT_SYMBOL(devfreq_suspend_device);
+
+/**
+ * devfreq_resume_device() - Resume devfreq of a device.
+ * @devfreq: the devfreq instance to be resumed
+ */
+int devfreq_resume_device(struct devfreq *devfreq)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ if (!devfreq->governor)
+ return 0;
+
+ return devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_RESUME, NULL);
+}
+EXPORT_SYMBOL(devfreq_resume_device);
+
+/**
+ * devfreq_add_governor() - Add devfreq governor
+ * @governor: the devfreq governor to be added
+ */
+int devfreq_add_governor(struct devfreq_governor *governor)
+{
+ struct devfreq_governor *g;
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!governor) {
+ pr_err("%s: Invalid parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&devfreq_list_lock);
+ g = find_devfreq_governor(governor->name);
+ if (!IS_ERR(g)) {
+ pr_err("%s: governor %s already registered\n", __func__,
+ g->name);
+ err = -EINVAL;
+ goto err_out;
+ }
- if (central_polling) {
- mutex_lock(&devfreq_list_lock);
- while (wait_remove_device == devfreq) {
- mutex_unlock(&devfreq_list_lock);
- schedule();
- mutex_lock(&devfreq_list_lock);
+ list_add(&governor->node, &devfreq_governor_list);
+
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ int ret = 0;
+ struct device *dev = devfreq->dev.parent;
+
+ if (!strncmp(devfreq->governor_name, governor->name,
+ DEVFREQ_NAME_LEN)) {
+ /* The following should never occur */
+ if (devfreq->governor) {
+ dev_warn(dev,
+ "%s: Governor %s already present\n",
+ __func__, devfreq->governor->name);
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev,
+ "%s: Governor %s stop = %d\n",
+ __func__,
+ devfreq->governor->name, ret);
+ }
+ /* Fall through */
+ }
+ devfreq->governor = governor;
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_START, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s start=%d\n",
+ __func__, devfreq->governor->name,
+ ret);
+ }
}
}
- mutex_lock(&devfreq->lock);
- _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
+err_out:
+ mutex_unlock(&devfreq_list_lock);
- if (central_polling)
- mutex_unlock(&devfreq_list_lock);
+ return err;
+}
+EXPORT_SYMBOL(devfreq_add_governor);
- return 0;
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @governor: the devfreq governor to be removed
+ */
+int devfreq_remove_governor(struct devfreq_governor *governor)
+{
+ struct devfreq_governor *g;
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!governor) {
+ pr_err("%s: Invalid parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&devfreq_list_lock);
+ g = find_devfreq_governor(governor->name);
+ if (IS_ERR(g)) {
+ pr_err("%s: governor %s not registered\n", __func__,
+ governor->name);
+ err = PTR_ERR(g);
+ goto err_out;
+ }
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ int ret;
+ struct device *dev = devfreq->dev.parent;
+
+ if (!strncmp(devfreq->governor_name, governor->name,
+ DEVFREQ_NAME_LEN)) {
+ /* we should have a devfreq governor! */
+ if (!devfreq->governor) {
+ dev_warn(dev, "%s: Governor %s NOT present\n",
+ __func__, governor->name);
+ continue;
+ /* Fall through */
+ }
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s stop=%d\n",
+ __func__, devfreq->governor->name,
+ ret);
+ }
+ devfreq->governor = NULL;
+ }
+ }
+
+ list_del(&governor->node);
+err_out:
+ mutex_unlock(&devfreq_list_lock);
+
+ return err;
}
+EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t show_governor(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ if (!to_devfreq(dev)->governor)
+ return -EINVAL;
+
return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
}
+static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int ret;
+ char str_governor[DEVFREQ_NAME_LEN + 1];
+ struct devfreq_governor *governor;
+
+ ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&devfreq_list_lock);
+ governor = find_devfreq_governor(str_governor);
+ if (IS_ERR(governor)) {
+ ret = PTR_ERR(governor);
+ goto out;
+ }
+ if (df->governor == governor)
+ goto out;
+
+ if (df->governor) {
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+ __func__, df->governor->name, ret);
+ goto out;
+ }
+ }
+ df->governor = governor;
+ strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret)
+ dev_warn(dev, "%s: Governor %s not started(%d)\n",
+ __func__, df->governor->name, ret);
+out:
+ mutex_unlock(&devfreq_list_lock);
+
+ if (!ret)
+ ret = count;
+ return ret;
+}
+static ssize_t show_available_governors(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq_governor *tmp_governor;
+ ssize_t count = 0;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%s ", tmp_governor->name);
+ mutex_unlock(&devfreq_list_lock);
+
+ /* Truncate the trailing space */
+ if (count)
+ count--;
+
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
static ssize_t show_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ unsigned long freq;
+ struct devfreq *devfreq = to_devfreq(dev);
+
+ if (devfreq->profile->get_cur_freq &&
+ !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+ return sprintf(buf, "%lu\n", freq);
+
+ return sprintf(buf, "%lu\n", devfreq->previous_freq);
+}
+
+static ssize_t show_target_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
}
@@ -486,39 +790,19 @@ static ssize_t store_polling_interval(struct device *dev,
unsigned int value;
int ret;
+ if (!df->governor)
+ return -EINVAL;
+
ret = sscanf(buf, "%u", &value);
if (ret != 1)
- goto out;
-
- mutex_lock(&df->lock);
- df->profile->polling_ms = value;
- df->next_polling = df->polling_jiffies
- = msecs_to_jiffies(value);
- mutex_unlock(&df->lock);
+ return -EINVAL;
+ df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
ret = count;
- if (df->governor->no_central_polling)
- goto out;
-
- mutex_lock(&devfreq_list_lock);
- if (df->next_polling > 0 && !polling) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work,
- df->next_polling);
- }
- mutex_unlock(&devfreq_list_lock);
-out:
return ret;
}
-static ssize_t show_central_polling(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n",
- !to_devfreq(dev)->governor->no_central_polling);
-}
-
static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -529,7 +813,7 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
mutex_lock(&df->lock);
max = df->max_freq;
@@ -543,7 +827,6 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
ret = count;
unlock:
mutex_unlock(&df->lock);
-out:
return ret;
}
@@ -563,7 +846,7 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
mutex_lock(&df->lock);
min = df->min_freq;
@@ -577,7 +860,6 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
ret = count;
unlock:
mutex_unlock(&df->lock);
-out:
return ret;
}
@@ -587,34 +869,92 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
}
+static ssize_t show_available_freqs(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(d);
+ struct device *dev = df->dev.parent;
+ struct opp *opp;
+ ssize_t count = 0;
+ unsigned long freq = 0;
+
+ rcu_read_lock();
+ do {
+ opp = opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%lu ", freq);
+ freq++;
+ } while (1);
+ rcu_read_unlock();
+
+ /* Truncate the trailing space */
+ if (count)
+ count--;
+
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
+static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ ssize_t len;
+ int i, j, err;
+ unsigned int max_state = devfreq->profile->max_state;
+
+ err = devfreq_update_status(devfreq, devfreq->previous_freq);
+ if (err)
+ return 0;
+
+ len = sprintf(buf, " From : To\n");
+ len += sprintf(buf + len, " :");
+ for (i = 0; i < max_state; i++)
+ len += sprintf(buf + len, "%8u",
+ devfreq->profile->freq_table[i]);
+
+ len += sprintf(buf + len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
+ if (devfreq->profile->freq_table[i]
+ == devfreq->previous_freq) {
+ len += sprintf(buf + len, "*");
+ } else {
+ len += sprintf(buf + len, " ");
+ }
+ len += sprintf(buf + len, "%8u:",
+ devfreq->profile->freq_table[i]);
+ for (j = 0; j < max_state; j++)
+ len += sprintf(buf + len, "%8u",
+ devfreq->trans_table[(i * max_state) + j]);
+ len += sprintf(buf + len, "%10u\n",
+ jiffies_to_msecs(devfreq->time_in_state[i]));
+ }
+
+ len += sprintf(buf + len, "Total transition : %u\n",
+ devfreq->total_trans);
+ return len;
+}
+
static struct device_attribute devfreq_attrs[] = {
- __ATTR(governor, S_IRUGO, show_governor, NULL),
+ __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
+ __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
- __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
+ __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
+ __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
store_polling_interval),
__ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
__ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
+ __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
{ },
};
-/**
- * devfreq_start_polling() - Initialize data structure for devfreq framework and
- * start polling registered devfreq devices.
- */
-static int __init devfreq_start_polling(void)
-{
- mutex_lock(&devfreq_list_lock);
- polling = false;
- devfreq_wq = create_freezable_workqueue("devfreq_wq");
- INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
- mutex_unlock(&devfreq_list_lock);
-
- devfreq_monitor(&devfreq_work.work);
- return 0;
-}
-late_initcall(devfreq_start_polling);
-
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -622,7 +962,15 @@ static int __init devfreq_init(void)
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_class);
}
+
+ devfreq_wq = create_freezable_workqueue("devfreq_wq");
+ if (IS_ERR(devfreq_wq)) {
+ class_destroy(devfreq_class);
+ pr_err("%s: couldn't create workqueue\n", __FILE__);
+ return PTR_ERR(devfreq_wq);
+ }
devfreq_class->dev_attrs = devfreq_attrs;
+
return 0;
}
subsys_initcall(devfreq_init);
@@ -630,6 +978,7 @@ subsys_initcall(devfreq_init);
static void __exit devfreq_exit(void)
{
class_destroy(devfreq_class);
+ destroy_workqueue(devfreq_wq);
}
module_exit(devfreq_exit);
@@ -641,10 +990,15 @@ module_exit(devfreq_exit);
/**
* devfreq_recommended_opp() - Helper function to get proper OPP for the
* freq value given to target callback.
- * @dev The devfreq user device. (parent of devfreq)
- * @freq The frequency given to target function
- * @flags Flags handed from devfreq framework.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @freq: The frequency given to target function
+ * @flags: Flags handed from devfreq framework.
*
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
u32 flags)
@@ -656,14 +1010,14 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
opp = opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
- if (opp == ERR_PTR(-ENODEV))
+ if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
opp = opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
- if (opp == ERR_PTR(-ENODEV))
+ if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_floor(dev, freq);
}
@@ -674,35 +1028,49 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
* for any changes in the OPP availability
* changes
- * @dev The devfreq user device. (parent of devfreq)
- * @devfreq The devfreq object.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
*/
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh = opp_get_notifier(dev);
+ struct srcu_notifier_head *nh;
+ int ret = 0;
+ rcu_read_lock();
+ nh = opp_get_notifier(dev);
if (IS_ERR(nh))
- return PTR_ERR(nh);
- return srcu_notifier_chain_register(nh, &devfreq->nb);
+ ret = PTR_ERR(nh);
+ rcu_read_unlock();
+ if (!ret)
+ ret = srcu_notifier_chain_register(nh, &devfreq->nb);
+
+ return ret;
}
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
* notified for any changes in the OPP
* availability changes anymore.
- * @dev The devfreq user device. (parent of devfreq)
- * @devfreq The devfreq object.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
*
* At exit() callback of devfreq_dev_profile, this must be included if
* devfreq_recommended_opp is used.
*/
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh = opp_get_notifier(dev);
+ struct srcu_notifier_head *nh;
+ int ret = 0;
+ rcu_read_lock();
+ nh = opp_get_notifier(dev);
if (IS_ERR(nh))
- return PTR_ERR(nh);
- return srcu_notifier_chain_unregister(nh, &devfreq->nb);
+ ret = PTR_ERR(nh);
+ rcu_read_unlock();
+ if (!ret)
+ ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
+
+ return ret;
}
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 88ddc77a9bb1..46d94e9e95b5 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -73,6 +73,16 @@ enum busclk_level_idx {
#define EX4210_LV_NUM (LV_2 + 1)
#define EX4x12_LV_NUM (LV_4 + 1)
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate: Frequency in hertz
+ * @volt: Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+ unsigned long rate;
+ unsigned long volt;
+};
+
struct busfreq_data {
enum exynos4_busf_type type;
struct device *dev;
@@ -80,7 +90,7 @@ struct busfreq_data {
bool disabled;
struct regulator *vdd_int;
struct regulator *vdd_mif; /* Exynos4412/4212 only */
- struct opp *curr_opp;
+ struct busfreq_opp_info curr_oppinfo;
struct exynos4_ppmu dmc[2];
struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
};
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4210_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ if (oppi->rate == exynos4210_busclk_table[index].clk)
break;
if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
return 0;
}
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4x12_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ if (oppi->rate == exynos4x12_mifclk_table[index].clk)
break;
if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)
return -EINVAL;
}
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
- struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi,
+ struct busfreq_opp_info *oldoppi)
{
int err = 0, tmp;
- unsigned long volt = opp_get_voltage(opp);
+ unsigned long volt = oppi->volt;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
if (err)
break;
- tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ tmp = exynos4x12_get_intspec(oppi->rate);
if (tmp < 0) {
err = tmp;
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
}
@@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
/* Try to recover */
if (err)
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
default:
@@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
- unsigned long freq = opp_get_freq(opp);
- unsigned long old_freq = opp_get_freq(data->curr_opp);
+ struct opp *opp;
+ unsigned long freq;
+ unsigned long old_freq = data->curr_oppinfo.rate;
+ struct busfreq_opp_info new_oppinfo;
- if (IS_ERR(opp))
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, _freq, flags);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ freq = new_oppinfo.rate;
if (old_freq == freq)
return 0;
- dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+ dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
mutex_lock(&data->lock);
@@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
goto out;
if (old_freq < freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
if (old_freq != freq) {
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
goto out;
if (old_freq > freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
out:
mutex_unlock(&data->lock);
return err;
@@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
exynos4_read_ppmu(data);
busier_dmc = exynos4_get_busier_dmc(data);
- stat->current_frequency = opp_get_freq(data->curr_opp);
+ stat->current_frequency = data->curr_oppinfo.rate;
if (busier_dmc)
addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
struct opp *opp;
+ struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
+ rcu_read_lock();
opp = opp_find_freq_floor(data->dev, &maxfreq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(data->dev, "%s: unable to find a min freq\n",
+ __func__);
+ return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto unlock;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
if (err)
goto unlock;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
unlock:
mutex_unlock(&data->lock);
if (err)
@@ -980,14 +1016,14 @@ unlock:
return NOTIFY_DONE;
}
-static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+static int exynos4_busfreq_probe(struct platform_device *pdev)
{
struct busfreq_data *data;
struct opp *opp;
struct device *dev = &pdev->dev;
int err = 0;
- data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
if (data == NULL) {
dev_err(dev, "Cannot allocate memory.\n");
return -ENOMEM;
@@ -1012,75 +1048,60 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
err = -EINVAL;
}
if (err)
- goto err_regulator;
+ return err;
- data->vdd_int = regulator_get(dev, "vdd_int");
+ data->vdd_int = devm_regulator_get(dev, "vdd_int");
if (IS_ERR(data->vdd_int)) {
dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- err = PTR_ERR(data->vdd_int);
- goto err_regulator;
+ return PTR_ERR(data->vdd_int);
}
if (data->type == TYPE_BUSF_EXYNOS4x12) {
- data->vdd_mif = regulator_get(dev, "vdd_mif");
+ data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
if (IS_ERR(data->vdd_mif)) {
dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
- err = PTR_ERR(data->vdd_mif);
- regulator_put(data->vdd_int);
- goto err_regulator;
-
+ return PTR_ERR(data->vdd_mif);
}
}
+ rcu_read_lock();
opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos4_devfreq_profile.initial_freq);
- err = PTR_ERR(opp);
- goto err_opp_add;
+ exynos4_devfreq_profile.initial_freq);
+ return PTR_ERR(opp);
}
- data->curr_opp = opp;
+ data->curr_oppinfo.rate = opp_get_freq(opp);
+ data->curr_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
platform_set_drvdata(pdev, data);
busfreq_mon_reset(data);
data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
- &devfreq_simple_ondemand, NULL);
- if (IS_ERR(data->devfreq)) {
- err = PTR_ERR(data->devfreq);
- goto err_opp_add;
- }
+ "simple_ondemand", NULL);
+ if (IS_ERR(data->devfreq))
+ return PTR_ERR(data->devfreq);
devfreq_register_opp_notifier(dev, data->devfreq);
err = register_pm_notifier(&data->pm_notifier);
if (err) {
dev_err(dev, "Failed to setup pm notifier\n");
- goto err_devfreq_add;
+ devfreq_remove_device(data->devfreq);
+ return err;
}
return 0;
-err_devfreq_add:
- devfreq_remove_device(data->devfreq);
-err_opp_add:
- if (data->vdd_mif)
- regulator_put(data->vdd_mif);
- regulator_put(data->vdd_int);
-err_regulator:
- kfree(data);
- return err;
}
-static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+static int exynos4_busfreq_remove(struct platform_device *pdev)
{
struct busfreq_data *data = platform_get_drvdata(pdev);
unregister_pm_notifier(&data->pm_notifier);
devfreq_remove_device(data->devfreq);
- regulator_put(data->vdd_int);
- if (data->vdd_mif)
- regulator_put(data->vdd_mif);
- kfree(data);
return 0;
}
@@ -1106,7 +1127,7 @@ static const struct platform_device_id exynos4_busfreq_id[] = {
static struct platform_driver exynos4_busfreq_driver = {
.probe = exynos4_busfreq_probe,
- .remove = __devexit_p(exynos4_busfreq_remove),
+ .remove = exynos4_busfreq_remove,
.id_table = exynos4_busfreq_id,
.driver = {
.name = "exynos4-busfreq",
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index ea7f13c58ded..fad7d6321978 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,24 @@
#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+/* Devfreq events */
+#define DEVFREQ_GOV_START 0x1
+#define DEVFREQ_GOV_STOP 0x2
+#define DEVFREQ_GOV_INTERVAL 0x3
+#define DEVFREQ_GOV_SUSPEND 0x4
+#define DEVFREQ_GOV_RESUME 0x5
+
/* Caution: devfreq->lock must be locked before calling update_devfreq */
extern int update_devfreq(struct devfreq *devfreq);
+extern void devfreq_monitor_start(struct devfreq *devfreq);
+extern void devfreq_monitor_stop(struct devfreq *devfreq);
+extern void devfreq_monitor_suspend(struct devfreq *devfreq);
+extern void devfreq_monitor_resume(struct devfreq *devfreq);
+extern void devfreq_interval_update(struct devfreq *devfreq,
+ unsigned int *delay);
+
+extern int devfreq_add_governor(struct devfreq_governor *governor);
+extern int devfreq_remove_governor(struct devfreq_governor *governor);
+
#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index af75ddd4f158..c72f942f30a8 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include <linux/module.h>
#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
@@ -26,14 +27,41 @@ static int devfreq_performance_func(struct devfreq *df,
return 0;
}
-static int performance_init(struct devfreq *devfreq)
+static int devfreq_performance_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return update_devfreq(devfreq);
+ int ret = 0;
+
+ if (event == DEVFREQ_GOV_START) {
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ }
+
+ return ret;
}
-const struct devfreq_governor devfreq_performance = {
+static struct devfreq_governor devfreq_performance = {
.name = "performance",
- .init = performance_init,
.get_target_freq = devfreq_performance_func,
- .no_central_polling = true,
+ .event_handler = devfreq_performance_handler,
};
+
+static int __init devfreq_performance_init(void)
+{
+ return devfreq_add_governor(&devfreq_performance);
+}
+subsys_initcall(devfreq_performance_init);
+
+static void __exit devfreq_performance_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_performance);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_performance_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index fec0cdbd2477..0c6bed567e6d 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include <linux/module.h>
#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
@@ -23,14 +24,41 @@ static int devfreq_powersave_func(struct devfreq *df,
return 0;
}
-static int powersave_init(struct devfreq *devfreq)
+static int devfreq_powersave_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return update_devfreq(devfreq);
+ int ret = 0;
+
+ if (event == DEVFREQ_GOV_START) {
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ }
+
+ return ret;
}
-const struct devfreq_governor devfreq_powersave = {
+static struct devfreq_governor devfreq_powersave = {
.name = "powersave",
- .init = powersave_init,
.get_target_freq = devfreq_powersave_func,
- .no_central_polling = true,
+ .event_handler = devfreq_powersave_handler,
};
+
+static int __init devfreq_powersave_init(void)
+{
+ return devfreq_add_governor(&devfreq_powersave);
+}
+subsys_initcall(devfreq_powersave_init);
+
+static void __exit devfreq_powersave_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_powersave);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_powersave_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index a2e3eae79011..0720ba84ca92 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -10,8 +10,10 @@
*/
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/math64.h>
+#include "governor.h"
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
#define DFSO_UPTHRESHOLD (90)
@@ -88,7 +90,58 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
return 0;
}
-const struct devfreq_governor devfreq_simple_ondemand = {
+static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ devfreq_monitor_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ devfreq_monitor_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ devfreq_interval_update(devfreq, (unsigned int *)data);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ devfreq_monitor_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ devfreq_monitor_resume(devfreq);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_simple_ondemand = {
.name = "simple_ondemand",
.get_target_freq = devfreq_simple_ondemand_func,
+ .event_handler = devfreq_simple_ondemand_handler,
};
+
+static int __init devfreq_simple_ondemand_init(void)
+{
+ return devfreq_add_governor(&devfreq_simple_ondemand);
+}
+subsys_initcall(devfreq_simple_ondemand_init);
+
+static void __exit devfreq_simple_ondemand_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_simple_ondemand);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_simple_ondemand_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 0681246fc89d..35de6e83c1fe 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -14,6 +14,7 @@
#include <linux/devfreq.h>
#include <linux/pm.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include "governor.h"
struct userspace_data {
@@ -116,10 +117,46 @@ static void userspace_exit(struct devfreq *devfreq)
devfreq->data = NULL;
}
-const struct devfreq_governor devfreq_userspace = {
+static int devfreq_userspace_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int ret = 0;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ ret = userspace_init(devfreq);
+ break;
+ case DEVFREQ_GOV_STOP:
+ userspace_exit(devfreq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct devfreq_governor devfreq_userspace = {
.name = "userspace",
.get_target_freq = devfreq_userspace_func,
- .init = userspace_init,
- .exit = userspace_exit,
- .no_central_polling = true,
+ .event_handler = devfreq_userspace_handler,
};
+
+static int __init devfreq_userspace_init(void)
+{
+ return devfreq_add_governor(&devfreq_userspace);
+}
+subsys_initcall(devfreq_userspace_init);
+
+static void __exit devfreq_userspace_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_userspace);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_userspace_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 24225f0fdcd8..64b048d7fba7 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -228,6 +228,20 @@ static void dmatest_callback(void *arg)
wake_up_all(done->wait);
}
+static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
+ unsigned int count)
+{
+ while (count--)
+ dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
+}
+
+static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
+ unsigned int count)
+{
+ while (count--)
+ dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
@@ -353,15 +367,35 @@ static int dmatest_func(void *data)
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev->dev, dma_srcs[i]);
+ if (ret) {
+ unmap_src(dev->dev, dma_srcs, len, i);
+ pr_warn("%s: #%u: mapping error %d with "
+ "src_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, ret,
+ src_off, len);
+ failed_tests++;
+ continue;
+ }
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
test_buf_size,
DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(dev->dev, dma_dsts[i]);
+ if (ret) {
+ unmap_src(dev->dev, dma_srcs, len, src_cnt);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
+ pr_warn("%s: #%u: mapping error %d with "
+ "dst_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, ret,
+ dst_off, test_buf_size);
+ failed_tests++;
+ continue;
+ }
}
-
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off,
@@ -383,13 +417,8 @@ static int dmatest_func(void *data)
}
if (!tx) {
- for (i = 0; i < src_cnt; i++)
- dma_unmap_single(dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
- for (i = 0; i < dst_cnt; i++)
- dma_unmap_single(dev->dev, dma_dsts[i],
- test_buf_size,
- DMA_BIDIRECTIONAL);
+ unmap_src(dev->dev, dma_srcs, len, src_cnt);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
@@ -443,9 +472,7 @@ static int dmatest_func(void *data)
}
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- for (i = 0; i < dst_cnt; i++)
- dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
- DMA_BIDIRECTIONAL);
+ unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
error_count = 0;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index c4b0eb3cde81..3e8ba02ba292 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1462,7 +1462,7 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
-static int __devinit dw_probe(struct platform_device *pdev)
+static int dw_probe(struct platform_device *pdev)
{
struct dw_dma_platform_data *pdata;
struct resource *io;
@@ -1634,7 +1634,7 @@ static int __devinit dw_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit dw_remove(struct platform_device *pdev)
+static int dw_remove(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
@@ -1700,7 +1700,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
static struct platform_driver dw_driver = {
- .remove = __devexit_p(dw_remove),
+ .remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
.name = "dw_dmac",
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 05aea3ce8506..f424298f1ac5 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -545,7 +545,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
INIT_LIST_HEAD(&dma->channels);
}
-static int __devinit edma_probe(struct platform_device *pdev)
+static int edma_probe(struct platform_device *pdev)
{
struct edma_cc *ecc;
int ret;
@@ -585,7 +585,7 @@ err_reg1:
return ret;
}
-static int __devexit edma_remove(struct platform_device *pdev)
+static int edma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
@@ -598,7 +598,7 @@ static int __devexit edma_remove(struct platform_device *pdev)
static struct platform_driver edma_driver = {
.probe = edma_probe,
- .remove = __devexit_p(edma_remove),
+ .remove = edma_remove,
.driver = {
.name = "edma-dma-engine",
.owner = THIS_MODULE,
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 094437b9d823..4fc2980556ad 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1221,7 +1221,7 @@ out_unwind:
/* OpenFirmware Subsystem */
/*----------------------------------------------------------------------------*/
-static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
+static int fsl_dma_chan_probe(struct fsldma_device *fdev,
struct device_node *node, u32 feature, const char *compatible)
{
struct fsldma_chan *chan;
@@ -1324,7 +1324,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan)
kfree(chan);
}
-static int __devinit fsldma_of_probe(struct platform_device *op)
+static int fsldma_of_probe(struct platform_device *op)
{
struct fsldma_device *fdev;
struct device_node *child;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 7d9554cc4976..a7dcf78b1ff8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -29,7 +29,6 @@
#include <asm/irq.h>
#include <linux/platform_data/dma-imx.h>
-#include <mach/hardware.h>
#include "dmaengine.h"
#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
@@ -167,6 +166,12 @@ struct imxdma_channel {
int slot_2d;
};
+enum imx_dma_type {
+ IMX1_DMA,
+ IMX21_DMA,
+ IMX27_DMA,
+};
+
struct imxdma_engine {
struct device *dev;
struct device_dma_parameters dma_parms;
@@ -177,8 +182,40 @@ struct imxdma_engine {
spinlock_t lock;
struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
struct imxdma_channel channel[IMX_DMA_CHANNELS];
+ enum imx_dma_type devtype;
};
+static struct platform_device_id imx_dma_devtype[] = {
+ {
+ .name = "imx1-dma",
+ .driver_data = IMX1_DMA,
+ }, {
+ .name = "imx21-dma",
+ .driver_data = IMX21_DMA,
+ }, {
+ .name = "imx27-dma",
+ .driver_data = IMX27_DMA,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
+
+static inline int is_imx1_dma(struct imxdma_engine *imxdma)
+{
+ return imxdma->devtype == IMX1_DMA;
+}
+
+static inline int is_imx21_dma(struct imxdma_engine *imxdma)
+{
+ return imxdma->devtype == IMX21_DMA;
+}
+
+static inline int is_imx27_dma(struct imxdma_engine *imxdma)
+{
+ return imxdma->devtype == IMX27_DMA;
+}
+
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct imxdma_channel, chan);
@@ -212,7 +249,9 @@ static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
{
- if (cpu_is_mx27())
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+
+ if (is_imx27_dma(imxdma))
return imxdmac->hw_chaining;
else
return 0;
@@ -267,7 +306,7 @@ static void imxdma_enable_hw(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
- if ((cpu_is_mx21() || cpu_is_mx27()) &&
+ if (!is_imx1_dma(imxdma) &&
d->sg && imxdma_hw_chain(imxdmac)) {
d->sg = sg_next(d->sg);
if (d->sg) {
@@ -436,7 +475,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
struct imxdma_engine *imxdma = dev_id;
int i, disr;
- if (cpu_is_mx21() || cpu_is_mx27())
+ if (!is_imx1_dma(imxdma))
imxdma_err_handler(irq, dev_id);
disr = imx_dmav1_readl(imxdma, DMA_DISR);
@@ -645,9 +684,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break;
}
- imxdmac->hw_chaining = 1;
- if (!imxdma_hw_chain(imxdmac))
- return -EINVAL;
+ imxdmac->hw_chaining = 0;
+
imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
CCR_REN;
@@ -961,35 +999,32 @@ static void imxdma_issue_pending(struct dma_chan *chan)
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
+ struct resource *res;
int ret, i;
+ int irq, irq_err;
-
- imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
+ imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
- if (cpu_is_mx1()) {
- imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
- } else if (cpu_is_mx21()) {
- imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
- } else if (cpu_is_mx27()) {
- imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
- } else {
- kfree(imxdma);
- return 0;
- }
+ imxdma->devtype = pdev->id_entry->driver_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!imxdma->base)
+ return -EADDRNOTAVAIL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(imxdma->dma_ipg)) {
- ret = PTR_ERR(imxdma->dma_ipg);
- goto err_clk;
- }
+ if (IS_ERR(imxdma->dma_ipg))
+ return PTR_ERR(imxdma->dma_ipg);
imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(imxdma->dma_ahb)) {
- ret = PTR_ERR(imxdma->dma_ahb);
- goto err_clk;
- }
+ if (IS_ERR(imxdma->dma_ahb))
+ return PTR_ERR(imxdma->dma_ahb);
clk_prepare_enable(imxdma->dma_ipg);
clk_prepare_enable(imxdma->dma_ahb);
@@ -997,18 +1032,25 @@ static int __init imxdma_probe(struct platform_device *pdev)
/* reset DMA module */
imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
- if (cpu_is_mx1()) {
- ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
+ if (is_imx1_dma(imxdma)) {
+ ret = devm_request_irq(&pdev->dev, irq,
+ dma_irq_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
- goto err_enable;
+ goto err;
+ }
+
+ irq_err = platform_get_irq(pdev, 1);
+ if (irq_err < 0) {
+ ret = irq_err;
+ goto err;
}
- ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
+ ret = devm_request_irq(&pdev->dev, irq_err,
+ imxdma_err_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
- free_irq(MX1_DMA_INT, NULL);
- goto err_enable;
+ goto err;
}
}
@@ -1038,14 +1080,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
for (i = 0; i < IMX_DMA_CHANNELS; i++) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
- if (cpu_is_mx21() || cpu_is_mx27()) {
- ret = request_irq(MX2x_INT_DMACH0 + i,
+ if (!is_imx1_dma(imxdma)) {
+ ret = devm_request_irq(&pdev->dev, irq + i,
dma_irq_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register IRQ %d "
"for DMA channel %d\n",
- MX2x_INT_DMACH0 + i, i);
- goto err_init;
+ irq + i, i);
+ goto err;
}
init_timer(&imxdmac->watchdog);
imxdmac->watchdog.function = &imxdma_watchdog;
@@ -1091,46 +1133,25 @@ static int __init imxdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&imxdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
- goto err_init;
+ goto err;
}
return 0;
-err_init:
-
- if (cpu_is_mx21() || cpu_is_mx27()) {
- while (--i >= 0)
- free_irq(MX2x_INT_DMACH0 + i, NULL);
- } else if cpu_is_mx1() {
- free_irq(MX1_DMA_INT, NULL);
- free_irq(MX1_DMA_ERR, NULL);
- }
-err_enable:
+err:
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
-err_clk:
- kfree(imxdma);
return ret;
}
static int __exit imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
- int i;
dma_async_device_unregister(&imxdma->dma_device);
- if (cpu_is_mx21() || cpu_is_mx27()) {
- for (i = 0; i < IMX_DMA_CHANNELS; i++)
- free_irq(MX2x_INT_DMACH0 + i, NULL);
- } else if cpu_is_mx1() {
- free_irq(MX1_DMA_INT, NULL);
- free_irq(MX1_DMA_ERR, NULL);
- }
-
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
- kfree(imxdma);
return 0;
}
@@ -1139,6 +1160,7 @@ static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
},
+ .id_table = imx_dma_devtype,
.remove = __exit_p(imxdma_remove),
};
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c099ca0846f4..f082aa3a918c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -40,7 +40,6 @@
#include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h>
#include <linux/platform_data/dma-imx.h>
-#include <mach/hardware.h>
#include "dmaengine.h"
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 02b21d7d38e5..a0de82e21a7c 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1225,7 +1225,7 @@ static void middma_shutdown(struct pci_dev *pdev)
* Initialize the PCI device, map BARs, query driver data.
* Call setup_dma to complete contoller and chan initilzation
*/
-static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
+static int intel_mid_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct middma_device *device;
@@ -1308,7 +1308,7 @@ err_enable_device:
* Free up all resources and data
* Call shutdown_dma to complete contoller and chan cleanup
*/
-static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
+static void intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
@@ -1432,7 +1432,7 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.name = "Intel MID DMA",
.id_table = intel_mid_dma_ids,
.probe = intel_mid_dma_probe,
- .remove = __devexit_p(intel_mid_dma_remove),
+ .remove = intel_mid_dma_remove,
#ifdef CONFIG_PM
.driver = {
.pm = &intel_mid_dma_pm,
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index abd9038e06b1..9b041858d10d 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -242,8 +242,7 @@ static struct dca_ops ioat_dca_ops = {
};
-struct dca_provider * __devinit
-ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -408,8 +407,7 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
return slots;
}
-struct dca_provider * __devinit
-ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -604,8 +602,24 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
return slots;
}
-struct dca_provider * __devinit
-ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+static inline int dca3_tag_map_invalid(u8 *tag_map)
+{
+ /*
+ * If the tag map is not programmed by the BIOS the default is:
+ * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
+ *
+ * This an invalid map and will result in only 2 possible tags
+ * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
+ * this entire definition is invalid.
+ */
+ return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
+ (tag_map[1] == DCA_TAG_MAP_VALID) &&
+ (tag_map[2] == DCA_TAG_MAP_VALID) &&
+ (tag_map[3] == DCA_TAG_MAP_VALID) &&
+ (tag_map[4] == DCA_TAG_MAP_VALID));
+}
+
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -674,6 +688,12 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
}
+ if (dca3_tag_map_invalid(ioatdca->tag_map)) {
+ dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
+ free_dca_provider(dca);
+ return NULL;
+ }
+
err = register_dca_provider(dca, &pdev->dev);
if (err) {
free_dca_provider(dca);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 73b2b65cb1de..1a68a8ba87e6 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -782,7 +782,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
*/
#define IOAT_TEST_SIZE 2000
-static void __devinit ioat_dma_test_callback(void *dma_async_param)
+static void ioat_dma_test_callback(void *dma_async_param)
{
struct completion *cmp = dma_async_param;
@@ -793,7 +793,7 @@ static void __devinit ioat_dma_test_callback(void *dma_async_param)
* ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
* @device: device to be tested
*/
-int __devinit ioat_dma_self_test(struct ioatdma_device *device)
+int ioat_dma_self_test(struct ioatdma_device *device)
{
int i;
u8 *src;
@@ -994,7 +994,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device)
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
}
-int __devinit ioat_probe(struct ioatdma_device *device)
+int ioat_probe(struct ioatdma_device *device)
{
int err = -ENODEV;
struct dma_device *dma = &device->common;
@@ -1049,7 +1049,7 @@ err_dma_pool:
return err;
}
-int __devinit ioat_register(struct ioatdma_device *device)
+int ioat_register(struct ioatdma_device *device)
{
int err = dma_async_device_register(&device->common);
@@ -1183,7 +1183,7 @@ void ioat_kobject_del(struct ioatdma_device *device)
}
}
-int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
+int ioat1_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct dma_device *dma;
@@ -1216,7 +1216,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
return err;
}
-void __devexit ioat_dma_remove(struct ioatdma_device *device)
+void ioat_dma_remove(struct ioatdma_device *device)
{
struct dma_device *dma = &device->common;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5e8fe01ba69d..087935f1565f 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -303,13 +303,12 @@ static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
pci_unmap_page(pdev, addr, len, direction);
}
-int __devinit ioat_probe(struct ioatdma_device *device);
-int __devinit ioat_register(struct ioatdma_device *device);
-int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
-int __devinit ioat_dma_self_test(struct ioatdma_device *device);
-void __devexit ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
- void __iomem *iobase);
+int ioat_probe(struct ioatdma_device *device);
+int ioat_register(struct ioatdma_device *device);
+int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
+int ioat_dma_self_test(struct ioatdma_device *device);
+void ioat_dma_remove(struct ioatdma_device *device);
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b9d667851445..82d4e306c32e 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -862,7 +862,7 @@ struct kobj_type ioat2_ktype = {
.default_attrs = ioat2_attrs,
};
-int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
+int ioat2_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct dma_device *dma;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index be2a55b95c23..e100f644e344 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -155,10 +155,10 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
}
-int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
-int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
+int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device);
struct dma_async_tx_descriptor *
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index f7f1dc62c15c..3e9d66920eb3 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -836,7 +836,7 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
return &desc->txd;
}
-static void __devinit ioat3_dma_test_callback(void *dma_async_param)
+static void ioat3_dma_test_callback(void *dma_async_param)
{
struct completion *cmp = dma_async_param;
@@ -844,7 +844,7 @@ static void __devinit ioat3_dma_test_callback(void *dma_async_param)
}
#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
-static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
+static int ioat_xor_val_self_test(struct ioatdma_device *device)
{
int i, src_idx;
struct page *dest;
@@ -951,7 +951,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
}
- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
/* skip validate if the capability is not present */
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
@@ -1096,7 +1096,7 @@ out:
return err;
}
-static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
+static int ioat3_dma_self_test(struct ioatdma_device *device)
{
int rc = ioat_dma_self_test(device);
@@ -1187,7 +1187,7 @@ static bool is_snb_ioat(struct pci_dev *pdev)
}
}
-int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+int ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
int dca_en = system_has_dca_enabled(pdev);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index c0573061b45d..4f686c527ab6 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -109,9 +109,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
-static int __devinit ioat_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-static void __devexit ioat_remove(struct pci_dev *pdev);
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void ioat_remove(struct pci_dev *pdev);
static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
@@ -125,7 +124,7 @@ static struct pci_driver ioat_pci_driver = {
.name = DRV_NAME,
.id_table = ioat_pci_tbl,
.probe = ioat_pci_probe,
- .remove = __devexit_p(ioat_remove),
+ .remove = ioat_remove,
};
static struct ioatdma_device *
@@ -141,7 +140,7 @@ alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
return d;
}
-static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
@@ -195,7 +194,7 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
return 0;
}
-static void __devexit ioat_remove(struct pci_dev *pdev)
+static void ioat_remove(struct pci_dev *pdev)
{
struct ioatdma_device *device = pci_get_drvdata(pdev);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 79e3eba29702..eacb8be99812 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -968,7 +968,7 @@ static void iop_adma_issue_pending(struct dma_chan *chan)
*/
#define IOP_ADMA_TEST_SIZE 2000
-static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
+static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
{
int i;
void *src, *dest;
@@ -1042,7 +1042,7 @@ out:
}
#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
-static int __devinit
+static int
iop_adma_xor_val_self_test(struct iop_adma_device *device)
{
int i, src_idx;
@@ -1243,7 +1243,7 @@ out:
}
#ifdef CONFIG_RAID6_PQ
-static int __devinit
+static int
iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
{
/* combined sources, software pq results, and extra hw pq results */
@@ -1406,7 +1406,7 @@ out:
}
#endif
-static int __devexit iop_adma_remove(struct platform_device *dev)
+static int iop_adma_remove(struct platform_device *dev)
{
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
@@ -1429,7 +1429,7 @@ static int __devexit iop_adma_remove(struct platform_device *dev)
return 0;
}
-static int __devinit iop_adma_probe(struct platform_device *pdev)
+static int iop_adma_probe(struct platform_device *pdev)
{
struct resource *res;
int ret = 0, i;
@@ -1711,7 +1711,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
- .remove = __devexit_p(iop_adma_remove),
+ .remove = iop_adma_remove,
.driver = {
.owner = THIS_MODULE,
.name = "iop-adma",
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c7573e50aa14..65855373cee6 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -22,8 +22,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-
-#include <mach/ipu.h>
+#include <linux/dma/ipu-dma.h>
#include "../dmaengine.h"
#include "ipu_intern.h"
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index fa95bcc3de1f..a5ee37d5320f 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -15,8 +15,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/module.h>
-
-#include <mach/ipu.h>
+#include <linux/dma/ipu-dma.h>
#include "ipu_intern.h"
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 14da1f403edf..c6d98c00f05c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -712,7 +712,7 @@ static void dma_do_tasklet(unsigned long data)
}
}
-static int __devexit mmp_pdma_remove(struct platform_device *op)
+static int mmp_pdma_remove(struct platform_device *op)
{
struct mmp_pdma_device *pdev = platform_get_drvdata(op);
@@ -720,7 +720,7 @@ static int __devexit mmp_pdma_remove(struct platform_device *op)
return 0;
}
-static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
int idx, int irq)
{
struct mmp_pdma_phy *phy = &pdev->phy[idx];
@@ -764,7 +764,7 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
-static int __devinit mmp_pdma_probe(struct platform_device *op)
+static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
const struct of_device_id *of_id;
@@ -865,7 +865,7 @@ static struct platform_driver mmp_pdma_driver = {
},
.id_table = mmp_pdma_id_table,
.probe = mmp_pdma_probe,
- .remove = __devexit_p(mmp_pdma_remove),
+ .remove = mmp_pdma_remove,
};
module_platform_driver(mmp_pdma_driver);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index f3e8d71bcbc7..a9f1cd56689c 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -467,7 +467,7 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
mmp_tdma_enable_chan(tdmac);
}
-static int __devexit mmp_tdma_remove(struct platform_device *pdev)
+static int mmp_tdma_remove(struct platform_device *pdev)
{
struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
@@ -475,7 +475,7 @@ static int __devexit mmp_tdma_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
+static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
int idx, int irq, int type)
{
struct mmp_tdma_chan *tdmac;
@@ -515,7 +515,7 @@ static struct of_device_id mmp_tdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
-static int __devinit mmp_tdma_probe(struct platform_device *pdev)
+static int mmp_tdma_probe(struct platform_device *pdev)
{
enum mmp_tdma_type type;
const struct of_device_id *of_id;
@@ -609,7 +609,7 @@ static struct platform_driver mmp_tdma_driver = {
},
.id_table = mmp_tdma_id_table,
.probe = mmp_tdma_probe,
- .remove = __devexit_p(mmp_tdma_remove),
+ .remove = mmp_tdma_remove,
};
module_platform_driver(mmp_tdma_driver);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2ab0a3d0eed5..2d956732aa3d 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -641,7 +641,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
return &mdesc->desc;
}
-static int __devinit mpc_dma_probe(struct platform_device *op)
+static int mpc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -799,7 +799,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
return retval;
}
-static int __devexit mpc_dma_remove(struct platform_device *op)
+static int mpc_dma_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
@@ -818,7 +818,7 @@ static struct of_device_id mpc_dma_match[] = {
static struct platform_driver mpc_dma_driver = {
.probe = mpc_dma_probe,
- .remove = __devexit_p(mpc_dma_remove),
+ .remove = mpc_dma_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e362e2b80efb..e17fad03cb80 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
#include <linux/platform_data/dma-mv_xor.h>
#include "dmaengine.h"
@@ -34,14 +37,14 @@
static void mv_xor_issue_pending(struct dma_chan *chan);
#define to_mv_xor_chan(chan) \
- container_of(chan, struct mv_xor_chan, common)
-
-#define to_mv_xor_device(dev) \
- container_of(dev, struct mv_xor_device, common)
+ container_of(chan, struct mv_xor_chan, dmachan)
#define to_mv_xor_slot(tx) \
container_of(tx, struct mv_xor_desc_slot, async_tx)
+#define mv_chan_to_devp(chan) \
+ ((chan)->dmadev.dev)
+
static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -166,7 +169,7 @@ static int mv_is_err_intr(u32 intr_cause)
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
- dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
+ dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
@@ -206,9 +209,9 @@ static void mv_set_mode(struct mv_xor_chan *chan,
op_mode = XOR_OPERATION_MODE_MEMSET;
break;
default:
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error: unsupported operation %d.\n",
- type);
+ dev_err(mv_chan_to_devp(chan),
+ "error: unsupported operation %d.\n",
+ type);
BUG();
return;
}
@@ -223,7 +226,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
{
u32 activation;
- dev_dbg(chan->device->common.dev, " activate chan.\n");
+ dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
activation = __raw_readl(XOR_ACTIVATION(chan));
activation |= 0x1;
__raw_writel(activation, XOR_ACTIVATION(chan));
@@ -251,7 +254,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *slot)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
__func__, __LINE__, slot);
slot->slots_per_op = 0;
@@ -266,7 +269,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *sw_desc)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
if (sw_desc->type != mv_chan->current_type)
mv_set_mode(mv_chan, sw_desc->type);
@@ -284,7 +287,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
}
mv_chan->pending += sw_desc->slot_cnt;
- mv_xor_issue_pending(&mv_chan->common);
+ mv_xor_issue_pending(&mv_chan->dmachan);
}
static dma_cookie_t
@@ -308,8 +311,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
*/
if (desc->group_head && desc->unmap_len) {
struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev =
- &mv_chan->device->pdev->dev;
+ struct device *dev = mv_chan_to_devp(mv_chan);
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
@@ -353,7 +355,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
completed_node) {
@@ -369,7 +371,7 @@ static int
mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
struct mv_xor_chan *mv_chan)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
__func__, __LINE__, desc, desc->async_tx.flags);
list_del(&desc->chain_node);
/* the client is allowed to attach dependent operations
@@ -393,8 +395,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
u32 current_desc = mv_chan_get_current_desc(mv_chan);
int seen_current = 0;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
- dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
mv_xor_clean_completed_slots(mv_chan);
/* free completed slots from the chain starting with
@@ -438,7 +440,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
}
if (cookie > 0)
- mv_chan->common.completed_cookie = cookie;
+ mv_chan->dmachan.completed_cookie = cookie;
}
static void
@@ -547,7 +549,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
int new_hw_chain = 1;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
@@ -570,7 +572,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
@@ -604,9 +606,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
- struct mv_xor_platform_data *plat_data =
- mv_chan->device->pdev->dev.platform_data;
- int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
+ int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
/* Allocate descriptor slots */
idx = mv_chan->slots_allocated;
@@ -617,7 +617,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
+ hw_desc = (char *) mv_chan->dma_desc_pool_virt;
slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -625,7 +625,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->device->dma_desc_pool;
+ hw_desc = (char *) mv_chan->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
slot->idx = idx++;
@@ -641,7 +641,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
struct mv_xor_desc_slot,
slot_node);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"allocated %d descriptor slots last_used: %p\n",
mv_chan->slots_allocated, mv_chan->last_used);
@@ -656,7 +656,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %x src %x len: %u flags: %ld\n",
__func__, dest, src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -680,7 +680,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
@@ -695,7 +695,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %x len: %u flags: %ld\n",
__func__, dest, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -718,7 +718,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
sw_desc->unmap_len = len;
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -737,7 +737,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
__func__, src_cnt, len, dest, flags);
@@ -758,7 +758,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +791,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
mv_chan->last_used = NULL;
- dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
__func__, mv_chan->slots_allocated);
spin_unlock_bh(&mv_chan->lock);
if (in_use_descs)
- dev_err(mv_chan->device->common.dev,
+ dev_err(mv_chan_to_devp(mv_chan),
"freeing %d in use descriptors!\n", in_use_descs);
}
@@ -828,42 +828,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
u32 val;
val = __raw_readl(XOR_CONFIG(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "config 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "config 0x%08x.\n", val);
val = __raw_readl(XOR_ACTIVATION(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "activation 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "activation 0x%08x.\n", val);
val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "intr cause 0x%08x.\n", val);
val = __raw_readl(XOR_INTR_MASK(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr mask 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "intr mask 0x%08x.\n", val);
val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error cause 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "error cause 0x%08x.\n", val);
val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error addr 0x%08x.\n", val);
+ dev_err(mv_chan_to_devp(chan),
+ "error addr 0x%08x.\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
u32 intr_cause)
{
if (intr_cause & (1 << 4)) {
- dev_dbg(chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(chan),
"ignore this error\n");
return;
}
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error on chan %d. intr cause 0x%08x.\n",
- chan->idx, intr_cause);
+ dev_err(mv_chan_to_devp(chan),
+ "error on chan %d. intr cause 0x%08x.\n",
+ chan->idx, intr_cause);
mv_dump_xor_regs(chan);
BUG();
@@ -874,7 +874,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
struct mv_xor_chan *chan = data;
u32 intr_cause = mv_chan_get_intr_cause(chan);
- dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
+ dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
if (mv_is_err_intr(intr_cause))
mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -901,7 +901,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
*/
#define MV_XOR_TEST_SIZE 2000
-static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
+static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
int i;
void *src, *dest;
@@ -910,7 +910,6 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
int err = 0;
- struct mv_xor_chan *mv_chan;
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
if (!src)
@@ -926,10 +925,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
for (i = 0; i < MV_XOR_TEST_SIZE; i++)
((u8 *) src)[i] = (u8)i;
- /* Start copy, using first DMA channel */
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
@@ -950,18 +946,17 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy timed out, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy failed compare, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
@@ -975,8 +970,8 @@ out:
}
#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
-static int __devinit
-mv_xor_xor_self_test(struct mv_xor_device *device)
+static int
+mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
{
int i, src_idx;
struct page *dest;
@@ -989,7 +984,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
- struct mv_xor_chan *mv_chan;
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1022,9 +1016,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
memset(page_address(dest), 0, PAGE_SIZE);
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
@@ -1048,22 +1040,21 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor timed out, disabling\n");
+ dev_err(dma_chan->device->dev,
+ "Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ dev_err(dma_chan->device->dev,
+ "Self-test xor failed compare, disabling."
+ " index %d, data %x, expected %x\n", i,
+ ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
@@ -1079,62 +1070,66 @@ out:
return err;
}
-static int __devexit mv_xor_remove(struct platform_device *dev)
+/* This driver does not implement any of the optional DMA operations. */
+static int
+mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
{
- struct mv_xor_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
- struct mv_xor_chan *mv_chan;
- struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
+ struct device *dev = mv_chan->dmadev.dev;
- dma_async_device_unregister(&device->common);
+ dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(&dev->dev, plat_data->pool_size,
- device->dma_desc_pool_virt, device->dma_desc_pool);
+ dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
- list_for_each_entry_safe(chan, _chan, &device->common.channels,
- device_node) {
- mv_chan = to_mv_xor_chan(chan);
+ list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
+ device_node) {
list_del(&chan->device_node);
}
+ free_irq(mv_chan->irq, mv_chan);
+
return 0;
}
-static int __devinit mv_xor_probe(struct platform_device *pdev)
+static struct mv_xor_chan *
+mv_xor_channel_add(struct mv_xor_device *xordev,
+ struct platform_device *pdev,
+ int idx, dma_cap_mask_t cap_mask, int irq)
{
int ret = 0;
- int irq;
- struct mv_xor_device *adev;
struct mv_xor_chan *mv_chan;
struct dma_device *dma_dev;
- struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
+ mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
+ if (!mv_chan) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
- adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
- if (!adev)
- return -ENOMEM;
+ mv_chan->idx = idx;
+ mv_chan->irq = irq;
- dma_dev = &adev->common;
+ dma_dev = &mv_chan->dmadev;
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
- if (!adev->dma_desc_pool_virt)
- return -ENOMEM;
-
- adev->id = plat_data->hw_id;
+ mv_chan->dma_desc_pool_virt =
+ dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
+ &mv_chan->dma_desc_pool, GFP_KERNEL);
+ if (!mv_chan->dma_desc_pool_virt)
+ return ERR_PTR(-ENOMEM);
/* discover transaction capabilites from the platform data */
- dma_dev->cap_mask = plat_data->cap_mask;
- adev->pdev = pdev;
- platform_set_drvdata(pdev, adev);
-
- adev->shared = platform_get_drvdata(plat_data->shared);
+ dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
@@ -1143,6 +1138,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
+ dma_dev->device_control = mv_xor_control;
dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
@@ -1155,15 +1151,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
}
- mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
- mv_chan->device = adev;
- mv_chan->idx = plat_data->hw_id;
- mv_chan->mmr_base = adev->shared->xor_base;
-
+ mv_chan->mmr_base = xordev->xor_base;
if (!mv_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_dma;
@@ -1174,14 +1162,8 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
/* clear errors before enabling interrupts */
mv_xor_device_clear_err_status(mv_chan);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_free_dma;
- }
- ret = devm_request_irq(&pdev->dev, irq,
- mv_xor_interrupt_handler,
- 0, dev_name(&pdev->dev), mv_chan);
+ ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
+ 0, dev_name(&pdev->dev), mv_chan);
if (ret)
goto err_free_dma;
@@ -1193,26 +1175,26 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mv_chan->chain);
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->all_slots);
- mv_chan->common.device = dma_dev;
- dma_cookie_init(&mv_chan->common);
+ mv_chan->dmachan.device = dma_dev;
+ dma_cookie_init(&mv_chan->dmachan);
- list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
+ list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- ret = mv_xor_memcpy_self_test(adev);
+ ret = mv_xor_memcpy_self_test(mv_chan);
dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- ret = mv_xor_xor_self_test(adev);
+ ret = mv_xor_xor_self_test(mv_chan);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
- dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
+ dev_info(&pdev->dev, "Marvell XOR: "
"( %s%s%s%s)\n",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
@@ -1220,20 +1202,21 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
- goto out;
+ return mv_chan;
+err_free_irq:
+ free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
- adev->dma_desc_pool_virt, adev->dma_desc_pool);
- out:
- return ret;
+ dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+ return ERR_PTR(ret);
}
static void
-mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
+mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
- void __iomem *base = msp->xor_base;
+ void __iomem *base = xordev->xor_base;
u32 win_enable = 0;
int i;
@@ -1258,99 +1241,179 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(0));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
-static struct platform_driver mv_xor_driver = {
- .probe = mv_xor_probe,
- .remove = __devexit_p(mv_xor_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_NAME,
- },
-};
-
-static int mv_xor_shared_probe(struct platform_device *pdev)
+static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
- struct mv_xor_shared_private *msp;
+ struct mv_xor_device *xordev;
+ struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
+ int i, ret;
- dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell XOR driver\n");
- msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
- if (!msp)
+ xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
+ if (!xordev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- msp->xor_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_base)
+ xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_base)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
- msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_high_base)
+ xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_high_base)
return -EBUSY;
- platform_set_drvdata(pdev, msp);
+ platform_set_drvdata(pdev, xordev);
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
- mv_xor_conf_mbus_windows(msp, dram);
+ mv_xor_conf_mbus_windows(xordev, dram);
/* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists.
*/
- msp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(msp->clk))
- clk_prepare_enable(msp->clk);
+ xordev->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(xordev->clk))
+ clk_prepare_enable(xordev->clk);
+
+ if (pdev->dev.of_node) {
+ struct device_node *np;
+ int i = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ dma_cap_mask_t cap_mask;
+ int irq;
+
+ dma_cap_zero(cap_mask);
+ if (of_property_read_bool(np, "dmacap,memcpy"))
+ dma_cap_set(DMA_MEMCPY, cap_mask);
+ if (of_property_read_bool(np, "dmacap,xor"))
+ dma_cap_set(DMA_XOR, cap_mask);
+ if (of_property_read_bool(np, "dmacap,memset"))
+ dma_cap_set(DMA_MEMSET, cap_mask);
+ if (of_property_read_bool(np, "dmacap,interrupt"))
+ dma_cap_set(DMA_INTERRUPT, cap_mask);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] =
+ mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(xordev->channels[i])) {
+ ret = PTR_ERR(xordev->channels[i]);
+ xordev->channels[i] = NULL;
+ irq_dispose_mapping(irq);
+ goto err_channel_add;
+ }
+
+ i++;
+ }
+ } else if (pdata && pdata->channels) {
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ struct mv_xor_channel_data *cd;
+ int irq;
+
+ cd = &pdata->channels[i];
+ if (!cd) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] =
+ mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(xordev->channels[i])) {
+ ret = PTR_ERR(xordev->channels[i]);
+ goto err_channel_add;
+ }
+ }
+ }
return 0;
+
+err_channel_add:
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
+ if (xordev->channels[i]) {
+ mv_xor_channel_remove(xordev->channels[i]);
+ if (pdev->dev.of_node)
+ irq_dispose_mapping(xordev->channels[i]->irq);
+ }
+
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
+ }
+
+ return ret;
}
-static int mv_xor_shared_remove(struct platform_device *pdev)
+static int mv_xor_remove(struct platform_device *pdev)
{
- struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+ struct mv_xor_device *xordev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ if (xordev->channels[i])
+ mv_xor_channel_remove(xordev->channels[i]);
+ }
- if (!IS_ERR(msp->clk)) {
- clk_disable_unprepare(msp->clk);
- clk_put(msp->clk);
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
}
return 0;
}
-static struct platform_driver mv_xor_shared_driver = {
- .probe = mv_xor_shared_probe,
- .remove = mv_xor_shared_remove,
+#ifdef CONFIG_OF
+static struct of_device_id mv_xor_dt_ids[] = {
+ { .compatible = "marvell,orion-xor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_driver = {
+ .probe = mv_xor_probe,
+ .remove = mv_xor_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_SHARED_NAME,
+ .owner = THIS_MODULE,
+ .name = MV_XOR_NAME,
+ .of_match_table = of_match_ptr(mv_xor_dt_ids),
},
};
static int __init mv_xor_init(void)
{
- int rc;
-
- rc = platform_driver_register(&mv_xor_shared_driver);
- if (!rc) {
- rc = platform_driver_register(&mv_xor_driver);
- if (rc)
- platform_driver_unregister(&mv_xor_shared_driver);
- }
- return rc;
+ return platform_driver_register(&mv_xor_driver);
}
module_init(mv_xor_init);
@@ -1359,7 +1422,6 @@ module_init(mv_xor_init);
static void __exit mv_xor_exit(void)
{
platform_driver_unregister(&mv_xor_driver);
- platform_driver_unregister(&mv_xor_shared_driver);
return;
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index a5b422f5a8ab..c632a4761fcf 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -24,8 +24,10 @@
#include <linux/interrupt.h>
#define USE_TIMER
+#define MV_XOR_POOL_SIZE PAGE_SIZE
#define MV_XOR_SLOT_SIZE 64
#define MV_XOR_THRESHOLD 1
+#define MV_XOR_MAX_CHANNELS 2
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
@@ -51,29 +53,13 @@
#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
+#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
-struct mv_xor_shared_private {
- void __iomem *xor_base;
- void __iomem *xor_high_base;
- struct clk *clk;
-};
-
-
-/**
- * struct mv_xor_device - internal representation of a XOR device
- * @pdev: Platform device
- * @id: HW XOR Device selector
- * @dma_desc_pool: base of DMA descriptor region (DMA address)
- * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
- * @common: embedded struct dma_device
- */
struct mv_xor_device {
- struct platform_device *pdev;
- int id;
- dma_addr_t dma_desc_pool;
- void *dma_desc_pool_virt;
- struct dma_device common;
- struct mv_xor_shared_private *shared;
+ void __iomem *xor_base;
+ void __iomem *xor_high_base;
+ struct clk *clk;
+ struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
};
/**
@@ -96,11 +82,15 @@ struct mv_xor_chan {
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
unsigned int idx;
+ int irq;
enum dma_transaction_type current_type;
struct list_head chain;
struct list_head completed_slots;
- struct mv_xor_device *device;
- struct dma_chan common;
+ dma_addr_t dma_desc_pool;
+ void *dma_desc_pool_virt;
+ size_t pool_size;
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
struct mv_xor_desc_slot *last_used;
struct list_head all_slots;
int slots_allocated;
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index bb2d8e7029eb..5a31264f2bd1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -19,9 +19,6 @@
#include "virt-dma.h"
-#include <plat/cpu.h>
-#include <plat/dma.h>
-
struct omap_dmadev {
struct dma_device ddev;
spinlock_t lock;
@@ -438,7 +435,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
}
- if (!cpu_class_is_omap1()) {
+ if (dma_omap2plus()) {
omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 987ab5cd2617..3f2617255ef2 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -843,7 +843,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
}
#endif
-static int __devinit pch_dma_probe(struct pci_dev *pdev,
+static int pch_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pch_dma *pd;
@@ -961,7 +961,7 @@ err_free_mem:
return err;
}
-static void __devexit pch_dma_remove(struct pci_dev *pdev)
+static void pch_dma_remove(struct pci_dev *pdev)
{
struct pch_dma *pd = pci_get_drvdata(pdev);
struct pch_dma_chan *pd_chan;
@@ -1022,7 +1022,7 @@ static struct pci_driver pch_dma_driver = {
.name = DRV_NAME,
.id_table = pch_dma_id_table,
.probe = pch_dma_probe,
- .remove = __devexit_p(pch_dma_remove),
+ .remove = pch_dma_remove,
#ifdef CONFIG_PM
.suspend = pch_dma_suspend,
.resume = pch_dma_resume,
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 665668b6f2b1..80680eee0171 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2851,7 +2851,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
-static int __devinit
+static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
@@ -2988,7 +2988,7 @@ probe_err1:
return ret;
}
-static int __devexit pl330_remove(struct amba_device *adev)
+static int pl330_remove(struct amba_device *adev)
{
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index f72348d0bc41..5d3d95569a1e 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4361,7 +4361,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
/**
* ppc440spe_adma_probe - probe the asynch device
*/
-static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev)
+static int ppc440spe_adma_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct resource res;
@@ -4592,7 +4592,7 @@ out:
/**
* ppc440spe_adma_remove - remove the asynch device
*/
-static int __devexit ppc440spe_adma_remove(struct platform_device *ofdev)
+static int ppc440spe_adma_remove(struct platform_device *ofdev)
{
struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
struct device_node *np = ofdev->dev.of_node;
@@ -4905,7 +4905,7 @@ out_free:
return ret;
}
-static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
+static const struct of_device_id ppc440spe_adma_of_match[] = {
{ .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", },
{},
@@ -4914,7 +4914,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct platform_driver ppc440spe_adma_driver = {
.probe = ppc440spe_adma_probe,
- .remove = __devexit_p(ppc440spe_adma_remove),
+ .remove = ppc440spe_adma_remove,
.driver = {
.name = "PPC440SP(E)-ADMA",
.owner = THIS_MODULE,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index b893159c1ecb..461a91ab70bb 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -826,7 +826,7 @@ static const struct sa11x0_dma_channel_desc chan_desc[] = {
CD(Ser4SSPRc, DDAR_RW),
};
-static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
+static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
struct device *dev)
{
unsigned i;
@@ -891,7 +891,7 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
}
}
-static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
+static int sa11x0_dma_probe(struct platform_device *pdev)
{
struct sa11x0_dma_dev *d;
struct resource *res;
@@ -967,7 +967,7 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
+static int sa11x0_dma_remove(struct platform_device *pdev)
{
struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
unsigned pch;
@@ -1072,7 +1072,7 @@ static struct platform_driver sa11x0_dma_driver = {
.pm = &sa11x0_dma_pm_ops,
},
.probe = sa11x0_dma_probe,
- .remove = __devexit_p(sa11x0_dma_remove),
+ .remove = sa11x0_dma_remove,
};
bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index f41bcc5267fd..3315e4be9b85 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -483,7 +483,7 @@ static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
.priority = 1,
};
-static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
int irq, unsigned long flags)
{
const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
@@ -646,7 +646,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.get_partial = sh_dmae_get_partial,
};
-static int __devinit sh_dmae_probe(struct platform_device *pdev)
+static int sh_dmae_probe(struct platform_device *pdev)
{
struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
unsigned long irqflags = IRQF_DISABLED,
@@ -880,7 +880,7 @@ ermrdmars:
return err;
}
-static int __devexit sh_dmae_remove(struct platform_device *pdev)
+static int sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
@@ -926,7 +926,7 @@ static struct platform_driver sh_dmae_driver = {
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
},
- .remove = __devexit_p(sh_dmae_remove),
+ .remove = sh_dmae_remove,
.shutdown = sh_dmae_shutdown,
};
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d451caace806..94674a96c646 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -550,7 +550,7 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL(sirfsoc_dma_filter_id);
-static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+static int sirfsoc_dma_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -655,7 +655,7 @@ irq_dispose:
return ret;
}
-static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+static int sirfsoc_dma_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -673,7 +673,7 @@ static struct of_device_id sirfsoc_dma_match[] = {
static struct platform_driver sirfsoc_dma_driver = {
.probe = sirfsoc_dma_probe,
- .remove = __devexit_p(sirfsoc_dma_remove),
+ .remove = sirfsoc_dma_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index ae55091c2272..23c5573e62dd 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -19,8 +19,7 @@
#include <linux/err.h>
#include <linux/amba/bus.h>
#include <linux/regulator/consumer.h>
-
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include "dmaengine.h"
#include "ste_dma40_ll.h"
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index cad9e1daedff..851ad56e8409 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -6,7 +6,7 @@
*/
#include <linux/kernel.h>
-#include <plat/ste_dma40.h>
+#include <linux/platform_data/dma-ste-dma40.h>
#include "ste_dma40_ll.h"
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 528c62dd4b00..3cad856fe67f 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -266,6 +266,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
if (async_tx_test_ack(&dma_desc->txd)) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
+ dma_desc->txd.flags = 0;
return dma_desc;
}
}
@@ -1050,7 +1051,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1095,7 +1098,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
mem += len;
}
sg_req->last_sg = true;
- dma_desc->txd.flags = 0;
+ if (flags & DMA_CTRL_ACK)
+ dma_desc->txd.flags = DMA_CTRL_ACK;
/*
* Make sure that mode should not be conflicting with currently
@@ -1184,7 +1188,7 @@ static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.max_dma_count = 1024UL * 64,
};
-static const struct of_device_id tegra_dma_of_match[] __devinitconst = {
+static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra30-apbdma",
.data = &tegra30_dma_chip_data,
@@ -1197,7 +1201,7 @@ static const struct of_device_id tegra_dma_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
#endif
-static int __devinit tegra_dma_probe(struct platform_device *pdev)
+static int tegra_dma_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_dma *tdma;
@@ -1360,7 +1364,7 @@ err_pm_disable:
return ret;
}
-static int __devexit tegra_dma_remove(struct platform_device *pdev)
+static int tegra_dma_remove(struct platform_device *pdev)
{
struct tegra_dma *tdma = platform_get_drvdata(pdev);
int i;
@@ -1403,7 +1407,7 @@ static int tegra_dma_runtime_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = {
+static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = tegra_dma_runtime_suspend,
.runtime_resume = tegra_dma_runtime_resume,
@@ -1418,7 +1422,7 @@ static struct platform_driver tegra_dmac_driver = {
.of_match_table = of_match_ptr(tegra_dma_of_match),
},
.probe = tegra_dma_probe,
- .remove = __devexit_p(tegra_dma_remove),
+ .remove = tegra_dma_remove,
};
module_platform_driver(tegra_dmac_driver);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 4e0dff59901d..952f823901a6 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -667,7 +667,7 @@ static irqreturn_t td_irq(int irq, void *devid)
}
-static int __devinit td_probe(struct platform_device *pdev)
+static int td_probe(struct platform_device *pdev)
{
struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
struct timb_dma *td;
@@ -798,7 +798,7 @@ err_release_region:
}
-static int __devexit td_remove(struct platform_device *pdev)
+static int td_remove(struct platform_device *pdev)
{
struct timb_dma *td = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index bb82d6be793c..66719925970f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -4,10 +4,13 @@
# Licensed and distributed under the GPL
#
+config EDAC_SUPPORT
+ bool
+
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC || TILE || ARM
+ depends on X86 || PPC || TILE || ARM || EDAC_SUPPORT
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -29,8 +32,6 @@ menuconfig EDAC
if EDAC
-comment "Reporting subsystems"
-
config EDAC_LEGACY_SYSFS
bool "EDAC legacy sysfs"
default y
@@ -316,4 +317,32 @@ config EDAC_HIGHBANK_L2
Support for error detection and correction on the
Calxeda Highbank memory controller.
+config EDAC_OCTEON_PC
+ tristate "Cavium Octeon Primary Caches"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the primary caches of
+ the cnMIPS cores of Cavium Octeon family SOCs.
+
+config EDAC_OCTEON_L2C
+ tristate "Cavium Octeon Secondary Caches (L2C)"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
+config EDAC_OCTEON_LMC
+ tristate "Cavium Octeon DRAM Memory Controller (LMC)"
+ depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
+config EDAC_OCTEON_PCI
+ tristate "Cavium Octeon PCI Controller"
+ depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON
+ help
+ Support for error detection and correction on the
+ Cavium Octeon family of SOCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 7e5129a733f8..5608a9ba61b7 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -58,3 +58,8 @@ obj-$(CONFIG_EDAC_TILE) += tile_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
+
+obj-$(CONFIG_EDAC_OCTEON_PC) += octeon_edac-pc.o
+obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o
+obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
+obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index f74a684269ff..ad8bf2aa629d 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2563,8 +2563,8 @@ err_ret:
return ret;
}
-static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int amd64_probe_one_instance(struct pci_dev *pdev,
+ const struct pci_device_id *mc_type)
{
u8 nid = get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -2612,7 +2612,7 @@ err_out:
return ret;
}
-static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+static void amd64_remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -2686,7 +2686,7 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
static struct pci_driver amd64_pci_driver = {
.name = EDAC_MOD_STR,
.probe = amd64_probe_one_instance,
- .remove = __devexit_p(amd64_remove_one_instance),
+ .remove = amd64_remove_one_instance,
.id_table = amd64_pci_table,
};
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 29eeb68a200c..96e3ee3460a5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -301,8 +301,8 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit amd76x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int amd76x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -318,7 +318,7 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
* structure for the device then delete the mci and free the
* resources.
*/
-static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+static void amd76x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -350,7 +350,7 @@ MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
static struct pci_driver amd76x_driver = {
.name = EDAC_MOD_STR,
.probe = amd76x_init_one,
- .remove = __devexit_p(amd76x_remove_one),
+ .remove = amd76x_remove_one,
.id_table = amd76x_pci_tbl,
};
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index a1bbd8edd257..c2eaf334b90b 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -124,7 +124,7 @@ static void cell_edac_check(struct mem_ctl_info *mci)
}
}
-static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
+static void cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = mci->csrows[0];
struct dimm_info *dimm;
@@ -164,7 +164,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit cell_edac_probe(struct platform_device *pdev)
+static int cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
@@ -233,7 +233,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit cell_edac_remove(struct platform_device *pdev)
+static int cell_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
if (mci)
@@ -247,7 +247,7 @@ static struct platform_driver cell_edac_driver = {
.owner = THIS_MODULE,
},
.probe = cell_edac_probe,
- .remove = __devexit_p(cell_edac_remove),
+ .remove = cell_edac_remove,
};
static int __init cell_edac_init(void)
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index c2ef13495873..7f3c57113ba1 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -932,7 +932,7 @@ static int cpc925_mc_get_channels(void __iomem *vbase)
return dual;
}
-static int __devinit cpc925_probe(struct platform_device *pdev)
+static int cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index a5ed6b795fd4..644fec54681f 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1390,8 +1390,7 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit e752x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -1402,7 +1401,7 @@ static int __devinit e752x_init_one(struct pci_dev *pdev,
return e752x_probe1(pdev, ent->driver_data);
}
-static void __devexit e752x_remove_one(struct pci_dev *pdev)
+static void e752x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
@@ -1445,7 +1444,7 @@ MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
static struct pci_driver e752x_driver = {
.name = EDAC_MOD_STR,
.probe = e752x_init_one,
- .remove = __devexit_p(e752x_remove_one),
+ .remove = e752x_remove_one,
.id_table = e752x_pci_tbl,
};
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 9ff57f361a43..1c4056a50383 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -528,8 +528,7 @@ fail0:
}
/* returns count (>= 0), or negative on error */
-static int __devinit e7xxx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -538,7 +537,7 @@ static int __devinit e7xxx_init_one(struct pci_dev *pdev,
-EIO : e7xxx_probe1(pdev, ent->driver_data);
}
-static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+static void e7xxx_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e7xxx_pvt *pvt;
@@ -579,7 +578,7 @@ MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
static struct pci_driver e7xxx_driver = {
.name = EDAC_MOD_STR,
.probe = e7xxx_init_one,
- .remove = __devexit_p(e7xxx_remove_one),
+ .remove = e7xxx_remove_one,
.id_table = e7xxx_pci_tbl,
};
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index a641f623fffd..fb219bc5cb2c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Alocate and fill the csrow/channels structs
*/
- mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
goto error;
for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
- csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+ csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
GFP_KERNEL);
if (!csr->channels)
goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Allocate and fill the dimm structs
*/
- mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+ mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
if (!mci->dimms)
goto error;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index de2df92f9c77..0ca1ca71157f 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -472,8 +472,7 @@ static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
device_remove_file(&csrow->dev,
dynamic_csrow_ce_count_attr[chan]);
}
- put_device(&mci->csrows[i]->dev);
- device_del(&mci->csrows[i]->dev);
+ device_unregister(&mci->csrows[i]->dev);
}
}
#endif
@@ -1055,11 +1054,9 @@ fail:
struct dimm_info *dimm = mci->dimms[i];
if (dimm->nr_pages == 0)
continue;
- put_device(&dimm->dev);
- device_del(&dimm->dev);
+ device_unregister(&dimm->dev);
}
- put_device(&mci->dev);
- device_del(&mci->dev);
+ device_unregister(&mci->dev);
bus_unregister(&mci->bus);
kfree(mci->bus.name);
return err;
@@ -1086,16 +1083,14 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
if (dimm->nr_pages == 0)
continue;
edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
- put_device(&dimm->dev);
- device_del(&dimm->dev);
+ device_unregister(&dimm->dev);
}
}
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
- put_device(&mci->dev);
- device_del(&mci->dev);
+ device_unregister(&mci->dev);
bus_unregister(&mci->bus);
kfree(mci->bus.name);
}
@@ -1159,8 +1154,6 @@ int __init edac_mc_sysfs_init(void)
void __exit edac_mc_sysfs_exit(void)
{
- put_device(mci_pdev);
- device_del(mci_pdev);
+ device_unregister(mci_pdev);
edac_put_sysfs_subsys();
- kfree(mci_pdev);
}
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 7684426fafa2..e8658e451762 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
- if (edac_pci_dev->show)
+ if (edac_pci_dev->store)
return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
return -EIO;
}
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index e599b00c05a8..c2bd8c6a4349 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -50,7 +50,7 @@ static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit highbank_l2_err_probe(struct platform_device *pdev)
+static int highbank_l2_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci;
struct hb_l2_drvdata *drvdata;
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 7ea4cc2e8bd2..4695dd2d71fd 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -119,7 +119,7 @@ static const struct file_operations highbank_mc_debug_inject_fops = {
.llseek = generic_file_llseek,
};
-static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
{
if (mci->debugfs)
debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
@@ -127,11 +127,11 @@ static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
;
}
#else
-static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+static void highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
{}
#endif
-static int __devinit highbank_mc_probe(struct platform_device *pdev)
+static int highbank_mc_probe(struct platform_device *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index d3d19cc4e9a1..694efcbf19c0 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -455,8 +455,7 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i3000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
@@ -472,7 +471,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i3000_remove_one(struct pci_dev *pdev)
+static void i3000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -502,7 +501,7 @@ MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
static struct pci_driver i3000_driver = {
.name = EDAC_MOD_STR,
.probe = i3000_init_one,
- .remove = __devexit_p(i3000_remove_one),
+ .remove = i3000_remove_one,
.id_table = i3000_pci_tbl,
};
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index b6653a6fc5d5..4e8337602e78 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -419,8 +419,7 @@ fail:
return rc;
}
-static int __devinit i3200_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
@@ -436,7 +435,7 @@ static int __devinit i3200_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i3200_remove_one(struct pci_dev *pdev)
+static void i3200_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i3200_priv *priv;
@@ -467,7 +466,7 @@ MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
static struct pci_driver i3200_driver = {
.name = EDAC_MOD_STR,
.probe = i3200_init_one,
- .remove = __devexit_p(i3200_remove_one),
+ .remove = i3200_remove_one,
.id_table = i3200_pci_tbl,
};
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 6a49dd00b81b..63b2194e8c20 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1489,8 +1489,7 @@ fail0:
* negative on error
* count (>= 0)
*/
-static int __devinit i5000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i5000_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
@@ -1509,7 +1508,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
* i5000_remove_one destructor for one instance of device
*
*/
-static void __devexit i5000_remove_one(struct pci_dev *pdev)
+static void i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -1547,7 +1546,7 @@ MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
static struct pci_driver i5000_driver = {
.name = KBUILD_BASENAME,
.probe = i5000_init_one,
- .remove = __devexit_p(i5000_remove_one),
+ .remove = i5000_remove_one,
.id_table = i5000_pci_tbl,
};
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index c4b5e5f868e8..d6955b2cc99f 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -638,8 +638,7 @@ static struct pci_dev *pci_get_device_func(unsigned vendor,
return ret;
}
-static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
- int csrow)
+static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
@@ -660,7 +659,7 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
}
-static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
+static void i5100_init_mtr(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
@@ -732,7 +731,7 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
-static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
+static void i5100_init_dimm_csmap(struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
int i;
@@ -762,8 +761,8 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
}
-static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
- struct mem_ctl_info *mci)
+static void i5100_init_dimm_layout(struct pci_dev *pdev,
+ struct mem_ctl_info *mci)
{
struct i5100_priv *priv = mci->pvt_info;
int i;
@@ -784,8 +783,8 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
i5100_init_dimm_csmap(mci);
}
-static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
- struct mem_ctl_info *mci)
+static void i5100_init_interleaving(struct pci_dev *pdev,
+ struct mem_ctl_info *mci)
{
u16 w;
u32 dw;
@@ -830,7 +829,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
i5100_init_mtr(mci);
}
-static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
+static void i5100_init_csrows(struct mem_ctl_info *mci)
{
int i;
struct i5100_priv *priv = mci->pvt_info;
@@ -864,8 +863,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit i5100_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
struct mem_ctl_info *mci;
@@ -1020,7 +1018,7 @@ bail:
return ret;
}
-static void __devexit i5100_remove_one(struct pci_dev *pdev)
+static void i5100_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i5100_priv *priv;
@@ -1054,7 +1052,7 @@ MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
static struct pci_driver i5100_driver = {
.name = KBUILD_BASENAME,
.probe = i5100_init_one,
- .remove = __devexit_p(i5100_remove_one),
+ .remove = i5100_remove_one,
.id_table = i5100_pci_tbl,
};
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 277246998b80..0a05bbceb08f 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1373,8 +1373,7 @@ fail0:
* negative on error
* count (>= 0)
*/
-static int __devinit i5400_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i5400_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
@@ -1393,7 +1392,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
* i5400_remove_one destructor for one instance of device
*
*/
-static void __devexit i5400_remove_one(struct pci_dev *pdev)
+static void i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -1431,7 +1430,7 @@ MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
static struct pci_driver i5400_driver = {
.name = "i5400_edac",
.probe = i5400_init_one,
- .remove = __devexit_p(i5400_remove_one),
+ .remove = i5400_remove_one,
.id_table = i5400_pci_tbl,
};
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 9d669cd43618..087c27bc5d42 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -923,7 +923,7 @@ static void i7300_put_devices(struct mem_ctl_info *mci)
* Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
* Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
*/
-static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
+static int i7300_get_devices(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct pci_dev *pdev;
@@ -1008,8 +1008,7 @@ error:
* @pdev: struct pci_dev pointer
* @id: struct pci_device_id pointer - currently unused
*/
-static int __devinit i7300_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[3];
@@ -1122,7 +1121,7 @@ fail0:
* i7300_remove_one() - Remove the driver
* @pdev: struct pci_dev pointer
*/
-static void __devexit i7300_remove_one(struct pci_dev *pdev)
+static void i7300_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
char *tmp;
@@ -1163,7 +1162,7 @@ MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
static struct pci_driver i7300_driver = {
.name = "i7300_edac",
.probe = i7300_init_one,
- .remove = __devexit_p(i7300_remove_one),
+ .remove = i7300_remove_one,
.id_table = i7300_pci_tbl,
};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index ad5f934c95d3..0ec3e95a12cd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2305,8 +2305,7 @@ fail0:
* < 0 for error code
*/
-static int __devinit i7core_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc, count = 0;
struct i7core_dev *i7core_dev;
@@ -2368,7 +2367,7 @@ fail0:
* i7core_remove destructor for one instance of device
*
*/
-static void __devexit i7core_remove(struct pci_dev *pdev)
+static void i7core_remove(struct pci_dev *pdev)
{
struct i7core_dev *i7core_dev;
@@ -2409,7 +2408,7 @@ MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
static struct pci_driver i7core_driver = {
.name = "i7core_edac",
.probe = i7core_probe,
- .remove = __devexit_p(i7core_remove),
+ .remove = i7core_remove,
.id_table = i7core_pci_tbl,
};
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 90f303db5d1d..57fdb77903ba 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -353,8 +353,8 @@ fail:
EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
/* returns count (>= 0), or negative on error */
-static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -369,7 +369,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -399,7 +399,7 @@ MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
static struct pci_driver i82443bxgx_edacmc_driver = {
.name = EDAC_MOD_STR,
.probe = i82443bxgx_edacmc_init_one,
- .remove = __devexit_p(i82443bxgx_edacmc_remove_one),
+ .remove = i82443bxgx_edacmc_remove_one,
.id_table = i82443bxgx_pci_tbl,
};
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 1faa74971513..3e3e431c8301 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -254,8 +254,8 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i82860_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82860_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -273,7 +273,7 @@ static int __devinit i82860_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82860_remove_one(struct pci_dev *pdev)
+static void i82860_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
static struct pci_driver i82860_driver = {
.name = EDAC_MOD_STR,
.probe = i82860_init_one,
- .remove = __devexit_p(i82860_remove_one),
+ .remove = i82860_remove_one,
.id_table = i82860_pci_tbl,
};
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 3e416b1a6b53..2f8535fc451e 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -479,8 +479,8 @@ fail0:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i82875p_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82875p_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -498,7 +498,7 @@ static int __devinit i82875p_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+static void i82875p_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt = NULL;
@@ -541,7 +541,7 @@ MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
static struct pci_driver i82875p_driver = {
.name = EDAC_MOD_STR,
.probe = i82875p_init_one,
- .remove = __devexit_p(i82875p_remove_one),
+ .remove = i82875p_remove_one,
.id_table = i82875p_pci_tbl,
};
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index a98020409fa9..0c8d4b0eaa32 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -592,8 +592,8 @@ fail0:
}
/* returns count (>= 0), or negative on error */
-static int __devinit i82975x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int i82975x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc;
@@ -610,7 +610,7 @@ static int __devinit i82975x_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+static void i82975x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i82975x_pvt *pvt;
@@ -643,7 +643,7 @@ MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
static struct pci_driver i82975x_driver = {
.name = EDAC_MOD_STR,
.probe = i82975x_init_one,
- .remove = __devexit_p(i82975x_remove_one),
+ .remove = i82975x_remove_one,
.id_table = i82975x_pci_tbl,
};
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4fe66fa183ec..42a840d530a5 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -212,7 +212,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
+int mpc85xx_pci_err_probe(struct platform_device *op)
{
struct edac_pci_ctl_info *pci;
struct mpc85xx_pci_pdata *pdata;
@@ -504,7 +504,7 @@ static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
+static int mpc85xx_l2_err_probe(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev;
struct mpc85xx_l2_pdata *pdata;
@@ -885,7 +885,7 @@ static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
+static void mpc85xx_init_csrows(struct mem_ctl_info *mci)
{
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
@@ -964,7 +964,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
}
}
-static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
+static int mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 2b315c2edc3c..542fad70e360 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -100,7 +100,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev)
return 0;
}
-static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
+static int mv64x60_pci_err_probe(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci;
struct mv64x60_pci_pdata *pdata;
@@ -221,7 +221,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev)
static struct platform_driver mv64x60_pci_err_driver = {
.probe = mv64x60_pci_err_probe,
- .remove = __devexit_p(mv64x60_pci_err_remove),
+ .remove = mv64x60_pci_err_remove,
.driver = {
.name = "mv64x60_pci_err",
}
@@ -271,7 +271,7 @@ static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
+static int mv64x60_sram_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev;
struct mv64x60_sram_pdata *pdata;
@@ -439,7 +439,7 @@ static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
+static int mv64x60_cpu_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev;
struct resource *r;
@@ -697,7 +697,7 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
dimm->edac_mode = EDAC_SECDED;
}
-static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
+static int mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
new file mode 100644
index 000000000000..7e98084d3645
--- /dev/null
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -0,0 +1,208 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/cvmx.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_MOD_STR "octeon-l2c"
+
+static void octeon_l2c_poll_oct1(struct edac_device_ctl_info *l2c)
+{
+ union cvmx_l2t_err l2t_err, l2t_err_reset;
+ union cvmx_l2d_err l2d_err, l2d_err_reset;
+
+ l2t_err_reset.u64 = 0;
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.sec_err) {
+ edac_device_handle_ce(l2c, 0, 0,
+ "Tag Single bit error (corrected)");
+ l2t_err_reset.s.sec_err = 1;
+ }
+ if (l2t_err.s.ded_err) {
+ edac_device_handle_ue(l2c, 0, 0,
+ "Tag Double bit error (detected)");
+ l2t_err_reset.s.ded_err = 1;
+ }
+ if (l2t_err_reset.u64)
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err_reset.u64);
+
+ l2d_err_reset.u64 = 0;
+ l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
+ if (l2d_err.s.sec_err) {
+ edac_device_handle_ce(l2c, 0, 1,
+ "Data Single bit error (corrected)");
+ l2d_err_reset.s.sec_err = 1;
+ }
+ if (l2d_err.s.ded_err) {
+ edac_device_handle_ue(l2c, 0, 1,
+ "Data Double bit error (detected)");
+ l2d_err_reset.s.ded_err = 1;
+ }
+ if (l2d_err_reset.u64)
+ cvmx_write_csr(CVMX_L2D_ERR, l2d_err_reset.u64);
+
+}
+
+static void _octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c, int tad)
+{
+ union cvmx_l2c_err_tdtx err_tdtx, err_tdtx_reset;
+ union cvmx_l2c_err_ttgx err_ttgx, err_ttgx_reset;
+ char buf1[64];
+ char buf2[80];
+
+ err_tdtx_reset.u64 = 0;
+ err_tdtx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TDTX(tad));
+ if (err_tdtx.s.dbe || err_tdtx.s.sbe ||
+ err_tdtx.s.vdbe || err_tdtx.s.vsbe)
+ snprintf(buf1, sizeof(buf1),
+ "type:%d, syn:0x%x, way:%d",
+ err_tdtx.s.type, err_tdtx.s.syn, err_tdtx.s.wayidx);
+
+ if (err_tdtx.s.dbe) {
+ snprintf(buf2, sizeof(buf2),
+ "L2D Double bit error (detected):%s", buf1);
+ err_tdtx_reset.s.dbe = 1;
+ edac_device_handle_ue(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.sbe) {
+ snprintf(buf2, sizeof(buf2),
+ "L2D Single bit error (corrected):%s", buf1);
+ err_tdtx_reset.s.sbe = 1;
+ edac_device_handle_ce(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.vdbe) {
+ snprintf(buf2, sizeof(buf2),
+ "VBF Double bit error (detected):%s", buf1);
+ err_tdtx_reset.s.vdbe = 1;
+ edac_device_handle_ue(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx.s.vsbe) {
+ snprintf(buf2, sizeof(buf2),
+ "VBF Single bit error (corrected):%s", buf1);
+ err_tdtx_reset.s.vsbe = 1;
+ edac_device_handle_ce(l2c, tad, 1, buf2);
+ }
+ if (err_tdtx_reset.u64)
+ cvmx_write_csr(CVMX_L2C_ERR_TDTX(tad), err_tdtx_reset.u64);
+
+ err_ttgx_reset.u64 = 0;
+ err_ttgx.u64 = cvmx_read_csr(CVMX_L2C_ERR_TTGX(tad));
+
+ if (err_ttgx.s.dbe || err_ttgx.s.sbe)
+ snprintf(buf1, sizeof(buf1),
+ "type:%d, syn:0x%x, way:%d",
+ err_ttgx.s.type, err_ttgx.s.syn, err_ttgx.s.wayidx);
+
+ if (err_ttgx.s.dbe) {
+ snprintf(buf2, sizeof(buf2),
+ "Tag Double bit error (detected):%s", buf1);
+ err_ttgx_reset.s.dbe = 1;
+ edac_device_handle_ue(l2c, tad, 0, buf2);
+ }
+ if (err_ttgx.s.sbe) {
+ snprintf(buf2, sizeof(buf2),
+ "Tag Single bit error (corrected):%s", buf1);
+ err_ttgx_reset.s.sbe = 1;
+ edac_device_handle_ce(l2c, tad, 0, buf2);
+ }
+ if (err_ttgx_reset.u64)
+ cvmx_write_csr(CVMX_L2C_ERR_TTGX(tad), err_ttgx_reset.u64);
+}
+
+static void octeon_l2c_poll_oct2(struct edac_device_ctl_info *l2c)
+{
+ int i;
+ for (i = 0; i < l2c->nr_instances; i++)
+ _octeon_l2c_poll_oct2(l2c, i);
+}
+
+static int octeon_l2c_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *l2c;
+
+ int num_tads = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 : 1;
+
+ /* 'Tags' are block 0, 'Data' is block 1*/
+ l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0,
+ NULL, 0, edac_device_alloc_index());
+ if (!l2c)
+ return -ENOMEM;
+
+ l2c->dev = &pdev->dev;
+ platform_set_drvdata(pdev, l2c);
+ l2c->dev_name = dev_name(&pdev->dev);
+
+ l2c->mod_name = "octeon-l2c";
+ l2c->ctl_name = "octeon_l2c_err";
+
+
+ if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ union cvmx_l2t_err l2t_err;
+ union cvmx_l2d_err l2d_err;
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.sec_intena = 0; /* We poll */
+ l2t_err.s.ded_intena = 0;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ l2d_err.u64 = cvmx_read_csr(CVMX_L2D_ERR);
+ l2d_err.s.sec_intena = 0; /* We poll */
+ l2d_err.s.ded_intena = 0;
+ cvmx_write_csr(CVMX_L2T_ERR, l2d_err.u64);
+
+ l2c->edac_check = octeon_l2c_poll_oct1;
+ } else {
+ /* OCTEON II */
+ l2c->edac_check = octeon_l2c_poll_oct2;
+ }
+
+ if (edac_device_add_device(l2c) > 0) {
+ pr_err("%s: edac_device_add_device() failed\n", __func__);
+ goto err;
+ }
+
+
+ return 0;
+
+err:
+ edac_device_free_ctl_info(l2c);
+
+ return -ENXIO;
+}
+
+static int octeon_l2c_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(l2c);
+
+ return 0;
+}
+
+static struct platform_driver octeon_l2c_driver = {
+ .probe = octeon_l2c_probe,
+ .remove = octeon_l2c_remove,
+ .driver = {
+ .name = "octeon_l2c_edac",
+ }
+};
+module_platform_driver(octeon_l2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
new file mode 100644
index 000000000000..93412d6b3af1
--- /dev/null
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -0,0 +1,186 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-lmcx-defs.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define OCTEON_MAX_MC 4
+
+static void octeon_lmc_edac_poll(struct mem_ctl_info *mci)
+{
+ union cvmx_lmcx_mem_cfg0 cfg0;
+ bool do_clear = false;
+ char msg[64];
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx));
+ if (cfg0.s.sec_err || cfg0.s.ded_err) {
+ union cvmx_lmcx_fadr fadr;
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ snprintf(msg, sizeof(msg),
+ "DIMM %d rank %d bank %d row %d col %d",
+ fadr.cn30xx.fdimm, fadr.cn30xx.fbunk,
+ fadr.cn30xx.fbank, fadr.cn30xx.frow, fadr.cn30xx.fcol);
+ }
+
+ if (cfg0.s.sec_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ cfg0.s.sec_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+
+ if (cfg0.s.ded_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ cfg0.s.ded_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+ if (do_clear)
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mci->mc_idx), cfg0.u64);
+}
+
+static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
+{
+ union cvmx_lmcx_int int_reg;
+ bool do_clear = false;
+ char msg[64];
+
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ if (int_reg.s.sec_err || int_reg.s.ded_err) {
+ union cvmx_lmcx_fadr fadr;
+ fadr.u64 = cvmx_read_csr(CVMX_LMCX_FADR(mci->mc_idx));
+ snprintf(msg, sizeof(msg),
+ "DIMM %d rank %d bank %d row %d col %d",
+ fadr.cn61xx.fdimm, fadr.cn61xx.fbunk,
+ fadr.cn61xx.fbank, fadr.cn61xx.frow, fadr.cn61xx.fcol);
+ }
+
+ if (int_reg.s.sec_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ int_reg.s.sec_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+
+ if (int_reg.s.ded_err) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, msg, "");
+ int_reg.s.ded_err = -1; /* Done, re-arm */
+ do_clear = true;
+ }
+ if (do_clear)
+ cvmx_write_csr(CVMX_LMCX_INT(mci->mc_idx), int_reg.u64);
+}
+
+static int octeon_lmc_edac_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ int mc = pdev->id;
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+
+ if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+ union cvmx_lmcx_mem_cfg0 cfg0;
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
+ if (!cfg0.s.ecc_ena) {
+ dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
+ return 0;
+ }
+
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ if (!mci)
+ return -ENXIO;
+
+ mci->pdev = &pdev->dev;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ mci->mod_name = "octeon-lmc";
+ mci->ctl_name = "octeon-lmc-err";
+ mci->edac_check = octeon_lmc_edac_poll;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
+ edac_mc_free(mci);
+ return -ENXIO;
+ }
+
+ cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
+ cfg0.s.intr_ded_ena = 0; /* We poll */
+ cfg0.s.intr_sec_ena = 0;
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), cfg0.u64);
+ } else {
+ /* OCTEON II */
+ union cvmx_lmcx_int_en en;
+ union cvmx_lmcx_config config;
+
+ config.u64 = cvmx_read_csr(CVMX_LMCX_CONFIG(0));
+ if (!config.s.ecc_ena) {
+ dev_info(&pdev->dev, "Disabled (ECC not enabled)\n");
+ return 0;
+ }
+
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
+ if (!mci)
+ return -ENXIO;
+
+ mci->pdev = &pdev->dev;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ mci->mod_name = "octeon-lmc";
+ mci->ctl_name = "co_lmc_err";
+ mci->edac_check = octeon_lmc_edac_poll_o2;
+
+ if (edac_mc_add_mc(mci)) {
+ dev_err(&pdev->dev, "edac_mc_add_mc() failed\n");
+ edac_mc_free(mci);
+ return -ENXIO;
+ }
+
+ en.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(mc));
+ en.s.intr_ded_ena = 0; /* We poll */
+ en.s.intr_sec_ena = 0;
+ cvmx_write_csr(CVMX_LMCX_MEM_CFG0(mc), en.u64);
+ }
+ platform_set_drvdata(pdev, mci);
+
+ return 0;
+}
+
+static int octeon_lmc_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver octeon_lmc_edac_driver = {
+ .probe = octeon_lmc_edac_probe,
+ .remove = octeon_lmc_edac_remove,
+ .driver = {
+ .name = "octeon_lmc_edac",
+ }
+};
+module_platform_driver(octeon_lmc_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
new file mode 100644
index 000000000000..0f83c33a7d1f
--- /dev/null
+++ b/drivers/edac/octeon_edac-pc.c
@@ -0,0 +1,143 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#include <asm/octeon/cvmx.h>
+#include <asm/mipsregs.h>
+
+extern int register_co_cache_error_notifier(struct notifier_block *nb);
+extern int unregister_co_cache_error_notifier(struct notifier_block *nb);
+
+extern unsigned long long cache_err_dcache[NR_CPUS];
+
+struct co_cache_error {
+ struct notifier_block notifier;
+ struct edac_device_ctl_info *ed;
+};
+
+/**
+ * EDAC CPU cache error callback
+ *
+ * @event: non-zero if unrecoverable.
+ */
+static int co_cache_error_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct co_cache_error *p = container_of(this, struct co_cache_error,
+ notifier);
+
+ unsigned int core = cvmx_get_core_num();
+ unsigned int cpu = smp_processor_id();
+ u64 icache_err = read_octeon_c0_icacheerr();
+ u64 dcache_err;
+
+ if (event) {
+ dcache_err = cache_err_dcache[core];
+ cache_err_dcache[core] = 0;
+ } else {
+ dcache_err = read_octeon_c0_dcacheerr();
+ }
+
+ if (icache_err & 1) {
+ edac_device_printk(p->ed, KERN_ERR,
+ "CacheErr (Icache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
+ (unsigned long long)icache_err, core, cpu,
+ read_c0_errorepc());
+ write_octeon_c0_icacheerr(0);
+ edac_device_handle_ce(p->ed, cpu, 1, "icache");
+ }
+ if (dcache_err & 1) {
+ edac_device_printk(p->ed, KERN_ERR,
+ "CacheErr (Dcache):%llx, core %d/cpu %d, cp0_errorepc == %lx\n",
+ (unsigned long long)dcache_err, core, cpu,
+ read_c0_errorepc());
+ if (event)
+ edac_device_handle_ue(p->ed, cpu, 0, "dcache");
+ else
+ edac_device_handle_ce(p->ed, cpu, 0, "dcache");
+
+ /* Clear the error indication */
+ if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+ write_octeon_c0_dcacheerr(1);
+ else
+ write_octeon_c0_dcacheerr(0);
+ }
+
+ return NOTIFY_STOP;
+}
+
+static int co_cache_error_probe(struct platform_device *pdev)
+{
+ struct co_cache_error *p = devm_kzalloc(&pdev->dev, sizeof(*p),
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->notifier.notifier_call = co_cache_error_event;
+ platform_set_drvdata(pdev, p);
+
+ p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
+ "cache", 2, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!p->ed)
+ goto err;
+
+ p->ed->dev = &pdev->dev;
+
+ p->ed->dev_name = dev_name(&pdev->dev);
+
+ p->ed->mod_name = "octeon-cpu";
+ p->ed->ctl_name = "cache";
+
+ if (edac_device_add_device(p->ed)) {
+ pr_err("%s: edac_device_add_device() failed\n", __func__);
+ goto err1;
+ }
+
+ register_co_cache_error_notifier(&p->notifier);
+
+ return 0;
+
+err1:
+ edac_device_free_ctl_info(p->ed);
+err:
+ return -ENXIO;
+}
+
+static int co_cache_error_remove(struct platform_device *pdev)
+{
+ struct co_cache_error *p = platform_get_drvdata(pdev);
+
+ unregister_co_cache_error_notifier(&p->notifier);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(p->ed);
+ return 0;
+}
+
+static struct platform_driver co_cache_error_driver = {
+ .probe = co_cache_error_probe,
+ .remove = co_cache_error_remove,
+ .driver = {
+ .name = "octeon_pc_edac",
+ }
+};
+module_platform_driver(co_cache_error_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
new file mode 100644
index 000000000000..9ca73cec74e7
--- /dev/null
+++ b/drivers/edac/octeon_edac-pci.c
@@ -0,0 +1,111 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Cavium, Inc.
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pci-defs.h>
+#include <asm/octeon/octeon.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
+{
+ union cvmx_pci_cfg01 cfg01;
+
+ cfg01.u32 = octeon_npi_read32(CVMX_NPI_PCI_CFG01);
+ if (cfg01.s.dpe) { /* Detected parity error */
+ edac_pci_handle_pe(pci, pci->ctl_name);
+ cfg01.s.dpe = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.sse) {
+ edac_pci_handle_npe(pci, "Signaled System Error");
+ cfg01.s.sse = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.rma) {
+ edac_pci_handle_npe(pci, "Received Master Abort");
+ cfg01.s.rma = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.rta) {
+ edac_pci_handle_npe(pci, "Received Target Abort");
+ cfg01.s.rta = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.sta) {
+ edac_pci_handle_npe(pci, "Signaled Target Abort");
+ cfg01.s.sta = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+ if (cfg01.s.mdpe) {
+ edac_pci_handle_npe(pci, "Master Data Parity Error");
+ cfg01.s.mdpe = 1; /* Reset */
+ octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32);
+ }
+}
+
+static int octeon_pci_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ int res = 0;
+
+ pci = edac_pci_alloc_ctl_info(0, "octeon_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pci);
+ pci->dev_name = dev_name(&pdev->dev);
+
+ pci->mod_name = "octeon-pci";
+ pci->ctl_name = "octeon_pci_err";
+ pci->edac_check = octeon_pci_poll;
+
+ if (edac_pci_add_device(pci, 0) > 0) {
+ pr_err("%s: edac_pci_add_device() failed\n", __func__);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ edac_pci_free_ctl_info(pci);
+
+ return res;
+}
+
+static int octeon_pci_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+ edac_pci_del_device(&pdev->dev);
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver octeon_pci_driver = {
+ .probe = octeon_pci_probe,
+ .remove = octeon_pci_remove,
+ .driver = {
+ .name = "octeon_pci_edac",
+ }
+};
+module_platform_driver(octeon_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 2d35b78ada3c..9c971b575530 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -188,8 +188,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
return 0;
}
-static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pasemi_edac_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
@@ -266,7 +266,7 @@ fail:
return -ENODEV;
}
-static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
+static void pasemi_edac_remove(struct pci_dev *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
@@ -287,7 +287,7 @@ MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
static struct pci_driver pasemi_edac_driver = {
.name = MODULE_NAME,
.probe = pasemi_edac_probe,
- .remove = __devexit_p(pasemi_edac_remove),
+ .remove = pasemi_edac_remove,
.id_table = pasemi_edac_pci_tbl,
};
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index bf0957635991..ef6b7e08f485 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -838,8 +838,7 @@ ppc4xx_edac_isr(int irq, void *dev_id)
*
* Returns a device type width enumeration.
*/
-static enum dev_type __devinit
-ppc4xx_edac_get_dtype(u32 mcopt1)
+static enum dev_type ppc4xx_edac_get_dtype(u32 mcopt1)
{
switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) {
case SDRAM_MCOPT1_WDTH_16:
@@ -862,8 +861,7 @@ ppc4xx_edac_get_dtype(u32 mcopt1)
*
* Returns a memory type enumeration.
*/
-static enum mem_type __devinit
-ppc4xx_edac_get_mtype(u32 mcopt1)
+static enum mem_type ppc4xx_edac_get_mtype(u32 mcopt1)
{
bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN);
@@ -893,8 +891,7 @@ ppc4xx_edac_get_mtype(u32 mcopt1)
* Returns 0 if OK; otherwise, -EINVAL if the memory bank size
* configuration cannot be determined.
*/
-static int __devinit
-ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
+static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
{
const struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
int status = 0;
@@ -1011,11 +1008,9 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
*
* Returns 0 if OK; otherwise, < 0 on error.
*/
-static int __devinit
-ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
- struct platform_device *op,
- const dcr_host_t *dcr_host,
- u32 mcopt1)
+static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
+ struct platform_device *op,
+ const dcr_host_t *dcr_host, u32 mcopt1)
{
int status = 0;
const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
@@ -1105,8 +1100,8 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
* Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be
* mapped and assigned.
*/
-static int __devinit
-ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci)
+static int ppc4xx_edac_register_irq(struct platform_device *op,
+ struct mem_ctl_info *mci)
{
int status = 0;
int ded_irq, sec_irq;
@@ -1183,8 +1178,8 @@ ppc4xx_edac_register_irq(struct platform_device *op, struct mem_ctl_info *mci)
* Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on
* error.
*/
-static int __devinit
-ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
+static int ppc4xx_edac_map_dcrs(const struct device_node *np,
+ dcr_host_t *dcr_host)
{
unsigned int dcr_base, dcr_len;
@@ -1232,7 +1227,7 @@ ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host)
* Returns 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
-static int __devinit ppc4xx_edac_probe(struct platform_device *op)
+static int ppc4xx_edac_probe(struct platform_device *op)
{
int status = 0;
u32 mcopt1, memcheck;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index f854debd5533..2fd6a5490905 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -359,8 +359,8 @@ fail:
}
/* returns count (>= 0), or negative on error */
-static int __devinit r82600_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int r82600_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
edac_dbg(0, "\n");
@@ -368,7 +368,7 @@ static int __devinit r82600_init_one(struct pci_dev *pdev,
return r82600_probe1(pdev, ent->driver_data);
}
-static void __devexit r82600_remove_one(struct pci_dev *pdev)
+static void r82600_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -397,7 +397,7 @@ MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
static struct pci_driver r82600_driver = {
.name = EDAC_MOD_STR,
.probe = r82600_init_one,
- .remove = __devexit_p(r82600_remove_one),
+ .remove = r82600_remove_one,
.id_table = r82600_pci_tbl,
};
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 9ad24242ea18..57244f995614 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1692,8 +1692,7 @@ fail0:
* < 0 for error code
*/
-static int __devinit sbridge_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc;
u8 mc, num_mc = 0;
@@ -1744,7 +1743,7 @@ fail0:
* sbridge_remove destructor for one instance of device
*
*/
-static void __devexit sbridge_remove(struct pci_dev *pdev)
+static void sbridge_remove(struct pci_dev *pdev)
{
struct sbridge_dev *sbridge_dev;
@@ -1785,7 +1784,7 @@ MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
static struct pci_driver sbridge_driver = {
.name = "sbridge_edac",
.probe = sbridge_probe,
- .remove = __devexit_p(sbridge_remove),
+ .remove = sbridge_remove,
.id_table = sbridge_pci_tbl,
};
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 1e904b7b79a0..a0820536b7d9 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -82,7 +82,7 @@ static void tile_edac_check(struct mem_ctl_info *mci)
* Initialize the 'csrows' table within the mci control structure with the
* addressing of memory.
*/
-static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
+static int tile_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
@@ -120,7 +120,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
return 0;
}
-static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
+static int tile_edac_mc_probe(struct platform_device *pdev)
{
char hv_file[32];
int hv_devhdl;
@@ -186,7 +186,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
+static int tile_edac_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
@@ -202,7 +202,7 @@ static struct platform_driver tile_edac_mc_driver = {
.owner = THIS_MODULE,
},
.probe = tile_edac_mc_probe,
- .remove = __devexit_p(tile_edac_mc_remove),
+ .remove = tile_edac_mc_remove,
};
/*
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 08a992693e62..c9db24d95caa 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -418,8 +418,7 @@ fail:
return rc;
}
-static int __devinit x38_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
@@ -435,7 +434,7 @@ static int __devinit x38_init_one(struct pci_dev *pdev,
return rc;
}
-static void __devexit x38_remove_one(struct pci_dev *pdev)
+static void x38_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
@@ -464,7 +463,7 @@ MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
static struct pci_driver x38_driver = {
.name = EDAC_MOD_STR,
.probe = x38_init_one,
- .remove = __devexit_p(x38_remove_one),
+ .remove = x38_remove_one,
.id_table = x38_pci_tbl,
};
diff --git a/drivers/eisa/eisa.ids b/drivers/eisa/eisa.ids
index 6cbb7a514436..2d864b48a48f 100644
--- a/drivers/eisa/eisa.ids
+++ b/drivers/eisa/eisa.ids
@@ -985,8 +985,8 @@ ISABD00 "Racal-Interlan NP600A Ethernet 16bit"
ISABD02 "Racal-Interlan NI5210/8 Ethernet"
ISABD03 "Racal-Interlan NI5210/16 Ethernet"
ISABE00 "Qua Tech PXB-1608 Parallel Expansion Board"
-ISABE01 "Qua Tech ES-100 8 Channel Asyncronous"
-ISABE02 "Qua Tech QS-100M 4 Channel Asyncronous"
+ISABE01 "Qua Tech ES-100 8 Channel Asynchronous"
+ISABE02 "Qua Tech QS-100M 4 Channel Asynchronous"
ISABE03 "Qua Tech MXI-100 IEEE 488 GPIB"
ISABE04 "Qua Tech DS-201 Dual Channel RS-422"
ISABE05 "Qua Tech PXB-721 Parallel Expansion"
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index e87196f6d2d2..eda2a1aa4adb 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -91,7 +91,7 @@ static irqreturn_t adc_jack_irq_thread(int irq, void *_data)
return IRQ_HANDLED;
}
-static int __devinit adc_jack_probe(struct platform_device *pdev)
+static int adc_jack_probe(struct platform_device *pdev)
{
struct adc_jack_data *data;
struct adc_jack_pdata *pdata = pdev->dev.platform_data;
@@ -175,7 +175,7 @@ out:
return err;
}
-static int __devexit adc_jack_remove(struct platform_device *pdev)
+static int adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
@@ -188,7 +188,7 @@ static int __devexit adc_jack_remove(struct platform_device *pdev)
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
- .remove = __devexit_p(adc_jack_remove),
+ .remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
.owner = THIS_MODULE,
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index cdab9e598297..414aed50b1bc 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -166,6 +166,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
+ mutex_unlock(&info->lock);
return IRQ_NONE;
}
@@ -337,7 +338,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit arizona_extcon_probe(struct platform_device *pdev)
+static int arizona_extcon_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_pdata *pdata;
@@ -517,7 +518,7 @@ err:
return ret;
}
-static int __devexit arizona_extcon_remove(struct platform_device *pdev)
+static int arizona_extcon_remove(struct platform_device *pdev)
{
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
@@ -544,7 +545,7 @@ static struct platform_driver arizona_extcon_driver = {
.owner = THIS_MODULE,
},
.probe = arizona_extcon_probe,
- .remove = __devexit_p(arizona_extcon_remove),
+ .remove = arizona_extcon_remove,
};
module_platform_driver(arizona_extcon_driver);
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index d398821097f3..60adc04b0561 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -472,7 +472,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
if (obj->cable_index < 0)
- return -ENODEV;
+ return obj->cable_index;
obj->user_nb = nb;
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 71d3ab7b3d8d..1b14bfcdc176 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -76,7 +76,7 @@ static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
return -EINVAL;
}
-static int __devinit gpio_extcon_probe(struct platform_device *pdev)
+static int gpio_extcon_probe(struct platform_device *pdev)
{
struct gpio_extcon_platform_data *pdata = pdev->dev.platform_data;
struct gpio_extcon_data *extcon_data;
@@ -137,7 +137,7 @@ err:
return ret;
}
-static int __devexit gpio_extcon_remove(struct platform_device *pdev)
+static int gpio_extcon_remove(struct platform_device *pdev)
{
struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
@@ -150,7 +150,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
static struct platform_driver gpio_extcon_driver = {
.probe = gpio_extcon_probe,
- .remove = __devexit_p(gpio_extcon_remove),
+ .remove = gpio_extcon_remove,
.driver = {
.name = "extcon-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index a17d0d91ada2..8c17b65eb74d 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -648,7 +648,7 @@ out:
return ret;
}
-static int __devinit max77693_muic_probe(struct platform_device *pdev)
+static int max77693_muic_probe(struct platform_device *pdev)
{
struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
@@ -657,17 +657,17 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
int ret, i;
u8 id;
- info = kzalloc(sizeof(struct max77693_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
info->max77693 = max77693;
- if (info->max77693->regmap_muic)
+ if (info->max77693->regmap_muic) {
dev_dbg(&pdev->dev, "allocate register map\n");
- else {
+ } else {
info->max77693->regmap_muic = devm_regmap_init_i2c(
info->max77693->muic,
&max77693_muic_regmap_config);
@@ -675,7 +675,7 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
ret = PTR_ERR(info->max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ return ret;
}
}
platform_set_drvdata(pdev, info);
@@ -686,11 +686,13 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
/* Support irq domain for MAX77693 MUIC device */
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max77693_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
ret = request_threaded_irq(virq, NULL,
@@ -702,14 +704,13 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
" error :%d)\n",
muic_irq->irq, ret);
- for (i = i - 1; i >= 0; i--)
- free_irq(muic_irq->virq, info);
goto err_irq;
}
}
/* Initialize extcon device */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -720,7 +721,7 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize MUIC register by using platform data */
@@ -753,7 +754,7 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_extcon;
+ goto err_irq;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
@@ -765,16 +766,13 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
return ret;
-err_extcon:
- kfree(info->edev);
err_irq:
-err_regmap:
- kfree(info);
-err_kfree:
+ while (--i >= 0)
+ free_irq(muic_irqs[i].virq, info);
return ret;
}
-static int __devexit max77693_muic_remove(struct platform_device *pdev)
+static int max77693_muic_remove(struct platform_device *pdev)
{
struct max77693_muic_info *info = platform_get_drvdata(pdev);
int i;
@@ -783,8 +781,6 @@ static int __devexit max77693_muic_remove(struct platform_device *pdev)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
return 0;
}
@@ -795,7 +791,7 @@ static struct platform_driver max77693_muic_driver = {
.owner = THIS_MODULE,
},
.probe = max77693_muic_probe,
- .remove = __devexit_p(max77693_muic_remove),
+ .remove = max77693_muic_remove,
};
module_platform_driver(max77693_muic_driver);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 77b66b0cc8f5..93009fe6ef05 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -1,7 +1,7 @@
/*
* extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Donggeun Kim <dg77.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -426,18 +426,18 @@ static void max8997_muic_detect_dev(struct max8997_muic_info *info)
max8997_muic_handle_charger_type(info, chg_type);
}
-static int __devinit max8997_muic_probe(struct platform_device *pdev)
+static int max8997_muic_probe(struct platform_device *pdev)
{
struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
struct max8997_muic_info *info;
int ret, i;
- info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
@@ -450,14 +450,16 @@ static int __devinit max8997_muic_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
+ ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
0, muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
@@ -469,7 +471,8 @@ static int __devinit max8997_muic_probe(struct platform_device *pdev)
}
/* External connector */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -480,7 +483,7 @@ static int __devinit max8997_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize registers according to platform data */
@@ -498,17 +501,13 @@ static int __devinit max8997_muic_probe(struct platform_device *pdev)
return ret;
-err_extcon:
- kfree(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
- kfree(info);
-err_kfree:
return ret;
}
-static int __devexit max8997_muic_remove(struct platform_device *pdev)
+static int max8997_muic_remove(struct platform_device *pdev)
{
struct max8997_muic_info *info = platform_get_drvdata(pdev);
int i;
@@ -519,9 +518,6 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
-
return 0;
}
@@ -531,7 +527,7 @@ static struct platform_driver max8997_muic_driver = {
.owner = THIS_MODULE,
},
.probe = max8997_muic_probe,
- .remove = __devexit_p(max8997_muic_remove),
+ .remove = max8997_muic_remove,
};
module_platform_driver(max8997_muic_driver);
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index a9a347adb353..2cc89ce745c9 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -149,10 +149,10 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
- /* Accept asyncronous transfer requests from all nodes for now */
+ /* Accept asynchronous transfer requests from all nodes for now */
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
- /* Specify asyncronous transfer retries */
+ /* Specify asynchronous transfer retries */
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES<<4) |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 08c674957af8..e7a711f53a6f 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -828,7 +828,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
{
struct fwnet_device *dev;
struct fw_iso_packet packet;
- struct fw_card *card;
__be16 *hdr_ptr;
__be32 *buf_ptr;
int retval;
@@ -840,7 +839,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
unsigned long flags;
dev = data;
- card = dev->card;
hdr_ptr = header;
length = be16_to_cpup(hdr_ptr);
@@ -861,8 +859,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length,
- source_node_id, -1, true);
+ fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ context->card->generation, true);
}
packet.payload_length = dev->rcv_buffer_size;
@@ -958,7 +956,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
break;
}
- skb_pull(skb, ptask->max_payload);
+ if (ptask->dest_node == IEEE1394_ALL_NODES) {
+ skb_pull(skb,
+ ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
+ } else {
+ skb_pull(skb, ptask->max_payload);
+ }
if (ptask->outstanding_pkts > 1) {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
dg_size, fg_off, datagram_label);
@@ -1062,7 +1065,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
smp_rmb();
node_id = dev->card->node_id;
- p = skb_push(ptask->skb, 8);
+ p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
| RFC2734_SW_VERSION, &p[4]);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 4ebfb2273672..76b2d390f6ec 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -529,7 +529,7 @@ remove_card(struct pci_dev *dev)
#define RCV_BUFFER_SIZE (16 * 1024)
-static int __devinit
+static int
add_card(struct pci_dev *dev, const struct pci_device_id *unused)
{
struct pcilynx *lynx;
@@ -683,7 +683,7 @@ fail_disable:
return ret;
}
-static struct pci_device_id pci_table[] __devinitdata = {
+static struct pci_device_id pci_table[] = {
{
.vendor = PCI_VENDOR_ID_TI,
.device = PCI_DEVICE_ID_TI_PCILYNX,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 834e71d2324d..6ce6e07c38c1 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1281,7 +1281,7 @@ static int at_context_queue_packet(struct context *ctx,
d[0].res_count = cpu_to_le16(packet->timestamp);
/*
- * The DMA format for asyncronous link packets is different
+ * The DMA format for asynchronous link packets is different
* from the IEEE1394 layout, so shift the fields around
* accordingly.
*/
@@ -3537,7 +3537,7 @@ static inline void pmac_ohci_on(struct pci_dev *dev) {}
static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
-static int __devinit pci_probe(struct pci_dev *dev,
+static int pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index bb1b392f5cda..1162d6b3bf85 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1546,8 +1546,6 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->use_10_for_rw = 1;
- sdev->no_report_opcodes = 1;
- sdev->no_write_same = 1;
if (sbp2_param_exclusive_login)
sdev->manage_start_stop = 1;
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index ea5ac2dc1233..8e77c02edb24 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -537,7 +537,7 @@ static struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
};
-static int __devinit dcdbas_probe(struct platform_device *dev)
+static int dcdbas_probe(struct platform_device *dev)
{
int i, error;
@@ -575,7 +575,7 @@ static int __devinit dcdbas_probe(struct platform_device *dev)
return 0;
}
-static int __devexit dcdbas_remove(struct platform_device *dev)
+static int dcdbas_remove(struct platform_device *dev)
{
int i;
@@ -593,7 +593,7 @@ static struct platform_driver dcdbas_driver = {
.owner = THIS_MODULE,
},
.probe = dcdbas_probe,
- .remove = __devexit_p(dcdbas_remove),
+ .remove = dcdbas_remove,
};
/**
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index b298158cb922..982f1f5f5742 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -16,6 +16,7 @@
*/
static char dmi_empty_string[] = " ";
+static u16 __initdata dmi_ver;
/*
* Catch too early calls to dmi_check_system():
*/
@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
return 0;
}
-static int __init dmi_checksum(const u8 *buf)
+static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
- for (a = 0; a < 15; a++)
+ for (a = 0; a < len; a++)
sum += buf[a];
return sum == 0;
@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
return;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
- if(d[i] != 0x00) is_ff = 0;
- if(d[i] != 0xFF) is_00 = 0;
+ if (d[i] != 0x00)
+ is_00 = 0;
+ if (d[i] != 0xFF)
+ is_ff = 0;
}
if (is_ff || is_00)
@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
if (!s)
return;
- sprintf(s, "%pUB", d);
+ /*
+ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
+ * the UUID are supposed to be little-endian encoded. The specification
+ * says that this is the defacto standard.
+ */
+ if (dmi_ver >= 0x0206)
+ sprintf(s, "%pUL", d);
+ else
+ sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
@@ -404,35 +415,63 @@ static int __init dmi_present(const char __iomem *p)
u8 buf[15];
memcpy_fromio(buf, p, 15);
- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
+ if (dmi_checksum(buf, 15)) {
dmi_num = (buf[13] << 8) | buf[12];
dmi_len = (buf[7] << 8) | buf[6];
dmi_base = (buf[11] << 24) | (buf[10] << 16) |
(buf[9] << 8) | buf[8];
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we don't know at this point.
- */
- if (buf[14] != 0)
- printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14] >> 4, buf[14] & 0xF);
- else
- printk(KERN_INFO "DMI present.\n");
if (dmi_walk_early(dmi_decode) == 0) {
+ if (dmi_ver)
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ else {
+ dmi_ver = (buf[14] & 0xF0) << 4 |
+ (buf[14] & 0x0F);
+ pr_info("Legacy DMI %d.%d present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF);
+ }
dmi_dump_ids();
return 0;
}
}
+ dmi_ver = 0;
return 1;
}
+static int __init smbios_present(const char __iomem *p)
+{
+ u8 buf[32];
+ int offset = 0;
+
+ memcpy_fromio(buf, p, 32);
+ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
+ dmi_ver = (buf[6] << 8) + buf[7];
+
+ /* Some BIOS report weird SMBIOS version, fix that up */
+ switch (dmi_ver) {
+ case 0x021F:
+ case 0x0221:
+ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
+ dmi_ver & 0xFF, 3);
+ dmi_ver = 0x0203;
+ break;
+ case 0x0233:
+ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
+ dmi_ver = 0x0206;
+ break;
+ }
+ offset = 16;
+ }
+ return dmi_present(buf + offset);
+}
+
void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
int rc;
- if (efi_enabled) {
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto error;
@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
if (p == NULL)
goto error;
- rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
+ rc = smbios_present(p);
dmi_iounmap(p, 32);
if (!rc) {
dmi_available = 1;
@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
goto error;
for (q = p; q < p + 0x10000; q += 16) {
- rc = dmi_present(q);
+ if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
+ rc = smbios_present(q);
+ else if (memcmp(q, "_DMI_", 5) == 0)
+ rc = dmi_present(q);
+ else
+ continue;
if (!rc) {
dmi_available = 1;
dmi_iounmap(p, 0x10000);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index d10c9873dd9a..f5596db0cf58 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -80,6 +80,10 @@
#include <linux/slab.h>
#include <linux/pstore.h>
+#include <linux/fs.h>
+#include <linux/ramfs.h>
+#include <linux/pagemap.h>
+
#include <asm/uaccess.h>
#define EFIVARS_VERSION "0.08"
@@ -93,6 +97,12 @@ MODULE_VERSION(EFIVARS_VERSION);
#define DUMP_NAME_LEN 52
/*
+ * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"))
+ * not including trailing NUL
+ */
+#define GUID_LEN 36
+
+/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
* space in each part of the structure,
@@ -108,7 +118,6 @@ struct efi_variable {
__u32 Attributes;
} __attribute__((packed));
-
struct efivar_entry {
struct efivars *efivars;
struct efi_variable var;
@@ -122,6 +131,9 @@ struct efivar_attribute {
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
+static struct efivars __efivars;
+static struct efivar_operations ops;
+
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
@@ -629,14 +641,482 @@ static struct kobj_type efivar_ktype = {
.default_attrs = def_attrs,
};
-static struct pstore_info efi_pstore_info;
-
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
+static int efivarfs_file_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -EIO;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ssize_t efivarfs_file_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct efivar_entry *var = file->private_data;
+ struct efivars *efivars;
+ efi_status_t status;
+ void *data;
+ u32 attributes;
+ struct inode *inode = file->f_mapping->host;
+ unsigned long datasize = count - sizeof(attributes);
+ unsigned long newdatasize;
+ u64 storage_size, remaining_size, max_size;
+ ssize_t bytes = 0;
+
+ if (count < sizeof(attributes))
+ return -EINVAL;
+
+ if (copy_from_user(&attributes, userbuf, sizeof(attributes)))
+ return -EFAULT;
+
+ if (attributes & ~(EFI_VARIABLE_MASK))
+ return -EINVAL;
+
+ efivars = var->efivars;
+
+ /*
+ * Ensure that the user can't allocate arbitrarily large
+ * amounts of memory. Pick a default size of 64K if
+ * QueryVariableInfo() isn't supported by the firmware.
+ */
+ spin_lock(&efivars->lock);
+
+ if (!efivars->ops->query_variable_info)
+ status = EFI_UNSUPPORTED;
+ else {
+ const struct efivar_operations *fops = efivars->ops;
+ status = fops->query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ }
+
+ spin_unlock(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ if (status != EFI_UNSUPPORTED)
+ return efi_status_to_err(status);
+
+ remaining_size = 65536;
+ }
+
+ if (datasize > remaining_size)
+ return -ENOSPC;
+
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (copy_from_user(data, userbuf + sizeof(attributes), datasize)) {
+ bytes = -EFAULT;
+ goto out;
+ }
+
+ if (validate_var(&var->var, data, datasize) == false) {
+ bytes = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * The lock here protects the get_variable call, the conditional
+ * set_variable call, and removal of the variable from the efivars
+ * list (in the case of an authenticated delete).
+ */
+ spin_lock(&efivars->lock);
+
+ status = efivars->ops->set_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ attributes, datasize,
+ data);
+
+ if (status != EFI_SUCCESS) {
+ spin_unlock(&efivars->lock);
+ kfree(data);
+
+ return efi_status_to_err(status);
+ }
+
+ bytes = count;
+
+ /*
+ * Writing to the variable may have caused a change in size (which
+ * could either be an append or an overwrite), or the variable to be
+ * deleted. Perform a GetVariable() so we can tell what actually
+ * happened.
+ */
+ newdatasize = 0;
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ NULL, &newdatasize,
+ NULL);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ spin_unlock(&efivars->lock);
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, newdatasize + sizeof(attributes));
+ mutex_unlock(&inode->i_mutex);
+
+ } else if (status == EFI_NOT_FOUND) {
+ list_del(&var->list);
+ spin_unlock(&efivars->lock);
+ efivar_unregister(var);
+ drop_nlink(inode);
+ d_delete(file->f_dentry);
+ dput(file->f_dentry);
+
+ } else {
+ spin_unlock(&efivars->lock);
+ pr_warn("efivarfs: inconsistent EFI variable implementation? "
+ "status = %lx\n", status);
+ }
+
+out:
+ kfree(data);
+
+ return bytes;
+}
+
+static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct efivar_entry *var = file->private_data;
+ struct efivars *efivars = var->efivars;
+ efi_status_t status;
+ unsigned long datasize = 0;
+ u32 attributes;
+ void *data;
+ ssize_t size = 0;
+
+ spin_lock(&efivars->lock);
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ &attributes, &datasize, NULL);
+ spin_unlock(&efivars->lock);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock(&efivars->lock);
+ status = efivars->ops->get_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ &attributes, &datasize,
+ (data + sizeof(attributes)));
+ spin_unlock(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ size = efi_status_to_err(status);
+ goto out_free;
+ }
+
+ memcpy(data, &attributes, sizeof(attributes));
+ size = simple_read_from_buffer(userbuf, count, ppos,
+ data, datasize + sizeof(attributes));
+out_free:
+ kfree(data);
+
+ return size;
+}
+
+static void efivarfs_evict_inode(struct inode *inode)
+{
+ clear_inode(inode);
+}
+
+static const struct super_operations efivarfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .evict_inode = efivarfs_evict_inode,
+ .show_options = generic_show_options,
+};
+
+static struct super_block *efivarfs_sb;
+
+static const struct inode_operations efivarfs_dir_inode_operations;
+
+static const struct file_operations efivarfs_file_operations = {
+ .open = efivarfs_file_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .llseek = no_llseek,
+};
+
+static struct inode *efivarfs_get_inode(struct super_block *sb,
+ const struct inode *dir, int mode, dev_t dev)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_fop = &efivarfs_file_operations;
+ break;
+ case S_IFDIR:
+ inode->i_op = &efivarfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ break;
+ }
+ }
+ return inode;
+}
+
+static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+{
+ guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
+ guid->b[1] = hex_to_bin(str[4]) << 4 | hex_to_bin(str[5]);
+ guid->b[2] = hex_to_bin(str[2]) << 4 | hex_to_bin(str[3]);
+ guid->b[3] = hex_to_bin(str[0]) << 4 | hex_to_bin(str[1]);
+ guid->b[4] = hex_to_bin(str[11]) << 4 | hex_to_bin(str[12]);
+ guid->b[5] = hex_to_bin(str[9]) << 4 | hex_to_bin(str[10]);
+ guid->b[6] = hex_to_bin(str[16]) << 4 | hex_to_bin(str[17]);
+ guid->b[7] = hex_to_bin(str[14]) << 4 | hex_to_bin(str[15]);
+ guid->b[8] = hex_to_bin(str[19]) << 4 | hex_to_bin(str[20]);
+ guid->b[9] = hex_to_bin(str[21]) << 4 | hex_to_bin(str[22]);
+ guid->b[10] = hex_to_bin(str[24]) << 4 | hex_to_bin(str[25]);
+ guid->b[11] = hex_to_bin(str[26]) << 4 | hex_to_bin(str[27]);
+ guid->b[12] = hex_to_bin(str[28]) << 4 | hex_to_bin(str[29]);
+ guid->b[13] = hex_to_bin(str[30]) << 4 | hex_to_bin(str[31]);
+ guid->b[14] = hex_to_bin(str[32]) << 4 | hex_to_bin(str[33]);
+ guid->b[15] = hex_to_bin(str[34]) << 4 | hex_to_bin(str[35]);
+}
+
+static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ struct inode *inode;
+ struct efivars *efivars = &__efivars;
+ struct efivar_entry *var;
+ int namelen, i = 0, err = 0;
+
+ /*
+ * We need a GUID, plus at least one letter for the variable name,
+ * plus the '-' separator
+ */
+ if (dentry->d_name.len < GUID_LEN + 2)
+ return -EINVAL;
+
+ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+ if (!inode)
+ return -ENOMEM;
+
+ var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+ if (!var) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* length of the variable name itself: remove GUID and separator */
+ namelen = dentry->d_name.len - GUID_LEN - 1;
+
+ efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ &var->var.VendorGuid);
+
+ for (i = 0; i < namelen; i++)
+ var->var.VariableName[i] = dentry->d_name.name[i];
+
+ var->var.VariableName[i] = '\0';
+
+ inode->i_private = var;
+ var->efivars = efivars;
+ var->kobj.kset = efivars->kset;
+
+ err = kobject_init_and_add(&var->kobj, &efivar_ktype, NULL, "%s",
+ dentry->d_name.name);
+ if (err)
+ goto out;
+
+ kobject_uevent(&var->kobj, KOBJ_ADD);
+ spin_lock(&efivars->lock);
+ list_add(&var->list, &efivars->list);
+ spin_unlock(&efivars->lock);
+ d_instantiate(dentry, inode);
+ dget(dentry);
+out:
+ if (err) {
+ kfree(var);
+ iput(inode);
+ }
+ return err;
+}
+
+static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct efivar_entry *var = dentry->d_inode->i_private;
+ struct efivars *efivars = var->efivars;
+ efi_status_t status;
+
+ spin_lock(&efivars->lock);
+
+ status = efivars->ops->set_variable(var->var.VariableName,
+ &var->var.VendorGuid,
+ 0, 0, NULL);
+
+ if (status == EFI_SUCCESS || status == EFI_NOT_FOUND) {
+ list_del(&var->list);
+ spin_unlock(&efivars->lock);
+ efivar_unregister(var);
+ drop_nlink(dentry->d_inode);
+ dput(dentry);
+ return 0;
+ }
+
+ spin_unlock(&efivars->lock);
+ return -EINVAL;
+};
+
+static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode = NULL;
+ struct dentry *root;
+ struct efivar_entry *entry, *n;
+ struct efivars *efivars = &__efivars;
+ char *name;
+
+ efivarfs_sb = sb;
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = EFIVARFS_MAGIC;
+ sb->s_op = &efivarfs_ops;
+ sb->s_time_gran = 1;
+
+ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ if (!inode)
+ return -ENOMEM;
+ inode->i_op = &efivarfs_dir_inode_operations;
+
+ root = d_make_root(inode);
+ sb->s_root = root;
+ if (!root)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+ struct dentry *dentry, *root = efivarfs_sb->s_root;
+ unsigned long size = 0;
+ int len, i;
+
+ inode = NULL;
+
+ len = utf16_strlen(entry->var.VariableName);
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
+ if (!name)
+ goto fail;
+
+ for (i = 0; i < len; i++)
+ name[i] = entry->var.VariableName[i] & 0xFF;
+
+ name[len] = '-';
+
+ efi_guid_unparse(&entry->var.VendorGuid, name + len + 1);
+
+ name[len+GUID_LEN+1] = '\0';
+
+ inode = efivarfs_get_inode(efivarfs_sb, root->d_inode,
+ S_IFREG | 0644, 0);
+ if (!inode)
+ goto fail_name;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ goto fail_inode;
+
+ /* copied by the above to local storage in the dentry. */
+ kfree(name);
+
+ spin_lock(&efivars->lock);
+ efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ &entry->var.Attributes,
+ &size,
+ NULL);
+ spin_unlock(&efivars->lock);
+
+ mutex_lock(&inode->i_mutex);
+ inode->i_private = entry;
+ i_size_write(inode, size+4);
+ mutex_unlock(&inode->i_mutex);
+ d_add(dentry, inode);
+ }
+
+ return 0;
+
+fail_inode:
+ iput(inode);
+fail_name:
+ kfree(name);
+fail:
+ return -ENOMEM;
+}
+
+static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_single(fs_type, flags, data, efivarfs_fill_super);
+}
+
+static void efivarfs_kill_sb(struct super_block *sb)
+{
+ kill_litter_super(sb);
+ efivarfs_sb = NULL;
+}
+
+static struct file_system_type efivarfs_type = {
+ .name = "efivarfs",
+ .mount = efivarfs_mount,
+ .kill_sb = efivarfs_kill_sb,
+};
+
+static const struct inode_operations efivarfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .unlink = efivarfs_unlink,
+ .create = efivarfs_create,
+};
+
+static struct pstore_info efi_pstore_info;
+
#ifdef CONFIG_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
@@ -658,13 +1138,14 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec,
+ int *count, struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
char name[DUMP_NAME_LEN];
int i;
+ int cnt;
unsigned int part, size;
unsigned long time;
@@ -674,21 +1155,41 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
for (i = 0; i < DUMP_NAME_LEN; i++) {
name[i] = efivars->walk_entry->var.VariableName[i];
}
- if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
+ if (sscanf(name, "dump-type%u-%u-%d-%lu",
+ type, &part, &cnt, &time) == 4) {
*id = part;
+ *count = cnt;
timespec->tv_sec = time;
timespec->tv_nsec = 0;
- get_var_data_locked(efivars, &efivars->walk_entry->var);
- size = efivars->walk_entry->var.DataSize;
- *buf = kmalloc(size, GFP_KERNEL);
- if (*buf == NULL)
- return -ENOMEM;
- memcpy(*buf, efivars->walk_entry->var.Data,
- size);
- efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
- struct efivar_entry, list);
- return size;
+ } else if (sscanf(name, "dump-type%u-%u-%lu",
+ type, &part, &time) == 3) {
+ /*
+ * Check if an old format,
+ * which doesn't support holding
+ * multiple logs, remains.
+ */
+ *id = part;
+ *count = 0;
+ timespec->tv_sec = time;
+ timespec->tv_nsec = 0;
+ } else {
+ efivars->walk_entry = list_entry(
+ efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ continue;
}
+
+ get_var_data_locked(efivars, &efivars->walk_entry->var);
+ size = efivars->walk_entry->var.DataSize;
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
+ efivars->walk_entry = list_entry(
+ efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ return size;
}
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
@@ -698,26 +1199,77 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, size_t size, struct pstore_info *psi)
+ unsigned int part, int count, size_t size,
+ struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
- char stub_name[DUMP_NAME_LEN];
efi_char16_t efi_name[DUMP_NAME_LEN];
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
- struct efivar_entry *entry, *found = NULL;
int i, ret = 0;
+ u64 storage_space, remaining_space, max_variable_size;
+ efi_status_t status = EFI_NOT_FOUND;
- sprintf(stub_name, "dump-type%u-%u-", type, part);
- sprintf(name, "%s%lu", stub_name, get_seconds());
+ spin_lock(&efivars->lock);
+
+ /*
+ * Check if there is a space enough to log.
+ * size: a size of logging data
+ * DUMP_NAME_LEN * 2: a maximum size of variable name
+ */
+ status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES,
+ &storage_space,
+ &remaining_space,
+ &max_variable_size);
+ if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
+ spin_unlock(&efivars->lock);
+ *id = part;
+ return -ENOSPC;
+ }
+
+ sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count,
+ get_seconds());
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
+ size, psi->buf);
+
+ spin_unlock(&efivars->lock);
+
+ if (size)
+ ret = efivar_create_sysfs_entry(efivars,
+ utf16_strsize(efi_name,
+ DUMP_NAME_LEN * 2),
+ efi_name, &vendor);
+
+ *id = part;
+ return ret;
+};
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
+ struct timespec time, struct pstore_info *psi)
+{
+ char name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ char name_old[DUMP_NAME_LEN];
+ efi_char16_t efi_name_old[DUMP_NAME_LEN];
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ struct efivar_entry *entry, *found = NULL;
+ int i;
+
+ sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
+ time.tv_sec);
spin_lock(&efivars->lock);
for (i = 0; i < DUMP_NAME_LEN; i++)
- efi_name[i] = stub_name[i];
+ efi_name[i] = name[i];
/*
- * Clean up any entries with the same name
+ * Clean up an entry with the same name
*/
list_for_each_entry(entry, &efivars->list, list) {
@@ -726,11 +1278,22 @@ static int efi_pstore_write(enum pstore_type_id type,
if (efi_guidcmp(entry->var.VendorGuid, vendor))
continue;
if (utf16_strncmp(entry->var.VariableName, efi_name,
- utf16_strlen(efi_name)))
- continue;
- /* Needs to be a prefix */
- if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
- continue;
+ utf16_strlen(efi_name))) {
+ /*
+ * Check if an old format,
+ * which doesn't support holding
+ * multiple logs, remains.
+ */
+ sprintf(name_old, "dump-type%u-%u-%lu", type,
+ (unsigned int)id, time.tv_sec);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name_old[i] = name_old[i];
+
+ if (utf16_strncmp(entry->var.VariableName, efi_name_old,
+ utf16_strlen(efi_name_old)))
+ continue;
+ }
/* found */
found = entry;
@@ -738,37 +1301,17 @@ static int efi_pstore_write(enum pstore_type_id type,
&entry->var.VendorGuid,
PSTORE_EFI_ATTRIBUTES,
0, NULL);
+ break;
}
if (found)
list_del(&found->list);
- for (i = 0; i < DUMP_NAME_LEN; i++)
- efi_name[i] = name[i];
-
- efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
- size, psi->buf);
-
spin_unlock(&efivars->lock);
if (found)
efivar_unregister(found);
- if (size)
- ret = efivar_create_sysfs_entry(efivars,
- utf16_strsize(efi_name,
- DUMP_NAME_LEN * 2),
- efi_name, &vendor);
-
- *id = part;
- return ret;
-};
-
-static int efi_pstore_erase(enum pstore_type_id type, u64 id,
- struct pstore_info *psi)
-{
- efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
-
return 0;
}
#else
@@ -782,7 +1325,7 @@ static int efi_pstore_close(struct pstore_info *psi)
return 0;
}
-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *timespec,
char **buf, struct pstore_info *psi)
{
@@ -791,13 +1334,14 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, size_t size, struct pstore_info *psi)
+ unsigned int part, int count, size_t size,
+ struct pstore_info *psi)
{
return 0;
}
-static int efi_pstore_erase(enum pstore_type_id type, u64 id,
- struct pstore_info *psi)
+static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
+ struct timespec time, struct pstore_info *psi)
{
return 0;
}
@@ -1001,11 +1545,18 @@ efivar_create_sysfs_entry(struct efivars *efivars,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid)
{
- int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
+ int i, short_name_size;
char *short_name;
struct efivar_entry *new_efivar;
- short_name = kzalloc(short_name_size + 1, GFP_KERNEL);
+ /*
+ * Length of the variable bytes in ASCII, plus the '-' separator,
+ * plus the GUID, plus trailing NUL
+ */
+ short_name_size = variable_name_size / sizeof(efi_char16_t)
+ + 1 + GUID_LEN + 1;
+
+ short_name = kzalloc(short_name_size, GFP_KERNEL);
new_efivar = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
if (!short_name || !new_efivar) {
@@ -1125,6 +1676,7 @@ void unregister_efivars(struct efivars *efivars)
sysfs_remove_bin_file(&efivars->kset->kobj, efivars->del_var);
kfree(efivars->new_var);
kfree(efivars->del_var);
+ kobject_put(efivars->kobject);
kset_unregister(efivars->kset);
}
EXPORT_SYMBOL_GPL(unregister_efivars);
@@ -1156,6 +1708,14 @@ int register_efivars(struct efivars *efivars,
goto out;
}
+ efivars->kobject = kobject_create_and_add("efivars", parent_kobj);
+ if (!efivars->kobject) {
+ pr_err("efivars: Subsystem registration failed.\n");
+ error = -ENOMEM;
+ kset_unregister(efivars->kset);
+ goto out;
+ }
+
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
@@ -1198,6 +1758,8 @@ int register_efivars(struct efivars *efivars,
pstore_register(&efivars->efi_pstore_info);
}
+ register_filesystem(&efivarfs_type);
+
out:
kfree(variable_name);
@@ -1205,9 +1767,6 @@ out:
}
EXPORT_SYMBOL_GPL(register_efivars);
-static struct efivars __efivars;
-static struct efivar_operations ops;
-
/*
* For now we register the efi subsystem with the firmware subsystem
* and the vars subsystem with the efi subsystem. In the future, it
@@ -1224,7 +1783,7 @@ efivars_init(void)
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
EFIVARS_DATE);
- if (!efi_enabled)
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
/* For now we'll register the efi directory at /sys/firmware/efi */
@@ -1237,6 +1796,8 @@ efivars_init(void)
ops.get_variable = efi.get_variable;
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
+ ops.query_variable_info = efi.query_variable_info;
+
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
goto err_put;
@@ -1262,7 +1823,7 @@ err_put:
static void __exit
efivars_exit(void)
{
- if (efi_enabled) {
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
unregister_efivars(&__efivars);
kobject_put(efi_kobj);
}
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 4da4eb9ae926..2224f1dc074b 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!efi_enabled)
+ if (!efi_enabled(EFI_BOOT))
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 47150f5ded04..682de754d63f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -49,6 +49,10 @@ config OF_GPIO
def_bool y
depends on OF
+config GPIO_ACPI
+ def_bool y
+ depends on ACPI
+
config DEBUG_GPIO
bool "Debug GPIO calls"
depends on DEBUG_KERNEL
@@ -86,11 +90,26 @@ config GPIO_DA9052
help
Say yes here to enable the GPIO driver for the DA9052 chip.
+config GPIO_DA9055
+ tristate "Dialog Semiconductor DA9055 GPIO"
+ depends on MFD_DA9055
+ help
+ Say yes here to enable the GPIO driver for the DA9055 chip.
+
+ The Dialog DA9055 PMIC chip has 3 GPIO pins that can be
+ be controller by this driver.
+
+ If driver is built as a module it will be called gpio-da9055.
+
config GPIO_MAX730X
tristate
comment "Memory mapped GPIO drivers:"
+config GPIO_CLPS711X
+ def_bool y
+ depends on ARCH_CLPS711X
+
config GPIO_GENERIC_PLATFORM
tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
select GPIO_GENERIC
@@ -152,7 +171,8 @@ config GPIO_MSM_V2
config GPIO_MVEBU
def_bool y
- depends on ARCH_MVEBU
+ depends on PLAT_ORION
+ depends on OF
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -170,7 +190,7 @@ config GPIO_MXS
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
- depends on ARM_AMBA
+ depends on ARM && ARM_AMBA
select GENERIC_IRQ_CHIP
help
Say yes here to support the PrimeCell PL061 GPIO device
@@ -181,6 +201,13 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_SPEAR_SPICS
+ bool "ST SPEAr13xx SPI Chip Select as GPIO support"
+ depends on PLAT_SPEAR
+ select GENERIC_IRQ_CHIP
+ help
+ Say yes here to support ST SPEAr SPI Chip Select as GPIO device
+
config GPIO_STA2X11
bool "STA2x11/ConneXt GPIO support"
depends on MFD_STA2X11
@@ -189,6 +216,14 @@ config GPIO_STA2X11
Say yes here to support the STA2x11/ConneXt GPIO device.
The GPIO module has 128 GPIO pins with alternate functions.
+config GPIO_TS5500
+ tristate "TS-5500 DIO blocks and compatibles"
+ help
+ This driver supports Digital I/O exposed by pin blocks found on some
+ Technologic Systems platforms. It includes, but is not limited to, 3
+ blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
+ LCD port.
+
config GPIO_VT8500
bool "VIA/Wondermedia SoC GPIO Support"
depends on ARCH_VT8500
@@ -470,7 +505,7 @@ config GPIO_ADNP
help
This option enables support for N GPIOs found on Avionic Design
I2C GPIO expanders. The register space will be extended by powers
- of two, so the controller will need to accomodate for that. For
+ of two, so the controller will need to accommodate for that. For
example: if a controller provides 48 pins, 6 registers will be
enough to represent all pins, but the driver will assume a
register layout for 64 pins (8 registers).
@@ -649,4 +684,17 @@ config GPIO_MSIC
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
+comment "USB GPIO expanders:"
+
+config GPIO_VIPERBOARD
+ tristate "Viperboard GPIO a & b support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the GPIO signals of Nano River
+ Technologies Viperboard. There are two GPIO chips on the
+ board: gpioa and gpiob.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 9aeed6707326..c5aebd008dde 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
+obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
@@ -16,8 +17,10 @@ obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
+obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
+obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
@@ -57,6 +60,7 @@ obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
@@ -68,9 +72,11 @@ obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VT8500) += gpio-vt8500.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index f05e54258ffb..464be961f605 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -105,7 +105,7 @@ static int gen_74x164_direction_output(struct gpio_chip *gc,
return 0;
}
-static int __devinit gen_74x164_probe(struct spi_device *spi)
+static int gen_74x164_probe(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
struct gen_74x164_chip_platform_data *pdata;
@@ -181,7 +181,7 @@ exit_destroy:
return ret;
}
-static int __devexit gen_74x164_remove(struct spi_device *spi)
+static int gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
int ret;
@@ -215,7 +215,7 @@ static struct spi_driver gen_74x164_driver = {
.of_match_table = of_match_ptr(gen_74x164_dt_ids),
},
.probe = gen_74x164_probe,
- .remove = __devexit_p(gen_74x164_remove),
+ .remove = gen_74x164_remove,
};
module_spi_driver(gen_74x164_driver);
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index 050c05d91896..983ad425f0ac 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -402,7 +402,7 @@ static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
}
}
-static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
+static int ab8500_gpio_probe(struct platform_device *pdev)
{
struct ab8500_platform_data *ab8500_pdata =
dev_get_platdata(pdev->dev.parent);
@@ -474,7 +474,7 @@ out_free:
* ab8500_gpio_remove() - remove Ab8500-gpio driver
* @pdev : Platform device registered
*/
-static int __devexit ab8500_gpio_remove(struct platform_device *pdev)
+static int ab8500_gpio_remove(struct platform_device *pdev)
{
struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
int ret;
@@ -499,7 +499,7 @@ static struct platform_driver ab8500_gpio_driver = {
.owner = THIS_MODULE,
},
.probe = ab8500_gpio_probe,
- .remove = __devexit_p(ab8500_gpio_remove),
+ .remove = ab8500_gpio_remove,
};
static int __init ab8500_gpio_init(void)
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 3df88336415e..e60567fc5073 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -516,7 +516,7 @@ static void adnp_irq_teardown(struct adnp *adnp)
irq_domain_remove(adnp->domain);
}
-static __devinit int adnp_i2c_probe(struct i2c_client *client,
+static int adnp_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device_node *np = client->dev.of_node;
@@ -563,7 +563,7 @@ teardown:
return err;
}
-static __devexit int adnp_i2c_remove(struct i2c_client *client)
+static int adnp_i2c_remove(struct i2c_client *client)
{
struct adnp *adnp = i2c_get_clientdata(client);
struct device_node *np = client->dev.of_node;
@@ -582,13 +582,13 @@ static __devexit int adnp_i2c_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id adnp_i2c_id[] __devinitconst = {
+static const struct i2c_device_id adnp_i2c_id[] = {
{ "gpio-adnp" },
{ },
};
MODULE_DEVICE_TABLE(i2c, adnp_i2c_id);
-static const struct of_device_id adnp_of_match[] __devinitconst = {
+static const struct of_device_id adnp_of_match[] = {
{ .compatible = "ad,gpio-adnp", },
{ },
};
@@ -601,7 +601,7 @@ static struct i2c_driver adnp_i2c_driver = {
.of_match_table = of_match_ptr(adnp_of_match),
},
.probe = adnp_i2c_probe,
- .remove = __devexit_p(adnp_i2c_remove),
+ .remove = adnp_i2c_remove,
.id_table = adnp_i2c_id,
};
module_i2c_driver(adnp_i2c_driver);
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 2f263cc32561..8afa95f831b1 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -87,7 +87,7 @@ static int adp5520_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5520_gpio_probe(struct platform_device *pdev)
+static int adp5520_gpio_probe(struct platform_device *pdev)
{
struct adp5520_gpio_platform_data *pdata = pdev->dev.platform_data;
struct adp5520_gpio *dev;
@@ -167,7 +167,7 @@ err:
return ret;
}
-static int __devexit adp5520_gpio_remove(struct platform_device *pdev)
+static int adp5520_gpio_remove(struct platform_device *pdev)
{
struct adp5520_gpio *dev;
int ret;
@@ -190,7 +190,7 @@ static struct platform_driver adp5520_gpio_driver = {
.owner = THIS_MODULE,
},
.probe = adp5520_gpio_probe,
- .remove = __devexit_p(adp5520_gpio_remove),
+ .remove = adp5520_gpio_remove,
};
module_platform_driver(adp5520_gpio_driver);
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index eeedad42913e..2ba56987db04 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -346,7 +346,7 @@ static void adp5588_irq_teardown(struct adp5588_gpio *dev)
}
#endif /* CONFIG_GPIO_ADP5588_IRQ */
-static int __devinit adp5588_gpio_probe(struct i2c_client *client,
+static int adp5588_gpio_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
@@ -438,7 +438,7 @@ err:
return ret;
}
-static int __devexit adp5588_gpio_remove(struct i2c_client *client)
+static int adp5588_gpio_remove(struct i2c_client *client)
{
struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
struct adp5588_gpio *dev = i2c_get_clientdata(client);
@@ -479,7 +479,7 @@ static struct i2c_driver adp5588_gpio_driver = {
.name = DRV_NAME,
},
.probe = adp5588_gpio_probe,
- .remove = __devexit_p(adp5588_gpio_remove),
+ .remove = adp5588_gpio_remove,
.id_table = adp5588_gpio_id,
};
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 8740d2eb06f8..0ea853f68db2 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -94,7 +94,7 @@ static struct gpio_chip template_chip = {
.can_sleep = 1,
};
-static int __devinit arizona_gpio_probe(struct platform_device *pdev)
+static int arizona_gpio_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_pdata *pdata = arizona->dev->platform_data;
@@ -141,7 +141,7 @@ err:
return ret;
}
-static int __devexit arizona_gpio_remove(struct platform_device *pdev)
+static int arizona_gpio_remove(struct platform_device *pdev)
{
struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev);
@@ -152,7 +152,7 @@ static struct platform_driver arizona_gpio_driver = {
.driver.name = "arizona-gpio",
.driver.owner = THIS_MODULE,
.probe = arizona_gpio_probe,
- .remove = __devexit_p(arizona_gpio_remove),
+ .remove = arizona_gpio_remove,
};
module_platform_driver(arizona_gpio_driver);
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
new file mode 100644
index 000000000000..ce63b75b13f5
--- /dev/null
+++ b/drivers/gpio/gpio-clps711x.c
@@ -0,0 +1,199 @@
+/*
+ * CLPS711X GPIO driver
+ *
+ * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <mach/hardware.h>
+
+#define CLPS711X_GPIO_PORTS 5
+#define CLPS711X_GPIO_NAME "gpio-clps711x"
+
+struct clps711x_gpio {
+ struct gpio_chip chip[CLPS711X_GPIO_PORTS];
+ spinlock_t lock;
+};
+
+static void __iomem *clps711x_ports[] = {
+ CLPS711X_VIRT_BASE + PADR,
+ CLPS711X_VIRT_BASE + PBDR,
+ CLPS711X_VIRT_BASE + PCDR,
+ CLPS711X_VIRT_BASE + PDDR,
+ CLPS711X_VIRT_BASE + PEDR,
+};
+
+static void __iomem *clps711x_pdirs[] = {
+ CLPS711X_VIRT_BASE + PADDR,
+ CLPS711X_VIRT_BASE + PBDDR,
+ CLPS711X_VIRT_BASE + PCDDR,
+ CLPS711X_VIRT_BASE + PDDDR,
+ CLPS711X_VIRT_BASE + PEDDR,
+};
+
+#define clps711x_port(x) clps711x_ports[x->base / 8]
+#define clps711x_pdir(x) clps711x_pdirs[x->base / 8]
+
+static int gpio_clps711x_get(struct gpio_chip *chip, unsigned offset)
+{
+ return !!(readb(clps711x_port(chip)) & (1 << offset));
+}
+
+static void gpio_clps711x_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ int tmp;
+ unsigned long flags;
+ struct clps711x_gpio *gpio = dev_get_drvdata(chip->dev);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ tmp = readb(clps711x_port(chip)) & ~(1 << offset);
+ if (value)
+ tmp |= 1 << offset;
+ writeb(tmp, clps711x_port(chip));
+ spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static int gpio_clps711x_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ int tmp;
+ unsigned long flags;
+ struct clps711x_gpio *gpio = dev_get_drvdata(chip->dev);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ tmp = readb(clps711x_pdir(chip)) & ~(1 << offset);
+ writeb(tmp, clps711x_pdir(chip));
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int gpio_clps711x_dir_out(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ int tmp;
+ unsigned long flags;
+ struct clps711x_gpio *gpio = dev_get_drvdata(chip->dev);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ tmp = readb(clps711x_pdir(chip)) | (1 << offset);
+ writeb(tmp, clps711x_pdir(chip));
+ tmp = readb(clps711x_port(chip)) & ~(1 << offset);
+ if (value)
+ tmp |= 1 << offset;
+ writeb(tmp, clps711x_port(chip));
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int gpio_clps711x_dir_in_inv(struct gpio_chip *chip, unsigned offset)
+{
+ int tmp;
+ unsigned long flags;
+ struct clps711x_gpio *gpio = dev_get_drvdata(chip->dev);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ tmp = readb(clps711x_pdir(chip)) | (1 << offset);
+ writeb(tmp, clps711x_pdir(chip));
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static int gpio_clps711x_dir_out_inv(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ int tmp;
+ unsigned long flags;
+ struct clps711x_gpio *gpio = dev_get_drvdata(chip->dev);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ tmp = readb(clps711x_pdir(chip)) & ~(1 << offset);
+ writeb(tmp, clps711x_pdir(chip));
+ tmp = readb(clps711x_port(chip)) & ~(1 << offset);
+ if (value)
+ tmp |= 1 << offset;
+ writeb(tmp, clps711x_port(chip));
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
+static struct {
+ char *name;
+ int nr;
+ int inv_dir;
+} clps711x_gpio_ports[] __initconst = {
+ { "PORTA", 8, 0, },
+ { "PORTB", 8, 0, },
+ { "PORTC", 8, 0, },
+ { "PORTD", 8, 1, },
+ { "PORTE", 3, 0, },
+};
+
+static int __init gpio_clps711x_init(void)
+{
+ int i;
+ struct platform_device *pdev;
+ struct clps711x_gpio *gpio;
+
+ pdev = platform_device_alloc(CLPS711X_GPIO_NAME, 0);
+ if (!pdev) {
+ pr_err("Cannot create platform device: %s\n",
+ CLPS711X_GPIO_NAME);
+ return -ENOMEM;
+ }
+
+ platform_device_add(pdev);
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(struct clps711x_gpio),
+ GFP_KERNEL);
+ if (!gpio) {
+ dev_err(&pdev->dev, "GPIO allocating memory error\n");
+ platform_device_unregister(pdev);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ spin_lock_init(&gpio->lock);
+
+ for (i = 0; i < CLPS711X_GPIO_PORTS; i++) {
+ gpio->chip[i].owner = THIS_MODULE;
+ gpio->chip[i].dev = &pdev->dev;
+ gpio->chip[i].label = clps711x_gpio_ports[i].name;
+ gpio->chip[i].base = i * 8;
+ gpio->chip[i].ngpio = clps711x_gpio_ports[i].nr;
+ gpio->chip[i].get = gpio_clps711x_get;
+ gpio->chip[i].set = gpio_clps711x_set;
+ if (!clps711x_gpio_ports[i].inv_dir) {
+ gpio->chip[i].direction_input = gpio_clps711x_dir_in;
+ gpio->chip[i].direction_output = gpio_clps711x_dir_out;
+ } else {
+ gpio->chip[i].direction_input = gpio_clps711x_dir_in_inv;
+ gpio->chip[i].direction_output = gpio_clps711x_dir_out_inv;
+ }
+ WARN_ON(gpiochip_add(&gpio->chip[i]));
+ }
+
+ dev_info(&pdev->dev, "GPIO driver initialized\n");
+
+ return 0;
+}
+arch_initcall(gpio_clps711x_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X GPIO driver");
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 19eda1bbe343..c0a3aeba6f21 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -300,7 +300,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
},
};
-static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
+static int cs5535_gpio_probe(struct platform_device *pdev)
{
struct resource *res;
int err = -EIO;
@@ -355,7 +355,7 @@ done:
return err;
}
-static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
+static int cs5535_gpio_remove(struct platform_device *pdev)
{
struct resource *r;
int err;
@@ -378,7 +378,7 @@ static struct platform_driver cs5535_gpio_driver = {
.owner = THIS_MODULE,
},
.probe = cs5535_gpio_probe,
- .remove = __devexit_p(cs5535_gpio_remove),
+ .remove = cs5535_gpio_remove,
};
module_platform_driver(cs5535_gpio_driver);
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 24b8c2974047..29b11e9b6a78 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -185,10 +185,14 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
struct da9052_gpio *gpio = to_da9052_gpio(gc);
struct da9052 *da9052 = gpio->da9052;
- return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+ int irq;
+
+ irq = regmap_irq_get_virq(da9052->irq_data, DA9052_IRQ_GPI0 + offset);
+
+ return irq;
}
-static struct gpio_chip reference_gp __devinitdata = {
+static struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
@@ -201,7 +205,7 @@ static struct gpio_chip reference_gp __devinitdata = {
.base = -1,
};
-static int __devinit da9052_gpio_probe(struct platform_device *pdev)
+static int da9052_gpio_probe(struct platform_device *pdev)
{
struct da9052_gpio *gpio;
struct da9052_pdata *pdata;
@@ -229,7 +233,7 @@ static int __devinit da9052_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit da9052_gpio_remove(struct platform_device *pdev)
+static int da9052_gpio_remove(struct platform_device *pdev)
{
struct da9052_gpio *gpio = platform_get_drvdata(pdev);
@@ -238,7 +242,7 @@ static int __devexit da9052_gpio_remove(struct platform_device *pdev)
static struct platform_driver da9052_gpio_driver = {
.probe = da9052_gpio_probe,
- .remove = __devexit_p(da9052_gpio_remove),
+ .remove = da9052_gpio_remove,
.driver = {
.name = "da9052-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
new file mode 100644
index 000000000000..fd6dfe382f13
--- /dev/null
+++ b/drivers/gpio/gpio-da9055.c
@@ -0,0 +1,204 @@
+/*
+ * GPIO Driver for Dialog DA9055 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+#include <linux/mfd/da9055/pdata.h>
+
+#define DA9055_VDD_IO 0x0
+#define DA9055_PUSH_PULL 0x3
+#define DA9055_ACT_LOW 0x0
+#define DA9055_GPI 0x1
+#define DA9055_PORT_MASK 0x3
+#define DA9055_PORT_SHIFT(offset) (4 * (offset % 2))
+
+#define DA9055_INPUT DA9055_GPI
+#define DA9055_OUTPUT DA9055_PUSH_PULL
+#define DA9055_IRQ_GPI0 3
+
+struct da9055_gpio {
+ struct da9055 *da9055;
+ struct gpio_chip gp;
+};
+
+static inline struct da9055_gpio *to_da9055_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct da9055_gpio, gp);
+}
+
+static int da9055_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct da9055_gpio *gpio = to_da9055_gpio(gc);
+ int gpio_direction = 0;
+ int ret;
+
+ /* Get GPIO direction */
+ ret = da9055_reg_read(gpio->da9055, (offset >> 1) + DA9055_REG_GPIO0_1);
+ if (ret < 0)
+ return ret;
+
+ gpio_direction = ret & (DA9055_PORT_MASK) << DA9055_PORT_SHIFT(offset);
+ gpio_direction >>= DA9055_PORT_SHIFT(offset);
+ switch (gpio_direction) {
+ case DA9055_INPUT:
+ ret = da9055_reg_read(gpio->da9055, DA9055_REG_STATUS_B);
+ if (ret < 0)
+ return ret;
+ break;
+ case DA9055_OUTPUT:
+ ret = da9055_reg_read(gpio->da9055, DA9055_REG_GPIO_MODE0_2);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret & (1 << offset);
+
+}
+
+static void da9055_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct da9055_gpio *gpio = to_da9055_gpio(gc);
+
+ da9055_reg_update(gpio->da9055,
+ DA9055_REG_GPIO_MODE0_2,
+ 1 << offset,
+ value << offset);
+}
+
+static int da9055_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct da9055_gpio *gpio = to_da9055_gpio(gc);
+ unsigned char reg_byte;
+
+ reg_byte = (DA9055_ACT_LOW | DA9055_GPI)
+ << DA9055_PORT_SHIFT(offset);
+
+ return da9055_reg_update(gpio->da9055, (offset >> 1) +
+ DA9055_REG_GPIO0_1,
+ DA9055_PORT_MASK <<
+ DA9055_PORT_SHIFT(offset),
+ reg_byte);
+}
+
+static int da9055_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int value)
+{
+ struct da9055_gpio *gpio = to_da9055_gpio(gc);
+ unsigned char reg_byte;
+ int ret;
+
+ reg_byte = (DA9055_VDD_IO | DA9055_PUSH_PULL)
+ << DA9055_PORT_SHIFT(offset);
+
+ ret = da9055_reg_update(gpio->da9055, (offset >> 1) +
+ DA9055_REG_GPIO0_1,
+ DA9055_PORT_MASK <<
+ DA9055_PORT_SHIFT(offset),
+ reg_byte);
+ if (ret < 0)
+ return ret;
+
+ da9055_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
+{
+ struct da9055_gpio *gpio = to_da9055_gpio(gc);
+ struct da9055 *da9055 = gpio->da9055;
+
+ return regmap_irq_get_virq(da9055->irq_data,
+ DA9055_IRQ_GPI0 + offset);
+}
+
+static struct gpio_chip reference_gp = {
+ .label = "da9055-gpio",
+ .owner = THIS_MODULE,
+ .get = da9055_gpio_get,
+ .set = da9055_gpio_set,
+ .direction_input = da9055_gpio_direction_input,
+ .direction_output = da9055_gpio_direction_output,
+ .to_irq = da9055_gpio_to_irq,
+ .can_sleep = 1,
+ .ngpio = 3,
+ .base = -1,
+};
+
+static int da9055_gpio_probe(struct platform_device *pdev)
+{
+ struct da9055_gpio *gpio;
+ struct da9055_pdata *pdata;
+ int ret;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return -ENOMEM;
+
+ gpio->da9055 = dev_get_drvdata(pdev->dev.parent);
+ pdata = gpio->da9055->dev->platform_data;
+
+ gpio->gp = reference_gp;
+ if (pdata && pdata->gpio_base)
+ gpio->gp.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&gpio->gp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ goto err_mem;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ return 0;
+
+err_mem:
+ return ret;
+}
+
+static int da9055_gpio_remove(struct platform_device *pdev)
+{
+ struct da9055_gpio *gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&gpio->gp);
+}
+
+static struct platform_driver da9055_gpio_driver = {
+ .probe = da9055_gpio_probe,
+ .remove = da9055_gpio_remove,
+ .driver = {
+ .name = "da9055-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9055_gpio_init(void)
+{
+ return platform_driver_register(&da9055_gpio_driver);
+}
+subsys_initcall(da9055_gpio_init);
+
+static void __exit da9055_gpio_exit(void)
+{
+ platform_driver_unregister(&da9055_gpio_driver);
+}
+module_exit(da9055_gpio_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9055 GPIO Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-gpio");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index efb4c2d0d132..bdc8302e711a 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -35,7 +35,6 @@
struct em_gio_priv {
void __iomem *base0;
void __iomem *base1;
- unsigned int irq_base;
spinlock_t sense_lock;
struct platform_device *pdev;
struct gpio_chip gpio_chip;
@@ -214,7 +213,7 @@ static int em_gio_direction_output(struct gpio_chip *chip, unsigned offset,
static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return irq_find_mapping(gpio_to_priv(chip)->irq_domain, offset);
+ return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
}
static int em_gio_irq_domain_map(struct irq_domain *h, unsigned int virq,
@@ -234,41 +233,7 @@ static struct irq_domain_ops em_gio_irq_domain_ops = {
.map = em_gio_irq_domain_map,
};
-static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
-{
- struct platform_device *pdev = p->pdev;
- struct gpio_em_config *pdata = pdev->dev.platform_data;
-
- p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
- pdata->number_of_pins, numa_node_id());
- if (p->irq_base < 0) {
- dev_err(&pdev->dev, "cannot get irq_desc\n");
- return p->irq_base;
- }
- pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
- pdata->gpio_base, pdata->number_of_pins, p->irq_base);
-
- p->irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
- pdata->number_of_pins,
- p->irq_base, 0,
- &em_gio_irq_domain_ops, p);
- if (!p->irq_domain) {
- irq_free_descs(p->irq_base, pdata->number_of_pins);
- return -ENXIO;
- }
-
- return 0;
-}
-
-static void em_gio_irq_domain_cleanup(struct em_gio_priv *p)
-{
- struct gpio_em_config *pdata = p->pdev->dev.platform_data;
-
- irq_free_descs(p->irq_base, pdata->number_of_pins);
- /* FIXME: irq domain wants to be freed! */
-}
-
-static int __devinit em_gio_probe(struct platform_device *pdev)
+static int em_gio_probe(struct platform_device *pdev)
{
struct gpio_em_config *pdata = pdev->dev.platform_data;
struct em_gio_priv *p;
@@ -334,8 +299,11 @@ static int __devinit em_gio_probe(struct platform_device *pdev)
irq_chip->irq_set_type = em_gio_irq_set_type;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE;
- ret = em_gio_irq_domain_init(p);
- if (ret) {
+ p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ pdata->number_of_pins,
+ &em_gio_irq_domain_ops, p);
+ if (!p->irq_domain) {
+ ret = -ENXIO;
dev_err(&pdev->dev, "cannot initialize irq domain\n");
goto err3;
}
@@ -364,7 +332,7 @@ err6:
err5:
free_irq(irq[0]->start, pdev);
err4:
- em_gio_irq_domain_cleanup(p);
+ irq_domain_remove(p->irq_domain);
err3:
iounmap(p->base1);
err2:
@@ -375,7 +343,7 @@ err0:
return ret;
}
-static int __devexit em_gio_remove(struct platform_device *pdev)
+static int em_gio_remove(struct platform_device *pdev)
{
struct em_gio_priv *p = platform_get_drvdata(pdev);
struct resource *irq[2];
@@ -390,7 +358,7 @@ static int __devexit em_gio_remove(struct platform_device *pdev)
free_irq(irq[1]->start, pdev);
free_irq(irq[0]->start, pdev);
- em_gio_irq_domain_cleanup(p);
+ irq_domain_remove(p->irq_domain);
iounmap(p->base1);
iounmap(p->base0);
kfree(p);
@@ -399,7 +367,7 @@ static int __devexit em_gio_remove(struct platform_device *pdev)
static struct platform_driver em_gio_device_driver = {
.probe = em_gio_probe,
- .remove = __devexit_p(em_gio_remove),
+ .remove = em_gio_remove,
.driver = {
.name = "em_gio",
}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 9fe5b8fe9be8..56b98eebe1fc 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -340,7 +340,7 @@ static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
return gpiochip_add(&bgc->gc);
}
-static int __devinit ep93xx_gpio_probe(struct platform_device *pdev)
+static int ep93xx_gpio_probe(struct platform_device *pdev)
{
struct ep93xx_gpio *ep93xx_gpio;
struct resource *res;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 82e2e4fe599e..05fcc0f247ca 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -444,7 +444,7 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
return ret;
}
-static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
+static int bgpio_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *r;
@@ -507,7 +507,7 @@ static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
return gpiochip_add(&bgc->gc);
}
-static int __devexit bgpio_pdev_remove(struct platform_device *pdev)
+static int bgpio_pdev_remove(struct platform_device *pdev)
{
struct bgpio_chip *bgc = platform_get_drvdata(pdev);
@@ -527,7 +527,7 @@ static struct platform_driver bgpio_driver = {
},
.id_table = bgpio_id_table,
.probe = bgpio_pdev_probe,
- .remove = __devexit_p(bgpio_pdev_remove),
+ .remove = bgpio_pdev_remove,
};
module_platform_driver(bgpio_driver);
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index d4d617966696..6f2306db8591 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -238,7 +238,7 @@ static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
ichx_write_bit(GPIO_LVL, nr, val, 0);
}
-static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
+static void ichx_gpiolib_setup(struct gpio_chip *chip)
{
chip->owner = THIS_MODULE;
chip->label = DRV_NAME;
@@ -313,7 +313,7 @@ static struct ichx_desc intel5_desc = {
.ngpio = 76,
};
-static int __devinit ichx_gpio_request_regions(struct resource *res_base,
+static int ichx_gpio_request_regions(struct resource *res_base,
const char *name, u8 use_gpio)
{
int i;
@@ -353,7 +353,7 @@ static void ichx_gpio_release_regions(struct resource *res_base, u8 use_gpio)
}
}
-static int __devinit ichx_gpio_probe(struct platform_device *pdev)
+static int ichx_gpio_probe(struct platform_device *pdev)
{
struct resource *res_base, *res_pm;
int err;
@@ -390,6 +390,7 @@ static int __devinit ichx_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
+ spin_lock_init(&ichx_priv.lock);
res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
ichx_priv.use_gpio = ich_info->use_gpio;
err = ichx_gpio_request_regions(res_base, pdev->name,
@@ -442,7 +443,7 @@ add_err:
return err;
}
-static int __devexit ichx_gpio_remove(struct platform_device *pdev)
+static int ichx_gpio_remove(struct platform_device *pdev)
{
int err;
@@ -467,7 +468,7 @@ static struct platform_driver ichx_gpio_driver = {
.name = DRV_NAME,
},
.probe = ichx_gpio_probe,
- .remove = __devexit_p(ichx_gpio_remove),
+ .remove = ichx_gpio_remove,
};
module_platform_driver(ichx_gpio_driver);
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index f2f000dd70b3..7d0a04169a35 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -108,13 +108,13 @@ static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
spin_unlock(&mod->lock);
}
-static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
+static void ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
{
iowrite16be(reg, &mod->regs->control);
iowrite16be(val, &mod->regs->control);
}
-static void __devinit ttl_setup_device(struct ttl_module *mod)
+static void ttl_setup_device(struct ttl_module *mod)
{
/* reset the device to a known state */
iowrite16be(0x0000, &mod->regs->control);
@@ -140,7 +140,7 @@ static void __devinit ttl_setup_device(struct ttl_module *mod)
ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
}
-static int __devinit ttl_probe(struct platform_device *pdev)
+static int ttl_probe(struct platform_device *pdev)
{
struct janz_platform_data *pdata;
struct device *dev = &pdev->dev;
@@ -211,7 +211,7 @@ out_return:
return ret;
}
-static int __devexit ttl_remove(struct platform_device *pdev)
+static int ttl_remove(struct platform_device *pdev)
{
struct ttl_module *mod = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -234,7 +234,7 @@ static struct platform_driver ttl_driver = {
.owner = THIS_MODULE,
},
.probe = ttl_probe,
- .remove = __devexit_p(ttl_remove),
+ .remove = ttl_remove,
};
module_platform_driver(ttl_driver);
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index 202a99207b7d..e77b2b3e94af 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -332,7 +332,7 @@ static const struct dev_pm_ops lnw_gpio_pm_ops = {
.runtime_idle = lnw_gpio_runtime_idle,
};
-static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
+static int lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void *base;
@@ -435,7 +435,7 @@ static struct pci_driver lnw_gpio_driver = {
};
-static int __devinit wp_gpio_probe(struct platform_device *pdev)
+static int wp_gpio_probe(struct platform_device *pdev)
{
struct lnw_gpio *lnw;
struct gpio_chip *gc;
@@ -484,7 +484,7 @@ err_kmalloc:
return retval;
}
-static int __devexit wp_gpio_remove(struct platform_device *pdev)
+static int wp_gpio_remove(struct platform_device *pdev)
{
struct lnw_gpio *lnw = platform_get_drvdata(pdev);
int err;
@@ -499,7 +499,7 @@ static int __devexit wp_gpio_remove(struct platform_device *pdev)
static struct platform_driver wp_gpio_driver = {
.probe = wp_gpio_probe,
- .remove = __devexit_p(wp_gpio_remove),
+ .remove = wp_gpio_remove,
.driver = {
.name = "wp_gpio",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 3644e0dcb3dd..36d7dee07b28 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -542,7 +542,7 @@ static int lpc32xx_of_xlate(struct gpio_chip *gc,
return gpiospec->args[1];
}
-static int __devinit lpc32xx_gpio_probe(struct platform_device *pdev)
+static int lpc32xx_gpio_probe(struct platform_device *pdev)
{
int i;
@@ -559,7 +559,7 @@ static int __devinit lpc32xx_gpio_probe(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id lpc32xx_gpio_of_match[] __devinitdata = {
+static struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c
index a5ca0ab1b372..4b6b9a04e326 100644
--- a/drivers/gpio/gpio-max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -31,7 +31,7 @@ static int max7300_i2c_read(struct device *dev, unsigned int reg)
return i2c_smbus_read_byte_data(client, reg);
}
-static int __devinit max7300_probe(struct i2c_client *client,
+static int max7300_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max7301 *ts;
@@ -55,7 +55,7 @@ static int __devinit max7300_probe(struct i2c_client *client,
return ret;
}
-static int __devexit max7300_remove(struct i2c_client *client)
+static int max7300_remove(struct i2c_client *client)
{
return __max730x_remove(&client->dev);
}
@@ -72,7 +72,7 @@ static struct i2c_driver max7300_driver = {
.owner = THIS_MODULE,
},
.probe = max7300_probe,
- .remove = __devexit_p(max7300_remove),
+ .remove = max7300_remove,
.id_table = max7300_id,
};
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 741acfcbe761..c6c535c1310e 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -50,7 +50,7 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
return word & 0xff;
}
-static int __devinit max7301_probe(struct spi_device *spi)
+static int max7301_probe(struct spi_device *spi)
{
struct max7301 *ts;
int ret;
@@ -75,7 +75,7 @@ static int __devinit max7301_probe(struct spi_device *spi)
return ret;
}
-static int __devexit max7301_remove(struct spi_device *spi)
+static int max7301_remove(struct spi_device *spi)
{
return __max730x_remove(&spi->dev);
}
@@ -92,7 +92,7 @@ static struct spi_driver max7301_driver = {
.owner = THIS_MODULE,
},
.probe = max7301_probe,
- .remove = __devexit_p(max7301_remove),
+ .remove = max7301_remove,
.id_table = max7301_id,
};
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 05e2dac60b3b..00092342b84c 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -160,17 +160,13 @@ static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
mutex_unlock(&ts->lock);
}
-int __devinit __max730x_probe(struct max7301 *ts)
+int __max730x_probe(struct max7301 *ts)
{
struct device *dev = ts->dev;
struct max7301_platform_data *pdata;
int i, ret;
pdata = dev->platform_data;
- if (!pdata || !pdata->base) {
- dev_err(dev, "incorrect or missing platform data\n");
- return -EINVAL;
- }
mutex_init(&ts->lock);
dev_set_drvdata(dev, ts);
@@ -178,7 +174,12 @@ int __devinit __max730x_probe(struct max7301 *ts)
/* Power up the chip and disable IRQ output */
ts->write(dev, 0x04, 0x01);
- ts->input_pullup_active = pdata->input_pullup_active;
+ if (pdata) {
+ ts->input_pullup_active = pdata->input_pullup_active;
+ ts->chip.base = pdata->base;
+ } else {
+ ts->chip.base = -1;
+ }
ts->chip.label = dev->driver->name;
ts->chip.direction_input = max7301_direction_input;
@@ -186,7 +187,6 @@ int __devinit __max730x_probe(struct max7301 *ts)
ts->chip.direction_output = max7301_direction_output;
ts->chip.set = max7301_set;
- ts->chip.base = pdata->base;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = 1;
ts->chip.dev = dev;
@@ -226,7 +226,7 @@ exit_destroy:
}
EXPORT_SYMBOL_GPL(__max730x_probe);
-int __devexit __max730x_remove(struct device *dev)
+int __max730x_remove(struct device *dev)
{
struct max7301 *ts = dev_get_drvdata(dev);
int ret;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 9504120812a5..1e0467ce4c37 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -526,7 +526,7 @@ static void max732x_irq_teardown(struct max732x_chip *chip)
}
#endif
-static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
+static int max732x_setup_gpio(struct max732x_chip *chip,
const struct i2c_device_id *id,
unsigned gpio_start)
{
@@ -574,7 +574,7 @@ static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
return port;
}
-static int __devinit max732x_probe(struct i2c_client *client,
+static int max732x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max732x_platform_data *pdata;
@@ -651,7 +651,7 @@ out_failed:
return ret;
}
-static int __devexit max732x_remove(struct i2c_client *client)
+static int max732x_remove(struct i2c_client *client)
{
struct max732x_platform_data *pdata = client->dev.platform_data;
struct max732x_chip *chip = i2c_get_clientdata(client);
@@ -690,7 +690,7 @@ static struct i2c_driver max732x_driver = {
.owner = THIS_MODULE,
},
.probe = max732x_probe,
- .remove = __devexit_p(max732x_remove),
+ .remove = max732x_remove,
.id_table = max732x_id,
};
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 2de57ce5feb6..6a8fdc26ae6a 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -80,7 +80,7 @@ static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
mutex_unlock(&mc->lock);
}
-static int __devinit mc33880_probe(struct spi_device *spi)
+static int mc33880_probe(struct spi_device *spi)
{
struct mc33880 *mc;
struct mc33880_platform_data *pdata;
@@ -147,7 +147,7 @@ exit_destroy:
return ret;
}
-static int __devexit mc33880_remove(struct spi_device *spi)
+static int mc33880_remove(struct spi_device *spi)
{
struct mc33880 *mc;
int ret;
@@ -175,7 +175,7 @@ static struct spi_driver mc33880_driver = {
.owner = THIS_MODULE,
},
.probe = mc33880_probe,
- .remove = __devexit_p(mc33880_remove),
+ .remove = mc33880_remove,
};
static int __init mc33880_init(void)
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index ce1c84760076..3cea0ea79e80 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -475,7 +475,7 @@ fail:
#if IS_ENABLED(CONFIG_I2C)
-static int __devinit mcp230xx_probe(struct i2c_client *client,
+static int mcp230xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcp23s08_platform_data *pdata;
@@ -508,7 +508,7 @@ fail:
return status;
}
-static int __devexit mcp230xx_remove(struct i2c_client *client)
+static int mcp230xx_remove(struct i2c_client *client)
{
struct mcp23s08 *mcp = i2c_get_clientdata(client);
int status;
@@ -533,7 +533,7 @@ static struct i2c_driver mcp230xx_driver = {
.owner = THIS_MODULE,
},
.probe = mcp230xx_probe,
- .remove = __devexit_p(mcp230xx_remove),
+ .remove = mcp230xx_remove,
.id_table = mcp230xx_id,
};
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 6a29ee1847be..b73366523fae 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -385,7 +385,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
return ret;
}
-static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
+static void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
unsigned int irq_start, unsigned int num)
{
struct irq_chip_generic *gc;
@@ -406,7 +406,7 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
+static int ioh_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
@@ -517,7 +517,7 @@ err_pci_enable:
return ret;
}
-static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
+static void ioh_gpio_remove(struct pci_dev *pdev)
{
int err;
int i;
@@ -606,7 +606,7 @@ static struct pci_driver ioh_gpio_driver = {
.name = "ml_ioh_gpio",
.id_table = ioh_gpio_pcidev_id,
.probe = ioh_gpio_probe,
- .remove = __devexit_p(ioh_gpio_remove),
+ .remove = ioh_gpio_remove,
.suspend = ioh_gpio_suspend,
.resume = ioh_gpio_resume
};
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 2c7cef367fc0..42647f26c9e0 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -148,7 +148,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
+static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
@@ -308,7 +308,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
+static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index b38986285868..27ea7b9257ff 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -256,7 +256,7 @@ static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
chip->irq_eoi(data);
}
-static int __devinit platform_msic_gpio_probe(struct platform_device *pdev)
+static int platform_msic_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct intel_msic_gpio_pdata *pdata = dev->platform_data;
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 38305beb4375..55a7e7769af6 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -352,7 +352,7 @@ static struct irq_chip msm_gpio_irq_chip = {
.irq_set_wake = msm_gpio_irq_set_wake,
};
-static int __devinit msm_gpio_probe(struct platform_device *dev)
+static int msm_gpio_probe(struct platform_device *dev)
{
int i, irq, ret;
@@ -376,7 +376,7 @@ static int __devinit msm_gpio_probe(struct platform_device *dev)
return 0;
}
-static int __devexit msm_gpio_remove(struct platform_device *dev)
+static int msm_gpio_remove(struct platform_device *dev)
{
int ret = gpiochip_remove(&msm_gpio.gpio_chip);
@@ -390,7 +390,7 @@ static int __devexit msm_gpio_remove(struct platform_device *dev)
static struct platform_driver msm_gpio_driver = {
.probe = msm_gpio_probe,
- .remove = __devexit_p(msm_gpio_remove),
+ .remove = msm_gpio_remove,
.driver = {
.name = "msmgpio",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index be65c0451ad5..6819d63cb167 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -41,7 +41,6 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
/*
@@ -168,12 +167,12 @@ static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
* Functions implementing the gpio_chip methods
*/
-int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
+static int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
{
return pinctrl_request_gpio(chip->base + pin);
}
-void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
+static void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
{
pinctrl_free_gpio(chip->base + pin);
}
@@ -469,20 +468,7 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
-static struct platform_device_id mvebu_gpio_ids[] = {
- {
- .name = "orion-gpio",
- }, {
- .name = "mv78200-gpio",
- }, {
- .name = "armadaxp-gpio",
- }, {
- /* sentinel */
- },
-};
-MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids);
-
-static struct of_device_id mvebu_gpio_of_match[] __devinitdata = {
+static struct of_device_id mvebu_gpio_of_match[] = {
{
.compatible = "marvell,orion-gpio",
.data = (void*) MVEBU_GPIO_SOC_VARIANT_ORION,
@@ -501,7 +487,7 @@ static struct of_device_id mvebu_gpio_of_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
-static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
+static int mvebu_gpio_probe(struct platform_device *pdev)
{
struct mvebu_gpio_chip *mvchip;
const struct of_device_id *match;
@@ -546,6 +532,7 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.label = dev_name(&pdev->dev);
mvchip->chip.dev = &pdev->dev;
mvchip->chip.request = mvebu_gpio_request;
+ mvchip->chip.free = mvebu_gpio_free;
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
@@ -554,15 +541,12 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = 0;
-#ifdef CONFIG_OF
mvchip->chip.of_node = np;
-#endif
spin_lock_init(&mvchip->lock);
mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
if (! mvchip->membase) {
dev_err(&pdev->dev, "Cannot ioremap\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -572,14 +556,12 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (! res) {
dev_err(&pdev->dev, "Cannot get memory resource\n");
- kfree(mvchip->chip.label);
return -ENODEV;
}
mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
if (! mvchip->percpu_membase) {
dev_err(&pdev->dev, "Cannot ioremap\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
}
@@ -640,7 +622,6 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -648,7 +629,6 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
mvchip->membase, handle_level_irq);
if (! gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -673,8 +653,8 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
/* Setup irq domain on top of the generic chip. */
- mvchip->domain = irq_domain_add_legacy(np, mvchip->chip.ngpio,
- mvchip->irqbase, 0,
+ mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
+ mvchip->irqbase,
&irq_domain_simple_ops,
mvchip);
if (!mvchip->domain) {
@@ -683,7 +663,6 @@ static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
kfree(gc);
- kfree(mvchip->chip.label);
return -ENODEV;
}
@@ -697,7 +676,6 @@ static struct platform_driver mvebu_gpio_driver = {
.of_match_table = mvebu_gpio_of_match,
},
.probe = mvebu_gpio_probe,
- .id_table = mvebu_gpio_ids,
};
static int __init mvebu_gpio_init(void)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 80f44bb64a87..7877335c4cc8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -356,7 +356,7 @@ static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
IRQ_NOREQUEST, 0);
}
-static void __devinit mxc_gpio_get_hw(struct platform_device *pdev)
+static void mxc_gpio_get_hw(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxc_gpio_dt_ids, &pdev->dev);
@@ -395,7 +395,7 @@ static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return irq_find_mapping(port->domain, offset);
}
-static int __devinit mxc_gpio_probe(struct platform_device *pdev)
+static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 796fb13e4815..fa2a63cad32e 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -214,7 +214,7 @@ static const struct of_device_id mxs_gpio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
-static int __devinit mxs_gpio_probe(struct platform_device *pdev)
+static int mxs_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(mxs_gpio_dt_ids, &pdev->dev);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d335af1d4d85..f1fbedb2a6f9 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1012,7 +1012,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
dev_err(bank->dev, "Could not get gpio dbck\n");
}
-static __devinit void
+static void
omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
unsigned int num)
{
@@ -1041,7 +1041,7 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
+static void omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
static int gpio;
@@ -1089,7 +1089,7 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
static const struct of_device_id omap_gpio_match[];
-static int __devinit omap_gpio_probe(struct platform_device *pdev)
+static int omap_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
@@ -1105,7 +1105,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL);
+ bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
if (!bank) {
dev_err(dev, "Memory alloc failed\n");
return -ENOMEM;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 9c693ae17956..cc102d25ee24 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -16,6 +16,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/slab.h>
@@ -83,6 +84,7 @@ struct pca953x_chip {
u32 irq_trig_raise;
u32 irq_trig_fall;
int irq_base;
+ struct irq_domain *domain;
#endif
struct i2c_client *client;
@@ -333,14 +335,14 @@ static void pca953x_irq_mask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
+ chip->irq_mask &= ~(1 << d->hwirq);
}
static void pca953x_irq_unmask(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- chip->irq_mask |= 1 << (d->irq - chip->irq_base);
+ chip->irq_mask |= 1 << d->hwirq;
}
static void pca953x_irq_bus_lock(struct irq_data *d)
@@ -372,8 +374,7 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- u32 level = d->irq - chip->irq_base;
- u32 mask = 1 << level;
+ u32 mask = 1 << d->hwirq;
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -454,7 +455,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(level + chip->irq_base);
+ handle_nested_irq(irq_find_mapping(chip->domain, level));
pending &= ~(1 << level);
} while (pending);
@@ -499,6 +500,17 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (chip->irq_base < 0)
goto out_failed;
+ chip->domain = irq_domain_add_legacy(client->dev.of_node,
+ chip->gpio_chip.ngpio,
+ chip->irq_base,
+ 0,
+ &irq_domain_simple_ops,
+ NULL);
+ if (!chip->domain) {
+ ret = -ENODEV;
+ goto out_irqdesc_free;
+ }
+
for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
int irq = lvl + chip->irq_base;
@@ -521,7 +533,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
client->irq);
- goto out_failed;
+ goto out_irqdesc_free;
}
chip->gpio_chip.to_irq = pca953x_gpio_to_irq;
@@ -529,6 +541,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
return 0;
+out_irqdesc_free:
+ irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
out_failed:
chip->irq_base = -1;
return ret;
@@ -602,7 +616,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
}
#endif
-static int __devinit device_pca953x_init(struct pca953x_chip *chip, u32 invert)
+static int device_pca953x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
@@ -621,7 +635,7 @@ out:
return ret;
}
-static int __devinit device_pca957x_init(struct pca953x_chip *chip, u32 invert)
+static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
u32 val = 0;
@@ -652,7 +666,7 @@ out:
return ret;
}
-static int __devinit pca953x_probe(struct i2c_client *client,
+static int pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca953x_platform_data *pdata;
@@ -751,9 +765,38 @@ static int pca953x_remove(struct i2c_client *client)
return 0;
}
+static const struct of_device_id pca953x_dt_ids[] = {
+ { .compatible = "nxp,pca9534", },
+ { .compatible = "nxp,pca9535", },
+ { .compatible = "nxp,pca9536", },
+ { .compatible = "nxp,pca9537", },
+ { .compatible = "nxp,pca9538", },
+ { .compatible = "nxp,pca9539", },
+ { .compatible = "nxp,pca9554", },
+ { .compatible = "nxp,pca9555", },
+ { .compatible = "nxp,pca9556", },
+ { .compatible = "nxp,pca9557", },
+ { .compatible = "nxp,pca9574", },
+ { .compatible = "nxp,pca9575", },
+
+ { .compatible = "maxim,max7310", },
+ { .compatible = "maxim,max7312", },
+ { .compatible = "maxim,max7313", },
+ { .compatible = "maxim,max7315", },
+
+ { .compatible = "ti,pca6107", },
+ { .compatible = "ti,tca6408", },
+ { .compatible = "ti,tca6416", },
+ { .compatible = "ti,tca6424", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, pca953x_dt_ids);
+
static struct i2c_driver pca953x_driver = {
.driver = {
.name = "pca953x",
+ .of_match_table = pca953x_dt_ids,
},
.probe = pca953x_probe,
.remove = pca953x_remove,
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 16af35cd2b10..a19b7457a726 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -223,11 +223,11 @@ static void pcf857x_irq_domain_cleanup(struct pcf857x *gpio)
static int pcf857x_irq_domain_init(struct pcf857x *gpio,
struct pcf857x_platform_data *pdata,
- struct device *dev)
+ struct i2c_client *client)
{
int status;
- gpio->irq_domain = irq_domain_add_linear(dev->of_node,
+ gpio->irq_domain = irq_domain_add_linear(client->dev.of_node,
gpio->chip.ngpio,
&pcf857x_irq_domain_ops,
NULL);
@@ -235,15 +235,15 @@ static int pcf857x_irq_domain_init(struct pcf857x *gpio,
goto fail;
/* enable real irq */
- status = request_irq(pdata->irq, pcf857x_irq_demux, 0,
- dev_name(dev), gpio);
+ status = request_irq(client->irq, pcf857x_irq_demux, 0,
+ dev_name(&client->dev), gpio);
if (status)
goto fail;
/* enable gpio_to_irq() */
INIT_WORK(&gpio->work, pcf857x_irq_demux_work);
gpio->chip.to_irq = pcf857x_to_irq;
- gpio->irq = pdata->irq;
+ gpio->irq = client->irq;
return 0;
@@ -285,8 +285,8 @@ static int pcf857x_probe(struct i2c_client *client,
gpio->chip.ngpio = id->driver_data;
/* enable gpio_to_irq() if platform has settings */
- if (pdata && pdata->irq) {
- status = pcf857x_irq_domain_init(gpio, pdata, &client->dev);
+ if (pdata && client->irq) {
+ status = pcf857x_irq_domain_init(gpio, pdata, client);
if (status < 0) {
dev_err(&client->dev, "irq_domain init failed\n");
goto fail;
@@ -368,15 +368,6 @@ static int pcf857x_probe(struct i2c_client *client,
if (status < 0)
goto fail;
- /* NOTE: these chips can issue "some pin-changed" IRQs, which we
- * don't yet even try to use. Among other issues, the relevant
- * genirq state isn't available to modular drivers; and most irq
- * methods can't be called from sleeping contexts.
- */
-
- dev_info(&client->dev, "%s\n",
- client->irq ? " (irq ignored)" : "");
-
/* Let platform code set up the GPIOs and their users.
* Now is the first time anyone could use them.
*/
@@ -388,13 +379,15 @@ static int pcf857x_probe(struct i2c_client *client,
dev_warn(&client->dev, "setup --> %d\n", status);
}
+ dev_info(&client->dev, "probed\n");
+
return 0;
fail:
dev_dbg(&client->dev, "probe error %d for '%s'\n",
status, client->name);
- if (pdata && pdata->irq)
+ if (pdata && client->irq)
pcf857x_irq_domain_cleanup(gpio);
kfree(gpio);
@@ -418,7 +411,7 @@ static int pcf857x_remove(struct i2c_client *client)
}
}
- if (pdata && pdata->irq)
+ if (pdata && client->irq)
pcf857x_irq_domain_cleanup(gpio);
status = gpiochip_remove(&gpio->chip);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 4ad0c4f9171c..cdf599687cf7 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -215,6 +215,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
struct gpio_chip *gpio = &chip->gpio;
gpio->label = dev_name(chip->dev);
+ gpio->dev = chip->dev;
gpio->owner = THIS_MODULE;
gpio->direction_input = pch_gpio_direction_input;
gpio->get = pch_gpio_get;
@@ -325,7 +326,7 @@ static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
return ret;
}
-static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
+static void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
unsigned int irq_start, unsigned int num)
{
struct irq_chip_generic *gc;
@@ -345,7 +346,7 @@ static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-static int __devinit pch_gpio_probe(struct pci_dev *pdev,
+static int pch_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
s32 ret;
@@ -442,7 +443,7 @@ err_pci_enable:
return ret;
}
-static void __devexit pch_gpio_remove(struct pci_dev *pdev)
+static void pch_gpio_remove(struct pci_dev *pdev)
{
int err;
struct pch_gpio *chip = pci_get_drvdata(pdev);
@@ -531,7 +532,7 @@ static struct pci_driver pch_gpio_driver = {
.name = "pch_gpio",
.id_table = pch_gpio_pcidev_id,
.probe = pch_gpio_probe,
- .remove = __devexit_p(pch_gpio_remove),
+ .remove = pch_gpio_remove,
.suspend = pch_gpio_suspend,
.resume = pch_gpio_resume
};
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index b4b5da4fd2cc..c1720de18a4f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -48,12 +48,7 @@ struct pl061_context_save_regs {
#endif
struct pl061_gpio {
- /* Each of the two spinlocks protects a different set of hardware
- * regiters and data structurs. This decouples the code of the IRQ from
- * the GPIO code. This also makes the case of a GPIO routine call from
- * the IRQ code simpler.
- */
- spinlock_t lock; /* GPIO registers */
+ spinlock_t lock;
void __iomem *base;
int irq_base;
@@ -216,39 +211,34 @@ static void __init pl061_init_gc(struct pl061_gpio *chip, int irq_base)
IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
}
-static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
+static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct pl061_platform_data *pdata;
+ struct device *dev = &adev->dev;
+ struct pl061_platform_data *pdata = dev->platform_data;
struct pl061_gpio *chip;
int ret, irq, i;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
- pdata = dev->dev.platform_data;
if (pdata) {
chip->gc.base = pdata->gpio_base;
chip->irq_base = pdata->irq_base;
- } else if (dev->dev.of_node) {
+ } else if (adev->dev.of_node) {
chip->gc.base = -1;
chip->irq_base = 0;
- } else {
- ret = -ENODEV;
- goto free_mem;
- }
+ } else
+ return -ENODEV;
- if (!request_mem_region(dev->res.start,
- resource_size(&dev->res), "pl061")) {
- ret = -EBUSY;
- goto free_mem;
- }
+ if (!devm_request_mem_region(dev, adev->res.start,
+ resource_size(&adev->res), "pl061"))
+ return -EBUSY;
- chip->base = ioremap(dev->res.start, resource_size(&dev->res));
- if (chip->base == NULL) {
- ret = -ENOMEM;
- goto release_region;
- }
+ chip->base = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (chip->base == NULL)
+ return -ENOMEM;
spin_lock_init(&chip->lock);
@@ -258,13 +248,13 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
chip->gc.set = pl061_set_value;
chip->gc.to_irq = pl061_to_irq;
chip->gc.ngpio = PL061_GPIO_NR;
- chip->gc.label = dev_name(&dev->dev);
- chip->gc.dev = &dev->dev;
+ chip->gc.label = dev_name(dev);
+ chip->gc.dev = dev;
chip->gc.owner = THIS_MODULE;
ret = gpiochip_add(&chip->gc);
if (ret)
- goto iounmap;
+ return ret;
/*
* irq_chip support
@@ -276,11 +266,10 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
pl061_init_gc(chip, chip->irq_base);
writeb(0, chip->base + GPIOIE); /* disable irqs */
- irq = dev->irq[0];
- if (irq < 0) {
- ret = -ENODEV;
- goto iounmap;
- }
+ irq = adev->irq[0];
+ if (irq < 0)
+ return -ENODEV;
+
irq_set_chained_handler(irq, pl061_irq_handler);
irq_set_handler_data(irq, chip);
@@ -294,18 +283,9 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
}
}
- amba_set_drvdata(dev, chip);
+ amba_set_drvdata(adev, chip);
return 0;
-
-iounmap:
- iounmap(chip->base);
-release_region:
- release_mem_region(dev->res.start, resource_size(&dev->res));
-free_mem:
- kfree(chip);
-
- return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 98d52cb3fd1a..8325f580c0f1 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -250,7 +250,7 @@ static int pxa_gpio_of_xlate(struct gpio_chip *gc,
}
#endif
-static int __devinit pxa_init_gpio_chip(int gpio_end,
+static int pxa_init_gpio_chip(int gpio_end,
int (*set_wake)(unsigned int, unsigned int))
{
int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
@@ -448,7 +448,7 @@ static int pxa_gpio_nums(void)
} else if (cpu_is_pxa27x()) {
count = 120;
gpio_type = PXA27X_GPIO;
- } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
+ } else if (cpu_is_pxa93x()) {
count = 191;
gpio_type = PXA93X_GPIO;
} else if (cpu_is_pxa3xx()) {
@@ -490,7 +490,7 @@ const struct irq_domain_ops pxa_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
+static int pxa_gpio_probe_dt(struct platform_device *pdev)
{
int ret, nr_banks, nr_gpios;
struct device_node *prev, *next, *np = pdev->dev.of_node;
@@ -537,7 +537,7 @@ err:
#define pxa_gpio_probe_dt(pdev) (-1)
#endif
-static int __devinit pxa_gpio_probe(struct platform_device *pdev)
+static int pxa_gpio_probe(struct platform_device *pdev)
{
struct pxa_gpio_chip *c;
struct resource *res;
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 08428bf17718..e63d6a397e17 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -111,7 +111,7 @@ static void rc5t583_gpio_free(struct gpio_chip *gc, unsigned offset)
rc5t583_set_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
}
-static int __devinit rc5t583_gpio_probe(struct platform_device *pdev)
+static int rc5t583_gpio_probe(struct platform_device *pdev)
{
struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
@@ -146,7 +146,7 @@ static int __devinit rc5t583_gpio_probe(struct platform_device *pdev)
return gpiochip_add(&rc5t583_gpio->gpio_chip);
}
-static int __devexit rc5t583_gpio_remove(struct platform_device *pdev)
+static int rc5t583_gpio_remove(struct platform_device *pdev)
{
struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev);
@@ -159,7 +159,7 @@ static struct platform_driver rc5t583_gpio_driver = {
.owner = THIS_MODULE,
},
.probe = rc5t583_gpio_probe,
- .remove = __devexit_p(rc5t583_gpio_remove),
+ .remove = rc5t583_gpio_remove,
};
static int __init rc5t583_gpio_init(void)
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index b62d443e9a59..1bf55f67f7a5 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -128,7 +128,7 @@ static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
/*
* Cache the initial value of both GPIO data registers
*/
-static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
+static int rdc321x_gpio_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
@@ -206,7 +206,7 @@ out_free:
return err;
}
-static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
+static int rdc321x_gpio_remove(struct platform_device *pdev)
{
int ret;
struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
@@ -225,7 +225,7 @@ static struct platform_driver rdc321x_gpio_driver = {
.driver.name = "rdc321x-gpio",
.driver.owner = THIS_MODULE,
.probe = rdc321x_gpio_probe,
- .remove = __devexit_p(rdc321x_gpio_remove),
+ .remove = rdc321x_gpio_remove,
};
module_platform_driver(rdc321x_gpio_driver);
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index a006f0db15af..76be7eed79de 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -32,7 +32,6 @@
#include <mach/hardware.h>
#include <mach/map.h>
-#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/cpu.h>
@@ -42,12 +41,6 @@
#include <plat/gpio-fns.h>
#include <plat/pm.h>
-#ifndef DEBUG_GPIO
-#define gpio_dbg(x...) do { } while (0)
-#else
-#define gpio_dbg(x...) printk(KERN_DEBUG x)
-#endif
-
int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip,
unsigned int off, samsung_gpio_pull_t pull)
{
@@ -452,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
};
#endif
-#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)
static struct samsung_gpio_cfg exynos_gpio_cfg = {
.set_pull = exynos_gpio_setpull,
.get_pull = exynos_gpio_getpull,
@@ -596,10 +589,13 @@ static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
unsigned long con;
con = __raw_readl(base + GPIOCON_OFF);
- con &= ~(0xf << con_4bit_shift(offset));
+ if (ourchip->bitmap_gpio_int & BIT(offset))
+ con |= 0xf << con_4bit_shift(offset);
+ else
+ con &= ~(0xf << con_4bit_shift(offset));
__raw_writel(con, base + GPIOCON_OFF);
- gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
+ pr_debug("%s: %p: CON now %08lx\n", __func__, base, con);
return 0;
}
@@ -627,7 +623,7 @@ static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
__raw_writel(con, base + GPIOCON_OFF);
__raw_writel(dat, base + GPIODAT_OFF);
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+ pr_debug("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
return 0;
}
@@ -671,7 +667,7 @@ static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
con &= ~(0xf << con_4bit_shift(offset));
__raw_writel(con, regcon);
- gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
+ pr_debug("%s: %p: CON %08lx\n", __func__, base, con);
return 0;
}
@@ -706,7 +702,7 @@ static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
__raw_writel(con, regcon);
__raw_writel(dat, base + GPIODAT_OFF);
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+ pr_debug("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
return 0;
}
@@ -926,10 +922,10 @@ static void __init samsung_gpiolib_add(struct samsung_gpio_chip *chip)
#ifdef CONFIG_PM
if (chip->pm != NULL) {
if (!chip->pm->save || !chip->pm->resume)
- printk(KERN_ERR "gpio: %s has missing PM functions\n",
+ pr_err("gpio: %s has missing PM functions\n",
gc->label);
} else
- printk(KERN_ERR "gpio: %s has no PM function\n", gc->label);
+ pr_err("gpio: %s has no PM function\n", gc->label);
#endif
/* gpiochip_add() prints own failure message on error. */
@@ -1081,6 +1077,8 @@ static void __init samsung_gpiolib_add_4bit_chips(struct samsung_gpio_chip *chip
if ((base != NULL) && (chip->base == NULL))
chip->base = base + ((i) * 0x20);
+ chip->bitmap_gpio_int = 0;
+
samsung_gpiolib_add(chip);
}
}
@@ -2447,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_1[] = {
{
.chip = {
@@ -2615,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_2[] = {
{
.chip = {
@@ -2676,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_3[] = {
{
.chip = {
@@ -2712,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_4[] = {
{
.chip = {
@@ -2797,27 +2795,6 @@ static __init void exynos4_gpiolib_init(void)
int group = 0;
void __iomem *gpx_base;
-#ifdef CONFIG_PINCTRL_SAMSUNG
- /*
- * This gpio driver includes support for device tree support and
- * there are platforms using it. In order to maintain
- * compatibility with those platforms, and to allow non-dt
- * Exynos4210 platforms to use this gpiolib support, a check
- * is added to find out if there is a active pin-controller
- * driver support available. If it is available, this gpiolib
- * support is ignored and the gpiolib support available in
- * pin-controller driver is used. This is a temporary check and
- * will go away when all of the Exynos4210 platforms have
- * switched to using device tree and the pin-ctrl driver.
- */
- struct device_node *pctrl_np;
- const char *pctrl_compat = "samsung,pinctrl-exynos4210";
- pctrl_np = of_find_compatible_node(NULL, NULL, pctrl_compat);
- if (pctrl_np)
- if (of_device_is_available(pctrl_np))
- return;
-#endif
-
/* gpio part1 */
gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
if (gpio_base1 == NULL) {
@@ -3032,6 +3009,29 @@ static __init int samsung_gpiolib_init(void)
int i, nr_chips;
int group = 0;
+#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
+ /*
+ * This gpio driver includes support for device tree support and there
+ * are platforms using it. In order to maintain compatibility with those
+ * platforms, and to allow non-dt Exynos4210 platforms to use this
+ * gpiolib support, a check is added to find out if there is a active
+ * pin-controller driver support available. If it is available, this
+ * gpiolib support is ignored and the gpiolib support available in
+ * pin-controller driver is used. This is a temporary check and will go
+ * away when all of the Exynos4210 platforms have switched to using
+ * device tree and the pin-ctrl driver.
+ */
+ struct device_node *pctrl_np;
+ static const struct of_device_id exynos_pinctrl_ids[] = {
+ { .compatible = "samsung,pinctrl-exynos4210", },
+ { .compatible = "samsung,pinctrl-exynos4x12", },
+ { .compatible = "samsung,pinctrl-exynos5440", },
+ };
+ for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
+ if (pctrl_np && of_device_is_available(pctrl_np))
+ return -ENODEV;
+#endif
+
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
if (soc_is_s3c24xx()) {
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8707d4572a06..edae963f4625 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -185,7 +185,7 @@ static struct gpio_chip sch_gpio_resume = {
.set = sch_gpio_resume_set,
};
-static int __devinit sch_gpio_probe(struct platform_device *pdev)
+static int sch_gpio_probe(struct platform_device *pdev)
{
struct resource *res;
int err, id;
@@ -271,7 +271,7 @@ err_sch_gpio_core:
return err;
}
-static int __devexit sch_gpio_remove(struct platform_device *pdev)
+static int sch_gpio_remove(struct platform_device *pdev)
{
struct resource *res;
if (gpio_ba) {
@@ -303,7 +303,7 @@ static struct platform_driver sch_gpio_driver = {
.owner = THIS_MODULE,
},
.probe = sch_gpio_probe,
- .remove = __devexit_p(sch_gpio_remove),
+ .remove = sch_gpio_remove,
};
module_platform_driver(sch_gpio_driver);
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index e25f73130b40..88f374ac7753 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -129,7 +129,7 @@ static struct irq_domain_ops irq_domain_sdv_ops = {
.xlate = sdv_xlate,
};
-static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
+static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
struct pci_dev *pdev)
{
struct irq_chip_type *ct;
@@ -186,7 +186,7 @@ out_free_desc:
return ret;
}
-static int __devinit sdv_gpio_probe(struct pci_dev *pdev,
+static int sdv_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct sdv_gpio_chip_data *sd;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
new file mode 100644
index 000000000000..5f45fc4ed5d1
--- /dev/null
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -0,0 +1,217 @@
+/*
+ * SPEAr platform SPI chipselect abstraction over gpiolib
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/* maximum chipselects */
+#define NUM_OF_GPIO 4
+
+/*
+ * Provision is available on some SPEAr SoCs to control ARM PL022 spi cs
+ * through system registers. This register lies outside spi (pl022)
+ * address space into system registers.
+ *
+ * It provides control for spi chip select lines so that any chipselect
+ * (out of 4 possible chipselects in pl022) can be made low to select
+ * the particular slave.
+ */
+
+/**
+ * struct spear_spics - represents spi chip select control
+ * @base: base address
+ * @perip_cfg: configuration register
+ * @sw_enable_bit: bit to enable s/w control over chipselects
+ * @cs_value_bit: bit to program high or low chipselect
+ * @cs_enable_mask: mask to select bits required to select chipselect
+ * @cs_enable_shift: bit pos of cs_enable_mask
+ * @use_count: use count of a spi controller cs lines
+ * @last_off: stores last offset caller of set_value()
+ * @chip: gpio_chip abstraction
+ */
+struct spear_spics {
+ void __iomem *base;
+ u32 perip_cfg;
+ u32 sw_enable_bit;
+ u32 cs_value_bit;
+ u32 cs_enable_mask;
+ u32 cs_enable_shift;
+ unsigned long use_count;
+ int last_off;
+ struct gpio_chip chip;
+};
+
+/* gpio framework specific routines */
+static int spics_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ return -ENXIO;
+}
+
+static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct spear_spics *spics = container_of(chip, struct spear_spics,
+ chip);
+ u32 tmp;
+
+ /* select chip select from register */
+ tmp = readl_relaxed(spics->base + spics->perip_cfg);
+ if (spics->last_off != offset) {
+ spics->last_off = offset;
+ tmp &= ~(spics->cs_enable_mask << spics->cs_enable_shift);
+ tmp |= offset << spics->cs_enable_shift;
+ }
+
+ /* toggle chip select line */
+ tmp &= ~(0x1 << spics->cs_value_bit);
+ tmp |= value << spics->cs_value_bit;
+ writel_relaxed(tmp, spics->base + spics->perip_cfg);
+}
+
+static int spics_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return -ENXIO;
+}
+
+static int spics_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ spics_set_value(chip, offset, value);
+ return 0;
+}
+
+static int spics_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct spear_spics *spics = container_of(chip, struct spear_spics,
+ chip);
+ u32 tmp;
+
+ if (!spics->use_count++) {
+ tmp = readl_relaxed(spics->base + spics->perip_cfg);
+ tmp |= 0x1 << spics->sw_enable_bit;
+ tmp |= 0x1 << spics->cs_value_bit;
+ writel_relaxed(tmp, spics->base + spics->perip_cfg);
+ }
+
+ return 0;
+}
+
+static void spics_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct spear_spics *spics = container_of(chip, struct spear_spics,
+ chip);
+ u32 tmp;
+
+ if (!--spics->use_count) {
+ tmp = readl_relaxed(spics->base + spics->perip_cfg);
+ tmp &= ~(0x1 << spics->sw_enable_bit);
+ writel_relaxed(tmp, spics->base + spics->perip_cfg);
+ }
+}
+
+static int spics_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spear_spics *spics;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid IORESOURCE_MEM\n");
+ return -EBUSY;
+ }
+
+ spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL);
+ if (!spics) {
+ dev_err(&pdev->dev, "memory allocation fail\n");
+ return -ENOMEM;
+ }
+
+ spics->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!spics->base) {
+ dev_err(&pdev->dev, "request and ioremap fail\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(np, "st-spics,peripcfg-reg",
+ &spics->perip_cfg))
+ goto err_dt_data;
+ if (of_property_read_u32(np, "st-spics,sw-enable-bit",
+ &spics->sw_enable_bit))
+ goto err_dt_data;
+ if (of_property_read_u32(np, "st-spics,cs-value-bit",
+ &spics->cs_value_bit))
+ goto err_dt_data;
+ if (of_property_read_u32(np, "st-spics,cs-enable-mask",
+ &spics->cs_enable_mask))
+ goto err_dt_data;
+ if (of_property_read_u32(np, "st-spics,cs-enable-shift",
+ &spics->cs_enable_shift))
+ goto err_dt_data;
+
+ platform_set_drvdata(pdev, spics);
+
+ spics->chip.ngpio = NUM_OF_GPIO;
+ spics->chip.base = -1;
+ spics->chip.request = spics_request;
+ spics->chip.free = spics_free;
+ spics->chip.direction_input = spics_direction_input;
+ spics->chip.direction_output = spics_direction_output;
+ spics->chip.get = spics_get_value;
+ spics->chip.set = spics_set_value;
+ spics->chip.label = dev_name(&pdev->dev);
+ spics->chip.dev = &pdev->dev;
+ spics->chip.owner = THIS_MODULE;
+ spics->last_off = -1;
+
+ ret = gpiochip_add(&spics->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpio chip\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "spear spics registered\n");
+ return 0;
+
+err_dt_data:
+ dev_err(&pdev->dev, "DT probe failed\n");
+ return -EINVAL;
+}
+
+static const struct of_device_id spics_gpio_of_match[] = {
+ { .compatible = "st,spear-spics-gpio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spics_gpio_of_match);
+
+static struct platform_driver spics_gpio_driver = {
+ .probe = spics_gpio_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "spear-spics-gpio",
+ .of_match_table = spics_gpio_of_match,
+ },
+};
+
+static int __init spics_gpio_init(void)
+{
+ return platform_driver_register(&spics_gpio_driver);
+}
+subsys_initcall(spics_gpio_init);
+
+MODULE_AUTHOR("Shiraz Hashim <shiraz.hashim@st.com>");
+MODULE_DESCRIPTION("ST Microlectronics SPEAr SPI Chip Select Abstraction");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 6064fb376e11..558542552aae 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -320,7 +320,7 @@ static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
return ret;
}
-static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+static void gsta_alloc_irq_chip(struct gsta_gpio *chip)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
@@ -353,7 +353,7 @@ static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
}
/* The platform device used here is instantiated by the MFD device */
-static int __devinit gsta_probe(struct platform_device *dev)
+static int gsta_probe(struct platform_device *dev)
{
int i, err;
struct pci_dev *pdev;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index dce34727bbf8..770476a9da87 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -11,7 +11,9 @@
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/mfd/stmpe.h>
/*
@@ -28,6 +30,7 @@ struct stmpe_gpio {
struct stmpe *stmpe;
struct device *dev;
struct mutex irq_lock;
+ struct irq_domain *domain;
int irq_base;
unsigned norequest_mask;
@@ -103,7 +106,7 @@ static int stmpe_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
- return stmpe_gpio->irq_base + offset;
+ return irq_create_mapping(stmpe_gpio->domain, offset);
}
static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -132,7 +135,7 @@ static struct gpio_chip template_chip = {
static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
- int offset = d->irq - stmpe_gpio->irq_base;
+ int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -199,7 +202,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
static void stmpe_gpio_irq_mask(struct irq_data *d)
{
struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
- int offset = d->irq - stmpe_gpio->irq_base;
+ int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -209,7 +212,7 @@ static void stmpe_gpio_irq_mask(struct irq_data *d)
static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
- int offset = d->irq - stmpe_gpio->irq_base;
+ int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -251,8 +254,9 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = bank * 8 + bit;
+ int virq = irq_find_mapping(stmpe_gpio->domain, line);
- handle_nested_irq(stmpe_gpio->irq_base + line);
+ handle_nested_irq(virq);
stat &= ~(1 << bit);
}
@@ -267,43 +271,61 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
+int stmpe_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
{
- int base = stmpe_gpio->irq_base;
- int irq;
+ struct stmpe_gpio *stmpe_gpio = d->host_data;
+
+ if (!stmpe_gpio)
+ return -EINVAL;
- for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) {
- irq_set_chip_data(irq, stmpe_gpio);
- irq_set_chip_and_handler(irq, &stmpe_gpio_irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(irq, 1);
+ irq_set_chip_data(hwirq, stmpe_gpio);
+ irq_set_chip_and_handler(hwirq, &stmpe_gpio_irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(hwirq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
+ set_irq_flags(hwirq, IRQF_VALID);
#else
- irq_set_noprobe(irq);
+ irq_set_noprobe(hwirq);
#endif
- }
return 0;
}
-static void stmpe_gpio_irq_remove(struct stmpe_gpio *stmpe_gpio)
+void stmpe_gpio_irq_unmap(struct irq_domain *d, unsigned int virq)
{
- int base = stmpe_gpio->irq_base;
- int irq;
-
- for (irq = base; irq < base + stmpe_gpio->chip.ngpio; irq++) {
#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
+ set_irq_flags(virq, 0);
#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
+ .unmap = stmpe_gpio_irq_unmap,
+ .map = stmpe_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
+{
+ int base = stmpe_gpio->irq_base;
+
+ stmpe_gpio->domain = irq_domain_add_simple(NULL,
+ stmpe_gpio->chip.ngpio, base,
+ &stmpe_gpio_irq_simple_ops, stmpe_gpio);
+ if (!stmpe_gpio->domain) {
+ dev_err(stmpe_gpio->dev, "failed to create irqdomain\n");
+ return -ENOSYS;
}
+
+ return 0;
}
-static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
+static int stmpe_gpio_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_gpio_platform_data *pdata;
struct stmpe_gpio *stmpe_gpio;
int ret;
@@ -321,13 +343,17 @@ static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->dev = &pdev->dev;
stmpe_gpio->stmpe = stmpe;
- stmpe_gpio->norequest_mask = pdata ? pdata->norequest_mask : 0;
-
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
stmpe_gpio->chip.dev = &pdev->dev;
stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
+ if (pdata)
+ stmpe_gpio->norequest_mask = pdata->norequest_mask;
+ else if (np)
+ of_property_read_u32(np, "st,norequest-mask",
+ &stmpe_gpio->norequest_mask);
+
if (irq >= 0)
stmpe_gpio->irq_base = stmpe->irq_base + STMPE_INT_GPIO(0);
else
@@ -348,7 +374,7 @@ static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
IRQF_ONESHOT, "stmpe-gpio", stmpe_gpio);
if (ret) {
dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- goto out_removeirq;
+ goto out_disable;
}
}
@@ -368,9 +394,6 @@ static int __devinit stmpe_gpio_probe(struct platform_device *pdev)
out_freeirq:
if (irq >= 0)
free_irq(irq, stmpe_gpio);
-out_removeirq:
- if (irq >= 0)
- stmpe_gpio_irq_remove(stmpe_gpio);
out_disable:
stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
out_free:
@@ -378,7 +401,7 @@ out_free:
return ret;
}
-static int __devexit stmpe_gpio_remove(struct platform_device *pdev)
+static int stmpe_gpio_remove(struct platform_device *pdev)
{
struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -398,10 +421,9 @@ static int __devexit stmpe_gpio_remove(struct platform_device *pdev)
stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
- if (irq >= 0) {
+ if (irq >= 0)
free_irq(irq, stmpe_gpio);
- stmpe_gpio_irq_remove(stmpe_gpio);
- }
+
platform_set_drvdata(pdev, NULL);
kfree(stmpe_gpio);
@@ -412,7 +434,7 @@ static struct platform_driver stmpe_gpio_driver = {
.driver.name = "stmpe-gpio",
.driver.owner = THIS_MODULE,
.probe = stmpe_gpio_probe,
- .remove = __devexit_p(stmpe_gpio_remove),
+ .remove = stmpe_gpio_remove,
};
static int __init stmpe_gpio_init(void)
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 8bead0bb6459..85841ee70b17 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -197,7 +197,7 @@ static int xway_stp_hw_init(struct xway_stp *chip)
return 0;
}
-static int __devinit xway_stp_probe(struct platform_device *pdev)
+static int xway_stp_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
const __be32 *shadow, *groups, *dsl, *phy;
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index eb3e215d2396..796b6c42fa70 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -575,7 +575,7 @@ static void sx150x_remove_irq_chip(struct sx150x_chip *chip)
}
}
-static int __devinit sx150x_probe(struct i2c_client *client,
+static int sx150x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
@@ -622,7 +622,7 @@ probe_fail_pre_gpiochip_add:
return rc;
}
-static int __devexit sx150x_remove(struct i2c_client *client)
+static int sx150x_remove(struct i2c_client *client)
{
struct sx150x_chip *chip;
int rc;
@@ -646,7 +646,7 @@ static struct i2c_driver sx150x_driver = {
.owner = THIS_MODULE
},
.probe = sx150x_probe,
- .remove = __devexit_p(sx150x_remove),
+ .remove = sx150x_remove,
.id_table = sx150x_id,
};
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 1e48317e70fb..c0595bbf3268 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -292,17 +292,15 @@ static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio,
{
int base = tc3589x_gpio->irq_base;
- if (base) {
- tc3589x_gpio->domain = irq_domain_add_legacy(
- NULL, tc3589x_gpio->chip.ngpio, base,
- 0, &tc3589x_irq_ops, tc3589x_gpio);
- }
- else {
- tc3589x_gpio->domain = irq_domain_add_linear(
- np, tc3589x_gpio->chip.ngpio,
- &tc3589x_irq_ops, tc3589x_gpio);
- }
-
+ /*
+ * If this results in a linear domain, irq_create_mapping() will
+ * take care of allocating IRQ descriptors at runtime. When a base
+ * is provided, the IRQ descriptors will be allocated when the
+ * domain is instantiated.
+ */
+ tc3589x_gpio->domain = irq_domain_add_simple(np,
+ tc3589x_gpio->chip.ngpio, base, &tc3589x_irq_ops,
+ tc3589x_gpio);
if (!tc3589x_gpio->domain) {
dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n");
return -ENOSYS;
@@ -311,7 +309,7 @@ static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio,
return 0;
}
-static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
+static int tc3589x_gpio_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
struct tc3589x_gpio_platform_data *pdata;
@@ -389,7 +387,7 @@ out_free:
return ret;
}
-static int __devexit tc3589x_gpio_remove(struct platform_device *pdev)
+static int tc3589x_gpio_remove(struct platform_device *pdev)
{
struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -419,7 +417,7 @@ static struct platform_driver tc3589x_gpio_driver = {
.driver.name = "tc3589x-gpio",
.driver.owner = THIS_MODULE,
.probe = tc3589x_gpio_probe,
- .remove = __devexit_p(tc3589x_gpio_remove),
+ .remove = tc3589x_gpio_remove,
};
static int __init tc3589x_gpio_init(void)
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index d982593d7563..63cb643d4b5a 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/irqdomain.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm.h>
#include <asm/mach/irq.h>
@@ -64,7 +65,7 @@ struct tegra_gpio_bank {
int bank;
int irq;
spinlock_t lvl_lock[4];
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
u32 cnf[4];
u32 out[4];
u32 oe[4];
@@ -109,20 +110,18 @@ static void tegra_gpio_enable(int gpio)
{
tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
}
-EXPORT_SYMBOL_GPL(tegra_gpio_enable);
static void tegra_gpio_disable(int gpio)
{
tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
}
-EXPORT_SYMBOL_GPL(tegra_gpio_disable);
-int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
{
return pinctrl_request_gpio(offset);
}
-void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
{
pinctrl_free_gpio(offset);
tegra_gpio_disable(offset);
@@ -135,6 +134,11 @@ static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
{
+ /* If gpio is in output mode then read from the out value */
+ if ((tegra_gpio_readl(GPIO_OE(offset)) >> GPIO_BIT(offset)) & 1)
+ return (tegra_gpio_readl(GPIO_OUT(offset)) >>
+ GPIO_BIT(offset)) & 0x1;
+
return (tegra_gpio_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
}
@@ -285,8 +289,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
-#ifdef CONFIG_PM
-void tegra_gpio_resume(void)
+#ifdef CONFIG_PM_SLEEP
+static int tegra_gpio_resume(struct device *dev)
{
unsigned long flags;
int b;
@@ -308,9 +312,10 @@ void tegra_gpio_resume(void)
}
local_irq_restore(flags);
+ return 0;
}
-void tegra_gpio_suspend(void)
+static int tegra_gpio_suspend(struct device *dev)
{
unsigned long flags;
int b;
@@ -330,6 +335,7 @@ void tegra_gpio_suspend(void)
}
}
local_irq_restore(flags);
+ return 0;
}
static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
@@ -345,11 +351,15 @@ static struct irq_chip tegra_gpio_irq_chip = {
.irq_mask = tegra_gpio_irq_mask,
.irq_unmask = tegra_gpio_irq_unmask,
.irq_set_type = tegra_gpio_irq_set_type,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.irq_set_wake = tegra_gpio_wake_enable,
#endif
};
+static const struct dev_pm_ops tegra_gpio_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
+};
+
struct tegra_gpio_soc_config {
u32 bank_stride;
u32 upper_offset;
@@ -365,7 +375,7 @@ static struct tegra_gpio_soc_config tegra30_gpio_config = {
.upper_offset = 0x80,
};
-static struct of_device_id tegra_gpio_of_match[] __devinitdata = {
+static struct of_device_id tegra_gpio_of_match[] = {
{ .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
{ },
@@ -376,11 +386,10 @@ static struct of_device_id tegra_gpio_of_match[] __devinitdata = {
*/
static struct lock_class_key gpio_lock_class;
-static int __devinit tegra_gpio_probe(struct platform_device *pdev)
+static int tegra_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct tegra_gpio_soc_config *config;
- int irq_base;
struct resource *res;
struct tegra_gpio_bank *bank;
int gpio;
@@ -417,14 +426,11 @@ static int __devinit tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq_base = irq_alloc_descs(-1, 0, tegra_gpio_chip.ngpio, 0);
- if (irq_base < 0) {
- dev_err(&pdev->dev, "Couldn't allocate IRQ numbers\n");
- return -ENODEV;
- }
- irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
- tegra_gpio_chip.ngpio, irq_base, 0,
+ irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ tegra_gpio_chip.ngpio,
&irq_domain_simple_ops, NULL);
+ if (!irq_domain)
+ return -ENODEV;
for (i = 0; i < tegra_gpio_bank_count; i++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
@@ -464,7 +470,7 @@ static int __devinit tegra_gpio_probe(struct platform_device *pdev)
gpiochip_add(&tegra_gpio_chip);
for (gpio = 0; gpio < tegra_gpio_chip.ngpio; gpio++) {
- int irq = irq_find_mapping(irq_domain, gpio);
+ int irq = irq_create_mapping(irq_domain, gpio);
/* No validity check; all Tegra GPIOs are valid IRQs */
bank = &tegra_gpio_banks[GPIO_BANK(gpio)];
@@ -493,6 +499,7 @@ static struct platform_driver tegra_gpio_driver = {
.driver = {
.name = "tegra-gpio",
.owner = THIS_MODULE,
+ .pm = &tegra_gpio_pm_ops,
.of_match_table = tegra_gpio_of_match,
},
.probe = tegra_gpio_probe,
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 1a3e2b9b4772..702cca9284f1 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -222,7 +222,7 @@ static struct irq_chip timbgpio_irqchip = {
.irq_set_type = timbgpio_irq_type,
};
-static int __devinit timbgpio_probe(struct platform_device *pdev)
+static int timbgpio_probe(struct platform_device *pdev)
{
int err, i;
struct gpio_chip *gc;
@@ -316,7 +316,7 @@ err_mem:
return err;
}
-static int __devexit timbgpio_remove(struct platform_device *pdev)
+static int timbgpio_remove(struct platform_device *pdev)
{
int err;
struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 2526b3bb0fae..29e8e750bd49 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -80,7 +80,15 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
val, mask);
}
-static int __devinit tps6586x_gpio_probe(struct platform_device *pdev)
+static int tps6586x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+
+ return tps6586x_irq_get_virq(tps6586x_gpio->parent,
+ TPS6586X_INT_PLDO_0 + offset);
+}
+
+static int tps6586x_gpio_probe(struct platform_device *pdev)
{
struct tps6586x_platform_data *pdata;
struct tps6586x_gpio *tps6586x_gpio;
@@ -106,6 +114,7 @@ static int __devinit tps6586x_gpio_probe(struct platform_device *pdev)
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
+ tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
#ifdef CONFIG_OF_GPIO
tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
@@ -126,7 +135,7 @@ static int __devinit tps6586x_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit tps6586x_gpio_remove(struct platform_device *pdev)
+static int tps6586x_gpio_remove(struct platform_device *pdev)
{
struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev);
@@ -137,7 +146,7 @@ static struct platform_driver tps6586x_gpio_driver = {
.driver.name = "tps6586x-gpio",
.driver.owner = THIS_MODULE,
.probe = tps6586x_gpio_probe,
- .remove = __devexit_p(tps6586x_gpio_remove),
+ .remove = tps6586x_gpio_remove,
};
static int __init tps6586x_gpio_init(void)
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 11f29c82253c..5083825a0348 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -113,7 +113,7 @@ static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
}
#endif
-static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
+static int tps65910_gpio_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
@@ -188,7 +188,7 @@ skip_init:
return ret;
}
-static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
+static int tps65910_gpio_remove(struct platform_device *pdev)
{
struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
@@ -199,7 +199,7 @@ static struct platform_driver tps65910_gpio_driver = {
.driver.name = "tps65910-gpio",
.driver.owner = THIS_MODULE,
.probe = tps65910_gpio_probe,
- .remove = __devexit_p(tps65910_gpio_remove),
+ .remove = tps65910_gpio_remove,
};
static int __init tps65910_gpio_init(void)
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 99106d1e2e55..30a5844a7dca 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -84,7 +84,7 @@ static struct gpio_chip template_chip = {
.base = -1,
};
-static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
+static int tps65912_gpio_probe(struct platform_device *pdev)
{
struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
struct tps65912_board *pdata = tps65912->dev->platform_data;
@@ -113,7 +113,7 @@ static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit tps65912_gpio_remove(struct platform_device *pdev)
+static int tps65912_gpio_remove(struct platform_device *pdev)
{
struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
@@ -126,7 +126,7 @@ static struct platform_driver tps65912_gpio_driver = {
.owner = THIS_MODULE,
},
.probe = tps65912_gpio_probe,
- .remove = __devexit_p(tps65912_gpio_remove),
+ .remove = tps65912_gpio_remove,
};
static int __init tps65912_gpio_init(void)
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
new file mode 100644
index 000000000000..cc53cab8df2a
--- /dev/null
+++ b/drivers/gpio/gpio-ts5500.c
@@ -0,0 +1,466 @@
+/*
+ * Digital I/O driver for Technologic Systems TS-5500
+ *
+ * Copyright (c) 2012 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * Technologic Systems platforms have pin blocks, exposing several Digital
+ * Input/Output lines (DIO). This driver aims to support single pin blocks.
+ * In that sense, the support is not limited to the TS-5500 blocks.
+ * Actually, the following platforms have DIO support:
+ *
+ * TS-5500:
+ * Documentation: http://wiki.embeddedarm.com/wiki/TS-5500
+ * Blocks: DIO1, DIO2 and LCD port.
+ *
+ * TS-5600:
+ * Documentation: http://wiki.embeddedarm.com/wiki/TS-5600
+ * Blocks: LCD port (identical to TS-5500 LCD).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/gpio-ts5500.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* List of supported Technologic Systems platforms DIO blocks */
+enum ts5500_blocks { TS5500_DIO1, TS5500_DIO2, TS5500_LCD, TS5600_LCD };
+
+struct ts5500_priv {
+ const struct ts5500_dio *pinout;
+ struct gpio_chip gpio_chip;
+ spinlock_t lock;
+ bool strap;
+ u8 hwirq;
+};
+
+/*
+ * Hex 7D is used to control several blocks (e.g. DIO2 and LCD port).
+ * This flag ensures that the region has been requested by this driver.
+ */
+static bool hex7d_reserved;
+
+/*
+ * This structure is used to describe capabilities of DIO lines,
+ * such as available directions and connected interrupt (if any).
+ */
+struct ts5500_dio {
+ const u8 value_addr;
+ const u8 value_mask;
+ const u8 control_addr;
+ const u8 control_mask;
+ const bool no_input;
+ const bool no_output;
+ const u8 irq;
+};
+
+#define TS5500_DIO_IN_OUT(vaddr, vbit, caddr, cbit) \
+ { \
+ .value_addr = vaddr, \
+ .value_mask = BIT(vbit), \
+ .control_addr = caddr, \
+ .control_mask = BIT(cbit), \
+ }
+
+#define TS5500_DIO_IN(addr, bit) \
+ { \
+ .value_addr = addr, \
+ .value_mask = BIT(bit), \
+ .no_output = true, \
+ }
+
+#define TS5500_DIO_IN_IRQ(addr, bit, _irq) \
+ { \
+ .value_addr = addr, \
+ .value_mask = BIT(bit), \
+ .no_output = true, \
+ .irq = _irq, \
+ }
+
+#define TS5500_DIO_OUT(addr, bit) \
+ { \
+ .value_addr = addr, \
+ .value_mask = BIT(bit), \
+ .no_input = true, \
+ }
+
+/*
+ * Input/Output DIO lines are programmed in groups of 4. Their values are
+ * available through 4 consecutive bits in a value port, whereas the direction
+ * of these 4 lines is driven by only 1 bit in a control port.
+ */
+#define TS5500_DIO_GROUP(vaddr, vbitfrom, caddr, cbit) \
+ TS5500_DIO_IN_OUT(vaddr, vbitfrom + 0, caddr, cbit), \
+ TS5500_DIO_IN_OUT(vaddr, vbitfrom + 1, caddr, cbit), \
+ TS5500_DIO_IN_OUT(vaddr, vbitfrom + 2, caddr, cbit), \
+ TS5500_DIO_IN_OUT(vaddr, vbitfrom + 3, caddr, cbit)
+
+/*
+ * TS-5500 DIO1 block
+ *
+ * value control dir hw
+ * addr bit addr bit in out irq name pin offset
+ *
+ * 0x7b 0 0x7a 0 x x DIO1_0 1 0
+ * 0x7b 1 0x7a 0 x x DIO1_1 3 1
+ * 0x7b 2 0x7a 0 x x DIO1_2 5 2
+ * 0x7b 3 0x7a 0 x x DIO1_3 7 3
+ * 0x7b 4 0x7a 1 x x DIO1_4 9 4
+ * 0x7b 5 0x7a 1 x x DIO1_5 11 5
+ * 0x7b 6 0x7a 1 x x DIO1_6 13 6
+ * 0x7b 7 0x7a 1 x x DIO1_7 15 7
+ * 0x7c 0 0x7a 5 x x DIO1_8 4 8
+ * 0x7c 1 0x7a 5 x x DIO1_9 6 9
+ * 0x7c 2 0x7a 5 x x DIO1_10 8 10
+ * 0x7c 3 0x7a 5 x x DIO1_11 10 11
+ * 0x7c 4 x DIO1_12 12 12
+ * 0x7c 5 x 7 DIO1_13 14 13
+ */
+static const struct ts5500_dio ts5500_dio1[] = {
+ TS5500_DIO_GROUP(0x7b, 0, 0x7a, 0),
+ TS5500_DIO_GROUP(0x7b, 4, 0x7a, 1),
+ TS5500_DIO_GROUP(0x7c, 0, 0x7a, 5),
+ TS5500_DIO_IN(0x7c, 4),
+ TS5500_DIO_IN_IRQ(0x7c, 5, 7),
+};
+
+/*
+ * TS-5500 DIO2 block
+ *
+ * value control dir hw
+ * addr bit addr bit in out irq name pin offset
+ *
+ * 0x7e 0 0x7d 0 x x DIO2_0 1 0
+ * 0x7e 1 0x7d 0 x x DIO2_1 3 1
+ * 0x7e 2 0x7d 0 x x DIO2_2 5 2
+ * 0x7e 3 0x7d 0 x x DIO2_3 7 3
+ * 0x7e 4 0x7d 1 x x DIO2_4 9 4
+ * 0x7e 5 0x7d 1 x x DIO2_5 11 5
+ * 0x7e 6 0x7d 1 x x DIO2_6 13 6
+ * 0x7e 7 0x7d 1 x x DIO2_7 15 7
+ * 0x7f 0 0x7d 5 x x DIO2_8 4 8
+ * 0x7f 1 0x7d 5 x x DIO2_9 6 9
+ * 0x7f 2 0x7d 5 x x DIO2_10 8 10
+ * 0x7f 3 0x7d 5 x x DIO2_11 10 11
+ * 0x7f 4 x 6 DIO2_13 14 12
+ */
+static const struct ts5500_dio ts5500_dio2[] = {
+ TS5500_DIO_GROUP(0x7e, 0, 0x7d, 0),
+ TS5500_DIO_GROUP(0x7e, 4, 0x7d, 1),
+ TS5500_DIO_GROUP(0x7f, 0, 0x7d, 5),
+ TS5500_DIO_IN_IRQ(0x7f, 4, 6),
+};
+
+/*
+ * TS-5500 LCD port used as DIO block
+ * TS-5600 LCD port is identical
+ *
+ * value control dir hw
+ * addr bit addr bit in out irq name pin offset
+ *
+ * 0x72 0 0x7d 2 x x LCD_0 8 0
+ * 0x72 1 0x7d 2 x x LCD_1 7 1
+ * 0x72 2 0x7d 2 x x LCD_2 10 2
+ * 0x72 3 0x7d 2 x x LCD_3 9 3
+ * 0x72 4 0x7d 3 x x LCD_4 12 4
+ * 0x72 5 0x7d 3 x x LCD_5 11 5
+ * 0x72 6 0x7d 3 x x LCD_6 14 6
+ * 0x72 7 0x7d 3 x x LCD_7 13 7
+ * 0x73 0 x LCD_EN 5 8
+ * 0x73 6 x LCD_WR 6 9
+ * 0x73 7 x 1 LCD_RS 3 10
+ */
+static const struct ts5500_dio ts5500_lcd[] = {
+ TS5500_DIO_GROUP(0x72, 0, 0x7d, 2),
+ TS5500_DIO_GROUP(0x72, 4, 0x7d, 3),
+ TS5500_DIO_OUT(0x73, 0),
+ TS5500_DIO_IN(0x73, 6),
+ TS5500_DIO_IN_IRQ(0x73, 7, 1),
+};
+
+static inline struct ts5500_priv *ts5500_gc_to_priv(struct gpio_chip *chip)
+{
+ return container_of(chip, struct ts5500_priv, gpio_chip);
+}
+
+static inline void ts5500_set_mask(u8 mask, u8 addr)
+{
+ u8 val = inb(addr);
+ val |= mask;
+ outb(val, addr);
+}
+
+static inline void ts5500_clear_mask(u8 mask, u8 addr)
+{
+ u8 val = inb(addr);
+ val &= ~mask;
+ outb(val, addr);
+}
+
+static int ts5500_gpio_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct ts5500_priv *priv = ts5500_gc_to_priv(chip);
+ const struct ts5500_dio line = priv->pinout[offset];
+ unsigned long flags;
+
+ if (line.no_input)
+ return -ENXIO;
+
+ if (line.no_output)
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ts5500_clear_mask(line.control_mask, line.control_addr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ts5500_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct ts5500_priv *priv = ts5500_gc_to_priv(chip);
+ const struct ts5500_dio line = priv->pinout[offset];
+
+ return !!(inb(line.value_addr) & line.value_mask);
+}
+
+static int ts5500_gpio_output(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct ts5500_priv *priv = ts5500_gc_to_priv(chip);
+ const struct ts5500_dio line = priv->pinout[offset];
+ unsigned long flags;
+
+ if (line.no_output)
+ return -ENXIO;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!line.no_input)
+ ts5500_set_mask(line.control_mask, line.control_addr);
+
+ if (val)
+ ts5500_set_mask(line.value_mask, line.value_addr);
+ else
+ ts5500_clear_mask(line.value_mask, line.value_addr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct ts5500_priv *priv = ts5500_gc_to_priv(chip);
+ const struct ts5500_dio line = priv->pinout[offset];
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (val)
+ ts5500_set_mask(line.value_mask, line.value_addr);
+ else
+ ts5500_clear_mask(line.value_mask, line.value_addr);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int ts5500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct ts5500_priv *priv = ts5500_gc_to_priv(chip);
+ const struct ts5500_dio *block = priv->pinout;
+ const struct ts5500_dio line = block[offset];
+
+ /* Only one pin is connected to an interrupt */
+ if (line.irq)
+ return line.irq;
+
+ /* As this pin is input-only, we may strap it to another in/out pin */
+ if (priv->strap)
+ return priv->hwirq;
+
+ return -ENXIO;
+}
+
+static int ts5500_enable_irq(struct ts5500_priv *priv)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->hwirq == 7)
+ ts5500_set_mask(BIT(7), 0x7a); /* DIO1_13 on IRQ7 */
+ else if (priv->hwirq == 6)
+ ts5500_set_mask(BIT(7), 0x7d); /* DIO2_13 on IRQ6 */
+ else if (priv->hwirq == 1)
+ ts5500_set_mask(BIT(6), 0x7d); /* LCD_RS on IRQ1 */
+ else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static void ts5500_disable_irq(struct ts5500_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->hwirq == 7)
+ ts5500_clear_mask(BIT(7), 0x7a); /* DIO1_13 on IRQ7 */
+ else if (priv->hwirq == 6)
+ ts5500_clear_mask(BIT(7), 0x7d); /* DIO2_13 on IRQ6 */
+ else if (priv->hwirq == 1)
+ ts5500_clear_mask(BIT(6), 0x7d); /* LCD_RS on IRQ1 */
+ else
+ dev_err(priv->gpio_chip.dev, "invalid hwirq %d\n", priv->hwirq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int ts5500_dio_probe(struct platform_device *pdev)
+{
+ enum ts5500_blocks block = platform_get_device_id(pdev)->driver_data;
+ struct ts5500_dio_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ const char *name = dev_name(dev);
+ struct ts5500_priv *priv;
+ struct resource *res;
+ unsigned long flags;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "missing IRQ resource\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct ts5500_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+ priv->hwirq = res->start;
+ spin_lock_init(&priv->lock);
+
+ priv->gpio_chip.owner = THIS_MODULE;
+ priv->gpio_chip.label = name;
+ priv->gpio_chip.dev = dev;
+ priv->gpio_chip.direction_input = ts5500_gpio_input;
+ priv->gpio_chip.direction_output = ts5500_gpio_output;
+ priv->gpio_chip.get = ts5500_gpio_get;
+ priv->gpio_chip.set = ts5500_gpio_set;
+ priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
+ priv->gpio_chip.base = -1;
+ if (pdata) {
+ priv->gpio_chip.base = pdata->base;
+ priv->strap = pdata->strap;
+ }
+
+ switch (block) {
+ case TS5500_DIO1:
+ priv->pinout = ts5500_dio1;
+ priv->gpio_chip.ngpio = ARRAY_SIZE(ts5500_dio1);
+
+ if (!devm_request_region(dev, 0x7a, 3, name)) {
+ dev_err(dev, "failed to request %s ports\n", name);
+ return -EBUSY;
+ }
+ break;
+ case TS5500_DIO2:
+ priv->pinout = ts5500_dio2;
+ priv->gpio_chip.ngpio = ARRAY_SIZE(ts5500_dio2);
+
+ if (!devm_request_region(dev, 0x7e, 2, name)) {
+ dev_err(dev, "failed to request %s ports\n", name);
+ return -EBUSY;
+ }
+
+ if (hex7d_reserved)
+ break;
+
+ if (!devm_request_region(dev, 0x7d, 1, name)) {
+ dev_err(dev, "failed to request %s 7D\n", name);
+ return -EBUSY;
+ }
+
+ hex7d_reserved = true;
+ break;
+ case TS5500_LCD:
+ case TS5600_LCD:
+ priv->pinout = ts5500_lcd;
+ priv->gpio_chip.ngpio = ARRAY_SIZE(ts5500_lcd);
+
+ if (!devm_request_region(dev, 0x72, 2, name)) {
+ dev_err(dev, "failed to request %s ports\n", name);
+ return -EBUSY;
+ }
+
+ if (!hex7d_reserved) {
+ if (!devm_request_region(dev, 0x7d, 1, name)) {
+ dev_err(dev, "failed to request %s 7D\n", name);
+ return -EBUSY;
+ }
+
+ hex7d_reserved = true;
+ }
+
+ /* Ensure usage of LCD port as DIO */
+ spin_lock_irqsave(&priv->lock, flags);
+ ts5500_clear_mask(BIT(4), 0x7d);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ }
+
+ ret = gpiochip_add(&priv->gpio_chip);
+ if (ret) {
+ dev_err(dev, "failed to register the gpio chip\n");
+ return ret;
+ }
+
+ ret = ts5500_enable_irq(priv);
+ if (ret) {
+ dev_err(dev, "invalid interrupt %d\n", priv->hwirq);
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ if (gpiochip_remove(&priv->gpio_chip))
+ dev_err(dev, "failed to remove gpio chip\n");
+ return ret;
+}
+
+static int ts5500_dio_remove(struct platform_device *pdev)
+{
+ struct ts5500_priv *priv = platform_get_drvdata(pdev);
+
+ ts5500_disable_irq(priv);
+ return gpiochip_remove(&priv->gpio_chip);
+}
+
+static struct platform_device_id ts5500_dio_ids[] = {
+ { "ts5500-dio1", TS5500_DIO1 },
+ { "ts5500-dio2", TS5500_DIO2 },
+ { "ts5500-dio-lcd", TS5500_LCD },
+ { "ts5600-dio-lcd", TS5600_LCD },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ts5500_dio_ids);
+
+static struct platform_driver ts5500_dio_driver = {
+ .driver = {
+ .name = "ts5500-dio",
+ .owner = THIS_MODULE,
+ },
+ .probe = ts5500_dio_probe,
+ .remove = ts5500_dio_remove,
+ .id_table = ts5500_dio_ids,
+};
+
+module_platform_driver(ts5500_dio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems TS-5500 Digital I/O driver");
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index c5f8ca233e1f..9572aa137e6f 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -88,11 +88,15 @@ static inline int gpio_twl4030_write(u8 address, u8 data)
/*----------------------------------------------------------------------*/
/*
- * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB}))
+ * LED register offsets from TWL_MODULE_LED base
* PWMs A and B are dedicated to LEDs A and B, respectively.
*/
-#define TWL4030_LED_LEDEN 0x0
+#define TWL4030_LED_LEDEN_REG 0x00
+#define TWL4030_PWMAON_REG 0x01
+#define TWL4030_PWMAOFF_REG 0x02
+#define TWL4030_PWMBON_REG 0x03
+#define TWL4030_PWMBOFF_REG 0x04
/* LEDEN bits */
#define LEDEN_LEDAON BIT(0)
@@ -104,9 +108,6 @@ static inline int gpio_twl4030_write(u8 address, u8 data)
#define LEDEN_PWM_LENGTHA BIT(6)
#define LEDEN_PWM_LENGTHB BIT(7)
-#define TWL4030_PWMx_PWMxON 0x0
-#define TWL4030_PWMx_PWMxOFF 0x1
-
#define PWMxON_LENGTH BIT(7)
/*----------------------------------------------------------------------*/
@@ -145,7 +146,7 @@ static void twl4030_led_set_value(int led, int value)
else
cached_leden |= mask;
status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
- TWL4030_LED_LEDEN);
+ TWL4030_LED_LEDEN_REG);
mutex_unlock(&gpio_lock);
}
@@ -216,33 +217,33 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
if (offset >= TWL4030_GPIO_MAX) {
u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT
| LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA;
- u8 module = TWL4030_MODULE_PWMA;
+ u8 reg = TWL4030_PWMAON_REG;
offset -= TWL4030_GPIO_MAX;
if (offset) {
ledclr_mask <<= 1;
- module = TWL4030_MODULE_PWMB;
+ reg = TWL4030_PWMBON_REG;
}
/* initialize PWM to always-drive */
- status = twl_i2c_write_u8(module, 0x7f,
- TWL4030_PWMx_PWMxOFF);
+ /* Configure PWM OFF register first */
+ status = twl_i2c_write_u8(TWL4030_MODULE_LED, 0x7f, reg + 1);
if (status < 0)
goto done;
- status = twl_i2c_write_u8(module, 0x7f,
- TWL4030_PWMx_PWMxON);
+
+ /* Followed by PWM ON register */
+ status = twl_i2c_write_u8(TWL4030_MODULE_LED, 0x7f, reg);
if (status < 0)
goto done;
/* init LED to not-driven (high) */
- module = TWL4030_MODULE_LED;
- status = twl_i2c_read_u8(module, &cached_leden,
- TWL4030_LED_LEDEN);
+ status = twl_i2c_read_u8(TWL4030_MODULE_LED, &cached_leden,
+ TWL4030_LED_LEDEN_REG);
if (status < 0)
goto done;
cached_leden &= ~ledclr_mask;
- status = twl_i2c_write_u8(module, cached_leden,
- TWL4030_LED_LEDEN);
+ status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ TWL4030_LED_LEDEN_REG);
if (status < 0)
goto done;
@@ -352,15 +353,15 @@ static struct gpio_chip twl_gpiochip = {
/*----------------------------------------------------------------------*/
-static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
+static int gpio_twl4030_pulls(u32 ups, u32 downs)
{
- u8 message[6];
+ u8 message[5];
unsigned i, gpio_bit;
/* For most pins, a pulldown was enabled by default.
* We should have data that's specific to this board.
*/
- for (gpio_bit = 1, i = 1; i < 6; i++) {
+ for (gpio_bit = 1, i = 0; i < 5; i++) {
u8 bit_mask;
unsigned j;
@@ -377,18 +378,18 @@ static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
REG_GPIOPUPDCTR1, 5);
}
-static int __devinit gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
+static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
{
- u8 message[4];
+ u8 message[3];
/* 30 msec of debouncing is always used for MMC card detect,
* and is optional for everything else.
*/
- message[1] = (debounce & 0xff) | (mmc_cd & 0x03);
+ message[0] = (debounce & 0xff) | (mmc_cd & 0x03);
debounce >>= 8;
- message[2] = (debounce & 0xff);
+ message[1] = (debounce & 0xff);
debounce >>= 8;
- message[3] = (debounce & 0x03);
+ message[2] = (debounce & 0x03);
return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
@@ -419,7 +420,7 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
return omap_twl_info;
}
-static int __devinit gpio_twl4030_probe(struct platform_device *pdev)
+static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
struct device_node *node = pdev->dev.of_node;
@@ -505,7 +506,7 @@ out:
return ret;
}
-/* Cannot use __devexit as gpio_twl4030_probe() calls us */
+/* Cannot use as gpio_twl4030_probe() calls us */
static int gpio_twl4030_remove(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index dd58e8b25043..0be82c6dd796 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -82,7 +82,7 @@ static struct gpio_chip twl6040gpo_chip = {
/*----------------------------------------------------------------------*/
-static int __devinit gpo_twl6040_probe(struct platform_device *pdev)
+static int gpo_twl6040_probe(struct platform_device *pdev)
{
struct twl6040_gpo_data *pdata = pdev->dev.platform_data;
struct device *twl6040_core_dev = pdev->dev.parent;
@@ -113,7 +113,7 @@ static int __devinit gpo_twl6040_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit gpo_twl6040_remove(struct platform_device *pdev)
+static int gpo_twl6040_remove(struct platform_device *pdev)
{
return gpiochip_remove(&twl6040gpo_chip);
}
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
new file mode 100644
index 000000000000..59d72391de26
--- /dev/null
+++ b/drivers/gpio/gpio-viperboard.c
@@ -0,0 +1,517 @@
+/*
+ * Nano River Technologies viperboard GPIO lib driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_GPIOA_CLK_1MHZ 0
+#define VPRBRD_GPIOA_CLK_100KHZ 1
+#define VPRBRD_GPIOA_CLK_10KHZ 2
+#define VPRBRD_GPIOA_CLK_1KHZ 3
+#define VPRBRD_GPIOA_CLK_100HZ 4
+#define VPRBRD_GPIOA_CLK_10HZ 5
+
+#define VPRBRD_GPIOA_FREQ_DEFAULT 1000
+
+#define VPRBRD_GPIOA_CMD_CONT 0x00
+#define VPRBRD_GPIOA_CMD_PULSE 0x01
+#define VPRBRD_GPIOA_CMD_PWM 0x02
+#define VPRBRD_GPIOA_CMD_SETOUT 0x03
+#define VPRBRD_GPIOA_CMD_SETIN 0x04
+#define VPRBRD_GPIOA_CMD_SETINT 0x05
+#define VPRBRD_GPIOA_CMD_GETIN 0x06
+
+#define VPRBRD_GPIOB_CMD_SETDIR 0x00
+#define VPRBRD_GPIOB_CMD_SETVAL 0x01
+
+struct vprbrd_gpioa_msg {
+ u8 cmd;
+ u8 clk;
+ u8 offset;
+ u8 t1;
+ u8 t2;
+ u8 invert;
+ u8 pwmlevel;
+ u8 outval;
+ u8 risefall;
+ u8 answer;
+ u8 __fill;
+} __packed;
+
+struct vprbrd_gpiob_msg {
+ u8 cmd;
+ u16 val;
+ u16 mask;
+} __packed;
+
+struct vprbrd_gpio {
+ struct gpio_chip gpioa; /* gpio a related things */
+ u32 gpioa_out;
+ u32 gpioa_val;
+ struct gpio_chip gpiob; /* gpio b related things */
+ u32 gpiob_out;
+ u32 gpiob_val;
+ struct vprbrd *vb;
+};
+
+/* gpioa sampling clock module parameter */
+static unsigned char gpioa_clk;
+static unsigned int gpioa_freq = VPRBRD_GPIOA_FREQ_DEFAULT;
+module_param(gpioa_freq, uint, 0);
+MODULE_PARM_DESC(gpioa_freq,
+ "gpio-a sampling freq in Hz (default is 1000Hz) valid values: 10, 100, 1000, 10000, 100000, 1000000");
+
+/* ----- begin of gipo a chip -------------------------------------------- */
+
+static int vprbrd_gpioa_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret, answer, error = 0;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpioa_out & (1 << offset))
+ return gpio->gpioa_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_GETIN;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ answer = gamsg->answer & 0x01;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ if (error)
+ return error;
+
+ return answer;
+}
+
+static void vprbrd_gpioa_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ if (gpio->gpioa_out & (1 << offset)) {
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETIN;
+ gamsg->clk = gpioa_clk;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out |= (1 << offset);
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+/* ----- end of gpio a chip ---------------------------------------------- */
+
+/* ----- begin of gipo b chip -------------------------------------------- */
+
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
+ unsigned dir)
+{
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+ int ret;
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETDIR;
+ gbmsg->val = cpu_to_be16(dir << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpiob_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ u16 val;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpiob_out & (1 << offset))
+ return gpio->gpiob_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ val = gbmsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return ret;
+
+ /* cache the read values */
+ gpio->gpiob_val = be16_to_cpu(val);
+
+ return (gpio->gpiob_val >> offset) & 0x1;
+}
+
+static void vprbrd_gpiob_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ if (gpio->gpiob_out & (1 << offset)) {
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 0);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to input\n");
+
+ return ret;
+}
+
+static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out |= (1 << offset);
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 1);
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to output\n");
+
+ mutex_unlock(&vb->lock);
+
+ vprbrd_gpiob_set(chip, offset, value);
+
+ return ret;
+}
+
+/* ----- end of gpio b chip ---------------------------------------------- */
+
+static int vprbrd_gpio_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_gpio *vb_gpio;
+ int ret;
+
+ vb_gpio = devm_kzalloc(&pdev->dev, sizeof(*vb_gpio), GFP_KERNEL);
+ if (vb_gpio == NULL)
+ return -ENOMEM;
+
+ vb_gpio->vb = vb;
+ /* registering gpio a */
+ vb_gpio->gpioa.label = "viperboard gpio a";
+ vb_gpio->gpioa.dev = &pdev->dev;
+ vb_gpio->gpioa.owner = THIS_MODULE;
+ vb_gpio->gpioa.base = -1;
+ vb_gpio->gpioa.ngpio = 16;
+ vb_gpio->gpioa.can_sleep = 1;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
+ vb_gpio->gpioa.get = vprbrd_gpioa_get;
+ vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
+ vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpioa);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpioa.dev, "could not add gpio a");
+ goto err_gpioa;
+ }
+
+ /* registering gpio b */
+ vb_gpio->gpiob.label = "viperboard gpio b";
+ vb_gpio->gpiob.dev = &pdev->dev;
+ vb_gpio->gpiob.owner = THIS_MODULE;
+ vb_gpio->gpiob.base = -1;
+ vb_gpio->gpiob.ngpio = 16;
+ vb_gpio->gpiob.can_sleep = 1;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
+ vb_gpio->gpiob.get = vprbrd_gpiob_get;
+ vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
+ vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpiob);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpiob.dev, "could not add gpio b");
+ goto err_gpiob;
+ }
+
+ platform_set_drvdata(pdev, vb_gpio);
+
+ return ret;
+
+err_gpiob:
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+err_gpioa:
+ return ret;
+}
+
+static int vprbrd_gpio_remove(struct platform_device *pdev)
+{
+ struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&vb_gpio->gpiob);
+ if (ret == 0)
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_gpio_driver = {
+ .driver.name = "viperboard-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_gpio_probe,
+ .remove = vprbrd_gpio_remove,
+};
+
+static int __init vprbrd_gpio_init(void)
+{
+ switch (gpioa_freq) {
+ case 1000000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1MHZ;
+ break;
+ case 100000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100KHZ;
+ break;
+ case 10000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10KHZ;
+ break;
+ case 1000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ break;
+ case 100:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100HZ;
+ break;
+ case 10:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10HZ;
+ break;
+ default:
+ pr_warn("invalid gpioa_freq (%d)\n", gpioa_freq);
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_gpio_driver);
+}
+subsys_initcall(vprbrd_gpio_init);
+
+static void __exit vprbrd_gpio_exit(void)
+{
+ platform_driver_unregister(&vprbrd_gpio_driver);
+}
+module_exit(vprbrd_gpio_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("GPIO driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-gpio");
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 82d5c20ad3cb..9902732a382d 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -490,7 +490,7 @@ static struct gpio_chip vr41xx_gpio_chip = {
.to_irq = vr41xx_gpio_to_irq,
};
-static int __devinit giu_probe(struct platform_device *pdev)
+static int giu_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int trigger, i, pin;
@@ -552,7 +552,7 @@ static int __devinit giu_probe(struct platform_device *pdev)
return cascade_irq(irq, giu_get_irq);
}
-static int __devexit giu_remove(struct platform_device *pdev)
+static int giu_remove(struct platform_device *pdev)
{
if (giu_base) {
iounmap(giu_base);
@@ -564,7 +564,7 @@ static int __devexit giu_remove(struct platform_device *pdev)
static struct platform_driver giu_device_driver = {
.probe = giu_probe,
- .remove = __devexit_p(giu_remove),
+ .remove = giu_remove,
.driver = {
.name = "GIU",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-vt8500.c b/drivers/gpio/gpio-vt8500.c
index bcd8e4aa7c7d..b53320a16fc8 100644
--- a/drivers/gpio/gpio-vt8500.c
+++ b/drivers/gpio/gpio-vt8500.c
@@ -96,6 +96,7 @@ static struct vt8500_gpio_data wm8505_data = {
VT8500_BANK(0x5C, 0x84, 0xAC, 0xD4, 12),
VT8500_BANK(0x60, 0x88, 0xB0, 0xD8, 16),
VT8500_BANK(0x64, 0x8C, 0xB4, 0xDC, 22),
+ VT8500_BANK(0x500, 0x504, 0x508, 0x50C, 6),
},
};
@@ -115,6 +116,7 @@ static struct vt8500_gpio_data wm8650_data = {
VT8500_BANK(0x58, 0x98, 0xD8, 0x18, 32),
VT8500_BANK(0x5C, 0x9C, 0xDC, 0x1C, 32),
VT8500_BANK(0x7C, 0xBC, 0xFC, 0x3C, 32),
+ VT8500_BANK(0x500, 0x504, 0x508, 0x50C, 6),
},
};
@@ -269,7 +271,7 @@ static struct of_device_id vt8500_gpio_dt_ids[] = {
{ /* Sentinel */ },
};
-static int __devinit vt8500_gpio_probe(struct platform_device *pdev)
+static int vt8500_gpio_probe(struct platform_device *pdev)
{
void __iomem *gpio_base;
struct device_node *np;
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 76ebfe5ff702..2b7252cb2427 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -219,7 +219,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
}
/* This platform device is ordinarily registered by the vx855 mfd driver */
-static __devinit int vx855gpio_probe(struct platform_device *pdev)
+static int vx855gpio_probe(struct platform_device *pdev)
{
struct resource *res_gpi;
struct resource *res_gpo;
@@ -284,7 +284,7 @@ out_release:
return ret;
}
-static int __devexit vx855gpio_remove(struct platform_device *pdev)
+static int vx855gpio_remove(struct platform_device *pdev)
{
struct vx855_gpio *vg = platform_get_drvdata(pdev);
struct resource *res;
@@ -312,7 +312,7 @@ static struct platform_driver vx855gpio_driver = {
.owner = THIS_MODULE,
},
.probe = vx855gpio_probe,
- .remove = __devexit_p(vx855gpio_remove),
+ .remove = vx855gpio_remove,
};
module_platform_driver(vx855gpio_driver);
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index b6eda35089d5..2a743e10ecb6 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -243,7 +243,7 @@ static struct gpio_chip template_chip = {
.can_sleep = 1,
};
-static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
+static int wm831x_gpio_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
@@ -275,7 +275,7 @@ static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit wm831x_gpio_remove(struct platform_device *pdev)
+static int wm831x_gpio_remove(struct platform_device *pdev)
{
struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
@@ -286,7 +286,7 @@ static struct platform_driver wm831x_gpio_driver = {
.driver.name = "wm831x-gpio",
.driver.owner = THIS_MODULE,
.probe = wm831x_gpio_probe,
- .remove = __devexit_p(wm831x_gpio_remove),
+ .remove = wm831x_gpio_remove,
};
static int __init wm831x_gpio_init(void)
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index fb4293889392..0b598cf3df9d 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -109,7 +109,7 @@ static struct gpio_chip template_chip = {
.can_sleep = 1,
};
-static int __devinit wm8350_gpio_probe(struct platform_device *pdev)
+static int wm8350_gpio_probe(struct platform_device *pdev)
{
struct wm8350 *wm8350 = dev_get_drvdata(pdev->dev.parent);
struct wm8350_platform_data *pdata = wm8350->dev->platform_data;
@@ -141,7 +141,7 @@ static int __devinit wm8350_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit wm8350_gpio_remove(struct platform_device *pdev)
+static int wm8350_gpio_remove(struct platform_device *pdev)
{
struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
@@ -152,7 +152,7 @@ static struct platform_driver wm8350_gpio_driver = {
.driver.name = "wm8350-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8350_gpio_probe,
- .remove = __devexit_p(wm8350_gpio_remove),
+ .remove = wm8350_gpio_remove,
};
static int __init wm8350_gpio_init(void)
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 1c764e779d80..ae409fd94af7 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -245,7 +245,7 @@ static struct gpio_chip template_chip = {
.can_sleep = 1,
};
-static int __devinit wm8994_gpio_probe(struct platform_device *pdev)
+static int wm8994_gpio_probe(struct platform_device *pdev)
{
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
@@ -281,7 +281,7 @@ err:
return ret;
}
-static int __devexit wm8994_gpio_remove(struct platform_device *pdev)
+static int wm8994_gpio_remove(struct platform_device *pdev)
{
struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
@@ -292,7 +292,7 @@ static struct platform_driver wm8994_gpio_driver = {
.driver.name = "wm8994-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8994_gpio_probe,
- .remove = __devexit_p(wm8994_gpio_remove),
+ .remove = wm8994_gpio_remove,
};
static int __init wm8994_gpio_init(void)
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 79b0fe8a7253..9ae7aa8ca48a 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -159,7 +159,7 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
* driver data structure. It returns 0, if the driver is bound to the GPIO
* device, or a negative value if there is an error.
*/
-static int __devinit xgpio_of_probe(struct device_node *np)
+static int xgpio_of_probe(struct device_node *np)
{
struct xgpio_instance *chip;
int status = 0;
@@ -209,7 +209,7 @@ static int __devinit xgpio_of_probe(struct device_node *np)
return 0;
}
-static struct of_device_id xgpio_of_match[] __devinitdata = {
+static struct of_device_id xgpio_of_match[] = {
{ .compatible = "xlnx,xps-gpio-1.00.a", },
{ /* end of list */ },
};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
new file mode 100644
index 000000000000..cbad6e908d30
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -0,0 +1,54 @@
+/*
+ * ACPI helpers for GPIO API
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/acpi_gpio.h>
+#include <linux/acpi.h>
+
+static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
+{
+ if (!gc->dev)
+ return false;
+
+ return ACPI_HANDLE(gc->dev) == data;
+}
+
+/**
+ * acpi_get_gpio() - Translate ACPI GPIO pin to GPIO number usable with GPIO API
+ * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
+ * @pin: ACPI GPIO pin number (0-based, controller-relative)
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or errno error value
+ */
+
+int acpi_get_gpio(char *path, int pin)
+{
+ struct gpio_chip *chip;
+ acpi_handle handle;
+ acpi_status status;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ chip = gpiochip_find(handle, acpi_gpiochip_find);
+ if (!chip)
+ return -ENODEV;
+
+ if (!gpio_is_valid(chip->base + pin))
+ return -EINVAL;
+
+ return chip->base + pin;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gpio);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 58b9838801c0..199fca15f270 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -191,6 +191,32 @@ err:
return ret;
}
+/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
+static int gpio_get_direction(unsigned gpio)
+{
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ chip = gpio_to_chip(gpio);
+ gpio -= chip->base;
+
+ if (!chip->get_direction)
+ return status;
+
+ status = chip->get_direction(chip, gpio);
+ if (status > 0) {
+ /* GPIOF_DIR_IN, or other positive */
+ status = 1;
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ }
+ if (status == 0) {
+ /* GPIOF_DIR_OUT */
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
+ return status;
+}
+
#ifdef CONFIG_GPIO_SYSFS
/* lock protects against unexport_gpio() being called while
@@ -223,6 +249,7 @@ static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
+ unsigned gpio = desc - gpio_desc;
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -230,6 +257,7 @@ static ssize_t gpio_direction_show(struct device *dev,
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else
+ gpio_get_direction(gpio);
status = sprintf(buf, "%s\n",
test_bit(FLAG_IS_OUT, &desc->flags)
? "out" : "in");
@@ -704,8 +732,9 @@ int gpio_export(unsigned gpio, bool direction_may_change)
{
unsigned long flags;
struct gpio_desc *desc;
- int status = -EINVAL;
+ int status;
const char *ioname = NULL;
+ struct device *dev;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
@@ -713,59 +742,66 @@ int gpio_export(unsigned gpio, bool direction_may_change)
return -ENOENT;
}
- if (!gpio_is_valid(gpio))
- goto done;
+ if (!gpio_is_valid(gpio)) {
+ pr_debug("%s: gpio %d is not valid\n", __func__, gpio);
+ return -EINVAL;
+ }
mutex_lock(&sysfs_lock);
spin_lock_irqsave(&gpio_lock, flags);
desc = &gpio_desc[gpio];
- if (test_bit(FLAG_REQUESTED, &desc->flags)
- && !test_bit(FLAG_EXPORT, &desc->flags)) {
- status = 0;
- if (!desc->chip->direction_input
- || !desc->chip->direction_output)
- direction_may_change = false;
+ if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags)) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ pr_debug("%s: gpio %d unavailable (requested=%d, exported=%d)\n",
+ __func__, gpio,
+ test_bit(FLAG_REQUESTED, &desc->flags),
+ test_bit(FLAG_EXPORT, &desc->flags));
+ status = -EPERM;
+ goto fail_unlock;
}
+
+ if (!desc->chip->direction_input || !desc->chip->direction_output)
+ direction_may_change = false;
spin_unlock_irqrestore(&gpio_lock, flags);
if (desc->chip->names && desc->chip->names[gpio - desc->chip->base])
ioname = desc->chip->names[gpio - desc->chip->base];
- if (status == 0) {
- struct device *dev;
-
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u", gpio);
- if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
- &gpio_attr_group);
-
- if (!status && direction_may_change)
- status = device_create_file(dev,
- &dev_attr_direction);
-
- if (!status && gpio_to_irq(gpio) >= 0
- && (direction_may_change
- || !test_bit(FLAG_IS_OUT,
- &desc->flags)))
- status = device_create_file(dev,
- &dev_attr_edge);
-
- if (status != 0)
- device_unregister(dev);
- } else
- status = PTR_ERR(dev);
- if (status == 0)
- set_bit(FLAG_EXPORT, &desc->flags);
+ dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
+ desc, ioname ? ioname : "gpio%u", gpio);
+ if (IS_ERR(dev)) {
+ status = PTR_ERR(dev);
+ goto fail_unlock;
}
- mutex_unlock(&sysfs_lock);
-
-done:
+ status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
if (status)
- pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+ goto fail_unregister_device;
+ if (direction_may_change) {
+ status = device_create_file(dev, &dev_attr_direction);
+ if (status)
+ goto fail_unregister_device;
+ }
+
+ if (gpio_to_irq(gpio) >= 0 && (direction_may_change ||
+ !test_bit(FLAG_IS_OUT, &desc->flags))) {
+ status = device_create_file(dev, &dev_attr_edge);
+ if (status)
+ goto fail_unregister_device;
+ }
+
+ set_bit(FLAG_EXPORT, &desc->flags);
+ mutex_unlock(&sysfs_lock);
+ return 0;
+
+fail_unregister_device:
+ device_unregister(dev);
+fail_unlock:
+ mutex_unlock(&sysfs_lock);
+ pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_export);
@@ -1075,6 +1111,7 @@ int gpiochip_add(struct gpio_chip *chip)
* inputs (often with pullups enabled) so power
* usage is minimized. Linux code should set the
* gpio direction first thing; but until it does,
+ * and in case chip->get_direction is not set,
* we may expose the wrong direction in sysfs.
*/
gpio_desc[id].flags = !chip->direction_input
@@ -1304,9 +1341,15 @@ int gpio_request(unsigned gpio, const char *label)
desc_set_label(desc, NULL);
module_put(chip->owner);
clear_bit(FLAG_REQUESTED, &desc->flags);
+ goto done;
}
}
-
+ if (chip->get_direction) {
+ /* chip->get_direction may sleep */
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ gpio_get_direction(gpio);
+ spin_lock_irqsave(&gpio_lock, flags);
+ }
done:
if (status)
pr_debug("gpio_request: gpio-%d (%s) status %d\n",
@@ -1842,6 +1885,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
continue;
+ gpio_get_direction(gpio);
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
gpio, gdesc->label,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b880..983201b450f1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ead..6f58c81cfcbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 31123b6a0be5..2d2c2f8d6dc6 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
MODULE_DEVICE_TABLE(pci, pciidlist);
-static int __devinit
-ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb4..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c8991..8ecb601152ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
};
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
+
+ return 0;
}
-static int __devinit
-cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- cirrus_kick_out_firmware_fb(pdev);
+ int ret;
+
+ ret = cirrus_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c830..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d37..f2d667b8bee2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_connector_property_set_value(connector, property, value);
+ drm_object_property_set_value(&connector->base, property, value);
return ret;
}
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
if (connector->funcs->reset)
connector->funcs->reset(connector);
+ }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74dbc..7b2d378b2576 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
*
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
- drm_kms_helper_poll_enable(dev);
}
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- /* if this is HPD or polled don't check it -
- TV out for instance */
- if (!connector->polled)
+ /* Ignore forced connectors. */
+ if (connector->force)
continue;
- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
- repoll = true;
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
- !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
- if (changed) {
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
- }
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled)
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
if (!dev->mode_config.poll_enabled)
return;
- /* kill timer and schedule immediate execution, this doesn't block */
- cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7f246f212457..89e196627160 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
-
+
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
-
}
static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
-
+
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff196..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
- int i;
- u32 *raw_edid = (u32 *)in_edid;
+ if (memchr_inv(in_edid, 0, length))
+ return false;
- for (i = 0; i < length / 4; i++)
- if (*(raw_edid + i) != 0)
- return false;
return true;
}
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+ struct drm_display_mode *cea_mode;
+ u8 mode;
+
+ for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+ if (drm_mode_equal(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_LOG_KMS("HDMI: DVI dual %d, "
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3f..954d175bd7fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) {
const char *s;
switch (mode->force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
default:
- case DRM_FORCE_ON: s = "ON"; break;
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
}
DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0)
return 0;
- printk(KERN_ERR "panic occurred, switching back to text console\n");
+ pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: unregistered panic notifier\n");
+ pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp) {
+ if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- }
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
/* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- }
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0) {
+ if (register_framebuffer(info) < 0)
return -EINVAL;
- }
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
} else {
drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: registered panic notifier\n");
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
- if (strict) {
+ if (strict)
enable = connector->status == connector_status_connected;
- } else {
+ else
enable = connector->status != connector_status_disconnected;
- }
+
return enable;
}
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
continue;
- }
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
drm_enable_connectors(fb_helper, enabled);
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
+out:
kfree(crtcs);
kfree(modes);
kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
*
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
- if (count == 0) {
- printk(KERN_INFO "No connectors reported connected with modes\n");
- }
+ if (count == 0)
+ dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb.
+ * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d8..80254547a3f8 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ if (entry->key == key)
+ return list;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list;
}
if (parent) {
- hlist_add_after(parent, &item->head);
+ hlist_add_after_rcu(parent, &item->head);
} else {
- hlist_add_head(&item->head, h_list);
+ hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init(list);
+ hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init(&item->head);
+ hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f28..e77bd8b57df2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b9..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns;
int vblrc;
struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags,
struct drm_crtc *refcrtc)
{
- struct timeval stime, raw_time;
+ ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable();
/* Get system timestamp before query. */
- do_gettimeofday(&stime);
+ stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
- do_gettimeofday(&raw_time);
+ etime = ktime_get();
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
preempt_enable();
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8;
}
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos,
- (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i);
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+
+ return ktime_to_timeval(now);
+}
+
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
}
/* GPU high precision timestamp query unsupported or failed.
- * Return gettimeofday timestamp as best estimate.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
*/
- do_gettimeofday(tvblank);
+ *tvblank = get_drm_timestamp();
return 0;
}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
+ spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ send_vblank_event(dev, e, seq, &now);
vblwait->reply.sequence = seq;
- trace_drm_vblank_event_delivered(current->pid, pipe,
- vblwait->request.sequence);
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb2..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free(mm, size, alignment, false);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, 0);
-
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -213,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (mm->color_adjust)
- mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
if (adj_start < start)
adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
@@ -275,22 +285,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free_in_range(mm, size, alignment,
- start, end, false);
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
start, end);
-
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
@@ -489,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
@@ -516,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
@@ -535,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
- unsigned long adj_start;
- unsigned long adj_end;
+ unsigned long adj_start, adj_end;
mm->scanned_blocks++;
@@ -553,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
- hole_start = drm_mm_hole_node_start(prev_node);
- hole_end = drm_mm_hole_node_end(prev_node);
-
- adj_start = hole_start;
- adj_end = hole_end;
-
- if (mm->color_adjust)
- mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
@@ -569,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
adj_end = mm->scan_end;
}
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
- mm->scan_hit_size = hole_end;
-
+ mm->scan_hit_end = hole_end;
return 1;
}
@@ -609,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
node_list);
prev_node->hole_follows = node->scanned_preceeds_hole;
- INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
- /* Only need to check for containement because start&size for the
- * complete resulting free block (not just the desired part) is
- * stored. */
- if (node->start >= mm->scan_hit_start &&
- node->start + node->size
- <= mm->scan_hit_start + mm->scan_hit_size) {
- return 1;
- }
-
- return 0;
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf96..d8da30e90db5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
*
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS:
* @mode->hdisplay
*/
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS:
* @mode->vdisplay
*/
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap, lnkcap2;
+ u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba6..200e104f1fa0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master)
return -EINVAL;
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
+ if (file_priv->minor->master)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
+ kfree(dev->devname);
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..046bcda36abe 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -18,7 +24,7 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on DRM_EXYNOS && !FB_S3C
+ depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..4e9b5ba8edff 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -55,15 +56,16 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
+ .of_match_table = of_match_ptr(hdmiddc_match_types),
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
- .remove = __devexit_p(s5p_ddc_remove),
+ .remove = s5p_ddc_remove,
.command = NULL,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..57affae9568b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -29,93 +15,103 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
- }
+ init_dma_attrs(&buf->dma_attrs);
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (!(flags & EXYNOS_BO_NONCONTIG))
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
+
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
+
+ nr_pages = buf->size >> PAGE_SHIFT;
+
+ if (!is_drm_iommu_supported(dev)) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
+
+ buf->pages = kzalloc(sizeof(struct page) * nr_pages,
+ GFP_KERNEL);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ return -ENOMEM;
+ }
+
+ buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL,
+ &buf->dma_attrs);
+ if (!buf->kvaddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ kfree(buf->pages);
+ return -ENOMEM;
+ }
+
+ start_addr = buf->dma_addr;
+ while (i < nr_pages) {
+ buf->pages[i] = phys_to_page(start_addr);
+ start_addr += PAGE_SIZE;
+ i++;
+ }
+ } else {
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL,
+ &buf->dma_attrs);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
+ }
}
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
- if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to get sg table.\n");
ret = -ENOMEM;
- goto err2;
- }
-
- sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
- while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
- sgl = sg_next(sgl);
- i++;
+ goto err_free_attrs;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
-err1:
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
+
+ if (!is_drm_iommu_supported(dev))
+ kfree(buf->pages);
return ret;
}
@@ -125,23 +121,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
@@ -150,11 +135,14 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
+ if (!is_drm_iommu_supported(dev)) {
+ dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+ kfree(buf->pages);
+ } else
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..a6412f19673c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_BUF_H_
@@ -34,12 +20,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 0f68a2872673..4c5b6859c9ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -32,7 +18,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
-#define MAX_EDID 256
#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
drm_connector)
@@ -110,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
to_exynos_connector(connector);
struct exynos_drm_manager *manager = exynos_connector->manager;
struct exynos_drm_display_ops *display_ops = manager->display_ops;
- unsigned int count;
+ struct edid *edid = NULL;
+ unsigned int count = 0;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -128,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* because lcd panel has only one mode.
*/
if (display_ops->get_edid) {
- int ret;
- void *edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid) {
- DRM_ERROR("failed to allocate edid\n");
- return 0;
+ edid = display_ops->get_edid(manager->dev, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ ret = PTR_ERR(edid);
+ edid = NULL;
+ DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+ goto out;
}
- ret = display_ops->get_edid(manager->dev, connector,
- edid, MAX_EDID);
- if (ret < 0) {
- DRM_ERROR("failed to get edid data.\n");
- kfree(edid);
- edid = NULL;
- return 0;
+ count = drm_add_edid_modes(connector, edid);
+ if (count < 0) {
+ DRM_ERROR("Add edid modes failed %d\n", count);
+ goto out;
}
drm_mode_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
} else {
struct exynos_drm_panel_info *panel;
struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -175,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
count = 1;
}
+out:
+ kfree(edid);
return count;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
index 22f6cc442c3d..547c6b590357 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_CONNECTOR_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 94026ad76a77..4667c9f67acd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..e8894bc9e6d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -236,16 +222,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
goto out;
}
@@ -402,3 +393,33 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_disable_vblank);
}
+
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_vblank_put(dev, crtc);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6bae8d8c250e..3e197e6ae7d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_CRTC_H_
@@ -32,5 +18,6 @@
int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..ba0a3aa78547 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -3,24 +3,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -30,70 +16,106 @@
#include <linux/dma-buf.h>
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ bool is_mapped;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
{
- struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- goto out;
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
- if (ret)
- goto err_free_sgt;
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ return 0;
+}
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
- return sgt;
+ if (!exynos_attach)
+ return;
-err_free_sgt:
- kfree(sgt);
- sgt = NULL;
-out:
- return NULL;
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL;
- unsigned int npages;
- int nents;
+ unsigned int i;
+ int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir && exynos_attach->is_mapped)
+ return &exynos_attach->sgt;
buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sgt = &exynos_attach->sgt;
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
- goto err_unlock;
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+ }
+
+ exynos_attach->is_mapped = true;
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -104,10 +126,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
+ /* Nothing to do. */
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +188,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
}
static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
@@ -185,7 +206,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
- exynos_gem_obj->base.size, 0600);
+ exynos_gem_obj->base.size, flags);
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
@@ -196,7 +217,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -210,7 +230,12 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
/* is it from our device? */
if (obj->dev == drm_dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
drm_gem_object_reference(obj);
+ dma_buf_put(dma_buf);
return obj;
}
}
@@ -233,38 +258,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +291,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
index 662a8f98ccdb..49acfafb4fdb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -3,24 +3,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_DMABUF_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..3da5c2d214d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -40,6 +26,8 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +37,9 @@
#define VBLANK_OFF_DELAY 50000
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -66,6 +57,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -80,7 +83,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +92,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +129,8 @@ err_drm_device:
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -142,6 +147,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -229,6 +236,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +294,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
@@ -295,7 +311,7 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove = __devexit_p(exynos_drm_platform_remove),
+ .remove = exynos_drm_platform_remove,
.driver = {
.owner = THIS_MODULE,
.name = "exynos-drm",
@@ -324,6 +340,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0)
goto out_common_hdmi;
+
+ ret = exynos_platform_device_hdmi_register();
+ if (ret < 0)
+ goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +358,80 @@ static int __init exynos_drm_init(void)
goto out_g2d;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
+ goto out_drm;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ ret = PTR_ERR(exynos_drm_pdev);
goto out;
+ }
return 0;
out:
+ platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
platform_driver_unregister(&vidi_driver);
+out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi:
platform_driver_unregister(&mixer_driver);
@@ -375,13 +451,32 @@ static void __exit exynos_drm_exit(void)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ platform_device_unregister(exynos_drm_pdev);
+
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..4606fac7241a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_DRV_H_
@@ -74,8 +60,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +67,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
- void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -110,7 +93,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -142,7 +124,6 @@ struct exynos_drm_overlay {
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- void __iomem *vaddr[MAX_FB_BUFFER];
int zpos;
bool default_win;
@@ -167,8 +148,8 @@ struct exynos_drm_overlay {
struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
- int (*get_edid)(struct device *dev, struct drm_connector *connector,
- u8 *edid, int len);
+ struct edid *(*get_edid)(struct device *dev,
+ struct drm_connector *connector);
void *(*get_panel)(struct device *dev);
int (*check_timing)(struct device *dev, void *timing);
int (*power_on)(struct device *dev, int mode);
@@ -186,6 +167,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
*/
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +183,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
+ void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -231,16 +215,28 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head gem_list;
- unsigned int gem_nr;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
};
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +251,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -318,10 +318,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f7..c63721f64aec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -234,6 +220,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_encoder *exynos_encoder;
+ struct exynos_drm_manager_ops *ops;
+ struct drm_device *dev = fb->dev;
+ struct drm_encoder *encoder;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ exynos_encoder = to_exynos_encoder(encoder);
+ ops = exynos_encoder->manager->ops;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (ops->wait_for_vblank)
+ ops->wait_for_vblank(exynos_encoder->manager->dev);
+ }
+}
+
+
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -505,14 +517,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
-
- /*
- * wait for vblank interrupt
- * - this makes sure that hardware overlay is disabled to avoid
- * for the dma accesses to memory after gem buffer was released
- * because the setting for disabling the overlay will be updated
- * at vsync.
- */
- if (overlay_ops && overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..89e2fb0770af 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_ENCODER_H_
@@ -46,5 +32,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..294c0513f587 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -6,34 +6,23 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +39,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +72,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_encoder_complete_scanout(fb);
+
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +146,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
return &exynos_fb->fb;
}
@@ -190,9 +217,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int i;
+ int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -202,30 +228,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb)) {
- drm_gem_object_unreference_unlocked(obj);
- return fb;
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ return ERR_PTR(-ENOMEM);
}
- exynos_fb = to_exynos_fb(fb);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- exynos_drm_fb_destroy(fb);
+ kfree(exynos_fb);
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
- return fb;
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *gem_obj;
+
+ gem_obj = exynos_fb->exynos_gem_obj[i];
+ drm_gem_object_unreference_unlocked(&gem_obj->base);
+ }
+
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +295,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
return buffer;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 96262e54f76d..517471b37566 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -5,24 +5,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_FB_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414cb..71f867340a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -34,6 +20,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
@@ -46,8 +33,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -79,6 +96,26 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ if (is_drm_iommu_supported(dev)) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+
+ buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ } else {
+ phys_addr_t dma_addr = buffer->dma_addr;
+ if (dma_addr)
+ buffer->kvaddr = phys_to_virt(dma_addr);
+ else
+ buffer->kvaddr = (void __iomem *)NULL;
+ }
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
@@ -87,8 +124,12 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
- offset);
+ if (is_drm_iommu_supported(dev))
+ fbi->fix.smem_start = (unsigned long)
+ (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
+ else
+ fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
+
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto out;
+ goto err_release_framebuffer;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto out;
+ goto err_destroy_gem;
}
helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
DRM_ERROR("failed to allocate cmap.\n");
- goto out;
+ goto err_destroy_framebuffer;
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0) {
- fb_dealloc_cmap(&fbi->cmap);
- goto out;
- }
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
/*
* if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
+ if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index ccfce8a1a451..e16d7f0ae192 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -6,24 +6,10 @@
* Joonyoung Shim <jy0922.shim@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_FBDEV_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..67a83e69544b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,1955 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC stands for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+ char *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_driverdata *ddata;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* stop dma operation */
+ cfg = fimc_read(EXYNOS_CISTATUS);
+ if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ }
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg1, EXYNOS_MSCTRL);
+ fimc_write(cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src_w / pre_hratio;
+ pre_dst_height = src_h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(0, EXYNOS_CIIYSA(i));
+ fimc_write(0, EXYNOS_CIICBSA(i));
+ fimc_write(0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(0, EXYNOS_CIOYSA(i));
+ fimc_write(0, EXYNOS_CIOCBSA(i));
+ fimc_write(0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ fimc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ struct fimc_driverdata *ddata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ddata = (struct fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ return PTR_ERR(ctx->sclk_fimc_clk);
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = devm_clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(ctx->fimc_clk);
+ }
+
+ ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(ctx->wb_clk);
+ }
+
+ ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(ctx->wb_b_clk);
+ }
+
+ parent_clk = devm_clk_get(dev, ddata->parent_clk);
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return PTR_ERR(parent_clk);
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ clk_disable(ctx->sclk_fimc_clk);
+ return -EINVAL;
+ }
+
+ devm_clk_put(dev, parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ return -ENXIO;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ return -ENOENT;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ return ret;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->pol = pdata->pol;
+ ctx->ddata = ddata;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+
+ return ret;
+}
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+ .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+ .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "exynos4210-fimc",
+ .driver_data = (unsigned long)&exynos4210_fimc_data,
+ }, {
+ .name = "exynos4412-fimc",
+ .driver_data = (unsigned long)&exynos4410_fimc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..127a424c5fdf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1a..9537761931ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
+ bool resume;
};
struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
u32 vidcon1;
bool suspended;
struct mutex lock;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
struct exynos_drm_panel_info *panel;
};
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+ { .compatible = "samsung,exynos4-fimd",
+ .data = &exynos4_fimd_driver_data },
+ { .compatible = "samsung,exynos5-fimd",
+ .data = &exynos5_fimd_driver_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id =
+ of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+ if (of_id)
+ return (struct fimd_driver_data *)of_id->data;
+#endif
+
return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data;
}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1);
+ VIDTCON2_HOZVAL(timing->xres - 1) |
+ VIDTCON2_LINEVAL_E(timing->yres - 1) |
+ VIDTCON2_HOZVAL_E(timing->xres - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
}
}
+static void fimd_wait_for_vblank(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ if (ctx->suspended)
+ return;
+
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for FIMD to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(ctx->wait_vsync_queue,
+ !atomic_read(&ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
+ .wait_for_vblank = fimd_wait_for_vblank,
};
static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
struct fimd_win_data *win_data;
int win = zpos;
unsigned long val, alpha, size;
+ unsigned int last_x;
+ unsigned int last_y;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
/* buffer size */
val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+ VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+ VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+ VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */
val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+ VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+ VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+ VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
writel(val, ctx->regs + VIDOSD_A(win));
- val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
- win_data->ovl_width - 1) |
- VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
- win_data->ovl_height - 1);
+ last_x = win_data->offset_x + win_data->ovl_width;
+ if (last_x)
+ last_x--;
+ last_y = win_data->offset_y + win_data->ovl_height;
+ if (last_y)
+ last_y--;
+
+ val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+ VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y,
- win_data->offset_x + win_data->ovl_width - 1,
- win_data->offset_y + win_data->ovl_height - 1);
+ win_data->offset_x, win_data->offset_y, last_x, last_y);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data = &ctx->win_data[win];
+ if (ctx->suspended) {
+ /* do not resume this window*/
+ win_data->resume = false;
+ return;
+ }
+
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false;
}
-static void fimd_wait_for_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- int ret;
-
- ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
- VIDCON1_VSTATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set,
.commit = fimd_win_commit,
.disable = fimd_win_disable,
- .wait_for_vblank = fimd_wait_for_vblank,
};
static struct exynos_drm_manager fimd_manager = {
@@ -617,52 +663,6 @@ static struct exynos_drm_manager fimd_manager = {
.display_ops = &fimd_display_ops,
};
-static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
-{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
-
- is_checked = true;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
-
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
- }
-
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-}
-
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -682,8 +682,13 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
goto out;
drm_handle_vblank(drm_dev, manager->pipe);
- fimd_finish_pageflip(drm_dev, manager->pipe);
+ exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
out:
return IRQ_HANDLED;
}
@@ -709,6 +714,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
+ /* attach this sub driver to iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_attach_device(drm_dev, dev);
+
return 0;
}
@@ -716,7 +725,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* detach this sub driver from iommu mapping if supported. */
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, dev);
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +816,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
return 0;
}
+static void fimd_window_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ fimd_win_disable(dev, i);
+ }
+ fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct fimd_win_data *win_data;
+ int i;
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
static int fimd_activate(struct fimd_context *ctx, bool enable)
{
+ struct device *dev = ctx->subdrv.dev;
if (enable) {
int ret;
- struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true);
if (ret < 0)
@@ -820,7 +858,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev);
+
+ fimd_window_resume(dev);
} else {
+ fimd_window_suspend(dev);
+
fimd_clock(ctx, false);
ctx->suspended = true;
}
@@ -828,7 +870,7 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
return 0;
}
-static int __devinit fimd_probe(struct platform_device *pdev)
+static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
@@ -857,18 +899,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = clk_get(dev, "fimd");
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- ret = PTR_ERR(ctx->bus_clk);
- goto err_clk_get;
+ return PTR_ERR(ctx->bus_clk);
}
- ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(ctx->lcd_clk);
- goto err_bus_clk;
+ return PTR_ERR(ctx->lcd_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +916,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
- goto err_clk;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return -ENXIO;
}
ctx->irq = res->start;
@@ -892,13 +931,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- goto err_clk;
+ return ret;
}
ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win;
ctx->panel = panel;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv;
@@ -926,20 +967,9 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
-
-err_clk:
- clk_disable(ctx->lcd_clk);
- clk_put(ctx->lcd_clk);
-
-err_bus_clk:
- clk_disable(ctx->bus_clk);
- clk_put(ctx->bus_clk);
-
-err_clk_get:
- return ret;
}
-static int __devexit fimd_remove(struct platform_device *pdev)
+static int fimd_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx = platform_get_drvdata(pdev);
@@ -960,9 +990,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
- clk_put(ctx->lcd_clk);
- clk_put(ctx->bus_clk);
-
return 0;
}
@@ -991,7 +1018,7 @@ static int fimd_resume(struct device *dev)
* of pm runtime would still be 1 so in this case, fimd driver
* should be on directly not drawing on pm runtime interface.
*/
- if (pm_runtime_suspended(dev)) {
+ if (!pm_runtime_suspended(dev)) {
int ret;
ret = fimd_activate(ctx, true);
@@ -1050,11 +1077,12 @@ static const struct dev_pm_ops fimd_pm_ops = {
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove = __devexit_p(fimd_remove),
+ .remove = fimd_remove,
.id_table = fimd_driver_ids,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
+ .of_match_table = of_match_ptr(fimd_driver_dt_match),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..9a4c08e7453c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+#define MAX_BUF_ADDR_NR 6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL (64 * 1024 * 1024)
+
+enum {
+ BUF_TYPE_GEM = 1,
+ BUF_TYPE_USERPTR,
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
- u32 head;
- u32 data[G2D_CMDLIST_DATA_NUM];
- u32 last; /* last data offset */
+ u32 head;
+ unsigned long data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
struct list_head list;
- unsigned int handle;
+ dma_addr_t dma_addr;
+ unsigned long userptr;
+ unsigned long size;
+ struct page **pages;
+ unsigned int npages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ atomic_t refcount;
+ bool in_pool;
+ bool out_of_list;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int gem_nr;
+ unsigned int map_nr;
+ unsigned long handles[MAX_BUF_ADDR_NR];
+ unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
+ struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
@@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
+ struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
+
+ unsigned long current_pool;
+ unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
- g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
- &g2d->cmdlist_pool, GFP_KERNEL);
+ init_dma_attrs(&g2d->cmdlist_dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL,
+ &g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct device *dev = g2d->dev;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+ unsigned long obj,
+ bool force)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr =
+ (struct g2d_cmdlist_userptr *)obj;
+
+ if (!obj)
+ return;
+
+ if (force)
+ goto out;
+
+ atomic_dec(&g2d_userptr->refcount);
+
+ if (atomic_read(&g2d_userptr->refcount) > 0)
+ return;
+
+ if (g2d_userptr->in_pool)
+ return;
+
+out:
+ exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+ if (!g2d_userptr->out_of_list)
+ list_del_init(&g2d_userptr->list);
+
+ sg_free_table(g2d_userptr->sgt);
+ kfree(g2d_userptr->sgt);
+ g2d_userptr->sgt = NULL;
+
+ kfree(g2d_userptr->pages);
+ g2d_userptr->pages = NULL;
+ kfree(g2d_userptr);
+ g2d_userptr = NULL;
+}
+
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+ unsigned long userptr,
+ unsigned long size,
+ struct drm_file *filp,
+ unsigned long *obj)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr;
+ struct g2d_data *g2d;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ unsigned int npages, offset;
+ int ret;
+
+ if (!size) {
+ DRM_ERROR("invalid userptr size.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ g2d = dev_get_drvdata(g2d_priv->dev);
+
+ /* check if userptr already exists in userptr_list. */
+ list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ if (g2d_userptr->userptr == userptr) {
+ /*
+ * also check size because there could be same address
+ * and different size.
+ */
+ if (g2d_userptr->size == size) {
+ atomic_inc(&g2d_userptr->refcount);
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+ }
+
+ /*
+ * at this moment, maybe g2d dma is accessing this
+ * g2d_userptr memory region so just remove this
+ * g2d_userptr object from userptr_list not to be
+ * referred again and also except it the userptr
+ * pool to be released after the dma access completion.
+ */
+ g2d_userptr->out_of_list = true;
+ g2d_userptr->in_pool = false;
+ list_del_init(&g2d_userptr->list);
+
+ break;
+ }
+ }
+
+ g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+ if (!g2d_userptr) {
+ DRM_ERROR("failed to allocate g2d_userptr.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ atomic_set(&g2d_userptr->refcount, 1);
+
+ start = userptr & PAGE_MASK;
+ offset = userptr & ~PAGE_MASK;
+ end = PAGE_ALIGN(userptr + size);
+ npages = (end - start) >> PAGE_SHIFT;
+ g2d_userptr->npages = npages;
+
+ pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ kfree(g2d_userptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vma = find_vma(current->mm, userptr);
+ if (!vma) {
+ DRM_ERROR("failed to get vm region.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ if (vma->vm_end < userptr + size) {
+ DRM_ERROR("vma is too small.\n");
+ ret = -EFAULT;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->vma = exynos_gem_get_vma(vma);
+ if (!g2d_userptr->vma) {
+ DRM_ERROR("failed to copy vma.\n");
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ g2d_userptr->size = size;
+
+ ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+ npages, pages, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to get user pages from userptr.\n");
+ goto err_put_vma;
+ }
+
+ g2d_userptr->pages = pages;
+
+ sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+ if (!sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_userptr;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+ size, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgt from pages.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->sgt = sgt;
+
+ ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL);
+ if (ret < 0) {
+ DRM_ERROR("failed to map sgt with dma region.\n");
+ goto err_free_sgt;
+ }
+
+ g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+ g2d_userptr->userptr = userptr;
+
+ list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+ if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+ g2d->current_pool += npages << PAGE_SHIFT;
+ g2d_userptr->in_pool = true;
+ }
+
+ *obj = (unsigned long)g2d_userptr;
+
+ return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+
+err_free_userptr:
+ exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+ g2d_userptr->npages,
+ g2d_userptr->vma);
+
+err_put_vma:
+ exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+ kfree(pages);
+ kfree(g2d_userptr);
+ pages = NULL;
+ g2d_userptr = NULL;
+
+ return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+ struct g2d_data *g2d,
+ struct drm_file *filp)
+{
+ struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+ list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ if (g2d_userptr->in_pool)
+ g2d_userptr_put_dma_addr(drm_dev,
+ (unsigned long)g2d_userptr,
+ true);
+
+ g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_device *drm_dev,
+ struct drm_file *file)
+{
struct g2d_cmdlist *cmdlist = node->cmdlist;
- dma_addr_t *addr;
int offset;
int i;
- for (i = 0; i < node->gem_nr; i++) {
- struct g2d_gem_node *gem_node;
-
- gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
- if (!gem_node) {
- dev_err(g2d_priv->dev, "failed to allocate gem node\n");
- return -ENOMEM;
- }
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle;
+ dma_addr_t *addr;
offset = cmdlist->last - (i * 2 + 1);
- gem_node->handle = cmdlist->data[offset];
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
- file);
- if (IS_ERR(addr)) {
- node->gem_nr = i;
- kfree(gem_node);
- return PTR_ERR(addr);
+ handle = cmdlist->data[offset];
+
+ if (node->obj_type[i] == BUF_TYPE_GEM) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+ } else {
+ struct drm_exynos_g2d_userptr g2d_userptr;
+
+ if (copy_from_user(&g2d_userptr, (void __user *)handle,
+ sizeof(struct drm_exynos_g2d_userptr))) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
+
+ addr = g2d_userptr_get_dma_addr(drm_dev,
+ g2d_userptr.userptr,
+ g2d_userptr.size,
+ file,
+ &handle);
+ if (IS_ERR(addr)) {
+ node->map_nr = i;
+ return -EFAULT;
+ }
}
cmdlist->data[offset] = *addr;
- list_add_tail(&gem_node->list, &g2d_priv->gem_list);
- g2d_priv->gem_nr++;
+ node->handles[i] = handle;
}
return 0;
}
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
- struct drm_file *file,
- unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node,
+ struct drm_file *filp)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_gem_node *node, *n;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ int i;
- list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
- if (!nr)
- break;
+ for (i = 0; i < node->map_nr; i++) {
+ unsigned long handle = node->handles[i];
- exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
- list_del_init(&node->list);
- kfree(node);
- nr--;
+ if (node->obj_type[i] == BUF_TYPE_GEM)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+ filp);
+ else
+ g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+ false);
+
+ node->handles[i] = 0;
}
+
+ node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
+ struct g2d_cmdlist_node *node;
+
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+ struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
+
+ if (for_addr) {
+ /* check userptr buffer type. */
+ reg_offset = (cmdlist->data[index] &
+ ~0x7fffffff) >> 31;
+ if (reg_offset) {
+ node->obj_type[i] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ }
+ }
+
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
+
+ if (node->obj_type[i] != BUF_TYPE_USERPTR)
+ node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
- size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
- node->gem_nr = req->cmd_gem_nr;
- if (req->cmd_gem_nr) {
- struct drm_exynos_g2d_cmd *cmd_gem;
+ node->map_nr = req->cmd_buf_nr;
+ if (req->cmd_buf_nr) {
+ struct drm_exynos_g2d_cmd *cmd_buf;
- cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+ cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
- (void __user *)cmd_gem,
- sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ (void __user *)cmd_buf,
+ sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT;
goto err_free_event;
}
- cmdlist->last += req->cmd_gem_nr * 2;
+ cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
- ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0)
goto err_unmap;
}
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+ g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
+ runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct g2d_data *g2d;
+ int ret;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ if (!is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ }
+
+ return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ if (!is_drm_iommu_supported(drm_dev))
+ return;
+
+ drm_iommu_detach_device(drm_dev, dev);
+}
+
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->gem_list);
+ INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0;
}
@@ -734,16 +1071,26 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
+ }
mutex_unlock(&g2d->cmdlist_mutex);
- g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+ /* release all g2d_userptr in pool. */
+ g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv);
}
-static int __devinit g2d_probe(struct platform_device *pdev)
+static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0)
- goto err_destroy_workqueue;
-
- g2d->gate_clk = clk_get(dev, "fimg2d");
+ g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_fini_cmdlist;
+ goto err_destroy_workqueue;
}
pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ g2d->max_pool = MAX_POOL;
+
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
- clk_put(g2d->gate_clk);
-err_fini_cmdlist:
- g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
@@ -844,7 +1188,7 @@ err_destroy_slab:
return ret;
}
-static int __devexit g2d_remove(struct platform_device *pdev)
+static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
@@ -899,7 +1242,7 @@ static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove = __devexit_p(g2d_remove),
+ .remove = g2d_remove,
.driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..473180776528 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#include <drm/drmP.h>
@@ -83,157 +69,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
+ /* TODO */
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
-
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
- return -EINTR;
-
- pfn = page_to_pfn(buf->pages[page_offset++]);
- } else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
- return vm_insert_mixed(vma, f_vaddr, pfn);
-}
-
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
+ if (!buf->sgt)
+ return -EINTR;
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- /* add some codes for UNCACHED type here. TODO */
+ return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +139,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +148,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +227,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -412,14 +263,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -427,25 +278,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -453,14 +296,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
drm_gem_object_unreference_unlocked(obj);
/*
@@ -489,22 +324,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+ struct file *filp)
+{
+ struct drm_file *file_priv;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ /* find current process's drm_file from filelist. */
+ list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+ if (file_priv->filp == filp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return file_priv;
+ }
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ WARN_ON(1);
+
+ return ERR_PTR(-EFAULT);
+}
+
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+ struct drm_file *file_priv;
+ unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = obj;
+ vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+ /* restore it to driver's fops. */
+ filp->f_op = fops_get(drm_dev->driver->fops);
+
+ file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+ if (IS_ERR(file_priv))
+ return PTR_ERR(file_priv);
+
+ /* restore it to drm_file. */
+ filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,40 +386,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
+ ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
- vma->vm_flags |= VM_MIXEDMAP;
+ /*
+ * take a reference to this mapping of the object. And this reference
+ * is unreferenced by the corresponding vm_close call.
+ */
+ drm_gem_object_reference(obj);
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
+ mutex_lock(&drm_dev->struct_mutex);
+ drm_vm_open_locked(drm_dev, vma);
+ mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
@@ -578,16 +431,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj->filp->f_op = &exynos_drm_gem_fops;
- obj->filp->private_data = obj;
+ /*
+ * Set specific mmper's fops. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+ * This is used to call specific mapper temporarily.
+ */
+ file_priv->filp->f_op = &exynos_drm_gem_fops;
- addr = vm_mmap(obj->filp, 0, args->size,
+ /*
+ * Set gem object to private_data so that specific mmaper
+ * can get the gem object. And it will be restored by
+ * exynos_drm_gem_mmap_buffer to drm_file.
+ */
+ file_priv->filp->private_data = obj;
+
+ addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr))
+ if (IS_ERR((void *)addr)) {
+ file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr);
+ }
args->mapped = addr;
@@ -622,6 +488,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *vma_copy;
+
+ vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+ if (!vma_copy)
+ return NULL;
+
+ if (vma->vm_ops && vma->vm_ops->open)
+ vma->vm_ops->open(vma);
+
+ if (vma->vm_file)
+ get_file(vma->vm_file);
+
+ memcpy(vma_copy, vma, sizeof(*vma));
+
+ vma_copy->vm_mm = NULL;
+ vma_copy->vm_next = NULL;
+ vma_copy->vm_prev = NULL;
+
+ return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+ if (!vma)
+ return;
+
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma)
+{
+ int get_npages;
+
+ /* the memory region mmaped with VM_PFNMAP. */
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ pages[i] = pfn_to_page(pfn);
+ }
+
+ if (i != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ get_npages = get_user_pages(current, current->mm, start,
+ npages, 1, 1, pages, NULL);
+ get_npages = max(get_npages, 0);
+ if (get_npages != npages) {
+ DRM_ERROR("failed to get user_pages.\n");
+ while (get_npages)
+ put_page(pages[--get_npages]);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma)
+{
+ if (!vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(pages[i]);
+
+ /*
+ * undo the reference we took when populating
+ * the table.
+ */
+ put_page(pages[i]);
+ }
+ }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ int nents;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with dma.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return nents;
+ }
+
+ mutex_unlock(&drm_dev->struct_mutex);
+ return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +742,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..35ebac47dc2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_GEM_H_
@@ -35,21 +21,27 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
* @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ * VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
+ unsigned long userptr;
dma_addr_t dma_addr;
- struct sg_table *sgt;
+ struct dma_attrs dma_attrs;
+ unsigned int write;
struct page **pages;
- unsigned long page_size;
+ struct sg_table *sgt;
unsigned long size;
+ bool pfnmap;
};
/*
@@ -65,6 +57,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
+ * @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +67,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
+ struct vm_area_struct *vma;
unsigned int flags;
};
@@ -104,9 +98,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/*
* put dma address from gem handle and this function could be used for
@@ -115,7 +109,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +122,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -163,4 +161,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+ unsigned int npages,
+ struct page **pages,
+ struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+ unsigned int npages,
+ struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..8140753ec9c8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1838 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC stands for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 4
+#define GSC_MAX_DST 16
+#define GSC_RESET_TIMEOUT 50
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 16
+#define GSC_WIDTH_ITU_709 1280
+#define GSC_SC_UP_MAX_RATIO 65536
+#define GSC_SC_DOWN_RATIO_7_8 74898
+#define GSC_SC_DOWN_RATIO_6_8 87381
+#define GSC_SC_DOWN_RATIO_5_8 104857
+#define GSC_SC_DOWN_RATIO_4_8 131072
+#define GSC_SC_DOWN_RATIO_3_8 174762
+#define GSC_SC_DOWN_RATIO_2_8 262144
+#define GSC_REFRESH_MIN 12
+#define GSC_REFRESH_MAX 60
+#define GSC_CROP_MAX 8192
+#define GSC_CROP_MIN 32
+#define GSC_SCALE_MAX 4224
+#define GSC_SCALE_MIN 32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR 16
+#define GSC_COEF_H_8T 8
+#define GSC_COEF_V_4T 4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ int id;
+ int irq;
+ bool rotation;
+ bool suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 0, 0, 128, 0, 0, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 },
+ { -2, 7, -21, 99, 57, -16, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 7, -2 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 3, -8, 14, 111, 13, -8, 3, 0 },
+ { 2, -6, 7, 112, 21, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 },
+ { 1, -1, -7, 103, 44, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 },
+ { 0, 3, -15, 85, 69, -17, 4, -1 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 },
+ { -1, 4, -17, 69, 85, -15, 3, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 },
+ { -1, 4, -15, 44, 103, -7, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 },
+ { -1, 4, -12, 28, 110, 1, -4, 2 },
+ { -1, 3, -10, 21, 112, 7, -6, 2 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 2, -11, 25, 96, 25, -11, 2, 0 },
+ { 2, -10, 19, 96, 31, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 },
+ { 2, -8, 10, 92, 43, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 },
+ { 0, 1, -12, 43, 92, 10, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 },
+ { 0, 2, -12, 31, 96, 19, -10, 2 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { -1, -8, 33, 80, 33, -8, -1, 0 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 },
+ { 0, -8, 20, 78, 46, -6, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 },
+ { 1, -7, 10, 71, 58, -1, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 },
+ { 1, -5, -1, 58, 71, 10, -7, 1 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 },
+ { 1, -3, -6, 46, 78, 20, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { -3, 0, 35, 64, 35, 0, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 },
+ { -2, -3, 24, 61, 46, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 },
+ { 0, -4, 6, 46, 61, 24, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { -1, 8, 33, 48, 33, 8, -1, 0 },
+ { -1, 7, 31, 49, 35, 9, -1, -1 },
+ { -1, 6, 30, 49, 36, 10, -1, -1 },
+ { -1, 5, 28, 48, 38, 12, -1, -1 },
+ { -1, 4, 26, 48, 39, 13, 0, -1 },
+ { -1, 3, 24, 47, 41, 15, 0, -1 },
+ { -1, 2, 23, 47, 42, 16, 0, -1 },
+ { -1, 2, 21, 45, 43, 18, 1, -1 },
+ { -1, 1, 19, 45, 45, 19, 1, -1 },
+ { -1, 1, 18, 43, 45, 21, 2, -1 },
+ { -1, 0, 16, 42, 47, 23, 2, -1 },
+ { -1, 0, 15, 41, 47, 24, 3, -1 },
+ { -1, 0, 13, 39, 48, 26, 4, -1 },
+ { -1, -1, 12, 38, 48, 28, 5, -1 },
+ { -1, -1, 10, 36, 49, 30, 6, -1 },
+ { -1, -1, 9, 35, 49, 31, 7, -1 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 2, 13, 30, 38, 30, 13, 2, 0 },
+ { 2, 12, 29, 38, 30, 14, 3, 0 },
+ { 2, 11, 28, 38, 31, 15, 3, 0 },
+ { 2, 10, 26, 38, 32, 16, 4, 0 },
+ { 1, 10, 26, 37, 33, 17, 4, 0 },
+ { 1, 9, 24, 37, 34, 18, 5, 0 },
+ { 1, 8, 24, 37, 34, 19, 5, 0 },
+ { 1, 7, 22, 36, 35, 20, 6, 1 },
+ { 1, 6, 21, 36, 36, 21, 6, 1 },
+ { 1, 6, 20, 35, 36, 22, 7, 1 },
+ { 0, 5, 19, 34, 37, 24, 8, 1 },
+ { 0, 5, 18, 34, 37, 24, 9, 1 },
+ { 0, 4, 17, 33, 37, 26, 10, 1 },
+ { 0, 4, 16, 32, 38, 26, 10, 2 },
+ { 0, 3, 15, 31, 38, 28, 11, 2 },
+ { 0, 3, 14, 30, 38, 29, 12, 2 }
+ }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+ { /* Ratio <= 65536 (~8:8) */
+ { 0, 128, 0, 0 },
+ { -4, 127, 5, 0 },
+ { -6, 124, 11, -1 },
+ { -8, 118, 19, -1 },
+ { -8, 111, 27, -2 },
+ { -8, 102, 37, -3 },
+ { -8, 92, 48, -4 },
+ { -7, 81, 59, -5 },
+ { -6, 70, 70, -6 },
+ { -5, 59, 81, -7 },
+ { -4, 48, 92, -8 },
+ { -3, 37, 102, -8 },
+ { -2, 27, 111, -8 },
+ { -1, 19, 118, -8 },
+ { -1, 11, 124, -6 },
+ { 0, 5, 127, -4 }
+ }, { /* 65536 < Ratio <= 74898 (~8:7) */
+ { 8, 112, 8, 0 },
+ { 4, 111, 14, -1 },
+ { 1, 109, 20, -2 },
+ { -2, 105, 27, -2 },
+ { -3, 100, 34, -3 },
+ { -5, 93, 43, -3 },
+ { -5, 86, 51, -4 },
+ { -5, 77, 60, -4 },
+ { -5, 69, 69, -5 },
+ { -4, 60, 77, -5 },
+ { -4, 51, 86, -5 },
+ { -3, 43, 93, -5 },
+ { -3, 34, 100, -3 },
+ { -2, 27, 105, -2 },
+ { -2, 20, 109, 1 },
+ { -1, 14, 111, 4 }
+ }, { /* 74898 < Ratio <= 87381 (~8:6) */
+ { 16, 96, 16, 0 },
+ { 12, 97, 21, -2 },
+ { 8, 96, 26, -2 },
+ { 5, 93, 32, -2 },
+ { 2, 89, 39, -2 },
+ { 0, 84, 46, -2 },
+ { -1, 79, 53, -3 },
+ { -2, 73, 59, -2 },
+ { -2, 66, 66, -2 },
+ { -2, 59, 73, -2 },
+ { -3, 53, 79, -1 },
+ { -2, 46, 84, 0 },
+ { -2, 39, 89, 2 },
+ { -2, 32, 93, 5 },
+ { -2, 26, 96, 8 },
+ { -2, 21, 97, 12 }
+ }, { /* 87381 < Ratio <= 104857 (~8:5) */
+ { 22, 84, 22, 0 },
+ { 18, 85, 26, -1 },
+ { 14, 84, 31, -1 },
+ { 11, 82, 36, -1 },
+ { 8, 79, 42, -1 },
+ { 6, 76, 47, -1 },
+ { 4, 72, 52, 0 },
+ { 2, 68, 58, 0 },
+ { 1, 63, 63, 1 },
+ { 0, 58, 68, 2 },
+ { 0, 52, 72, 4 },
+ { -1, 47, 76, 6 },
+ { -1, 42, 79, 8 },
+ { -1, 36, 82, 11 },
+ { -1, 31, 84, 14 },
+ { -1, 26, 85, 18 }
+ }, { /* 104857 < Ratio <= 131072 (~8:4) */
+ { 26, 76, 26, 0 },
+ { 22, 76, 30, 0 },
+ { 19, 75, 34, 0 },
+ { 16, 73, 38, 1 },
+ { 13, 71, 43, 1 },
+ { 10, 69, 47, 2 },
+ { 8, 66, 51, 3 },
+ { 6, 63, 55, 4 },
+ { 5, 59, 59, 5 },
+ { 4, 55, 63, 6 },
+ { 3, 51, 66, 8 },
+ { 2, 47, 69, 10 },
+ { 1, 43, 71, 13 },
+ { 1, 38, 73, 16 },
+ { 0, 34, 75, 19 },
+ { 0, 30, 76, 22 }
+ }, { /* 131072 < Ratio <= 174762 (~8:3) */
+ { 29, 70, 29, 0 },
+ { 26, 68, 32, 2 },
+ { 23, 67, 36, 2 },
+ { 20, 66, 39, 3 },
+ { 17, 65, 43, 3 },
+ { 15, 63, 46, 4 },
+ { 12, 61, 50, 5 },
+ { 10, 58, 53, 7 },
+ { 8, 56, 56, 8 },
+ { 7, 53, 58, 10 },
+ { 5, 50, 61, 12 },
+ { 4, 46, 63, 15 },
+ { 3, 43, 65, 17 },
+ { 3, 39, 66, 20 },
+ { 2, 36, 67, 23 },
+ { 2, 32, 68, 26 }
+ }, { /* 174762 < Ratio <= 262144 (~8:2) */
+ { 32, 64, 32, 0 },
+ { 28, 63, 34, 3 },
+ { 25, 62, 37, 4 },
+ { 22, 62, 40, 4 },
+ { 19, 61, 43, 5 },
+ { 17, 59, 46, 6 },
+ { 15, 58, 48, 7 },
+ { 13, 55, 51, 9 },
+ { 11, 53, 53, 11 },
+ { 9, 51, 55, 13 },
+ { 7, 48, 58, 15 },
+ { 6, 46, 59, 17 },
+ { 5, 43, 61, 19 },
+ { 4, 40, 62, 22 },
+ { 4, 37, 62, 25 },
+ { 3, 34, 63, 28 }
+ }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable)
+ cfg |= GSC_IRQ_ENABLE;
+ else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ else
+ cfg |= GSC_IRQ_OR_MASK;
+
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ else
+ cfg |= GSC_IRQ_FRMDONE_MASK;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+ GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_IN_RGB_HD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_IN_RGB_SD_WIDE;
+ else
+ cfg |= GSC_IN_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+ GSC_OUT_GLOBAL_ALPHA_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ ctx->rotation = cfg &
+ (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ *swap = ctx->rotation;
+
+ return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 8) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 4)
+ *ratio = 4;
+ else if (src >= dst * 2)
+ *ratio = 2;
+ else
+ *ratio = 1;
+
+ return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+ if (hratio == 4 && vratio == 4)
+ *shfactor = 4;
+ else if ((hratio == 4 && vratio == 2) ||
+ (hratio == 2 && vratio == 4))
+ *shfactor = 3;
+ else if ((hratio == 4 && vratio == 1) ||
+ (hratio == 1 && vratio == 4) ||
+ (hratio == 2 && vratio == 2))
+ *shfactor = 2;
+ else if (hratio == 1 && vratio == 1)
+ *shfactor = 0;
+ else
+ *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 src_w, src_h, dst_w, dst_h;
+ int ret = 0;
+
+ src_w = src->w;
+ src_h = src->h;
+
+ if (ctx->rotation) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+ __func__, sc->pre_hratio, sc->pre_vratio);
+
+ sc->main_hratio = (src_w << 16) / dst_w;
+ sc->main_vratio = (src_h << 16) / dst_h;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+ &sc->pre_shfactor);
+
+ DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+ sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_H_8T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(h_coef_8t[sc_ratio][i][j],
+ GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+ int i, j, k, sc_ratio;
+
+ if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+ sc_ratio = 0;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+ sc_ratio = 1;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+ sc_ratio = 2;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+ sc_ratio = 3;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+ sc_ratio = 4;
+ else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+ sc_ratio = 5;
+ else
+ sc_ratio = 6;
+
+ for (i = 0; i < GSC_COEF_PHASE; i++)
+ for (j = 0; j < GSC_COEF_V_4T; j++)
+ for (k = 0; k < GSC_COEF_DEPTH; k++)
+ gsc_write(v_coef_4t[sc_ratio][i][j],
+ GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ gsc_set_h_coef(ctx, sc->main_hratio);
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ gsc_set_v_coef(ctx, sc->main_vratio);
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+ GSC_DSTIMG_OFFSET_Y(pos->y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+ GSC_DSTIMG_HEIGHT(sz->vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+ __func__, pos->w, sc->range);
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= ~mask;
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -EINVAL;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ case IPP_BUF_DEQUEUE:
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_SRC;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_SRC; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_SRC) {
+ DRM_ERROR("failed to get in buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+ u32 cfg, curr_index, i;
+ u32 buf_id = GSC_MAX_DST;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+ for (i = curr_index; i < GSC_MAX_DST; i++) {
+ if (!((cfg >> i) & 0x1)) {
+ buf_id = i;
+ break;
+ }
+ }
+
+ if (buf_id == GSC_MAX_DST) {
+ DRM_ERROR("failed to get out buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+ if (ret < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+ curr_index, buf_id);
+
+ return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ u32 status;
+ int buf_id[EXYNOS_DRM_OPS_MAX];
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+
+ buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+ return IRQ_HANDLED;
+
+ buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+ if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+ buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+ buf_id[EXYNOS_DRM_OPS_SRC];
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = GSC_REFRESH_MIN;
+ prop_list->refresh_max = GSC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = GSC_CROP_MAX;
+ prop_list->crop_max.vsize = GSC_CROP_MAX;
+ prop_list->crop_min.hsize = GSC_CROP_MIN;
+ prop_list->crop_min.vsize = GSC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = GSC_SCALE_MAX;
+ prop_list->scale_max.vsize = GSC_SCALE_MAX;
+ prop_list->scale_min.hsize = GSC_SCALE_MIN;
+ prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!gsc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct gsc_scaler *sc = &ctx->sc;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ /* scaler setting */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+ sc->range = true;
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ u32 cfg;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+
+ gsc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* enable one shot */
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+ GSC_ENABLE_CLK_GATE_MODE_MASK);
+ cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+ gsc_write(cfg, GSC_ENABLE);
+
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_WB:
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+ /* src local path */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst dma memory */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ case IPP_CMD_OUTPUT:
+ /* src dma memory */
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+ cfg |= GSC_IN_PATH_MEMORY;
+ gsc_write(cfg, GSC_IN_CON);
+
+ /* dst local path */
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg |= GSC_OUT_PATH_MEMORY;
+ gsc_write(cfg, GSC_OUT_CON);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+static int gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = devm_clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ return PTR_ERR(ctx->gsc_clk);
+ }
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ return -ENXIO;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ return -ENOENT;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ return ret;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+ ret = gsc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+ return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return gsc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+ return gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = gsc_remove,
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..29ec1c5efcf2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..28644539b305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
/* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR];
};
+int exynos_platform_device_hdmi_register(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ return -EEXIST;
+
+ exynos_drm_hdmi_pdev = platform_device_register_simple(
+ "exynos-drm-hdmi", -1, NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+ return PTR_ERR(exynos_drm_hdmi_pdev);
+
+ return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+ if (exynos_drm_hdmi_pdev)
+ platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{
if (ctx)
@@ -86,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)
return false;
}
-static int drm_hdmi_get_edid(struct device *dev,
- struct drm_connector *connector, u8 *edid, int len)
+static struct edid *drm_hdmi_get_edid(struct device *dev,
+ struct drm_connector *connector)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->get_edid)
- return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
- len);
+ return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
- return 0;
+ return NULL;
}
static int drm_hdmi_check_timing(struct device *dev, void *timing)
@@ -157,6 +178,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (mixer_ops && mixer_ops->wait_for_vblank)
+ mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -238,6 +269,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
+ .wait_for_vblank = drm_hdmi_wait_for_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +323,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false;
}
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
- struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (mixer_ops && mixer_ops->wait_for_vblank)
- mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
- .wait_for_vblank = drm_mixer_wait_for_vblank,
};
static struct exynos_drm_manager hdmi_manager = {
@@ -346,10 +367,24 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
return 0;
}
-static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct drm_hdmi_context *ctx;
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ if (mixer_ops->iommu_on)
+ mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
+static int exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_subdrv *subdrv;
@@ -368,6 +403,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
@@ -376,7 +412,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+static int exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -389,7 +425,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
struct platform_driver exynos_drm_common_hdmi_driver = {
.probe = exynos_drm_hdmi_probe,
- .remove = __devexit_p(exynos_drm_hdmi_remove),
+ .remove = exynos_drm_hdmi_remove,
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..d80516fc9ed7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -3,24 +3,10 @@
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_HDMI_H_
@@ -44,8 +30,8 @@ struct exynos_drm_hdmi_context {
struct exynos_hdmi_ops {
/* display */
bool (*is_connected)(void *ctx);
- int (*get_edid)(void *ctx, struct drm_connector *connector,
- u8 *edid, int len);
+ struct edid *(*get_edid)(void *ctx,
+ struct drm_connector *connector);
int (*check_timing)(void *ctx, void *timing);
int (*power_on)(void *ctx, int mode);
@@ -62,12 +48,13 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
+ int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*wait_for_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
/* overlay */
- void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..3799d5c2b5df
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,136 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+ if (!priv->da_space_order)
+ priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size,
+ priv->da_space_order);
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..53b7deea8ab7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,71 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+#define EXYNOS_DEV_ADDR_ORDER 0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..1a556354e92f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2050 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP stands for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct drm_file *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex ipp_lock;
+ struct mutex prop_lock;
+ struct idr ipp_idr;
+ struct idr prop_idr;
+ struct workqueue_struct *event_workq;
+ struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ mutex_lock(&exynos_drm_ippdrv_lock);
+ list_del(&ippdrv->drv_list);
+ mutex_unlock(&exynos_drm_ippdrv_lock);
+
+ return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+ u32 *idp)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("failed to get idr.\n");
+ return -ENOMEM;
+ }
+
+ /* do the allocation under our mutexlock */
+ mutex_lock(lock);
+ ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ mutex_unlock(lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+ void *obj;
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+ mutex_lock(lock);
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (!obj) {
+ DRM_ERROR("failed to find object.\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_unlock(lock);
+
+ return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+ enum drm_exynos_ipp_cmd cmd)
+{
+ /*
+ * check dedicated flag and WB, OUTPUT operation with
+ * power on state.
+ */
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return true;
+
+ return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver using idr */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ ipp_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return ippdrv;
+ }
+
+ /*
+ * WB, OUTPUT opertion not supported multi-operation.
+ * so, make dedicated state at set property ioctl.
+ * when ipp driver finished operations, clear dedicated flags.
+ */
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_ERROR("already used choose device.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * This is necessary to find correct device in ipp drivers.
+ * ipp drivers have different abilities,
+ * so need to check property.
+ */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ippdrv;
+ } else {
+ /*
+ * This case is search all ipp driver for finding.
+ * user application don't set ipp_id in this case,
+ * so ipp subsystem search correct driver in driver list.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (ipp_check_dedicated(ippdrv, property->cmd)) {
+ DRM_DEBUG_KMS("%s:used device.\n", __func__);
+ continue;
+ }
+
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("%s:not support property.\n",
+ __func__);
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * This case is search ipp driver by prop_id handle.
+ * sometimes, ipp subsystem find driver by prop_id.
+ * e.g PAUSE state, queue buf, command contro.
+ */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+ count++, (int)ippdrv);
+
+ if (!list_empty(&ippdrv->cmd_list)) {
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+ if (c_node->property.prop_id == prop_id)
+ return ippdrv;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_prop_list *prop_list = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!prop_list) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+ if (!prop_list->ipp_id) {
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ count++;
+ /*
+ * Supports ippdrv list count for user application.
+ * First step user application getting ippdrv count.
+ * and second step getting ippdrv capability using ipp_id.
+ */
+ prop_list->count = count;
+ } else {
+ /*
+ * Getting ippdrv capability by ipp_id.
+ * some deivce not supported wb, output interface.
+ * so, user application detect correct ipp driver
+ * using this ioctl.
+ */
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+ prop_list->ipp_id);
+ if (!ippdrv) {
+ DRM_ERROR("not found ipp%d driver.\n",
+ prop_list->ipp_id);
+ return -EINVAL;
+ }
+
+ prop_list = ippdrv->prop_list;
+ }
+
+ return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+ int idx)
+{
+ struct drm_exynos_ipp_config *config = &property->config[idx];
+ struct drm_exynos_pos *pos = &config->pos;
+ struct drm_exynos_sz *sz = &config->sz;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+ __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+ DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ u32 prop_id = property->prop_id;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find command node using command list in ippdrv.
+ * when we find this command no using prop_id.
+ * return property information set in this command node.
+ */
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ if ((c_node->property.prop_id == prop_id) &&
+ (c_node->state == IPP_STATE_STOP)) {
+ DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->cmd, (int)ippdrv);
+
+ c_node->property = *property;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("failed to search property.\n");
+
+ return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+ if (!cmd_work) {
+ DRM_ERROR("failed to alloc cmd_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+ return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+ struct drm_exynos_ipp_event_work *event_work;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+ if (!event_work) {
+ DRM_ERROR("failed to alloc event_work.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+ return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_property *property = data;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is log print for user application property.
+ * user application set various property.
+ */
+ for_each_ipp_ops(i)
+ ipp_print_property(property, i);
+
+ /*
+ * set property ioctl generated new prop_id.
+ * but in this case already asigned prop_id using old set property.
+ * e.g PAUSE state. this case supports find current prop_id and use it
+ * instead of allocation.
+ */
+ if (property->prop_id) {
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+ return ipp_find_and_set_property(property);
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node) {
+ DRM_ERROR("failed to allocate map node.\n");
+ return -ENOMEM;
+ }
+
+ /* create property id */
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+ &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+ __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+ /* stored property information and ippdrv in private data */
+ c_node->priv = priv;
+ c_node->property = *property;
+ c_node->state = IPP_STATE_IDLE;
+
+ c_node->start_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->start_work)) {
+ DRM_ERROR("failed to create start work.\n");
+ goto err_clear;
+ }
+
+ c_node->stop_work = ipp_create_cmd_work();
+ if (IS_ERR_OR_NULL(c_node->stop_work)) {
+ DRM_ERROR("failed to create stop work.\n");
+ goto err_free_start;
+ }
+
+ c_node->event_work = ipp_create_event_work();
+ if (IS_ERR_OR_NULL(c_node->event_work)) {
+ DRM_ERROR("failed to create event work.\n");
+ goto err_free_stop;
+ }
+
+ mutex_init(&c_node->cmd_lock);
+ mutex_init(&c_node->mem_lock);
+ mutex_init(&c_node->event_lock);
+
+ init_completion(&c_node->start_complete);
+ init_completion(&c_node->stop_complete);
+
+ for_each_ipp_ops(i)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ INIT_LIST_HEAD(&c_node->event_list);
+ list_splice_init(&priv->event_list, &c_node->event_list);
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+ /* make dedicated state without m2m */
+ if (!ipp_is_m2m_cmd(property->cmd))
+ ippdrv->dedicated = true;
+
+ return 0;
+
+err_free_stop:
+ kfree(c_node->stop_work);
+err_free_start:
+ kfree(c_node->start_work);
+err_clear:
+ kfree(c_node);
+ return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* delete list */
+ list_del(&c_node->list);
+
+ /* destroy mutex */
+ mutex_destroy(&c_node->cmd_lock);
+ mutex_destroy(&c_node->mem_lock);
+ mutex_destroy(&c_node->event_lock);
+
+ /* free command node */
+ kfree(c_node->start_work);
+ kfree(c_node->stop_work);
+ kfree(c_node->event_work);
+ kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+ i ? "dst" : "src");
+ continue;
+ }
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+ i ? "dst" : "src", count[i], (int)m_node);
+ count[i]++;
+ }
+ }
+
+ DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+ min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+ max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+ /*
+ * M2M operations should be need paired memory address.
+ * so, need to check minimum count about src, dst.
+ * other case not use paired memory, so use maximum count
+ */
+ if (ipp_is_m2m_cmd(property->cmd))
+ ret = min(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+ else
+ ret = max(count[EXYNOS_DRM_OPS_SRC],
+ count[EXYNOS_DRM_OPS_DST]);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct list_head *head;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+ /* source/destination memory list */
+ head = &c_node->mem_list[qbuf->ops_id];
+
+ /* find memory node from memory list */
+ list_for_each_entry(m_node, head, list) {
+ DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+ __func__, count++, (int)m_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == qbuf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid queue node.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[m_node->ops_id];
+ if (!ops) {
+ DRM_ERROR("not support ops.\n");
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+ m_node->buf_id, IPP_BUF_ENQUEUE);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_unlock;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_get_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_lock(&c_node->mem_lock);
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node) {
+ DRM_ERROR("failed to allocate queue node.\n");
+ goto err_unlock;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* operations, buffer id */
+ m_node->ops_id = qbuf->ops_id;
+ m_node->prop_id = qbuf->prop_id;
+ m_node->buf_id = qbuf->buf_id;
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+ (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+ qbuf->prop_id, m_node->buf_id);
+
+ for_each_ipp_planar(i) {
+ DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+ i, qbuf->handle[i]);
+
+ /* get dma address by handle */
+ if (qbuf->handle[i]) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ qbuf->handle[i], file);
+ if (IS_ERR(addr)) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ buf_info.handles[i] = qbuf->handle[i];
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+ __func__, i, buf_info.base[i],
+ (int)buf_info.handles[i]);
+ }
+ }
+
+ m_node->filp = file;
+ m_node->buf_info = buf_info;
+ list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+ mutex_unlock(&c_node->mem_lock);
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+err_unlock:
+ mutex_unlock(&c_node->mem_lock);
+ return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+ if (!m_node) {
+ DRM_ERROR("invalid dequeue node.\n");
+ return -EFAULT;
+ }
+
+ if (list_empty(&m_node->list)) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&c_node->mem_lock);
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+ /* put gem buffer */
+ for_each_ipp_planar(i) {
+ unsigned long handle = m_node->buf_info.handles[i];
+ if (handle)
+ exynos_drm_gem_put_dma_addr(drm_dev, handle,
+ m_node->filp);
+ }
+
+ /* delete list in queue */
+ list_del(&m_node->list);
+ kfree(m_node);
+
+ mutex_unlock(&c_node->mem_lock);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+ qbuf->ops_id, qbuf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ if (!e) {
+ DRM_ERROR("failed to allocate event.\n");
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = qbuf->user_data;
+ e->event.prop_id = qbuf->prop_id;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+ list_add_tail(&e->base.link, &c_node->event_list);
+
+ return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+ return;
+ }
+
+ list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+ DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+ __func__, count++, (int)e);
+
+ /*
+ * quf == NULL condition means all event deletion.
+ * stop operations want to delete all event list.
+ * another case delete only same buf id.
+ */
+ if (!qbuf) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* compare buffer id */
+ if (qbuf && (qbuf->buf_id ==
+ e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ return;
+ }
+ }
+}
+
+static void ipp_handle_cmd_work(struct device *dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_work *cmd_work,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ cmd_work->ippdrv = ippdrv;
+ cmd_work->c_node = c_node;
+ queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_mem_node *m_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_property *property;
+ struct exynos_drm_ipp_ops *ops;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+ if (IS_ERR_OR_NULL(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EFAULT;
+ }
+
+ ops = ippdrv->ops[qbuf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EFAULT;
+ }
+
+ property = &c_node->property;
+
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /*
+ * If set destination buffer and enabled clock,
+ * then m2m operations need start operations at queue_buf
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+ cmd_work->ctrl = IPP_CTRL_PLAY;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ } else {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+ struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_queue_buf *qbuf)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+ /* delete list */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[qbuf->ops_id], list) {
+ if (m_node->buf_id == qbuf->buf_id &&
+ m_node->ops_id == qbuf->ops_id)
+ ipp_put_mem_node(drm_dev, c_node, m_node);
+ }
+ }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_queue_buf *qbuf = data;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!qbuf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+ __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+ qbuf->buf_id, qbuf->buf_type);
+
+ /* find command node */
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ qbuf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return -EFAULT;
+ }
+
+ /* buffer control */
+ switch (qbuf->buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* get memory node */
+ m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+ if (IS_ERR(m_node)) {
+ DRM_ERROR("failed to get m_node.\n");
+ return PTR_ERR(m_node);
+ }
+
+ /*
+ * first step get event for destination buffer.
+ * and second step when M2M case run with destination buffer
+ * if needed.
+ */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+ /* get event for destination buffer */
+ ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to get event.\n");
+ goto err_clean_node;
+ }
+
+ /*
+ * M2M case run play control for streaming feature.
+ * other case set address and waiting.
+ */
+ ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+ if (ret) {
+ DRM_ERROR("failed to run command.\n");
+ goto err_clean_node;
+ }
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ mutex_lock(&c_node->cmd_lock);
+
+ /* put event for destination buffer */
+ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+ ipp_put_event(c_node, qbuf);
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+ mutex_unlock(&c_node->cmd_lock);
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_clean_node:
+ DRM_ERROR("clean memory nodes.\n");
+
+ ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+ return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+ enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (ctrl != IPP_CTRL_PLAY) {
+ if (pm_runtime_suspended(dev)) {
+ DRM_ERROR("pm:runtime_suspended.\n");
+ goto err_status;
+ }
+ }
+
+ switch (ctrl) {
+ case IPP_CTRL_PLAY:
+ if (state != IPP_STATE_IDLE)
+ goto err_status;
+ break;
+ case IPP_CTRL_STOP:
+ if (state == IPP_STATE_STOP)
+ goto err_status;
+ break;
+ case IPP_CTRL_PAUSE:
+ if (state != IPP_STATE_START)
+ goto err_status;
+ break;
+ case IPP_CTRL_RESUME:
+ if (state != IPP_STATE_STOP)
+ goto err_status;
+ break;
+ default:
+ DRM_ERROR("invalid state.\n");
+ goto err_status;
+ break;
+ }
+
+ return true;
+
+err_status:
+ DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+ return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+ struct drm_exynos_ipp_cmd_work *cmd_work;
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!cmd_ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+ if (IS_ERR(ippdrv)) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return PTR_ERR(ippdrv);
+ }
+
+ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+ cmd_ctrl->prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+ c_node->state)) {
+ DRM_ERROR("invalid state.\n");
+ return -EINVAL;
+ }
+
+ switch (cmd_ctrl->ctrl) {
+ case IPP_CTRL_PLAY:
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+ c_node->state = IPP_STATE_START;
+
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ c_node->state = IPP_STATE_START;
+ break;
+ case IPP_CTRL_STOP:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(300))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ break;
+ case IPP_CTRL_PAUSE:
+ cmd_work = c_node->stop_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+ if (!wait_for_completion_timeout(&c_node->stop_complete,
+ msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout stop:prop_id[%d]\n",
+ c_node->property.prop_id);
+ }
+
+ c_node->state = IPP_STATE_STOP;
+ break;
+ case IPP_CTRL_RESUME:
+ c_node->state = IPP_STATE_START;
+ cmd_work = c_node->start_work;
+ cmd_work->ctrl = cmd_ctrl->ctrl;
+ ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+ break;
+ default:
+ DRM_ERROR("could not support this state currently.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+ cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+ return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ bool swap = false;
+ int ret, i;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for_each_ipp_ops(i) {
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ ret = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip, &swap);
+ if (ret) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* store command info in ippdrv */
+ ippdrv->c_node = c_node;
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* set current property in ippdrv */
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->c_node = NULL;
+ return ret;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("failed to get node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+ __func__, (int)m_node);
+
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ list_for_each_entry(m_node, head, list) {
+ ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node)
+{
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct list_head *head;
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* put event */
+ ipp_put_event(c_node, NULL);
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+ __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+ break;
+ }
+
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret) {
+ DRM_ERROR("failed to put m_node.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+err_clear:
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+ struct drm_exynos_ipp_cmd_work *cmd_work =
+ (struct drm_exynos_ipp_cmd_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ ippdrv = cmd_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("invalid ippdrv list.\n");
+ return;
+ }
+
+ c_node = cmd_work->c_node;
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ mutex_lock(&c_node->cmd_lock);
+
+ property = &c_node->property;
+
+ switch (cmd_work->ctrl) {
+ case IPP_CTRL_PLAY:
+ case IPP_CTRL_RESUME:
+ ret = ipp_start_property(ippdrv, c_node);
+ if (ret) {
+ DRM_ERROR("failed to start property:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+
+ /*
+ * M2M case supports wait_completion of transfer.
+ * because M2M case supports single unit operation
+ * with multiple queue.
+ * M2M need to wait completion of data transfer.
+ */
+ if (ipp_is_m2m_cmd(property->cmd)) {
+ if (!wait_for_completion_timeout
+ (&c_node->start_complete, msecs_to_jiffies(200))) {
+ DRM_ERROR("timeout event:prop_id[%d]\n",
+ c_node->property.prop_id);
+ goto err_unlock;
+ }
+ }
+ break;
+ case IPP_CTRL_STOP:
+ case IPP_CTRL_PAUSE:
+ ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+ c_node);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_unlock;
+ }
+
+ complete(&c_node->stop_complete);
+ break;
+ default:
+ DRM_ERROR("unknown control type\n");
+ break;
+ }
+
+ DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+ mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = &c_node->property;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_queue_buf qbuf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ for_each_ipp_ops(i)
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", buf_id[i]);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&c_node->event_list)) {
+ DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+ return 0;
+ }
+
+ if (!ipp_check_mem_list(c_node)) {
+ DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+ return 0;
+ }
+
+ /* check command */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for_each_ipp_ops(i) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[i] = m_node->buf_id;
+ DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+ i ? "dst" : "src", tbuf_id[i]);
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&qbuf, 0x0, sizeof(qbuf));
+ qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+ qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &qbuf);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty memory node.\n");
+ return -ENOMEM;
+ }
+
+ tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+ if (ret)
+ DRM_ERROR("failed to put m_node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return -EINVAL;
+ }
+
+ if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+ DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+ tbuf_id[1], buf_id[1], property->prop_id);
+
+ /*
+ * command node have event list of destination buffer
+ * If destination buffer enqueue to mem list,
+ * then we make event and link to event list tail.
+ * so, we get first event for first enqueued buffer.
+ */
+ e = list_first_entry(&c_node->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ if (!e) {
+ DRM_ERROR("empty event.\n");
+ return -EINVAL;
+ }
+
+ do_gettimeofday(&now);
+ DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+ , __func__, now.tv_sec, now.tv_usec);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for_each_ipp_ops(i)
+ e->event.buf_id[i] = tbuf_id[i];
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+ property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+ return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+ struct drm_exynos_ipp_event_work *event_work =
+ (struct drm_exynos_ipp_event_work *)work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int ret;
+
+ if (!event_work) {
+ DRM_ERROR("failed to get event_work.\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+ event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+ ippdrv = event_work->ippdrv;
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return;
+ }
+
+ c_node = ippdrv->c_node;
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ return;
+ }
+
+ /*
+ * IPP supports command thread, event thread synchronization.
+ * If IPP close immediately from user land, then IPP make
+ * synchronization with command thread, so make complete event.
+ * or going out operations.
+ */
+ if (c_node->state != IPP_STATE_START) {
+ DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+ __func__, c_node->state, c_node->property.prop_id);
+ goto err_completion;
+ }
+
+ mutex_lock(&c_node->event_lock);
+
+ ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+ if (ret) {
+ DRM_ERROR("failed to send event.\n");
+ goto err_completion;
+ }
+
+err_completion:
+ if (ipp_is_m2m_cmd(c_node->property.cmd))
+ complete(&c_node->start_complete);
+
+ mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret, count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ ippdrv->drm_dev = drm_dev;
+
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+ &ippdrv->ipp_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_idr;
+ }
+
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+ count++, (int)ippdrv, ippdrv->ipp_id);
+
+ if (ippdrv->ipp_id == 0) {
+ DRM_ERROR("failed to get ipp_id[%d]\n",
+ ippdrv->ipp_id);
+ goto err_idr;
+ }
+
+ /* store parent device for node */
+ ippdrv->parent_dev = dev;
+
+ /* store event work queue and handler */
+ ippdrv->event_workq = ctx->event_workq;
+ ippdrv->sched_event = ipp_sched_event;
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+ if (is_drm_iommu_supported(drm_dev)) {
+ ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err_iommu;
+ }
+ }
+ }
+
+ return 0;
+
+err_iommu:
+ /* get ipp driver entry */
+ list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (is_drm_iommu_supported(drm_dev))
+ drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate priv.\n");
+ return -ENOMEM;
+ }
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+
+ INIT_LIST_HEAD(&priv->event_list);
+
+ DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = NULL;
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+ if (list_empty(&exynos_drm_ippdrv_list)) {
+ DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+ goto err_clear;
+ }
+
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+ if (list_empty(&ippdrv->cmd_list))
+ continue;
+
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+ __func__, count++, (int)ippdrv);
+
+ if (c_node->priv == priv) {
+ /*
+ * userland goto unnormal state. process killed.
+ * and close the file.
+ * so, IPP didn't called stop cmd ctrl.
+ * so, we are make stop operation in this state.
+ */
+ if (c_node->state == IPP_STATE_START) {
+ ipp_stop_property(drm_dev, ippdrv,
+ c_node);
+ c_node->state = IPP_STATE_STOP;
+ }
+
+ ippdrv->dedicated = false;
+ ipp_clean_cmd_node(c_node);
+ if (list_empty(&ippdrv->cmd_list))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+ }
+ }
+
+err_clear:
+ kfree(priv);
+ return;
+}
+
+static int ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ mutex_init(&ctx->ipp_lock);
+ mutex_init(&ctx->prop_lock);
+
+ idr_init(&ctx->ipp_idr);
+ idr_init(&ctx->prop_idr);
+
+ /*
+ * create single thread for ipp event
+ * IPP supports event thread for IPP drivers.
+ * IPP driver send event_work to this thread.
+ * and IPP event thread send event to user process.
+ */
+ ctx->event_workq = create_singlethread_workqueue("ipp_event");
+ if (!ctx->event_workq) {
+ dev_err(dev, "failed to create event workqueue\n");
+ return -EINVAL;
+ }
+
+ /*
+ * create single thread for ipp command
+ * IPP supports command thread for user process.
+ * user process make command node using set property ioctl.
+ * and make start_work and send this work to command thread.
+ * and then this command thread start property.
+ */
+ ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+ if (!ctx->cmd_workq) {
+ dev_err(dev, "failed to create cmd workqueue\n");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ platform_set_drvdata(pdev, ctx);
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_cmd_workq;
+ }
+
+ dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_cmd_workq:
+ destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+ destroy_workqueue(ctx->event_workq);
+ return ret;
+}
+
+static int ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_remove_all(&ctx->ipp_idr);
+ idr_remove_all(&ctx->prop_idr);
+ idr_destroy(&ctx->ipp_idr);
+ idr_destroy(&ctx->prop_idr);
+
+ mutex_destroy(&ctx->ipp_lock);
+ mutex_destroy(&ctx->prop_lock);
+
+ /* destroy command, event work queue */
+ destroy_workqueue(ctx->cmd_workq);
+ destroy_workqueue(ctx->event_workq);
+
+ return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return ipp_power_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+ struct ipp_context *ctx = get_ipp_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+ SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = ipp_remove,
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ .pm = &ipp_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..4cadbea7dbde
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Jinyoung Jeon <jy0.jeon@samsung.com>
+ * Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos) \
+ for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ enum drm_exynos_ipp_ctrl ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct exynos_drm_ipp_private *priv;
+ struct list_head list;
+ struct list_head event_list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct mutex cmd_lock;
+ struct mutex mem_lock;
+ struct mutex event_lock;
+ struct completion start_complete;
+ struct completion stop_complete;
+ struct drm_exynos_ipp_property property;
+ struct drm_exynos_ipp_cmd_work *start_work;
+ struct drm_exynos_ipp_cmd_work *stop_work;
+ struct drm_exynos_ipp_event_work *event_work;
+ enum drm_exynos_ipp_state state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+ unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+ __u32 enable;
+ __u32 refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+ struct work_struct work;
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @c_node: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head drv_list;
+ struct device *parent_dev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ u32 ipp_id;
+ bool dedicated;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct workqueue_struct *event_workq;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct list_head cmd_list;
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb2102..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
* CRTC ----------------
* ^ start ^ end
*
- * There are six cases from a to b.
+ * There are six cases from a to f.
*
* <----- SCREEN ----->
* 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
}
overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
+ DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->dma_addr[i]);
}
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_x < 0) {
if (actual_w)
src_x -= crtc_x;
- else
- src_x += crtc_w;
crtc_x = 0;
}
if (crtc_y < 0) {
if (actual_h)
src_y -= crtc_y;
- else
- src_y += crtc_h;
crtc_y = 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..f976e29def6e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct rot_context, ippdrv);
+#define rot_read(offset) readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+ ROT_IRQ_STATUS_COMPLETE = 8,
+ ROT_IRQ_STATUS_ILLEGAL = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+ u32 min_w;
+ u32 min_h;
+ u32 max_w;
+ u32 max_h;
+ u32 align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+ struct rot_limit ycbcr420_2p;
+ struct rot_limit rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct clk *clock;
+ struct rot_limit_table *limit_tbl;
+ int irq;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
+ bool suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+ u32 val = rot_read(ROT_CONFIG);
+
+ if (enable == true)
+ val |= ROT_CONFIG_IRQ;
+ else
+ val &= ~ROT_CONFIG_IRQ;
+
+ rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_CONTROL);
+
+ val &= ROT_CONTROL_FMT_MASK;
+
+ return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+ u32 val = rot_read(ROT_STATUS);
+
+ val = ROT_STATUS_IRQ(val);
+
+ if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+ return ROT_IRQ_STATUS_COMPLETE;
+
+ return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+ struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+ enum rot_irq_status irq_status;
+ u32 val;
+
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+
+ /* clear status */
+ val = rot_read(ROT_STATUS);
+ val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+ rot_write(val, ROT_STATUS);
+
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+ queue_work(ippdrv->event_workq,
+ (struct work_struct *)event_work);
+ } else
+ DRM_ERROR("the SFR is set illegally\n");
+
+ return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
+{
+ struct rot_limit_table *limit_tbl = rot->limit_tbl;
+ struct rot_limit *limit;
+ u32 mask, val;
+
+ /* Get size limit */
+ if (fmt == ROT_CONTROL_FMT_RGB888)
+ limit = &limit_tbl->rgb888;
+ else
+ limit = &limit_tbl->ycbcr420_2p;
+
+ /* Get mask for rounding to nearest aligned val */
+ mask = ~((1 << limit->align) - 1);
+
+ /* Set aligned width */
+ val = ROT_ALIGN(*hsize, limit->align, mask);
+ if (val < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (val > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = val;
+
+ /* Set aligned height */
+ val = ROT_ALIGN(*vsize, limit->align, mask);
+ if (val < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (val > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FMT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ val |= ROT_CONTROL_FMT_YCBCR420_2P;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ val |= ROT_CONTROL_FMT_RGB888;
+ break;
+ default:
+ DRM_ERROR("invalid image format\n");
+ return -EINVAL;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+ (fmt == ROT_CONTROL_FMT_RGB888))
+ return true;
+
+ return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
+ u32 val;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_SRC_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_SRC_CROP_POS);
+ val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+ rot_write(val, ROT_SRC_CROP_SIZE);
+
+ return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+ val = rot_read(ROT_SRC_BUF_SIZE);
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ /* Set transform configuration */
+ val = rot_read(ROT_CONTROL);
+ val &= ~ROT_CONTROL_FLIP_MASK;
+
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ val |= ROT_CONTROL_FLIP_VERTICAL;
+ break;
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
+ break;
+ default:
+ /* Flip None */
+ break;
+ }
+
+ val &= ~ROT_CONTROL_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ val |= ROT_CONTROL_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ val |= ROT_CONTROL_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ val |= ROT_CONTROL_ROT_270;
+ break;
+ default:
+ /* Rotation 0 Degree */
+ break;
+ }
+
+ rot_write(val, ROT_CONTROL);
+
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ *swap = true;
+ else
+ *swap = false;
+
+ return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val, fmt, hsize, vsize;
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
+
+ /* Set buffer size configuration */
+ val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+ rot_write(val, ROT_DST_BUF_SIZE);
+
+ /* Set crop image position configuration */
+ val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+ rot_write(val, ROT_DST_CROP_POS);
+
+ return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 val, fmt, hsize, vsize;
+ int i;
+
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ /* Set address configuration */
+ for_each_ipp_planar(i)
+ addr[i] = buf_info->base[i];
+
+ /* Get format */
+ fmt = rotator_reg_get_fmt(rot);
+ if (!rotator_check_reg_fmt(fmt)) {
+ DRM_ERROR("%s:invalid format.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ !addr[EXYNOS_DRM_PLANAR_CB]) {
+ /* Get buf size */
+ val = rot_read(ROT_DST_BUF_SIZE);
+
+ hsize = ROT_GET_BUF_SIZE_W(val);
+ vsize = ROT_GET_BUF_SIZE_H(val);
+
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
+
+ for_each_ipp_planar(i)
+ rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+ break;
+ case IPP_BUF_DEQUEUE:
+ for_each_ipp_planar(i)
+ rot_write(0x0, ROT_DST_BUF_ADDR(i));
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 0;
+ prop_list->crop = 0;
+ prop_list->scale = 0;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:not support format\n", __func__);
+ return false;
+ }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_BOTH:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_fmt(dst_config->fmt)) {
+ DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+ return -EINVAL;
+ }
+
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!rotator_check_drm_flip(dst_config->flip)) {
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+ return -EINVAL;
+ }
+
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("%s:not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 val;
+
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
+
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ val = rot_read(ROT_CONTROL);
+ val |= ROT_CONTROL_START;
+
+ rot_write(val, ROT_CONTROL);
+
+ return 0;
+}
+
+static int rotator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot;
+ struct exynos_drm_ippdrv *ippdrv;
+ int ret;
+
+ rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+ if (!rot) {
+ dev_err(dev, "failed to allocate rot\n");
+ return -ENOMEM;
+ }
+
+ rot->limit_tbl = (struct rot_limit_table *)
+ platform_get_device_id(pdev)->driver_data;
+
+ rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+ if (!rot->regs) {
+ dev_err(dev, "failed to map register\n");
+ return -ENXIO;
+ }
+
+ rot->irq = platform_get_irq(pdev, 0);
+ if (rot->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ return rot->irq;
+ }
+
+ ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+ IRQF_ONESHOT, "drm_rotator", rot);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ rot->clock = devm_clk_get(dev, "rotator");
+ if (IS_ERR_OR_NULL(rot->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(rot->clock);
+ goto err_clk_get;
+ }
+
+ pm_runtime_enable(dev);
+
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
+ ret = rotator_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_ippdrv_register;
+ }
+
+ DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+ platform_set_drvdata(pdev, rot);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm rotator device\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(dev, "The exynos rotator is probed successfully\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_clk_get:
+ free_irq(rot->irq, rot);
+ return ret;
+}
+
+static int rotator_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_disable(dev);
+
+ free_irq(rot->irq, rot);
+
+ return 0;
+}
+
+static struct rot_limit_table rot_limit_tbl = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+static struct platform_device_id rotator_driver_ids[] = {
+ {
+ .name = "exynos-rot",
+ .driver_data = (unsigned long)&rot_limit_tbl,
+ },
+ {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (enable) {
+ clk_enable(rot->clock);
+ rot->suspended = false;
+ } else {
+ clk_disable(rot->clock);
+ rot->suspended = true;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!pm_runtime_suspended(dev))
+ return rotator_clk_crtl(rot, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+ NULL)
+};
+
+struct platform_driver rotator_driver = {
+ .probe = rotator_probe,
+ .remove = rotator_remove,
+ .id_table = rotator_driver_ids,
+ .driver = {
+ .name = "exynos-rot",
+ .owner = THIS_MODULE,
+ .pm = &rotator_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..71a0b4c0c1e8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * YoungJun Cho <yj44.cho@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_ROTATOR_H_
+#define _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..13ccbd4bcfaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
- void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
@@ -99,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)
return ctx->connected ? true : false;
}
-static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
- u8 *edid, int len)
+static struct edid *vidi_get_edid(struct device *dev,
+ struct drm_connector *connector)
{
struct vidi_context *ctx = get_vidi_context(dev);
+ struct edid *edid;
+ int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -112,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
*/
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("raw_edid is null.\n");
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
}
- memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
- * EDID_LENGTH, len));
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kzalloc(edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEBUG_KMS("failed to allocate edid\n");
+ return ERR_PTR(-ENOMEM);
+ }
- return 0;
+ memcpy(edid, ctx->raw_edid, edid_len);
+ return edid;
}
static void *vidi_get_panel(struct device *dev)
@@ -294,7 +300,6 @@ static void vidi_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -309,9 +314,7 @@ static void vidi_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
+ DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
@@ -376,52 +379,6 @@ static struct exynos_drm_manager vidi_manager = {
.display_ops = &vidi_display_ops,
};
-static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
-{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
-
- is_checked = true;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
-
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
- }
-
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-}
-
static void vidi_fake_vblank_handler(struct work_struct *work)
{
struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -446,7 +403,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
mutex_unlock(&ctx->lock);
- vidi_finish_pageflip(subdrv->drm_dev, manager->pipe);
+ exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
}
static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@@ -564,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
- struct edid *raw_edid;
int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -601,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
}
if (vidi->connection) {
- if (!vidi->edid) {
- DRM_DEBUG_KMS("edid data is null.\n");
+ struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid;
+ if (!drm_edid_is_valid(raw_edid)) {
+ DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
}
- raw_edid = (struct edid *)(uint32_t)vidi->edid;
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
if (!ctx->raw_edid) {
@@ -631,7 +587,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
-static int __devinit vidi_probe(struct platform_device *pdev)
+static int vidi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vidi_context *ctx;
@@ -667,7 +623,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit vidi_remove(struct platform_device *pdev)
+static int vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
@@ -705,7 +661,7 @@ static const struct dev_pm_ops vidi_pm_ops = {
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove = __devexit_p(vidi_remove),
+ .remove = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
index a4babe4e65d7..1e5fdaa36ccc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -3,24 +3,10 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_DRM_VIDI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..fbab3c468603 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,7 +34,6 @@
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
-#include <plat/gpio-cfg.h>
#include <drm/exynos_drm.h>
@@ -50,6 +49,29 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION 0x02
+#define HDMI_AVI_LENGTH 0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+ /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+ /* InfoFrame packet type */
+ HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+ /* Vendor-Specific InfoFrame */
+ HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+ /* Auxiliary Video information InfoFrame */
+ HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+ /* Audio information InfoFrame */
+ HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -74,8 +96,8 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
- int external_irq;
- int internal_irq;
+ void *parent_ctx;
+ int irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
@@ -84,7 +106,6 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
- void *parent_ctx;
int hpd_gpio;
@@ -182,6 +203,7 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
@@ -353,15 +375,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
+ { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+ &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+ &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
};
/* HDMI Version 1.4 */
@@ -479,6 +506,7 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
+ int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
@@ -934,16 +962,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+ { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+ { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+ { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
+struct hdmi_infoframe {
+ enum HDMI_PACKET_TYPE type;
+ u8 ver;
+ u8 len;
+};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
@@ -1267,6 +1300,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+ u32 start, u8 len, u32 hdr_sum)
+{
+ int i;
+
+ /* hdr_sum : header0 + header1 + header2
+ * start : start address of packet byte1
+ * len : packet bytes - 1 */
+ for (i = 0; i < len; ++i)
+ hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+ /* return 2's complement of 8 bit hdr_sum */
+ return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+ struct hdmi_infoframe *infoframe)
+{
+ u32 hdr_sum;
+ u8 chksum;
+ u32 aspect_ratio;
+ u32 mod;
+ u32 vic;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+ if (hdata->dvi_mode) {
+ hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+ HDMI_VSI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+ HDMI_AVI_CON_DO_NOT_TRANSMIT);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+ return;
+ }
+
+ switch (infoframe->type) {
+ case HDMI_PACKET_TYPE_AVI:
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+ /* Output format zero hardcoded ,RGB YBCR selection */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+ AVI_ACTIVE_FORMAT_VALID |
+ AVI_UNDERSCANNED_DISPLAY_VALID);
+
+ aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+ AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+ if (hdata->type == HDMI_TYPE13)
+ vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+ else
+ vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+ chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+ break;
+ case HDMI_PACKET_TYPE_AUI:
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+ hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+ infoframe->len, hdr_sum);
+ DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+ break;
+ default:
+ break;
+ }
+}
+
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
@@ -1274,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)
return hdata->hpd;
}
-static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
- u8 *edid, int len)
+static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
{
struct edid *raw_edid;
struct hdmi_context *hdata = ctx;
@@ -1283,21 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (!hdata->ddc_port)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
- if (raw_edid) {
- hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
- memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
- * EDID_LENGTH, len));
- DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
- (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- raw_edid->width_cm, raw_edid->height_cm);
- } else {
- return -ENODEV;
- }
+ if (!raw_edid)
+ return ERR_PTR(-ENODEV);
- return 0;
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ raw_edid->width_cm, raw_edid->height_cm);
+
+ return raw_edid;
}
static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
@@ -1534,14 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
/* resetting HDMI core */
hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- /* disable HPD interrupts */
+ struct hdmi_infoframe infoframe;
+
+ /* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1688,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
+ infoframe.type = HDMI_PACKET_TYPE_AVI;
+ infoframe.ver = HDMI_AVI_VERSION;
+ infoframe.len = HDMI_AVI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
+ infoframe.type = HDMI_PACKET_TYPE_AUI;
+ infoframe.ver = HDMI_AUI_VERSION;
+ infoframe.len = HDMI_AUI_LENGTH;
+ hdmi_reg_infoframe(hdata, &infoframe);
+
/* enable AVI packet every vsync, fixes purple line problem */
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
@@ -1651,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY)
break;
- mdelay(1);
+ usleep_range(1000, 2000);
}
/* steady state not achieved */
if (tries == 0) {
@@ -1818,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
if (val & HDMI_PHY_STATUS_READY)
break;
- mdelay(1);
+ usleep_range(1000, 2000);
}
/* steady state not achieved */
if (tries == 0) {
@@ -1870,9 +1991,27 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
/* reset hdmiphy */
hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
+}
+
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+ HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->type == HDMI_TYPE14)
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+ HDMI_PHY_POWER_OFF_EN);
}
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
@@ -1902,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
return;
}
- mdelay(10);
+ usleep_range(10000, 12000);
/* operation mode */
operation[0] = 0x1f;
@@ -1978,9 +2117,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
+ struct drm_mode_object base;
+ struct list_head head;
+
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
+
+ /* preserve display mode header while copying. */
+ head = adjusted_mode->head;
+ base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
+ adjusted_mode->head = head;
+ adjusted_mode->base = base;
break;
}
}
@@ -2015,6 +2163,13 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
hdmi_conf_apply(hdata);
}
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex);
- pm_runtime_get_sync(hdata->dev);
-
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi);
+
+ hdmiphy_poweron(hdata);
}
static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
* its reset state seems to meet the condition.
*/
hdmiphy_conf_reset(hdata);
+ hdmiphy_poweroff(hdata);
clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi);
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- pm_runtime_put_sync(hdata->dev);
-
mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
- hdmi_poweron(hdata);
+ if (pm_runtime_suspended(hdata->dev))
+ pm_runtime_get_sync(hdata->dev);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- hdmi_poweroff(hdata);
+ if (!pm_runtime_suspended(hdata->dev))
+ pm_runtime_put_sync(hdata->dev);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2109,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
.dpms = hdmi_dpms,
};
-static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
@@ -2124,32 +2280,7 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
-{
- struct exynos_drm_hdmi_context *ctx = arg;
- struct hdmi_context *hdata = ctx->ctx;
- u32 intc_flag;
-
- intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
- /* clearing flags for HPD plug/unplug */
- if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- DRM_DEBUG_KMS("unplugged\n");
- hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_UNPLUG);
- }
- if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- DRM_DEBUG_KMS("plugged\n");
- hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_PLUG);
- }
-
- if (ctx->drm_dev)
- drm_helper_hpd_irq_event(ctx->drm_dev);
-
- return IRQ_HANDLED;
-}
-
-static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
struct hdmi_resources *res = &hdata->res;
@@ -2166,27 +2297,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
- res->hdmi = clk_get(dev, "hdmi");
+ res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
- res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
- res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
- res->hdmiphy = clk_get(dev, "hdmiphy");
+ res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
@@ -2194,7 +2325,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2335,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
- ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
@@ -2217,28 +2348,6 @@ fail:
return -ENODEV;
}
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- regulator_bulk_free(res->regul_count, res->regul_bulk);
- /* kfree is NULL-safe */
- kfree(res->regul_bulk);
- if (!IS_ERR_OR_NULL(res->hdmiphy))
- clk_put(res->hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
- clk_put(res->sclk_hdmiphy);
- if (!IS_ERR_OR_NULL(res->sclk_pixel))
- clk_put(res->sclk_pixel);
- if (!IS_ERR_OR_NULL(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(res->hdmi))
- clk_put(res->hdmi);
- memset(res, 0, sizeof(*res));
-
- return 0;
-}
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2415,7 @@ static struct platform_device_id hdmi_driver_types[] = {
}
};
+#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -2314,8 +2424,9 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+#endif
-static int __devinit hdmi_probe(struct platform_device *pdev)
+static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -2366,6 +2477,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node);
+ if (match == NULL)
+ return -ENODEV;
hdata->type = (enum hdmi_type)match->data;
} else {
hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2491,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
- ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
- goto err_data;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
- ret = -ENOENT;
- goto err_resource;
+ return -ENOENT;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
- ret = -ENXIO;
- goto err_resource;
+ return -ENXIO;
}
- ret = gpio_request(hdata->hpd_gpio, "HPD");
+ ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- goto err_resource;
+ return ret;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
- ret = -ENOENT;
- goto err_gpio;
+ return -ENOENT;
}
hdata->ddc_port = hdmi_ddc;
@@ -2421,39 +2530,24 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
- hdata->external_irq = gpio_to_irq(hdata->hpd_gpio);
- if (hdata->external_irq < 0) {
- DRM_ERROR("failed to get GPIO external irq\n");
- ret = hdata->external_irq;
- goto err_hdmiphy;
- }
-
- hdata->internal_irq = platform_get_irq(pdev, 0);
- if (hdata->internal_irq < 0) {
- DRM_ERROR("failed to get platform internal irq\n");
- ret = hdata->internal_irq;
+ hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+ if (hdata->irq < 0) {
+ DRM_ERROR("failed to get GPIO irq\n");
+ ret = hdata->irq;
goto err_hdmiphy;
}
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- ret = request_threaded_irq(hdata->external_irq, NULL,
- hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+ ret = request_threaded_irq(hdata->irq, NULL,
+ hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi_external", drm_hdmi_ctx);
+ "hdmi", drm_hdmi_ctx);
if (ret) {
- DRM_ERROR("failed to register hdmi external interrupt\n");
+ DRM_ERROR("failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
- ret = request_threaded_irq(hdata->internal_irq, NULL,
- hdmi_internal_irq_thread, IRQF_ONESHOT,
- "hdmi_internal", drm_hdmi_ctx);
- if (ret) {
- DRM_ERROR("failed to register hdmi internal interrupt\n");
- goto err_free_irq;
- }
-
/* Attach HDMI Driver to common hdmi. */
exynos_hdmi_drv_attach(drm_hdmi_ctx);
@@ -2464,21 +2558,14 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return 0;
-err_free_irq:
- free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
-err_gpio:
- gpio_free(hdata->hpd_gpio);
-err_resource:
- hdmi_resources_cleanup(hdata);
-err_data:
return ret;
}
-static int __devexit hdmi_remove(struct platform_device *pdev)
+static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -2488,12 +2575,8 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
- free_irq(hdata->internal_irq, hdata);
- free_irq(hdata->external_irq, hdata);
+ free_irq(hdata->irq, hdata);
- gpio_free(hdata->hpd_gpio);
-
- hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
@@ -2509,13 +2592,19 @@ static int hdmi_suspend(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
- disable_irq(hdata->internal_irq);
- disable_irq(hdata->external_irq);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ disable_irq(hdata->irq);
hdata->hpd = false;
if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
hdmi_poweroff(hdata);
return 0;
@@ -2526,22 +2615,60 @@ static int hdmi_resume(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
- enable_irq(hdata->external_irq);
- enable_irq(hdata->internal_irq);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
+ enable_irq(hdata->irq);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ hdmi_poweron(hdata);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_poweron(hdata);
+
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+static const struct dev_pm_ops hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+ SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove = __devexit_p(hdmi_remove),
+ .remove = hdmi_remove,
.id_table = hdmi_driver_types,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = hdmi_match_types,
+ .of_match_table = of_match_ptr(hdmi_match_types),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
index 1c3b6d8f1fe7..0ddf3957de15 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -5,24 +5,10 @@
* Inki Dae <inki.dae@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*/
#ifndef _EXYNOS_HDMI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..ea49d132ecf6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -53,16 +54,17 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = hdmiphy_match_types,
+ .of_match_table = of_match_ptr(hdmiphy_match_types),
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
- .remove = __devexit_p(hdmiphy_remove),
+ .remove = hdmiphy_remove,
.command = NULL,
};
EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..c414584bfbae 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -35,15 +35,15 @@
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
dma_addr_t dma_addr;
- void __iomem *vaddr;
dma_addr_t chroma_dma_addr;
- void __iomem *chroma_vaddr;
uint32_t pixel_format;
unsigned int bpp;
unsigned int crtc_x;
@@ -59,6 +59,8 @@ struct hdmi_win_data {
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
+ bool enabled;
+ bool resume;
};
struct mixer_resources {
@@ -80,6 +82,7 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
+ struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
@@ -90,6 +93,9 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
+ void *parent_ctx;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
};
struct mixer_drv_data {
@@ -594,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
break;
- mdelay(10);
+ usleep_range(10000, 12000);
}
WARN(tries == 0, "failed to reset Video Processor\n");
}
@@ -665,58 +671,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static void mixer_poweron(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *mdata = ctx;
+ struct drm_device *drm_dev;
- mutex_lock(&ctx->mixer_mutex);
- if (ctx->powered) {
- mutex_unlock(&ctx->mixer_mutex);
- return;
- }
- ctx->powered = true;
- mutex_unlock(&ctx->mixer_mutex);
+ drm_hdmi_ctx = mdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
- pm_runtime_get_sync(ctx->dev);
+ if (is_drm_iommu_supported(drm_dev)) {
+ if (enable)
+ return drm_iommu_attach_device(drm_dev, mdata->dev);
- clk_enable(res->mixer);
- if (ctx->vp_enabled) {
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
+ drm_iommu_detach_device(drm_dev, mdata->dev);
}
-
- mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
- mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
- mutex_unlock(&ctx->mixer_mutex);
-
- ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
-
- clk_disable(res->mixer);
- if (ctx->vp_enabled) {
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
- }
-
- pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
+ return 0;
}
static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +716,6 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
-static void mixer_dpms(void *ctx, int mode)
-{
- struct mixer_context *mixer_ctx = ctx;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- mixer_poweron(mixer_ctx);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- mixer_poweroff(mixer_ctx);
- break;
- default:
- DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
- break;
- }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
- struct mixer_context *mixer_ctx = ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
- int ret;
-
- ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
- MXR_INT_STATUS_VSYNC), 50);
- if (ret < 0)
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -811,9 +748,7 @@ static void mixer_win_mode_set(void *ctx,
win_data = &mixer_ctx->win_data[win];
win_data->dma_addr = overlay->dma_addr[0];
- win_data->vaddr = overlay->vaddr[0];
win_data->chroma_dma_addr = overlay->dma_addr[1];
- win_data->chroma_vaddr = overlay->vaddr[1];
win_data->pixel_format = overlay->pixel_format;
win_data->bpp = overlay->bpp;
@@ -841,10 +776,19 @@ static void mixer_win_commit(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
if (win > 1 && mixer_ctx->vp_enabled)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
+
+ mixer_ctx->win_data[win].enabled = true;
}
static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +799,14 @@ static void mixer_win_disable(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ mixer_ctx->win_data[win].resume = false;
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -862,59 +814,149 @@ static void mixer_win_disable(void *ctx, int win)
mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ mixer_ctx->win_data[win].enabled = false;
}
-static struct exynos_mixer_ops mixer_ops = {
- /* manager */
- .enable_vblank = mixer_enable_vblank,
- .disable_vblank = mixer_disable_vblank,
- .dpms = mixer_dpms,
+static void mixer_wait_for_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
- /* overlay */
- .wait_for_vblank = mixer_wait_for_vblank,
- .win_mode_set = mixer_win_mode_set,
- .win_commit = mixer_win_commit,
- .win_disable = mixer_win_disable,
-};
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
+ atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+ /*
+ * wait for MIXER to signal VSYNC interrupt or return after
+ * timeout which is set to 50ms (refresh rate of 20).
+ */
+ if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+ !atomic_read(&mixer_ctx->wait_vsync_event),
+ DRM_HZ/20))
+ DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
-/* for pageflip event */
-static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+static void mixer_window_suspend(struct mixer_context *ctx)
{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->resume = win_data->enabled;
+ mixer_win_disable(ctx, i);
+ }
+ mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+ struct hdmi_win_data *win_data;
+ int i;
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ win_data = &ctx->win_data[i];
+ win_data->enabled = win_data->resume;
+ win_data->resume = false;
+ }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ clk_enable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ }
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+
+ mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- spin_lock_irqsave(&drm_dev->event_lock, flags);
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
+ mixer_window_suspend(ctx);
- is_checked = true;
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ clk_disable(res->mixer);
+ if (ctx->vp_enabled) {
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
}
- if (is_checked)
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+out:
+ mutex_unlock(&ctx->mixer_mutex);
}
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_get_sync(mixer_ctx->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (!pm_runtime_suspended(mixer_ctx->dev))
+ pm_runtime_put_sync(mixer_ctx->dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_mixer_ops mixer_ops = {
+ /* manager */
+ .iommu_on = mixer_iommu_on,
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .wait_for_vblank = mixer_wait_for_vblank,
+ .dpms = mixer_dpms,
+
+ /* overlay */
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
+
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
@@ -943,7 +985,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
}
drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
- mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
+ ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ DRM_WAKEUP(&ctx->wait_vsync_queue);
+ }
}
out:
@@ -960,8 +1009,8 @@ out:
return IRQ_HANDLED;
}
-static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
@@ -971,85 +1020,69 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
- mixer_res->mixer = clk_get(dev, "mixer");
+ mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- goto fail;
+ return ret;
}
mixer_res->irq = res->start;
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
- clk_put(mixer_res->sclk_hdmi);
- if (!IS_ERR_OR_NULL(mixer_res->mixer))
- clk_put(mixer_res->mixer);
- return ret;
}
-static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- int ret;
- mixer_res->vp = clk_get(dev, "vp");
+ mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
- mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (mixer_res->sclk_hdmi)
@@ -1058,28 +1091,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
+ return -ENXIO;
}
return 0;
-
-fail:
- if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
- clk_put(mixer_res->sclk_dac);
- if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
- clk_put(mixer_res->sclk_mixer);
- if (!IS_ERR_OR_NULL(mixer_res->vp))
- clk_put(mixer_res->vp);
- return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1113,7 +1135,7 @@ static struct of_device_id mixer_match_types[] = {
}
};
-static int __devinit mixer_probe(struct platform_device *pdev)
+static int mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -1149,9 +1171,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
+ ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
+ DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+ atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1202,13 +1227,66 @@ static int mixer_suspend(struct device *dev)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+ return 0;
+ }
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+
+static int mixer_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!pm_runtime_suspended(dev)) {
+ DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+ return 0;
+ }
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
mixer_poweroff(ctx);
return 0;
}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_poweron(ctx);
+
+ return 0;
+}
#endif
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+static const struct dev_pm_ops mixer_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+ SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
struct platform_driver mixer_driver = {
.driver = {
@@ -1218,6 +1296,6 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove = __devexit_p(mixer_remove),
+ .remove = mixer_remove,
.id_table = mixer_driver_types,
};
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN (1 << 0)
+
/* Video related registers */
#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
+
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG 0x00
+#define ROT_CONFIG_IRQ (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL 0x10
+#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
+#define ROT_CONTROL_FMT_RGB888 (6 << 8)
+#define ROT_CONTROL_FMT_MASK (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
+#define ROT_CONTROL_FLIP_MASK (3 << 6)
+#define ROT_CONTROL_ROT_90 (1 << 4)
+#define ROT_CONTROL_ROT_180 (2 << 4)
+#define ROT_CONTROL_ROT_270 (3 << 4)
+#define ROT_CONTROL_ROT_MASK (3 << 4)
+#define ROT_CONTROL_START (1 << 0)
+
+/* Status */
+#define ROT_STATUS 0x20
+#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
+#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE 1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE 0x3c
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS 0x40
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE 0x44
+#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask) ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b65..23e14e93991f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf320..51044cc55cf2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edfe..e223b500022e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property, &curValue))
return -1;
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
return -1;
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5a..d81dbc3368f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curValue))
return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
if (curValue == value)
return 0;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 32dba2ab53e1..2d4ab48f07a2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector, property, &val))
+ if (drm_object_property_get_value(&connector->base, property, &val))
goto set_prop_error;
if (val == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property, value))
goto set_prop_error;
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_connector_property_set_value(connector, property,
+ if (drm_object_property_set_value(&connector->base, property,
value))
goto set_prop_error;
else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
- if (!dev || ((pipe != 0) && (pipe != 2))) {
+ if (pipe != 0 && pipe != 2) {
DRM_ERROR("Invalid parameter\n");
return;
}
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*attach properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c6..74485dc43945 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index f2f9f38a5362..30adbbe23024 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
extern void oaktrail_hdmi_save(struct drm_device *dev);
extern void oaktrail_hdmi_restore(struct drm_device *dev);
extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cdafd2acc72f..3071526bc3c1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
+ if (pipe == 1) {
+ oaktrail_crtc_hdmi_dpms(crtc, mode);
+ return;
+ }
+
if (!gma_power_begin(dev, true))
return;
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
+ if (pipe == 1)
+ return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
if (!gma_power_begin(dev, true))
return 0;
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1));
if (psb_intel_encoder)
- drm_connector_property_get_value(connector,
+ drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 010b820744a5..08747fd7105c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
- .hdmi_mask = (1 << 0),
+ .hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 69e51e903f35..f036f1fc161e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
+static void wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+/*
+ * You don't want to know, you really really don't want to know....
+ *
+ * This is magic. However it's safe magic because of the way the platform
+ * works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+ void __iomem *base;
+ unsigned long scu_ipc_mmio = 0xff11c000UL;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map scu mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct oaktrail_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* Reset controller */
+ oaktrail_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Set the DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ wait_for_vblank(dev);
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ /* wait for pipe off */
+ udelay(150);
+
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+
+ /* wait for dpll off */
+ udelay(150);
+
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+
+ wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+ }
+
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+
+ /* LNC Chicken Bits - Squawk! */
+ REG_WRITE(0x70400, 0x4000);
+
+ return;
+}
+
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
struct i2c_adapter *i2c_adap;
struct edid *edid;
- struct drm_display_mode *mode, *t;
- int i = 0, ret = 0;
+ int ret = 0;
+ /*
+ * FIXME: We need to figure this lot out. In theory we can
+ * read the EDID somehow but I've yet to find working reference
+ * code.
+ */
i2c_adap = i2c_get_adapter(3);
if (i2c_adap == NULL) {
DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
-
- /*
- * prune modes that require frame buffer bigger than stolen mem
- */
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
- if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
- i++;
- drm_mode_remove(connector, mode);
- }
- }
- return ret - i;
+ return ret;
}
static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_sysfs_connector_add(connector);
+ dev_info(dev->dev, "HDMI initialised.\n");
return;
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
dev_priv->hdmi_priv = hdmi_dev;
oaktrail_hdmi_audio_disable(dev);
+
+ dev_info(dev->dev, "HDMI hardware present.\n");
+
return;
free:
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55ec..325013a9c48c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
return;
}
- drm_connector_property_get_value(
- connector,
+ drm_object_property_get_value(
+ &connector->base,
dev->mode_config.scaling_mode_property,
&v);
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e3..9fa5fa2e6192 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_connector_property_get_value(connector,
+ if (drm_object_property_get_value(&connector->base,
property,
&curval))
goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (curval == value)
goto set_prop_done;
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
- if (drm_connector_property_set_value(connector,
+ if (drm_object_property_set_value(&connector->base,
property,
value))
goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705dbf..a4cc777ab7a6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 4a07ab596174..771ff66711af 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -700,7 +700,7 @@ static struct i2c_driver tc35876x_bridge_i2c_driver = {
},
.id_table = tc35876x_bridge_id,
.probe = tc35876x_bridge_probe,
- .remove = __devexit_p(tc35876x_bridge_remove),
+ .remove = tc35876x_bridge_remove,
};
/* LCD panel I2C */
@@ -741,7 +741,7 @@ static struct i2c_driver cmi_lcd_i2c_driver = {
},
.id_table = cmi_lcd_i2c_id,
.probe = cmi_lcd_i2c_probe,
- .remove = __devexit_p(cmi_lcd_i2c_remove),
+ .remove = cmi_lcd_i2c_remove,
};
/* HACK to create I2C device while it's not created by platform code */
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e3..b865d0728e28 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
- drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
- drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector);
- drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin);
- drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_connector_attach_property(connector, conf->tv_mode_property,
+ drm_object_attach_property(&connector->base, conf->tv_mode_property,
priv->norm);
- drm_connector_attach_property(connector, conf->tv_brightness_property,
+ drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
- drm_connector_attach_property(connector, conf->tv_contrast_property,
+ drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast);
- drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker);
- drm_connector_attach_property(connector, priv->scale_property,
+ drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dde8b505bf7f..9d4a2c2adf0e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <generated/utsrelease.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -317,7 +318,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
- if (!work->pending) {
+ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
@@ -328,7 +329,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Stall check enabled, ");
else
seq_printf(m, "Stall check waiting for page flip ioctl, ");
- seq_printf(m, "%d prepares\n", work->pending);
+ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -641,6 +642,7 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -655,10 +657,12 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- seq_printf(m, " SYNC_0: 0x%08x\n",
- error->semaphore_mboxes[ring][0]);
- seq_printf(m, " SYNC_1: 0x%08x\n",
- error->semaphore_mboxes[ring][1]);
+ seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@@ -687,10 +691,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
+ seq_printf(m, "Kernel: " UTS_RELEASE);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
seq_printf(m, "CCID: 0x%08x\n", error->ccid);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -1068,7 +1075,7 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, gt_core_status, rcctl1;
+ u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count=0, ret;
@@ -1097,6 +1104,9 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ mutex_unlock(&dev_priv->rps.hw_lock);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1158,12 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
@@ -1273,7 +1289,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1282,19 +1298,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
- I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_READ_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode read of freq table timed out\n");
- continue;
- }
- ia_freq = I915_READ(GEN6_PCODE_DATA);
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(dev_priv,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
}
@@ -1398,15 +1409,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx) {
seq_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
+ describe_obj(m, dev_priv->ips.pwrctx);
seq_printf(m, "\n");
}
- if (dev_priv->renderctx) {
+ if (dev_priv->ips.renderctx) {
seq_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
+ describe_obj(m, dev_priv->ips.renderctx);
seq_printf(m, "\n");
}
@@ -1711,13 +1722,13 @@ i915_max_freq_read(struct file *filp,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1752,7 +1763,7 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1762,7 +1773,7 @@ i915_max_freq_write(struct file *filp,
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
@@ -1787,13 +1798,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1826,7 +1837,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@@ -1836,7 +1847,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 61ae104dca8c..99daa896105d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
}
/**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- /* Program Hardware Status Page */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
-
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
-
- memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
- 0, PAGE_SIZE);
-
- i915_write_hws_pga(dev);
-
- DRM_DEBUG_DRIVER("Enabled hardware status page\n");
- return 0;
-}
-
-/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 0;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
- master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
static int i915_quiescent(struct drm_device *dev)
{
- struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
i915_kernel_lost_context(dev);
- return intel_wait_ring_idle(ring);
+ return intel_ring_idle(LP_RING(dev->dev_private));
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n");
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
+ OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
- return dev_priv->counter;
+ return dev_priv->dri1.counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
return ret;
@@ -1014,6 +986,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1068,7 +1046,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_ring_buffer *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -1088,6 +1066,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
+ ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1305,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
+ INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -1491,19 +1472,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- ret = -EIO;
+ ret = i915_gem_gtt_init(dev);
+ if (ret)
goto put_bridge;
- }
-
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto put_gmch;
- }
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@@ -1590,18 +1561,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
- /* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
i915_gem_load(dev);
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_phys_hws(dev);
- if (ret)
- goto out_gem_unload;
- }
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1621,6 +1584,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1678,7 +1643,7 @@ out_mtrrfree:
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- intel_gmch_remove();
+ i915_gem_gtt_fini(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1721,6 +1686,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
+ cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6770ee6084b4..117265840b1f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid __read_mostly = 0;
+int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect [default], 1=lid open, "
- "-1=lid closed)");
+ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+ "-1=force lid closed, -2=force lid open)");
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- int id;
+ unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
-static int i915_drm_thaw(struct drm_device *dev)
+void intel_console_resume(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ console_resume_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+}
+
+static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
i915_restore_state(dev);
intel_opregion_setup(dev);
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
- intel_modeset_setup_hw_state(dev);
- drm_mode_config_reset(dev);
+ intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
}
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
- console_lock();
- intel_fbdev_set_suspend(dev, 0);
- console_unlock();
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ if (console_trylock()) {
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ } else {
+ schedule_work(&dev_priv->console_resume_work);
+ }
+
+ return error;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ intel_gt_reset(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ __i915_drm_thaw(dev);
+
return error;
}
int i915_resume(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- ret = i915_drm_thaw(dev);
+ intel_gt_reset(dev);
+
+ /*
+ * Platforms with opregion should have sane BIOS, older ones (gen3 and
+ * earlier) need this since the BIOS might clear all our scratch PTEs.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ !dev_priv->opregion.header) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ ret = __i915_drm_thaw(dev);
if (ret)
return ret;
@@ -827,13 +877,12 @@ int i915_reset(struct drm_device *dev)
return 0;
}
-static int __devinit
-i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (intel_info->is_haswell || intel_info->is_valleyview)
+ if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
@@ -1140,12 +1189,40 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST)
return false;
+ switch (reg) {
+ case _3D_CHICKEN3:
+ case IVB_CHICKEN3:
+ case GEN7_COMMON_SLICE_CHICKEN1:
+ case GEN7_L3CNTLREG1:
+ case GEN7_L3_CHICKEN_MODE_REGISTER:
+ case GEN7_ROW_CHICKEN2:
+ case GEN7_L3SQCREG4:
+ case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
+ case GEN7_HALF_SLICE_CHICKEN1:
+ case GEN6_MBCTL:
+ case GEN6_UCGCTL2:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+ * chip from rc6 before touching it for real. MI_MODE is masked, hence
+ * harmless to write 0 into. */
+ I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1254,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
+ if (IS_GEN5(dev_priv->dev)) \
+ ilk_dummy_write(dev_priv); \
+ if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
+ I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+ } \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f511fa2f4168..12ab3bdea54d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,6 +58,14 @@ enum pipe {
};
#define pipe_name(p) ((p) + 'A')
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
/* Interface history:
*
* 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -182,15 +188,19 @@ struct drm_i915_error_state {
u32 pgtbl_er;
u32 ier;
u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
+ u32 ctl[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +261,7 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+ void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -263,7 +274,6 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
- void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
@@ -338,6 +348,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
+ struct drm_device *dev;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
@@ -374,6 +385,11 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */
};
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +399,18 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
- bool force_bit;
+ u32 force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- const struct intel_device_info *info;
-
- int relative_constants_mode;
-
- void __iomem *regs;
-
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
-
- struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of the gmbus and gpio block.
- */
- uint32_t gpio_mmio_base;
-
- struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
-
- drm_dma_handle_t *status_page_dmah;
- uint32_t counter;
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
-
- struct resource mch_res;
-
- atomic_t irq_received;
-
- /* protects the irq masks */
- spinlock_t irq_lock;
-
- /* DPIO indirect register protection */
- spinlock_t dpio_lock;
-
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 pipestat[2];
- u32 irq_mask;
- u32 gt_irq_mask;
- u32 pch_irq_mask;
-
- u32 hotplug_supported_mask;
- struct work_struct hotplug_work;
-
- int num_pipe;
- int num_pch_pll;
-
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
- struct intel_opregion opregion;
-
- /* overlay */
- struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
-
- /* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
- int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
-
- bool initialized;
- bool support;
- int bpp;
- struct edp_power_seq pps;
- } edp;
- bool no_aux_handshake;
-
- struct notifier_block lid_notifier;
-
- int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
- struct workqueue_struct *wq;
-
- /* Display functions */
- struct drm_i915_display_funcs display;
-
- /* PCH chipset type */
- enum intel_pch pch_type;
-
- unsigned long quirks;
-
- /* Register state */
- bool modeset_on_lid;
+struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -676,10 +556,206 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ struct delayed_work delayed_resume_work;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct mutex hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct work_struct error_work;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ void __iomem *regs;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct spinlock gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ struct pci_dev *bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ struct resource mch_res;
+
+ atomic_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+
+ u32 hotplug_supported_mask;
+ struct work_struct hotplug_work;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ struct intel_opregion opregion;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct error_work;
+ struct completion error_completion;
+ struct workqueue_struct *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
struct {
/** Bridge to intel-gtt-ko */
- const struct intel_gtt *gtt;
+ struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
@@ -706,9 +782,8 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
- u32 *l3_remap_info;
-
struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
/**
* List of objects currently involved in rendering.
@@ -785,19 +860,6 @@ typedef struct drm_i915_private {
u32 object_count;
} mm;
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
- } dri1;
-
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +873,7 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
@@ -820,46 +883,17 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
- struct drm_connector *int_lvds_connector;
- struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
+ struct intel_l3_parity l3_parity;
+
/* gen6+ rps state */
- struct {
- struct work_struct work;
- u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
-
- /* The below variables an all the rps hw state are protected by
- * dev->struct mutext. */
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- } rps;
+ struct intel_gen6_power_mgmt rps;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
- struct {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
- } ips;
+ struct intel_ilk_power_mgmt ips;
enum no_fbc_reason no_fbc_reason;
@@ -871,14 +905,27 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ /*
+ * The console may be contended at resume, but we don't
+ * want it to block on it.
+ */
+ struct work_struct console_resume_work;
+
struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- struct work_struct parity_error_work;
bool hw_contexts_disabled;
uint32_t hw_context_size;
+
+ bool fdi_rx_polarity_reversed;
+
+ struct i915_suspend_saved_registers regfile;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1057,6 +1104,7 @@ struct drm_i915_gem_object {
*/
atomic_t pending_flip;
};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1120,9 +1168,17 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
/*
* The genX designation typically refers to the render engine, so render
@@ -1148,6 +1204,9 @@ struct drm_i915_file_private {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1168,6 +1227,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1316,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1324,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@@ -1368,8 +1436,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno);
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1387,7 +1454,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1566,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gtt_chipset_flush();
+}
+
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1670,12 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1704,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b285da4449b..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
}
if (needs_clflush_after)
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
return ret;
}
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */
- if (!obj->map_and_fenceable) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- goto unlock;
- }
- if (!obj->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
- if (ret)
- goto unlock;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
- }
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret)
+ goto unlock;
- if (!obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unpin;
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto unlock;
-
- if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ goto unpin;
obj->fault_mappable = true;
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+ i915_gem_object_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1528,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
@@ -1542,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
i915_gem_shrink_all(dev_priv);
- return drm_gem_create_mmap_offset(&obj->base);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1707,10 +1702,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count)
return -EBUSY;
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ list_del(&obj->gtt_list);
+
ops->put_pages(obj);
obj->pages = NULL;
- list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -1718,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
}
static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+ bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1726,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
gtt_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
@@ -1737,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list,
mm_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
@@ -1749,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count;
}
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ return __i915_gem_shrink(dev_priv, target, true);
+}
+
static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
@@ -1868,11 +1874,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
obj->ring = ring;
@@ -1933,26 +1939,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
}
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno = dev_priv->next_seqno;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
+ /* The hardware uses various monotonic 32-bit counters, if we
+ * detect that they will wraparound we need to idle the GPU
+ * and reset those counters.
+ */
+ ret = 0;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ret |= ring->sync_seqno[j] != 0;
+ }
+ if (ret == 0)
+ return ret;
+
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
- return seqno;
+ return 0;
}
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
- if (ring->outstanding_lazy_request == 0)
- ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_handle_seqno_wrap(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
- return ring->outstanding_lazy_request;
+ *seqno = dev_priv->next_seqno++;
+ return 0;
}
int
@@ -1963,7 +1997,6 @@ i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
- u32 seqno;
int was_empty;
int ret;
@@ -1982,7 +2015,6 @@ i915_add_request(struct intel_ring_buffer *ring,
if (request == NULL)
return -ENOMEM;
- seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
@@ -1991,15 +2023,13 @@ i915_add_request(struct intel_ring_buffer *ring,
*/
request_ring_position = intel_ring_get_tail(ring);
- ret = ring->add_request(ring, &seqno);
+ ret = ring->add_request(ring);
if (ret) {
kfree(request);
return ret;
}
- trace_i915_gem_request_add(ring, seqno);
-
- request->seqno = seqno;
+ request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
@@ -2017,23 +2047,24 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
+ trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work, HZ);
+ &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
if (out_seqno)
- *out_seqno = seqno;
+ *out_seqno = request->seqno;
return 0;
}
@@ -2131,7 +2162,6 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
- int i;
if (list_empty(&ring->request_list))
return;
@@ -2140,10 +2170,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
- if (seqno >= ring->sync_seqno[i])
- ring->sync_seqno[i] = 0;
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2218,7 +2244,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
return;
}
@@ -2236,7 +2263,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
if (!dev_priv->mm.suspended && !idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+ round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
@@ -2386,7 +2414,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
ret = to->sync_to(to, from, seqno);
if (!ret)
- from->sync_seqno[idx] = seqno;
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2469,14 +2501,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring)
-{
- if (list_empty(&ring->active_list))
- return 0;
-
- return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
-}
-
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2513,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;
- ret = i915_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
}
@@ -2879,7 +2903,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
+ struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2923,74 +2947,63 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ i915_gem_object_pin_pages(obj);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
if (map_and_fenceable)
- free_space =
- drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
+ 0, dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- false);
-
- if (free_space != NULL) {
- if (map_and_fenceable)
- obj->gtt_space =
- drm_mm_get_block_range_generic(free_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
- else
- obj->gtt_space =
- drm_mm_get_block_generic(free_space,
- size, alignment, obj->cache_level,
- false);
- }
- if (obj->gtt_space == NULL) {
+ ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level);
+ if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
- if (ret)
- return ret;
+ if (ret == 0)
+ goto search_free;
- goto search_free;
+ i915_gem_object_unpin_pages(obj);
+ kfree(node);
+ return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev,
- obj->gtt_space,
- obj->cache_level))) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
return -EINVAL;
}
-
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
return ret;
}
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
-
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_offset = obj->gtt_space->start;
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
fenceable =
- obj->gtt_space->size == fence_size &&
- (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
+ i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@@ -3059,7 +3072,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3454,11 +3467,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (obj->gtt_space == NULL) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3511,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
- obj->user_pin_count++;
- obj->pin_filp = file;
- if (obj->user_pin_count == 1) {
+ if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
@@ -3832,7 +3851,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev))
return;
- if (!dev_priv->mm.l3_remap_info)
+ if (!dev_priv->l3_parity.remap_info)
return;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3860,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
- if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+ if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
- if (remap && !dev_priv->mm.l3_remap_info[i/4])
+ if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
- I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
}
/* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3895,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- return;
-
-
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
-
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
-
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
- /* GFX_MODE is per-ring on gen7+ */
- }
-
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
- }
-}
-
static bool
intel_enable_blt(struct drm_device *dev)
{
@@ -3960,7 +3917,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (!intel_enable_gtt())
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4252,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page);
}
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
@@ -4382,7 +4339,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT;
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
return 0;
}
@@ -4407,6 +4364,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -4417,14 +4387,25 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
+ bool unlock = true;
int cnt;
- if (!mutex_trylock(&dev->struct_mutex))
- return 0;
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
+ nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}
@@ -4432,10 +4413,11 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 05ed42f203d7..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx;
int ret, id;
- ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
- u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from_obj, ring, seqno);
+ i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c22..abeaafef6d7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
obj = dma_buf->priv;
/* is it from our device? */
if (obj->base.dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
drm_gem_object_reference(&obj->base);
+ dma_buf_put(dma_buf);
return &obj->base;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eea143749f6..26d08bb58218 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj->cache_level);
}
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (unlikely(target_offset == 0)) {
- DRM_DEBUG("No GTT space found for object %d\n",
- reloc->target_handle);
- return ret;
- }
-
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
@@ -548,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
+ u64 invalid_offset = (u64)-1;
+ int j;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -558,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
}
+ /* As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+ if (copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+ }
+
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
@@ -672,7 +684,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -722,8 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -735,10 +746,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring, seqno);
+ i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
}
@@ -798,8 +809,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 seqno;
u32 mask;
+ u32 flags;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +822,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -983,26 +1004,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but let's be paranoid and do it
+ * unconditionally for now. */
+ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
- seqno = i915_gem_next_request_seqno(ring);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
- if (seqno < ring->sync_seqno[i]) {
- /* The GPU can not handle its semaphore value wrapping,
- * so every billion or so execbuffers, we need to stall
- * the GPU in order to reset the counters.
- */
- ret = i915_gpu_idle(dev);
- if (ret)
- goto err;
- i915_gem_retire_requests(dev);
-
- BUG_ON(ring->sync_seqno[i]);
- }
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1028,8 +1040,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, seqno);
-
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1040,17 +1050,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len);
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
} else {
- ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
if (ret)
goto err;
}
- i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d36..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
#include "i915_trace.h"
#include "intel_drv.h"
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gtt_pte_t pte_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_LLC_MLC:
+ /* Haswell doesn't set L3 this way */
+ if (IS_HASWELL(dev))
+ pte |= GEN6_PTE_CACHE_LLC;
+ else
+ pte |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ if (IS_HASWELL(dev))
+ pte |= HSW_PTE_UNCACHED;
+ else
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+
+ return pte;
+}
+
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
- uint32_t *pt_vaddr;
- uint32_t scratch_pte;
+ gtt_pte_t *pt_vaddr;
+ gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
- scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+ scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return ret;
+ ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
- ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
- uint32_t pte_flags)
+ enum i915_cache_level cache_level)
{
- uint32_t *pt_vaddr, pte;
+ gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[j] = pte | pte_flags;
+ pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
+ cache_level);
/* grab the next page */
if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- uint32_t pte_flags = GEN6_PTE_VALID;
-
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte_flags |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- if (IS_HASWELL(obj->base.dev))
- pte_flags |= HSW_PTE_UNCACHED;
- else
- pte_flags |= GEN6_PTE_UNCACHED;
- break;
- default:
- BUG();
- }
-
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT);
}
-/* XXX kill agp_type! */
-static unsigned int cache_level_to_agp_type(struct drm_device *dev,
- enum i915_cache_level cache_level)
+void i915_gem_init_ppgtt(struct drm_device *dev)
{
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- if (INTEL_INFO(dev)->gen >= 6)
- return AGP_USER_CACHED_MEMORY_LLC_MLC;
- /* Older chipsets do not have this extra level of CPU
- * cacheing, so fallthrough and request the PTE simply
- * as cached.
- */
- case I915_CACHE_LLC:
- return AGP_USER_CACHED_MEMORY;
- default:
- case I915_CACHE_NONE:
- return AGP_USER_MEMORY;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ uint32_t __iomem *pd_addr;
+ uint32_t pd_entry;
+ int i;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return;
+
+
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ unsigned first_entry,
+ unsigned num_entries)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+ int i;
+
+ if (INTEL_INFO(dev)->gen < 6) {
+ intel_gtt_clear_range(first_entry, num_entries);
+ return;
+ }
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+ readl(gtt_base);
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- intel_gtt_chipset_flush();
+ i915_gem_chipset_flush(dev);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sg_table *st = obj->pages;
+ struct scatterlist *sg = st->sgl;
+ const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+ const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+ gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+ int unused, i = 0;
+ unsigned int len, m = 0;
+ dma_addr_t addr;
+
+ for_each_sg(st->sgl, sg, st->nents, unused) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+ i++;
+ }
+ }
+
+ BUG_ON(i > max_entries);
+ BUG_ON(i != obj->base.size / PAGE_SIZE);
+
+ /* XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+ if (INTEL_INFO(dev)->gen < 6) {
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ flags);
+ } else {
+ gen6_ggtt_bind_object(obj, cache_level);
+ }
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ i915_ggtt_clear_range(obj->base.dev,
+ obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+ dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dma_addr))
+ return -EINVAL;
+#else
+ dma_addr = page_to_phys(page);
+#endif
+ dev_priv->mm.gtt->scratch_page = page;
+ dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+
+ return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(dev_priv->mm.gtt->scratch_page);
+ __free_page(dev_priv->mm.gtt->scratch_page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+ return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+{
+ static const int stolen_decoder[] = {
+ 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
+ snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
+ return stolen_decoder[snb_gmch_ctl] << 20;
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ phys_addr_t gtt_bus_addr;
+ u16 snb_gmch_ctl;
+ int ret;
+
+ /* On modern platforms we need not worry ourself with the legacy
+ * hostbridge query stuff. Skip it entirely
+ */
+ if (INTEL_INFO(dev)->gen < 6) {
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ return -EIO;
+ }
+
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ intel_gmch_remove();
+ return -ENODEV;
+ }
+ return 0;
+ }
+
+ dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
+ if (!dev_priv->mm.gtt)
+ return -ENOMEM;
+
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+
+#ifdef CONFIG_INTEL_IOMMU
+ dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
+ /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+ gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
+ dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
+
+ /* i9xx_setup */
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ dev_priv->mm.gtt->gtt_total_entries =
+ gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
+ if (INTEL_INFO(dev)->gen < 7)
+ dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+ else
+ dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
+
+ dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
+ /* 64/512MB is the current min/max we actually know of, but this is just a
+ * coarse sanity check.
+ */
+ if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
+ dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
+ DRM_ERROR("Unknown GMADR entries (%d)\n",
+ dev_priv->mm.gtt->gtt_mappable_entries);
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ ret = setup_scratch_page(dev);
+ if (ret) {
+ DRM_ERROR("Scratch setup failed\n");
+ goto err_out;
+ }
+
+ dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
+ dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+ if (!dev_priv->mm.gtt->gtt) {
+ DRM_ERROR("Failed to map the gtt page table\n");
+ teardown_scratch_page(dev);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
+ DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
+ DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
+ DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+
+ return 0;
+
+err_out:
+ kfree(dev_priv->mm.gtt);
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+ return ret;
+}
+
+void i915_gem_gtt_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ iounmap(dev_priv->mm.gtt->gtt);
+ teardown_scratch_page(dev);
+ if (INTEL_INFO(dev)->gen < 6)
+ intel_gmch_remove();
+ kfree(dev_priv->mm.gtt);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 32e1bda865b8..fe843389c7b4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -122,7 +122,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
/* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+ vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
/* Query vblank area. */
- vbl = I915_READ(VBLANK(pipe));
+ vbl = I915_READ(VBLANK(cpu_transcoder));
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
- jiffies +
- msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay);
}
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- parity_error_work);
+ l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
-static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
}
-static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]);
}
-static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received);
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
- if (HAS_PCH_CPT(dev))
- hotplug_mask = SDE_HOTPLUG_MASK_CPT;
- else
- hotplug_mask = SDE_HOTPLUG_MASK;
-
ret = IRQ_HANDLED;
if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
- if (pch_iir & hotplug_mask)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
@@ -1120,6 +1132,8 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1143,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1237,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
else
error->ier = I915_READ(IER);
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1464,7 +1489,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || work->pending || !work->enable_stall_check) {
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
@@ -1751,7 +1778,7 @@ void i915_hangcheck_elapsed(unsigned long data)
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
/* drm_dma.h hooks
@@ -1956,6 +1983,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2023,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- dev_priv->gt_irq_mask = ~0;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
- GT_GEN6_BLT_CS_ERROR_INTERRUPT |
- GT_GEN6_BLT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_GEN6_BSD_CS_ERROR_INTERRUPT |
- GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
- GT_PIPE_NOTIFY |
- GT_RENDER_CS_ERROR_INTERRUPT |
- GT_SYNC_STATUS |
- GT_USER_INTERRUPT);
+
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2038,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2045,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
-#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@@ -2129,7 +2146,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2324,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2562,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2708,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
- INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a4162ddff6c5..59afb7eb6db6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -40,6 +41,14 @@
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define IVB_GMCH_GMS_SHIFT 4
+#define IVB_GMCH_GMS_MASK 0xf
+
/* PCI config space */
@@ -105,23 +114,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define HSW_PTE_UNCACHED (0)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_CACHE_BITS (3 << 1)
-#define GEN6_PTE_GFDT (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -241,11 +233,18 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@@ -369,6 +368,7 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
@@ -384,6 +384,9 @@
#define DPIO_FASTCLK_DISABLE 0x8100
+#define DPIO_DATA_CHANNEL1 0x8220
+#define DPIO_DATA_CHANNEL2 0x8420
+
/*
* Fence registers
*/
@@ -509,11 +512,14 @@
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define DERRMR 0x44050
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
@@ -521,14 +527,17 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
#define GEN6_GT_MODE 0x20d0
-#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
@@ -547,6 +556,8 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_GUNIT_CLOCK_GATE 0x182060
+#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
@@ -661,6 +672,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -670,6 +682,8 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6 0x101008
+#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
@@ -1559,14 +1573,14 @@
#define _VSYNCSHIFT_B 0x61028
-#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -2641,6 +2655,7 @@
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
+#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2711,7 +2726,7 @@
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -2998,12 +3013,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
+#define DISPPLANE_BGRA555 (0x3<<26)
+#define DISPPLANE_BGRX555 (0x4<<26)
+#define DISPPLANE_BGRX565 (0x5<<26)
+#define DISPPLANE_BGRX888 (0x6<<26)
+#define DISPPLANE_BGRA888 (0x7<<26)
+#define DISPPLANE_RGBX101010 (0x8<<26)
+#define DISPPLANE_RGBA101010 (0x9<<26)
+#define DISPPLANE_BGRX101010 (0xa<<26)
+#define DISPPLANE_RGBX161616 (0xc<<26)
+#define DISPPLANE_RGBX888 (0xe<<26)
+#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3024,6 +3046,8 @@
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
+#define _DSPAOFFSET 0x701A4 /* HSW */
+#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3033,6 +3057,8 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3078,6 +3104,8 @@
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+#define _DSPBOFFSET 0x711A4
+#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3143,6 +3171,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
@@ -3177,6 +3206,8 @@
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
@@ -3197,6 +3228,8 @@
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
@@ -3210,8 +3243,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -3246,12 +3281,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
-#define PCH_DSPCLK_GATE_D 0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
-
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3301,20 +3330,22 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
+#define PF_PIPE_SEL_MASK_IVB (3<<29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
@@ -3423,15 +3454,13 @@
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
-#define ILK_DSPCLK_GATE 0x42020
-#define IVB_VRHUNIT_CLK_GATE (1<<28)
-#define ILK_DPARB_CLK_GATE (1<<5)
-#define ILK_DPFD_CLK_GATE (1<<7)
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define ILK_CLK_FBC (1<<7)
-#define ILK_DPFC_DIS1 (1<<8)
-#define ILK_DPFC_DIS2 (1<<9)
+#define ILK_DSPCLK_GATE_D 0x42020
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3447,14 +3476,21 @@
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+#define GEN7_L3SQCREG4 0xb034
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+#define HSW_FUSE_STRAP 0x42014
+#define HSW_CDCLK_LIMIT (1 << 24)
+
/* PCH */
/* south display engine interrupt: IBX */
@@ -3686,7 +3722,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@@ -3795,18 +3831,26 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
+
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
@@ -3816,6 +3860,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
@@ -3877,6 +3922,7 @@
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
@@ -3901,16 +3947,21 @@
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
-#define _FDI_RXA_MISC 0xf0010
-#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
+#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -4003,6 +4054,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
+#define PANEL_POWER_PORT_LVDS (0 << 30)
+#define PANEL_POWER_PORT_DP_A (1 << 30)
+#define PANEL_POWER_PORT_DP_C (2 << 30)
+#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4050,7 +4106,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4108,6 +4164,8 @@
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_KERNEL 0x1
+#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -4220,6 +4278,10 @@
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4251,6 +4313,15 @@
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
+#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
+#define GEN7_MAX_PS_THREAD_DEP (8<<12)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN7_ROW_CHICKEN2 0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+#define DOP_CLOCK_GATING_DISABLE (1<<0)
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4380,33 +4451,39 @@
#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A 0x60400
-#define PIPE_DDI_FUNC_CTL_B 0x61400
-#define PIPE_DDI_FUNC_CTL_C 0x62400
-#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
- PIPE_DDI_FUNC_CTL_B)
-#define PIPE_DDI_FUNC_ENABLE (1<<31)
+#define TRANS_DDI_FUNC_CTL_A 0x60400
+#define TRANS_DDI_FUNC_CTL_B 0x61400
+#define TRANS_DDI_FUNC_CTL_C 0x62400
+#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+ TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (7<<28)
-#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
-#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
-#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
-#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
-#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
-#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
-#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
-#define PIPE_DDI_BPC_MASK (7<<20)
-#define PIPE_DDI_BPC_8 (0<<20)
-#define PIPE_DDI_BPC_10 (1<<20)
-#define PIPE_DDI_BPC_6 (2<<20)
-#define PIPE_DDI_BPC_12 (3<<20)
-#define PIPE_DDI_PVSYNC (1<<17)
-#define PIPE_DDI_PHSYNC (1<<16)
-#define PIPE_DDI_BFI_ENABLE (1<<4)
-#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
-#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
-#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
+#define TRANS_DDI_PORT_NONE (0<<28)
+#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
+#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
+#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
+#define TRANS_DDI_BPC_MASK (7<<20)
+#define TRANS_DDI_BPC_8 (0<<20)
+#define TRANS_DDI_BPC_10 (1<<20)
+#define TRANS_DDI_BPC_6 (2<<20)
+#define TRANS_DDI_BPC_12 (3<<20)
+#define TRANS_DDI_PVSYNC (1<<17)
+#define TRANS_DDI_PHSYNC (1<<16)
+#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
+#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
+#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
@@ -4420,12 +4497,16 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
@@ -4444,6 +4525,7 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
@@ -4460,6 +4542,10 @@
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4477,10 +4563,12 @@
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0_ENABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
@@ -4490,8 +4578,8 @@
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SCC (1<<28)
-#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_SSC (1<<28)
+#define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
@@ -4500,7 +4588,7 @@
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@@ -4517,21 +4605,36 @@
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
-
-/* Pipe clock selection */
-#define PIPE_CLK_SEL_A 0x46140
-#define PIPE_CLK_SEL_B 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
-/* For each pipe, we need to select the corresponding port clock */
-#define PIPE_CLK_SEL_DISABLED (0x0<<29)
-#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+#define PORT_CLK_SEL_NONE (7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A 0x46140
+#define TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0<<29)
+#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+ _TRANSB_MSA_MISC)
+#define TRANS_MSA_SYNC_CLK (1<<0)
+#define TRANS_MSA_6_BPC (0<<5)
+#define TRANS_MSA_8_BPC (1<<5)
+#define TRANS_MSA_10_BPC (2<<5)
+#define TRANS_MSA_12_BPC (3<<5)
+#define TRANS_MSA_16_BPC (4<<5)
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CLK_FREQ_MASK (3<<26)
+#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5854bddb1e9f..63d4d30c39de 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
+ array = dev_priv->regfile.save_palette_a;
else
- array = dev_priv->save_palette_b;
+ array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
- dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
- dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
+ dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
- dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
+ dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
- dev_priv->saveGR[0x10] =
+ dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
+ dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
+ dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
+ dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
+ dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
+ dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
+ dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
+ dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
+ dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->saveCURABASE = I915_READ(_CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(_FPA0);
- dev_priv->saveFPA1 = I915_READ(_FPA1);
- dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(_FPB0);
- dev_priv->saveFPB1 = I915_READ(_FPB1);
- dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
return;
}
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
case 7:
case 6:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
}
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
- I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
- I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
- I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
- I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
- I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
- I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
- I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
- I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
- I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
- I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
return;
}
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->saveDSPARB = I915_READ(DSPARB);
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
- /* Don't save them in KMS mode */
+ /* Don't regfile.save them in KMS mode */
i915_save_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveADPA = I915_READ(PCH_ADPA);
- } else {
- dev_priv->saveADPA = I915_READ(ADPA);
- }
-
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else {
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->saveDP_B = I915_READ(DP_B);
- dev_priv->saveDP_C = I915_READ(DP_C);
- dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: save TV & SDVO state */
-
- /* Only save FBC state on the platform that supports FBC */
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+ }
+
+ /* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
}
/* VGA state */
- dev_priv->saveVGA0 = I915_READ(VGA0);
- dev_priv->saveVGA1 = I915_READ(VGA1);
- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
if (HAS_PCH_SPLIT(dev))
- dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
}
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->saveADPA);
-
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->saveLVDS);
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
* otherwise we get blank eDP screen after S3 on some machines
*/
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
- dev_priv->saveMCHBAR_RENDER_STANDBY);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
- }
-
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->saveDP_B);
- I915_WRITE(DP_C, dev_priv->saveDP_C);
- I915_WRITE(DP_D, dev_priv->saveDP_D);
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
}
- /* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
} else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
else
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->saveVGA0);
- I915_WRITE(VGA1, dev_priv->saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
i915_save_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveDEIER = I915_READ(DEIER);
- dev_priv->saveDEIMR = I915_READ(DEIMR);
- dev_priv->saveGTIER = I915_READ(GTIER);
- dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
}
intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
mutex_unlock(&dev->struct_mutex);
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
mutex_lock(&dev->struct_mutex);
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
i915_restore_display(dev);
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->saveDEIER);
- I915_WRITE(DEIMR, dev_priv->saveDEIMR);
- I915_WRITE(GTIER, dev_priv->saveGTIER);
- I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
- } else {
- I915_WRITE(IER, dev_priv->saveIER);
- I915_WRITE(IMR, dev_priv->saveIMR);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 903eebd2117a..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return -EPERM;
if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- if (!dev_priv->mm.l3_remap_info) {
+ if (!dev_priv->l3_parity.remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
- dev_priv->mm.l3_remap_info = temp;
+ dev_priv->l3_parity.remap_info = temp;
- memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+ memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4),
count);
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_delay = val;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return count;
}
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_delay = val;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->rps.hw_lock);
return count;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a6..3db4a6817713 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+ TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
+ __field(u32, flags)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
+ __entry->flags = flags;
i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+ __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 56846ed5ee55..55ffba1f5818 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */
- if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6345878ae1e7..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
- adpa = ADPA_HOTPLUG_BITS;
+ if (HAS_PCH_SPLIT(dev))
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
+ int ret;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
- return intel_connector_update_modes(connector, edid);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
+ }
+
}
/*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
- if (IS_HASWELL(dev) || IS_I830(dev))
+ if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- crt->base.get_hw_state = intel_crt_get_hw_state;
+ if (IS_HASWELL(dev))
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ else
+ crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
- if (HAS_PCH_SPLIT(dev)) {
- u32 adpa;
-
- adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
-
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
- crt->force_hotplug_required = 1;
- }
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the
+ * polarity reversal bit or not, instead of relying on the BIOS.
+ */
+ if (HAS_PCH_LPT(dev))
+ dev_priv->fdi_rx_polarity_reversed =
+ !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index bfe375466a0e..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG();
+ }
+}
+
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_800MV_3_5DB_HSW
};
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
-
- /* Configure CPU PLL, wait for warmup */
- I915_WRITE(SPLL_CTL,
- SPLL_PLL_ENABLE |
- SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SCC);
+ u32 temp, i, rx_ctl_val;
- /* Use SPLL to drive the output when in FDI mode */
- I915_WRITE(PORT_CLK_SEL(PORT_E),
- PORT_CLK_SEL_SPLL);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(PORT_E));
-
- udelay(20);
-
- /* Start the training iterating through available voltages and emphasis */
- for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 19);
+ if (dev_priv->fdi_rx_polarity_reversed)
+ rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
- temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
- temp |
- DDI_BUF_CTL_ENABLE |
- DDI_PORT_WIDTH_X2 |
- hsw_ddi_buf_ctl_values[i]);
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
udelay(600);
- /* We need to program FDI_RX_MISC with the default TP1 to TP2
- * values before enabling the receiver, and configure the delay
- * for the FDI timing generator to 90h. Luckily, all the other
- * bits are supposed to be zeroed, so we can write those values
- * directly.
- */
- I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
- FDI_RX_FDI_DELAY_90);
-
- /* Enable CPU FDI Receiver with auto-training */
- reg = FDI_RX_CTL(pipe);
- I915_WRITE(reg,
- I915_READ(reg) |
- FDI_LINK_TRAIN_AUTO |
- FDI_RX_ENABLE |
- FDI_LINK_TRAIN_PATTERN_1_CPT |
- FDI_RX_ENHANCE_FRAME_ENABLE |
- FDI_PORT_WIDTH_2X_LPT |
- FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ udelay(5);
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
-
- /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
- temp = I915_READ(DDI_FUNC_CTL(pipe));
- temp &= ~PIPE_DDI_PORT_MASK;
- temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
- PIPE_DDI_MODE_SELECT_FDI |
- PIPE_DDI_FUNC_ENABLE |
- PIPE_DDI_PORT_WIDTH_X2;
- I915_WRITE(DDI_FUNC_CTL(pipe),
- temp);
- break;
- } else {
- DRM_ERROR("Error training BUF_CTL %d\n", i);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
- /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
- I915_WRITE(DP_TP_CTL(PORT_E),
- I915_READ(DP_TP_CTL(PORT_E)) &
- ~DP_TP_CTL_ENABLE);
- I915_WRITE(FDI_RX_CTL(pipe),
- I915_READ(FDI_RX_CTL(pipe)) &
- ~FDI_RX_PLL_ENABLE);
- continue;
+ return;
}
- }
- DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-/* For DDI connections, it is possible to support different outputs over the
- * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
- * the time the output is detected what exactly is on the other end of it. This
- * function aims at providing support for this detection and proper output
- * configuration.
- */
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
- /* For now, we don't do any proper output detection and assume that we
- * handle HDMI only */
-
- switch(port){
- case PORT_A:
- /* We don't handle eDP and DP yet */
- DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
- break;
- /* Assume that the ports B, C and D are working in HDMI mode for now */
- case PORT_B:
- case PORT_C:
- case PORT_D:
- intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
- break;
- default:
- DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
- port);
- break;
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
}
+
+ DRM_ERROR("FDI link training failed!\n");
}
/* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{298000, 2, 21, 19},
};
-void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- int port = intel_hdmi->ddi_port;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
- int p, n2, r2;
- u32 temp, i;
+ int type = intel_encoder->type;
- /* On Haswell, we need to enable the clocks and prepare DDI function to
- * work in HDMI mode for this pipe.
- */
- DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ switch (intel_dp->lane_count) {
+ case 1:
+ intel_dp->DP |= DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ intel_dp->DP |= DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ intel_dp->DP |= DDI_PORT_WIDTH_X4;
+ WARN(1, "Unexpected DP lane count %d\n",
+ intel_dp->lane_count);
+ break;
+ }
+
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic
+ * and a new set of registers, so we leave it for future
+ * patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ }
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1)
+ WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+ intel_crtc->pipe);
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+ WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+ WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+ u32 i;
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
- if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+ if (clock <= wrpll_tmds_clock_table[i].clock)
break;
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
- p = wrpll_tmds_clock_table[i].p;
- n2 = wrpll_tmds_clock_table[i].n2;
- r2 = wrpll_tmds_clock_table[i].r2;
+ *p = wrpll_tmds_clock_table[i].p;
+ *n2 = wrpll_tmds_clock_table[i].n2;
+ *r2 = wrpll_tmds_clock_table[i].r2;
- if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
- DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
- wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+ if (wrpll_tmds_clock_table[i].clock != clock)
+ DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, clock);
- DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- crtc->mode.clock, p, n2, r2);
+ DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, *p, *n2, *r2);
+}
- /* Enable LCPLL if disabled */
- temp = I915_READ(LCPLL_CTL);
- if (temp & LCPLL_PLL_DISABLE)
- I915_WRITE(LCPLL_CTL,
- temp & ~LCPLL_PLL_DISABLE);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t reg, val;
- /* Configure WR PLL 1, program the correct divider values for
- * the desired frequency and wait for warmup */
- I915_WRITE(WRPLL_CTL1,
- WRPLL_PLL_ENABLE |
- WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p));
+ /* TODO: reuse PLLs when possible (compare values) */
- udelay(20);
+ intel_ddi_put_crtc_pll(crtc);
- /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
- * this port for connection.
- */
- I915_WRITE(PORT_CLK_SEL(port),
- PORT_CLK_SEL_WRPLL1);
- I915_WRITE(PIPE_CLK_SEL(pipe),
- PIPE_CLK_SEL_PORT(port));
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
+ }
+
+ /* We don't need to turn any PLL on because we'll use LCPLL. */
+ return true;
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ int p, n2, r2;
+
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll1_refcount++;
+ reg = WRPLL_CTL1;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll2_refcount++;
+ reg = WRPLL_CTL2;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+ WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+ "WRPLL already enabled\n");
+
+ intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ reg = SPLL_CTL;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ }
+
+ WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+ "SPLL already enabled\n");
+
+ val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ } else {
+ WARN(1, "Invalid DDI encoder type %d\n", type);
+ return false;
+ }
+
+ I915_WRITE(reg, val);
udelay(20);
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic and a new set
- * of registers, so we leave it for future patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ return true;
+}
- /* write eld */
- DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ temp |= TRANS_MSA_8_BPC;
+ WARN(1, "%d bpp unsupported by DDI function\n",
+ intel_crtc->bpp);
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
+}
- /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
switch (intel_crtc->bpp) {
case 18:
- temp |= PIPE_DDI_BPC_6;
+ temp |= TRANS_DDI_BPC_6;
break;
case 24:
- temp |= PIPE_DDI_BPC_8;
+ temp |= TRANS_DDI_BPC_8;
break;
case 30:
- temp |= PIPE_DDI_BPC_10;
+ temp |= TRANS_DDI_BPC_10;
break;
case 36:
- temp |= PIPE_DDI_BPC_12;
+ temp |= TRANS_DDI_BPC_12;
break;
default:
- WARN(1, "%d bpp unsupported by pipe DDI function\n",
+ WARN(1, "%d bpp unsupported by transcoder DDI function\n",
intel_crtc->bpp);
}
- if (intel_hdmi->has_hdmi_sink)
- temp |= PIPE_DDI_MODE_SELECT_HDMI;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ switch (intel_dp->lane_count) {
+ case 1:
+ temp |= TRANS_DDI_PORT_WIDTH_X1;
+ break;
+ case 2:
+ temp |= TRANS_DDI_PORT_WIDTH_X2;
+ break;
+ case 4:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ break;
+ default:
+ temp |= TRANS_DDI_PORT_WIDTH_X4;
+ WARN(1, "Unsupported lane count %d\n",
+ intel_dp->lane_count);
+ }
+
+ } else {
+ WARN(1, "Invalid encoder type %d for pipe %d\n",
+ intel_encoder->type, pipe);
+ }
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ uint32_t tmp;
+
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
+
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
else
- temp |= PIPE_DDI_MODE_SELECT_DVI;
+ cpu_transcoder = pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- temp |= PIPE_DDI_PVSYNC;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- temp |= PIPE_DDI_PHSYNC;
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
- I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ default:
+ return false;
+ }
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
u32 tmp;
int i;
- tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+ tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
- for_each_pipe(i) {
- tmp = I915_READ(DDI_FUNC_CTL(i));
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if ((tmp & PIPE_DDI_PORT_MASK)
- == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
- *pipe = i;
- return true;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
}
}
- DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
return true;
}
-void intel_enable_ddi(struct intel_encoder *encoder)
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
+ } else {
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ ret = I915_READ(PORT_CLK_SEL(port));
+
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+ pipe_name(pipe), port_name(port), ret);
+
+ return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
{
- struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
- temp = I915_READ(DDI_BUF_CTL(port));
- temp |= DDI_BUF_CTL_ENABLE;
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
- * and swing/emphasis values are ignored so nothing special needs
- * to be done besides enabling the port.
- */
- I915_WRITE(DDI_BUF_CTL(port), temp);
+ if (!intel_crtc->active)
+ continue;
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
}
-void intel_disable_ddi(struct intel_encoder *encoder)
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = encoder->base.dev;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ }
+
+ WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+ }
+
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- int port = intel_hdmi->ddi_port;
- u32 temp;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_on(intel_dp);
+ }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450;
+ else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+ LCPLL_CLK_FREQ_450)
+ return 450;
+ else if (IS_ULT(dev_priv->dev))
+ return 338;
+ else
+ return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
+ */
+
+ DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ bool wait;
+ uint32_t val;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int type = intel_encoder->type;
+
+ WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+ else
+ return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+ .mode_fixup = intel_ddi_mode_fixup,
+ .mode_set = intel_ddi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!dp_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ if (port != PORT_A) {
+ hdmi_connector = kzalloc(sizeof(struct intel_connector),
+ GFP_KERNEL);
+ if (!hdmi_connector) {
+ kfree(dp_connector);
+ kfree(intel_dig_port);
+ return;
+ }
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+ intel_dig_port->port = port;
+ if (hdmi_connector)
+ intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+ else
+ intel_dig_port->hdmi.sdvox_reg = 0;
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
- temp = I915_READ(DDI_BUF_CTL(port));
- temp &= ~DDI_BUF_CTL_ENABLE;
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
- I915_WRITE(DDI_BUF_CTL(port), temp);
+ if (hdmi_connector)
+ intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ intel_dp_init_connector(intel_dig_port, dp_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b426d44a2b05..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,8 +41,6 @@
#include <drm/drm_crtc_helper.h>
#include <linux/dma_remapping.h>
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_PCH_SPLIT(dev));
+
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
- .vco = { .min = 5994000, .max = 4000000 },
+ .vco = { .min = 4000000, .max = 5994000},
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
};
static const intel_limit_t intel_limits_vlv_dp = {
- .dot = { .min = 162000, .max = 270000 },
- .vco = { .min = 5994000, .max = 4000000 },
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
- .m = { .min = 60, .max = 300 }, /* guess */
+ .m = { .min = 22, .max = 450 },
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- HAS_eDP)
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->cpu_transcoder;
+}
+
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (INTEL_INFO(dev)->gen >= 4) {
- int reg = PIPECONF(pipe);
+ int reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
- reg = DDI_FUNC_CTL(pipe);
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
- cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
- return;
- } else {
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
- }
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
- return;
- }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* SBI access */
static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
+ u32 tmp;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_DATA,
- value);
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRWR);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
}
static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
u32 value = 0;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRRD);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
}
/**
- * intel_enable_pch_pll - enable PCH PLL
+ * ironlake_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
pll->on = false;
}
-static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- int reg;
- u32 val, pipeconf_val;
+ struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ uint32_t reg, val, pipeconf_val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
- DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
- return;
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
}
+
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
-static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- int reg;
- u32 val;
+ u32 val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+ if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t reg, val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(_TRANSACONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(_TRANSACONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
}
/**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum transcoder pch_transcoder;
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev))
+ pch_transcoder = TRANSCODER_A;
+ else
+ pch_transcoder = pipe;
+
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pipe);
- assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int reg;
u32 val;
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
- reg = PIPECONF(pipe);
+ reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ if (dev_priv->info->gen >= 4)
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ else
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
}
/**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch)
{
int tile_rows, tiles;
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
+
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- gen4_compute_dspaddr_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->bits_per_pixel) {
- case 8:
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
- case 16:
- if (fb->depth != 16)
- return -EINVAL;
-
- dspcntr |= DISPPLANE_16BPP;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
break;
- case 24:
- case 32:
- if (fb->depth == 24)
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- else if (fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- return -EINVAL;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- gen4_compute_dspaddr_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPLINOFF(plane), linear_offset);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ } else {
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ }
POSTING_READ(reg);
return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (intel_crtc->pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ break;
+ }
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
- if (!dev->primary->master)
- return 0;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return 0;
-
- if (intel_crtc->pipe) {
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
- } else {
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
- }
+ intel_crtc_update_sarea_pos(crtc, x, y);
return 0;
}
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
- flags |= FDI_PHASE_SYNC_OVR(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
- flags |= FDI_PHASE_SYNC_EN(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
- POSTING_READ(SOUTH_CHICKEN1);
+ /* When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. XXX: This misses the case where a pipe is not using
+ * any pch resources and so doesn't need any fdi lanes. */
+ if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- if (HAS_PCH_IBX(dev)) {
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
- }
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
+
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- if (HAS_PCH_CPT(dev))
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
break;
}
}
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
}
}
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
int pipe = intel_crtc->pipe;
u32 reg, temp;
- /* Write the TU size bits so error detection works */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
- POSTING_READ(SOUTH_CHICKEN1);
-}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_EN));
- } else if (HAS_PCH_CPT(dev)) {
- cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
* must be driven by its own crtc; no sharing is possible.
*/
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
- /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
- * CPU handles all others */
- if (IS_HASWELL(dev)) {
- /* It is still unclear how this will work on PPT, so throw up a warning */
- WARN_ON(!HAS_PCH_LPT(dev));
-
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
- DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
- return true;
- } else {
- DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
- intel_encoder->type);
- return false;
- }
- }
-
switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
- intel_sbi_read(dev_priv, SBI_SSCCTL6) |
- SBI_SSCCTL_DISABLE);
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
phaseinc);
/* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
- intel_sbi_write(dev_priv,
- SBI_SSCDIVINTPHASE6,
- temp);
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
/* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
- intel_sbi_write(dev_priv,
- SBI_SSCAUXDIV6,
- temp);
-
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
/* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv,
- SBI_SSCCTL6,
- temp);
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
/* Wait for initialization time */
udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_transcoder_disabled(dev_priv, pipe);
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(intel_crtc);
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+ * unconditionally resets the pll - we need that to have the right LVDS
+ * enable sequence. */
+ ironlake_enable_pch_pll(intel_crtc);
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
- lpt_program_iclkip(crtc);
- } else if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- if (!IS_HASWELL(dev))
- intel_fdi_normal_train(crtc);
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
- DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
- temp |= TRANS_DP_PORT_SEL_B;
- break;
+ BUG();
}
I915_WRITE(reg, temp);
}
- intel_enable_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+ assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+ int dslreg = PIPEDSL(pipe);
u32 temp;
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
- /* Without this, mode sets may fail silently on FDI */
- I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
- udelay(250);
- I915_WRITE(tc2reg, 0);
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = intel_crtc_driving_pch(crtc);
+ is_pch_port = ironlake_crtc_driving_pch(crtc);
if (is_pch_port) {
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
ironlake_fdi_pll_enable(intel_crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
@@ -3265,6 +3399,83 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ bool is_pch_port;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ /* Enable panel fitting for eDP */
+ if (dev_priv->pch_pf_size &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_pipe_func(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ lpt_pch_enable(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_crtc_update_cursor(crtc, true);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3303,7 +3514,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_disable(crtc);
- intel_disable_transcoder(dev_priv, pipe);
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ bool is_pch_port;
+
+ if (!intel_crtc->active)
+ return;
+
+ is_pch_port = haswell_crtc_driving_pch(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+ intel_crtc_update_cursor(crtc, false);
+
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ /* Disable PF */
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (is_pch_port) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_ddi_fdi_disable(crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_pch_pll(intel_crtc);
}
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+ * start using it. */
+ intel_crtc->cpu_transcoder = intel_crtc->pipe;
+
+ intel_ddi_put_crtc_pll(crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -4061,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
- int refclk, int num_connectors)
+ int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4069,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- bool is_hdmi;
+ bool is_sdvo;
+ u32 temp;
- is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+ dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+ dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+ dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
bestn = clock->n;
bestm1 = clock->m1;
@@ -4079,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bestp1 = clock->p1;
bestp2 = clock->p2;
- /* Enable DPIO clock input */
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
-
+ /*
+ * In Valleyview PLL and program lane counter registers are exposed
+ * through DPIO interface
+ */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4095,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
- pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
- (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+ (5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
- intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
@@ -4108,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
- if (is_hdmi) {
- u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
-
- I915_WRITE(DPLL_MD(pipe), temp);
- POSTING_READ(DPLL_MD(pipe));
}
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
- intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+ /* Now program lane control registers */
+ if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+ || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+ }
+ if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+ {
+ temp = 0x1000C4;
+ if(pipe == 1)
+ temp |= (1 << 21);
+ intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+ }
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4136,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
u32 dpll;
bool is_sdvo;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4236,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -4245,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll;
+ i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
dpll = DPLL_VGA_MODE_DIS;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4294,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t vsyncshift;
+
+ if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal / 2;
+ } else {
+ vsyncshift = 0;
+ }
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4307,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr, pipeconf, vsyncshift;
+ u32 dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder;
@@ -4371,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
- i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
- &reduced_clock : NULL);
-
if (IS_GEN2(dev))
- i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+ i8xx_update_pll(crtc, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else if (IS_VALLEYVIEW(dev))
- vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
- refclk, num_connectors);
+ vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4419,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_ENABLE |
+ I965_PIPECONF_ACTIVE;
+ }
+ }
+
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4434,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2;
- } else {
+ else
pipeconf |= PIPECONF_PROGRESSIVE;
- vsyncshift = 0;
- }
- if (!IS_GEN3(dev))
- I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
-
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
@@ -4476,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -4495,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4612,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
}
}
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+ bool is_sdv = false;
+ u32 tmp;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (!has_vga)
+ return;
+
+ /* XXX: Rip out SDV support once Haswell ships for real. */
+ if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+ is_sdv = true;
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (!is_sdv) {
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+ 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+ tmp &= ~(0x3 << 6);
+ tmp |= (1 << 6) | (1 << 0);
+ intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+ }
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+ tmp |= 0x7FFF;
+ intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ }
+
+ /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+ tmp |= SBI_DBUFF0_ENABLE;
+ intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
+}
+
static int ironlake_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -4668,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val |= PIPE_12BPC;
break;
default:
- val |= PIPE_8BPC;
- break;
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
}
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4686,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
POSTING_READ(PIPECONF(pipe));
}
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(cpu_transcoder));
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK_HSW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
@@ -4749,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
return true;
}
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *fb)
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ if (intel_crtc->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 4;
+
+ return false;
+ }
+
+ if (dev_priv->num_pipe == 2)
+ return true;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+
+ if (intel_crtc->fdi_lanes > 2)
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ case PIPE_C:
+ if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+ if (intel_crtc->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+ intel_crtc->pipe, intel_crtc->fdi_lanes);
+ /* Clamp lanes to avoid programming the hw with bogus values. */
+ intel_crtc->fdi_lanes = 2;
+
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ return true;
+ default:
+ BUG();
+ }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return bps / (link_bw * 8) + 1;
+}
+
+static void ironlake_set_m_n(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false, is_sdvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *encoder, *edp_encoder = NULL;
- int ret;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ struct intel_encoder *intel_encoder, *edp_encoder = NULL;
struct fdi_m_n m_n = {0};
- u32 temp;
- int target_clock, pixel_multiplier, lane, link_bw, factor;
- unsigned int pipe_bpp;
- bool dither;
- bool is_cpu_edp = false, is_pch_edp = false;
+ int target_clock, pixel_multiplier, lane, link_bw;
+ bool is_dp = false, is_cpu_edp = false;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- if (encoder->needs_tv_clock)
- is_tv = true;
- break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
- if (intel_encoder_is_pch_edp(&encoder->base))
- is_pch_edp = true;
- else
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
is_cpu_edp = true;
- edp_encoder = encoder;
+ edp_encoder = intel_encoder;
break;
}
-
- num_connectors++;
}
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock, &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
-
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4843,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
target_clock = adjusted_mode->clock;
- /* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
- adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
-
- if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
- pipe_bpp != 36) {
- WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- pipe_bpp = 24;
- }
- intel_crtc->bpp = pipe_bpp;
-
- if (!lane) {
- /*
- * Account for spread spectrum to avoid
- * oversubscribing the link. Max center spread
- * is 2.5%; use 5% for safety's sake.
- */
- u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
- lane = bps / (link_bw * 8) + 1;
- }
+ if (!lane)
+ lane = ironlake_get_lanes_required(target_clock, link_bw,
+ intel_crtc->bpp);
intel_crtc->fdi_lanes = lane;
@@ -4874,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, u32 fp)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, pixel_multiplier, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false, is_tv = false;
+ bool is_dp = false, is_cpu_edp = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
@@ -4889,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m < factor * clock.n)
+ if (clock->m < factor * clock->n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -4899,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
@@ -4909,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (clock.p2) {
+ switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -4939,15 +5561,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
+ return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither, fdi_config_ok;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+ "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
- * pre-Haswell/LPT generation */
- if (HAS_PCH_LPT(dev)) {
- DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
- pipe);
- } else if (!is_cpu_edp) {
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!is_cpu_edp) {
struct intel_pch_pll *pll;
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5033,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- I915_WRITE(VSYNCSHIFT(pipe),
- adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal/2);
- } else {
- I915_WRITE(VSYNCSHIFT(pipe), 0);
- }
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
- I915_WRITE(HTOTAL(pipe),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(pipe),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(pipe),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ /* Note, this also computes intel_crtc->fdi_lanes which is used below in
+ * ironlake_check_fdi_lanes. */
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
- I915_WRITE(VTOTAL(pipe),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(pipe),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(pipe),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
-
- /* pipesrc controls the size that is scaled from, which should
- * always be the user's requested size.
- */
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-
- I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+ fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5092,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+ return fdi_config_ok ? ret : -EINVAL;
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+ struct intel_encoder *encoder;
+ u32 temp;
+ int ret;
+ bool dither;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ is_dp = true;
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ is_cpu_edp = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if (is_cpu_edp)
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
+ /* We are not sure yet this won't happen. */
+ WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+ INTEL_PCH_TYPE(dev));
+
+ WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+ num_connectors, pipe_name(pipe));
+
+ WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+ (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+ WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+ if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+ return -EINVAL;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock,
+ &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+ drm_mode_debug_printmodeline(mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+
+ dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+ fp);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its
+ * own on pre-Haswell/LPT generation */
+ if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
+ return -EINVAL;
+ }
+ } else
+ intel_put_pch_pll(intel_crtc);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are
+ * enabled. This is an exception to the general rule that
+ * mode_set doesn't turn things on.
+ */
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether
+ * we're going to set the DPLLs for dual-channel mode or
+ * not.
+ */
+ if (clock.p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP |
+ LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode
+ * (LVDS_A3_POWER_UP) appropriately here, but we need to
+ * look more thoroughly into how panels behave in the
+ * two modes.
+ */
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(PCH_LVDS, temp);
+ }
+ }
+
+ if (is_dp && !is_cpu_edp) {
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ } else {
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery
+ * setting.*/
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+ }
+ }
+
+ intel_crtc->lowfreq_avail = false;
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+ }
+
+ if (intel_crtc->pch_pll) {
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+ }
+ }
+ }
+
+ intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+ if (!is_dp || is_cpu_edp)
+ ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (is_cpu_edp)
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+ haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -5103,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
@@ -5113,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- return ret;
+ if (ret != 0)
+ return ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
+
+ return 0;
}
static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5749,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -5879,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- goto fail;
+ return false;
}
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- goto fail;
+ return false;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
-
return true;
-fail:
- connector->encoder = NULL;
- encoder->crtc = NULL;
- return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6021,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct drm_display_mode *mode;
- int htot = I915_READ(HTOTAL(pipe));
- int hsync = I915_READ(HSYNC(pipe));
- int vtot = I915_READ(VTOTAL(pipe));
- int vsync = I915_READ(VSYNC(pipe));
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -6183,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
+ struct drm_device *dev = work->crtc->dev;
- mutex_lock(&work->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(work->dev);
- mutex_unlock(&work->dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
kfree(work);
}
@@ -6201,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
- struct drm_pending_vblank_event *e;
- struct timeval tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
@@ -6211,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || !work->pending) {
+
+ /* Ensure we don't miss a work->pending update ... */
+ smp_rmb();
+
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
- intel_crtc->unpin_work = NULL;
-
- if (work->event) {
- e = work->event;
- e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+ /* and that the unpin work is consistent wrt ->pending. */
+ smp_rmb();
- e->event.tv_sec = tvbl.tv_sec;
- e->event.tv_usec = tvbl.tv_usec;
+ intel_crtc->unpin_work = NULL;
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
+ if (work->event)
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
drm_vblank_put(dev, intel_crtc->pipe);
@@ -6238,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
-
wake_up(&dev_priv->pending_flip_queue);
- schedule_work(&work->work);
+
+ queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
@@ -6268,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work) {
- if ((++intel_crtc->unpin_work->pending) > 1)
- DRM_ERROR("Prepared flip multiple times\n");
- } else {
- DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
- }
+ if (intel_crtc->unpin_work)
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ smp_wmb();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ smp_wmb();
+}
+
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -6311,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6351,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6397,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6439,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6493,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
@@ -6541,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
work->event = event;
- work->dev = crtc->dev;
+ work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6566,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ flush_workqueue(dev_priv->wq);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
@@ -6584,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -6598,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
+ atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
@@ -6893,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
dev->mode_config.dpms_property;
connector->dpms = DRM_MODE_DPMS_ON;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dpms_property,
DRM_MODE_DPMS_ON);
@@ -7015,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_encoder *encoder;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
@@ -7061,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
@@ -7070,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
x, y, fb);
if (!ret)
goto done;
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
- if (encoder->crtc != &intel_crtc->base)
- continue;
-
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.id, drm_get_encoder_name(encoder),
- mode->base.id, mode->name);
- encoder_funcs = encoder->helper_private;
- encoder_funcs->mode_set(encoder, mode, adjusted_mode);
- }
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7259,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}
-
- /* Disable all disconnected encoders. */
- if (connector->base.status == connector_status_disconnected)
- connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */
@@ -7420,6 +8301,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (IS_HASWELL(dev))
+ intel_ddi_pll_init(dev);
+}
+
static void intel_pch_pll_init(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7459,6 +8346,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
+ intel_crtc->cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
@@ -7551,17 +8439,9 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (HAS_PCH_SPLIT(dev)) {
- dpd_is_edp = intel_dpd_is_edp(dev);
-
- if (has_edp_a(dev))
- intel_dp_init(dev, DP_A, PORT_A);
-
- if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_D, PORT_D);
- }
-
- intel_crt_init(dev);
+ if (!(IS_HASWELL(dev) &&
+ (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+ intel_crt_init(dev);
if (IS_HASWELL(dev)) {
int found;
@@ -7584,6 +8464,10 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
@@ -7603,11 +8487,15 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
- if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
+ /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C, PORT_C);
+
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
@@ -7620,9 +8508,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
- /* Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -7676,8 +8561,9 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_encoder_clones(encoder);
}
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
+
+ drm_helper_move_panel_connectors_to_head(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7712,33 +8598,74 @@ int intel_framebuffer_init(struct drm_device *dev,
{
int ret;
- if (obj->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
+ }
- if (mode_cmd->pitches[0] & 63)
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
+ /* FIXME <= Gen4 stride limits are bit unclear */
+ if (mode_cmd->pitches[0] > 32768) {
+ DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
+ return -EINVAL;
+ }
+
+ /* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- /* RGB formats are common across chipsets */
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+ return -EINVAL;
+ }
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format %u\n",
- mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7776,7 +8703,13 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7827,6 +8760,8 @@ static void intel_init_display(struct drm_device *dev)
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
@@ -8058,6 +8993,7 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
/* Just disable it once at startup */
@@ -8127,7 +9063,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->pipe);
+ reg = PIPECONF(crtc->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* We need to sanitize the plane -> pipe mapping first because this will
@@ -8244,9 +9180,27 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
+static void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+ }
+}
+
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev)
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
@@ -8255,10 +9209,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
+ if (IS_HASWELL(dev)) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ pipe = PIPE_C;
+ break;
+ }
+
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ crtc->cpu_transcoder = TRANSCODER_EDP;
+
+ DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+ pipe_name(pipe));
+ }
+ }
+
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- tmp = I915_READ(PIPECONF(pipe));
+ tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
if (tmp & PIPECONF_ENABLE)
crtc->active = true;
else
@@ -8271,6 +9250,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
+ if (IS_HASWELL(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
@@ -8317,9 +9299,21 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
intel_sanitize_crtc(crtc);
}
- intel_modeset_update_staged_output_state(dev);
+ if (force_restore) {
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_set_mode(&crtc->base, &crtc->base.mode,
+ crtc->base.x, crtc->base.y, crtc->base.fb);
+ }
+
+ i915_redisable_vga(dev);
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
intel_modeset_check_state(dev);
+
+ drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -8328,7 +9322,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, false);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -8447,6 +9441,7 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
+ enum transcoder cpu_transcoder;
int i;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8454,6 +9449,8 @@ intel_display_capture_error_state(struct drm_device *dev)
return NULL;
for_each_pipe(i) {
+ cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8468,14 +9465,14 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(i));
- error->pipe[i].hblank = I915_READ(HBLANK(i));
- error->pipe[i].hsync = I915_READ(HSYNC(i));
- error->pipe[i].vtotal = I915_READ(VTOTAL(i));
- error->pipe[i].vblank = I915_READ(VBLANK(i));
- error->pipe[i].vsync = I915_READ(VSYNC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
return error;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 368ed8ef1600..fb3715b4b09d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,8 +36,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
/**
@@ -49,7 +47,9 @@
*/
static bool is_edp(struct intel_dp *intel_dp)
{
- return intel_dp->base.type == INTEL_OUTPUT_EDP;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
/**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
- return container_of(encoder, struct intel_dp, base.base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.base.dev;
}
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_dp, base);
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
/**
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
return is_pch_edp(intel_dp);
}
-static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
*lane_num = intel_dp->lane_count;
- if (intel_dp->link_bw == DP_LINK_BW_1_62)
- *link_bw = 162000;
- else if (intel_dp->link_bw == DP_LINK_BW_2_7)
- *link_bw = 270000;
+ *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
}
int
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
- if (intel_dp->panel_fixed_mode)
- return intel_dp->panel_fixed_mode->clock;
+ if (intel_connector->panel.fixed_mode)
+ return intel_connector->panel.fixed_mode->clock;
else
return mode->clock;
}
static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
-{
- int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
- return max_lane_count;
-}
-
-static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int try, precharge;
+ if (IS_HASWELL(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ ch_ctl = DPA_AUX_CH_CTL;
+ ch_data = DPA_AUX_CH_DATA1;
+ break;
+ case PORT_B:
+ ch_ctl = PCH_DPB_AUX_CH_CTL;
+ ch_data = PCH_DPB_AUX_CH_DATA1;
+ break;
+ case PORT_C:
+ ch_ctl = PCH_DPC_AUX_CH_CTL;
+ ch_data = PCH_DPC_AUX_CH_DATA1;
+ break;
+ case PORT_D:
+ ch_ctl = PCH_DPD_AUX_CH_CTL;
+ ch_data = PCH_DPD_AUX_CH_DATA1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_HASWELL(dev))
+ aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+ else if (IS_VALLEYVIEW(dev))
+ aux_clock_divider = 100;
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
return ret;
}
-static bool
+bool
intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
- intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
}
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
/*
* Find the lane count in the intel_encoder private
*/
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(TRANSDATA_M1(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+ } else if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
+ TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+}
+
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
+
+ intel_dp_init_link_config(intel_dp);
/* Split out the IBX/CPU vs CPT settings */
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
return control;
}
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
ironlake_wait_panel_off(intel_dp);
}
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+
+ intel_panel_enable_backlight(dev, pipe);
}
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
if (!is_edp(intel_dp))
return;
+ intel_panel_disable_backlight(dev);
+
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
}
/* If the sink supports it, try to set the power state appropriately */
-static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
int ret, i;
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
return true;
}
}
- }
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
+ }
return true;
}
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
- int lane)
-{
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = adjust_request[lane>>1];
-
- return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
-
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char *link_train_names[] = {
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
uint8_t v = 0;
uint8_t p = 0;
int lane;
- uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
}
}
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int s = (lane & 1) * 4;
- uint8_t l = link_status[lane>>1];
-
- return (l >> s) & 0xf;
-}
-
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
{
- int lane;
- uint8_t lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
- DP_LANE_CHANNEL_EQ_DONE|\
- DP_LANE_SYMBOL_LOCKED)
-static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
-{
- uint8_t lane_align;
- uint8_t lane_status;
- int lane;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
- lane_align = intel_dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(link_status, lane);
- if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
- return false;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
}
- return true;
}
static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
int ret;
+ uint32_t temp;
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ if (IS_HASWELL(dev)) {
+ temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+ DP_TP_STATUS_IDLE_DONE), 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) &&
+ (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
/* Enable corresponding port and start training pattern 1 */
-static void
+void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
+ if (IS_HASWELL(dev))
+ intel_ddi_prepare_link_retrain(encoder);
+
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
-
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(
+ intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+ signal_levels);
+ /* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
- /* Set training pattern 1 */
- udelay(100);
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-static void
+void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_HASWELL(dev)) {
+ signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+ DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+ } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_LINK_SCRAMBLING_DISABLE))
break;
- udelay(400);
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp, link_status)) {
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
+
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ /*
+ * DDI code has a strict mode set sequence and we should try to respect
+ * it, otherwise we might hang the machine in many different ways. So we
+ * really should be disabling the port only on a complete crtc_disable
+ * sequence. This function is just called under two conditions on DDI
+ * code:
+ * - Link train failed while doing crtc_enable, and on this case we
+ * really should respect the mode set sequence and wait for a
+ * crtc_disable.
+ * - Someone turned the monitor off and intel_dp_check_link_status
+ * called us. We don't need to disable the whole port on this case, so
+ * when someone turns the monitor on again,
+ * intel_ddi_prepare_link_retrain will take care of redoing the link
+ * train.
+ */
+ if (IS_HASWELL(dev))
+ return;
+
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
}
/*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 4. Check link status on receipt of hot-plug interrupt
*/
-static void
+void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->base.connectors_active)
+ if (!intel_encoder->connectors_active)
return;
- if (WARN_ON(!intel_dp->base.base.crtc))
+ if (WARN_ON(!intel_encoder->base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!intel_channel_eq_ok(intel_dp, link_status)) {
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_dp->base.base));
+ drm_get_encoder_name(&intel_encoder->base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
- status = intel_panel_detect(intel_dp->base.base.dev);
+ status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct edid *edid;
- int size;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp)) {
- if (!intel_dp->edid)
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ struct edid *edid;
+ int size;
+
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
return NULL;
- size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+ size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
if (!edid)
return NULL;
- memcpy(edid, intel_dp->edid, size);
+ memcpy(edid, intel_connector->edid, size);
return edid;
}
- edid = drm_get_edid(connector, adapter);
- return edid;
+ return drm_get_edid(connector, adapter);
}
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int ret;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp)) {
- drm_mode_connector_update_edid_property(connector,
- intel_dp->edid);
- ret = drm_add_edid_modes(connector, intel_dp->edid);
- drm_edid_to_eld(connector,
- intel_dp->edid);
- return intel_dp->edid_mode_count;
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
}
- ret = intel_ddc_get_modes(connector, adapter);
- return ret;
+ return intel_ddc_get_modes(connector, adapter);
}
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
+ char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
- intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
- intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
- intel_dp->dpcd[6], intel_dp->dpcd[7]);
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
if (status != connector_status_connected)
return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
}
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
- if (ret) {
- if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
- struct drm_display_mode *newmode;
- list_for_each_entry(newmode, &connector->probed_modes,
- head) {
- if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, newmode);
- break;
- }
- }
- }
+ if (ret)
return ret;
- }
- /* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (is_edp(intel_dp)) {
- /* initialize panel mode from VBT if available for eDP */
- if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
- intel_dp->panel_fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_dp->panel_fixed_mode) {
- intel_dp->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- }
- }
- if (intel_dp->panel_fixed_mode) {
- struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = val;
+
+ goto done;
+ }
+
return -EINVAL;
done:
- if (intel_dp->base.base.crtc) {
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ if (intel_encoder->base.crtc) {
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
- if (is_edp(intel_dp))
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ if (is_edp(intel_dp)) {
intel_panel_destroy_backlight(dev);
+ intel_panel_fini(&intel_connector->panel);
+ }
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
- kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
- kfree(intel_dp);
+ kfree(intel_dig_port);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
intel_dp_check_link_status(intel_dp);
}
@@ -2435,13 +2525,14 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_dp->base.type == INTEL_OUTPUT_EDP)
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2471,78 +2562,204 @@ bool intel_dpd_is_edp(struct drm_device *dev)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(dev_priv);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on, pp_off, pp_div;
+
+ /* And finally store the new values in the power sequencer. */
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+ << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (is_cpu_edp(intel_dp))
+ pp_on |= PANEL_POWER_PORT_DP_A;
+ else
+ pp_on |= PANEL_POWER_PORT_DP_D;
+ }
+
+ I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+ I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+ I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(PCH_PP_ON_DELAYS),
+ I915_READ(PCH_PP_OFF_DELAYS),
+ I915_READ(PCH_PP_DIVISOR));
}
void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_dp *intel_dp;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edp_power_seq power_seq = { 0 };
+ enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
- intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
- if (!intel_dp)
- return;
-
- intel_dp->output_reg = output_reg;
- intel_dp->port = port;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_dp);
- return;
- }
- intel_encoder = &intel_dp->base;
-
- if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+ /*
+ * FIXME : We need to initialize built-in panels before external panels.
+ * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+ */
+ if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else if (port == PORT_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
+ /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+ * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+ * rewrite it.
+ */
type = DRM_MODE_CONNECTOR_DisplayPort;
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- intel_encoder->cloneable = false;
-
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- ironlake_panel_vdd_work);
-
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+ ironlake_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- intel_encoder->enable = intel_enable_dp;
- intel_encoder->pre_enable = intel_pre_enable_dp;
- intel_encoder->disable = intel_disable_dp;
- intel_encoder->post_disable = intel_post_disable_dp;
- intel_encoder->get_hw_state = intel_dp_get_hw_state;
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
/* Set up the DDC bus. */
switch (port) {
@@ -2566,66 +2783,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
break;
}
- /* Cache some DPCD data in the eDP case */
- if (is_edp(intel_dp)) {
- struct edp_power_seq cur, vbt;
- u32 pp_on, pp_off, pp_div;
-
- pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
- pp_div = I915_READ(PCH_PP_DIVISOR);
-
- if (!pp_on || !pp_off || !pp_div) {
- DRM_INFO("bad panel power sequencing delays, disabling panel\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
- return;
- }
-
- /* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
-
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
-
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
-
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
- vbt = dev_priv->edp.pps;
-
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
-#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
-
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- }
+ if (is_edp(intel_dp))
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_i2c_init(intel_dp, intel_connector, name);
+ /* Cache DPCD and EDID for edp. */
if (is_edp(intel_dp)) {
bool ret;
+ struct drm_display_mode *scan;
struct edid *edid;
ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2806,51 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- intel_dp_encoder_destroy(&intel_dp->base.base);
- intel_dp_destroy(&intel_connector->base);
+ intel_dp_encoder_destroy(&intel_encoder->base);
+ intel_dp_destroy(connector);
return;
}
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(connector,
- edid);
- intel_dp->edid_mode_count =
- drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- intel_dp->edid = edid;
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
}
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+
ironlake_edp_panel_vdd_off(intel_dp, false);
}
- intel_encoder->hot_plug = intel_dp_hot_plug;
-
if (is_edp(intel_dp)) {
- dev_priv->int_edp_connector = connector;
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
}
intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2864,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe7142502f43..8a1bd4a3ad0d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -94,6 +94,7 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
int crtc_mask;
};
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ int fitting_mode;
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -179,12 +185,19 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
+ enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+ atomic_t unpin_work_count;
+
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
+ uint32_t ddi_pll_sel;
};
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
+ bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
- struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
- int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
-#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
- struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
- enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
- struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- struct edid *edid; /* cached EDID for eDP */
- int edid_mode_count;
+ struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
};
static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
- struct drm_device *dev;
+ struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
- int pending;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check;
};
@@ -395,6 +415,8 @@ struct intel_fbc_work {
int interval;
};
+int intel_pch_rawclk(struct drm_device *dev);
+
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
+extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch);
+
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
-extern void intel_enable_ddi(struct intel_encoder *encoder);
-extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
-extern void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9ba0aaed7ee8..2ee9821b9d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,10 +36,15 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_hdmi, base.base);
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_hdmi, base);
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+ avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
intel_set_infoframe(encoder, &avi_if);
}
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -786,6 +794,9 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
}
return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_hdmi->base.base.crtc) {
- struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+ if (intel_dig_port->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
- .mode_fixup = intel_hdmi_mode_fixup,
- .mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
-};
-
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
- struct intel_connector *intel_connector;
- struct intel_hdmi *intel_hdmi;
-
- intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
- if (!intel_hdmi)
- return;
-
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_hdmi);
- return;
- }
-
- intel_encoder = &intel_hdmi->base;
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
+ enum port port = intel_dig_port->port;
- connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- intel_encoder->type = INTEL_OUTPUT_HDMI;
-
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
-
- intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
BUG();
}
- intel_hdmi->sdvox_reg = sdvox_reg;
-
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev)) {
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_hdmi_helper_funcs_hsw);
- } else {
- intel_encoder->enable = intel_enable_hdmi;
- intel_encoder->disable = intel_disable_hdmi;
- intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_hdmi_helper_funcs);
- }
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
+ if (IS_HASWELL(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector);
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port);
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+ intel_dig_port->dp.output_reg = 0;
+
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2c6dbc0971c..3ef5af15b812 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- bus->force_bit = true;
+ bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
- bus->force_bit = true;
+ bus->force_bit = 1;
intel_gpio_setup(bus, port);
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- bus->force_bit = force_bit;
+ bus->force_bit += force_bit ? 1 : -1;
+ DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
}
void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index edba93b3474b..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,28 +40,30 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds {
- struct intel_encoder base;
+struct intel_lvds_connector {
+ struct intel_connector base;
- struct edid *edid;
+ struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
- int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
- struct drm_display_mode *fixed_mode;
+ struct intel_lvds_connector *attached_connector;
};
-static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_lvds, base.base);
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_lvds, base);
+ return container_of(connector, struct intel_lvds_connector, base.base);
}
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_dirty) {
+ if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
+ lvds_encoder->pfit_control,
+ lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+ lvds_encoder->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (intel_lvds->pfit_control) {
+ if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_dirty = true;
+ lvds_encoder->pfit_dirty = true;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
- struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- if (intel_encoder_check_is_cloned(&intel_lvds->base))
+ if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
/*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+ intel_pch_panel_fitting(dev,
+ intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- switch (intel_lvds->fitting_mode) {
+ switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- if (pfit_control != intel_lvds->pfit_control ||
- pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
- intel_lvds->pfit_dirty = true;
+ if (pfit_control != lvds_encoder->pfit_control ||
+ pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+ lvds_encoder->pfit_control = pfit_control;
+ lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- if (intel_lvds->edid)
- return drm_add_edid_modes(connector, intel_lvds->edid);
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
- mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, lid_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector = dev_priv->int_lvds_connector;
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
- if (connector)
- connector->status = connector->funcs->detect(connector,
- false);
+ connector->status = connector->funcs->detect(connector, false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex);
- intel_modeset_check_state(dev);
+ intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
+
+ if (lvds_connector->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ kfree(lvds_connector->base.edid);
- intel_panel_destroy_backlight(dev);
+ intel_panel_destroy_backlight(connector->dev);
+ intel_panel_fini(&lvds_connector->base.panel);
- if (dev_priv->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
- struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+ struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (intel_lvds->fitting_mode == value) {
+ if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_lvds->fitting_mode = value;
+ intel_connector->panel.fitting_mode = value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -763,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "ZOTAC ZBOXSD-ID12/ID13",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
@@ -912,12 +917,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds;
+ struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
@@ -945,23 +953,25 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
- intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
- if (!intel_lvds) {
+ lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+ if (!lvds_encoder)
return false;
- }
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_lvds);
+ lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+ if (!lvds_connector) {
+ kfree(lvds_encoder);
return false;
}
+ lvds_encoder->attached_connector = lvds_connector;
+
if (!HAS_PCH_SPLIT(dev)) {
- intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+ lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
- intel_encoder = &intel_lvds->base;
+ intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1003,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
- /*
- * the initial panel fitting mode will be FULL_SCREEN.
- */
-
- drm_connector_attach_property(&intel_connector->base,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1015,20 +1021,21 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- intel_lvds->edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- pin));
- if (intel_lvds->edid) {
- if (drm_add_edid_modes(connector,
- intel_lvds->edid)) {
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
- intel_lvds->edid);
+ edid);
} else {
- kfree(intel_lvds->edid);
- intel_lvds->edid = NULL;
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
}
+ } else {
+ edid = ERR_PTR(-ENOENT);
}
- if (!intel_lvds->edid) {
+ lvds_connector->base.edid = edid;
+
+ if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
@@ -1041,22 +1048,26 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, scan);
- intel_find_lvds_downclock(dev,
- intel_lvds->fixed_mode,
- connector);
- goto out;
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ intel_find_lvds_downclock(dev, fixed_mode,
+ connector);
+ goto out;
+ }
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- intel_lvds->fixed_mode =
- drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@@ -1076,16 +1087,17 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (intel_lvds->fixed_mode) {
- intel_lvds->fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!intel_lvds->fixed_mode)
+ if (!fixed_mode)
goto failed;
out:
@@ -1100,16 +1112,15 @@ out:
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
- dev_priv->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
- dev_priv->lid_notifier.notifier_call = NULL;
+ lvds_connector->lid_notifier.notifier_call = NULL;
}
- /* keep the LVDS connector */
- dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- intel_panel_setup_backlight(dev);
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
return true;
@@ -1117,7 +1128,9 @@ failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_lvds);
- kfree(intel_connector);
+ if (fixed_mode)
+ drm_mode_destroy(dev, fixed_mode);
+ kfree(lvds_encoder);
+ kfree(lvds_connector);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cabd84bf66eb..b00f1c83adce 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
- kfree(edid);
return ret;
}
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
+ int ret;
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
- return intel_connector_update_modes(connector, edid);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
}
static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_connector_attach_property(connector, prop, 0);
+ drm_object_attach_property(&connector->base, prop, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5530413213d8..7741c22c934c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
+ DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e2aacd329545..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
-static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
/* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->saveBLC_PWM_CTL2 == 0) {
- dev_priv->saveBLC_PWM_CTL2 = val;
+ if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
- I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL2;
+ val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val);
}
} else {
val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->saveBLC_PWM_CTL == 0) {
- dev_priv->saveBLC_PWM_CTL = val;
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
- I915_WRITE(BLC_PWM_CTL,
- dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_CTL2,
- dev_priv->saveBLC_PWM_CTL2);
- val = dev_priv->saveBLC_PWM_CTL;
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(BLC_PWM_CTL, val);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->regfile.saveBLC_PWM_CTL2);
}
}
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
- max = i915_read_blc_pwm_ctl(dev_priv);
+ max = i915_read_blc_pwm_ctl(dev);
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
-#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
-
- if (i915_panel_ignore_lid)
- return i915_panel_ignore_lid > 0 ?
- connector_status_connected :
- connector_status_disconnected;
- /* opregion lid state on HP 2540p is wrong at boot up,
- * appears to be either the BIOS or Linux ACPI fault */
-#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
- if (dev_priv->opregion.lid_state)
+ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
-#endif
+ }
- return connector_status_unknown;
+ switch (i915_panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
}
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
.get_brightness = intel_panel_get_brightness,
};
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
- struct drm_connector *connector;
intel_panel_init_backlight(dev);
- if (dev_priv->int_lvds_connector)
- connector = dev_priv->int_lvds_connector;
- else if (dev_priv->int_edp_connector)
- connector = dev_priv->int_edp_connector;
- else
- return -ENODEV;
-
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
backlight_device_unregister(dev_priv->backlight);
}
#else
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
{
- intel_panel_init_backlight(dev);
+ intel_panel_init_backlight(connector->dev);
return 0;
}
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
return;
}
#endif
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode)
+{
+ panel->fixed_mode = fixed_mode;
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 442968f8b201..3280cffe50f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
* i915.i915_enable_fbc parameter
*/
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled &&
- !to_intel_crtc(tmp_crtc)->primary_disabled &&
- tmp_crtc->fb) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (!intel_crtc_active(crtc))
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1293,7 @@ static void valleyview_update_wm(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
vlv_update_drain_latency(dev);
@@ -1302,17 +1310,23 @@ static void valleyview_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1325,10 +1339,11 @@ static void valleyview_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void g4x_update_wm(struct drm_device *dev)
@@ -1351,17 +1366,18 @@ static void g4x_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1374,11 +1390,11 @@ static void g4x_update_wm(struct drm_device *dev)
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
@@ -1467,10 +1483,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
} else
@@ -1478,10 +1497,13 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
+ wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
enabled = crtc;
@@ -1571,8 +1593,7 @@ static void i830_update_wm(struct drm_device *dev)
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
+ 4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -1805,8 +1826,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2;
}
- if ((dev_priv->num_pipe == 3) &&
- g4x_compute_wm0(dev, 2,
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if (g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -1869,12 +1992,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
- /* WM3 */
+ /* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
@@ -1923,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
@@ -2323,7 +2451,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
@@ -2398,12 +2526,12 @@ static void gen6_enable_rps(struct drm_device *dev)
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
- u32 pcu_mbox, rc6_mask = 0;
+ u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
- int i;
+ int i, ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
@@ -2497,30 +2625,16 @@ static void gen6_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->rps.max_delay = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (!ret) {
+ pcu_mbox = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2534,6 +2648,20 @@ static void gen6_enable_rps(struct drm_device *dev)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
gen6_gt_force_wake_put(dev_priv);
}
@@ -2541,10 +2669,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
+ int gpu_freq;
+ unsigned int ia_freq, max_ia_freq;
int scaling_factor = 180;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
max_ia_freq = cpufreq_quick_get_max(0);
/*
@@ -2575,17 +2704,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq | gpu_freq);
}
}
@@ -2593,16 +2716,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
}
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
}
}
@@ -2628,14 +2751,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
return -ENOMEM;
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
@@ -2647,6 +2770,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ bool was_interruptible;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2785,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
if (ret)
return;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
/*
* GPU can automatically power down the render unit if given a page
* to save state.
@@ -2668,12 +2795,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
+ dev_priv->mm.interruptible = was_interruptible;
return;
}
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -2688,14 +2816,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
return;
}
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -3304,37 +3433,72 @@ static void intel_init_emon(struct drm_device *dev)
void intel_disable_gt_powersave(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+ mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
}
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ rps.delayed_resume_work.work);
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
void intel_enable_gt_powersave(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- gen6_enable_rps(dev);
- gen6_update_ring_freq(dev);
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ */
+ schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+ round_jiffies_up_relative(HZ));
}
}
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3342,8 +3506,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
@@ -3354,9 +3516,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
@@ -3378,33 +3538,70 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
}
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -3454,11 +3651,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3473,6 +3671,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+ cpt_init_clock_gating(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3487,13 +3687,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -3504,12 +3715,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3538,6 +3743,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
@@ -3547,27 +3756,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
+ lpt_init_clock_gating(dev);
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ /* WaDisablePSDDualDispatchEnable */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ else
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3576,7 +3796,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
@@ -3607,6 +3838,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3620,39 +3852,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ cpt_init_clock_gating(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+ /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3704,6 +3956,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
+
+ /*
+ * WaDisableVLVClockGating_VBIIssue
+ * Disable clock gating on th GCFG unit to prevent a delay
+ * in the reporting of vblank events.
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3722,6 +3981,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3777,44 +4040,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
}
/* Starting with Haswell, we have different power wells for
@@ -3840,7 +4070,7 @@ void intel_init_power_wells(struct drm_device *dev)
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
@@ -3878,11 +4108,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
@@ -3905,7 +4130,7 @@ void intel_init_pm(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
@@ -3993,6 +4218,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
DRM_ERROR("GT thread status wait timed out\n");
}
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4006,7 +4237,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4016,6 +4247,13 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
+}
+
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4029,8 +4267,9 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4067,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ /* something from same cacheline, but !FORCEWAKE */
+ POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4111,13 +4352,20 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+}
+
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4128,49 +4376,90 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
- /* The below doubles as a POSTING_READ */
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
gen6_gt_check_fifodbg(dev_priv);
}
+void intel_gt_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+}
+
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->gt_lock);
+ intel_gt_reset(dev);
+
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+}
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->gt.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
}
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
}
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ecbc5c5dbbbc..42ff97d667d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
static inline int ring_space(struct intel_ring_buffer *ring)
{
- int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
/*
* TLB invalidate requires a post-sync write.
*/
- flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
ret = intel_ring_begin(ring, 4);
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
- if (INTEL_INFO(dev)->gen > 3) {
+ if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
- if (IS_GEN7(dev))
- I915_WRITE(GFX_MODE_GEN7,
- _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- }
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
@@ -547,23 +559,24 @@ static int init_render_ring(struct intel_ring_buffer *ring)
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
+
if (!ring->private)
return;
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
cleanup_pipe_control(ring);
}
static void
update_mboxes(struct intel_ring_buffer *ring,
- u32 seqno,
- u32 mmio_offset)
+ u32 mmio_offset)
{
- intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_REGISTER |
- MI_SEMAPHORE_UPDATE);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
}
/**
@@ -576,8 +589,7 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring,
- u32 *seqno)
+gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
@@ -590,13 +602,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
- *seqno = i915_gem_next_request_seqno(ring);
-
- update_mboxes(ring, *seqno, mbox1_reg);
- update_mboxes(ring, *seqno, mbox2_reg);
+ update_mboxes(ring, mbox1_reg);
+ update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, *seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
@@ -653,10 +663,8 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+pc_render_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -677,7 +685,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +704,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -888,25 +895,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring,
- u32 *result)
+i9xx_add_request(struct intel_ring_buffer *ring)
{
- u32 seqno;
int ret;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- seqno = i915_gem_next_request_seqno(ring);
-
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- *result = seqno;
return 0;
}
@@ -964,7 +966,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags)
{
int ret;
@@ -975,35 +979,71 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- MI_BATCH_NON_SECURE_I965);
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
return 0;
}
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1012,7 +1052,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@@ -1075,6 +1115,29 @@ err:
return ret;
}
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
@@ -1086,6 +1149,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
init_waitqueue_head(&ring->irq_queue);
@@ -1093,6 +1157,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = init_status_page(ring);
if (ret)
return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
}
obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1226,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
- ret = intel_wait_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -1176,28 +1245,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-{
- uint32_t __iomem *virt;
- int rem = ring->size - ring->tail;
-
- if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(ring, rem);
- if (ret)
- return ret;
- }
-
- virt = ring->virtual_start + ring->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ring->tail = 0;
- ring->space = ring_space(ring);
-
- return 0;
-}
-
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
@@ -1231,7 +1278,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (request->tail == -1)
continue;
- space = request->tail - (ring->tail + 8);
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
@@ -1266,7 +1313,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
return 0;
}
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1356,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return -EBUSY;
}
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ uint32_t __iomem *virt;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ int ret;
+
+ /* We need to add any requests required to flush the objects and ring */
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait upon the last request to be completed */
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ seqno = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list)->seqno;
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
@@ -1320,6 +1421,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret)
return ret;
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
@@ -1327,7 +1433,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
}
if (unlikely(ring->space < n)) {
- ret = intel_wait_ring_buffer(ring, n);
+ ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
}
@@ -1391,10 +1497,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1402,8 +1515,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len)
+ u32 offset, u32 len,
+ unsigned flags)
{
int ret;
@@ -1411,7 +1546,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1432,10 +1569,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB;
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1490,7 +1634,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,10 +1647,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
- if (!I915_NEED_GFX_HWS(dev)) {
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
}
return intel_init_ring_buffer(dev, ring);
@@ -1514,6 +1675,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1551,16 +1713,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
- if (!I915_NEED_GFX_HWS(dev))
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
ring->effective_size = ring->size;
- if (IS_I830(ring->dev))
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1729,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
return -ENOMEM;
}
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_hws_pga(ring);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1618,7 +1783,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->init = init_ring_common;
-
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f0..6af87cd05725 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
struct intel_hw_status_page {
u32 *page_addr;
unsigned int gfx_addr;
@@ -70,8 +81,7 @@ struct intel_ring_buffer {
int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring,
- u32 *seqno);
+ int (*add_request)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,10 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- u32 offset, u32 length);
+ u32 offset, u32 length,
+ unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
@@ -181,27 +194,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
-int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
-static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
-{
- return intel_wait_ring_buffer(ring, ring->size - 8);
-}
-
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-
void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
@@ -217,6 +224,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
return ring->tail;
}
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ BUG_ON(ring->outstanding_lazy_request == 0);
+ return ring->outstanding_lazy_request;
+}
+
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a6ac0b416964..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
- u8 retry = 5;
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i;
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
*/
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
- udelay(15);
+ while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ if (retry < 10)
+ msleep(15);
+ else
+ udelay(15);
+
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
@@ -1228,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
@@ -1244,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u8 status;
temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0)
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ temp |= SDVO_PIPE_B_SELECT;
+ }
+
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ }
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1499,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
- if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- return connector_status_unknown;
-
- /* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
- msleep(30);
-
- if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1796,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_sdvo_connector);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@@ -1883,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1895,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1907,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1919,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2072,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
- pin = GMBUS_PORT_DPB;
- if (mapping->initialized)
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
- if (intel_gmbus_is_port_valid(pin)) {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
- intel_gmbus_force_bit(sdvo->i2c, true);
- } else {
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- }
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
}
static bool
@@ -2427,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&intel_sdvo_connector->base.base,
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
@@ -2443,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
- drm_connector_attach_property(connector, \
+ drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2480,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
@@ -2489,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2517,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
@@ -2527,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2559,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl)
return false;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2663,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
- kfree(intel_sdvo);
- return false;
- }
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
@@ -2765,6 +2813,8 @@ err_output:
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo);
return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 82f5e5c7009d..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
- int pixel_size;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
sprctl = I915_READ(SPRCTL(pipe));
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= SPRITE_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,27 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset =
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(SPRLINOFF(pipe), offset);
- }
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPRSCALE(pipe), sprscale);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -152,7 +153,8 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
- I915_WRITE(SPRSCALE(pipe), 0);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
@@ -225,8 +227,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe, pixel_size;
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -239,33 +243,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
- pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
- pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
- pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
- pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
- pixel_size = 2;
break;
default:
- DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- dvscntr |= DVS_FORMAT_RGBX888;
- pixel_size = 4;
- break;
+ BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +284,22 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE) {
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ dvssurf_offset =
+ intel_gen4_compute_offset_xtiled(&x, &y,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
- } else {
- unsigned long offset;
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
- offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- I915_WRITE(DVSLINOFF(pipe), offset);
- }
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -422,6 +421,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +437,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16;
/* Pipe must be running... */
- if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +447,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
@@ -473,6 +483,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
goto out;
/*
+ * We may not have a scaler, eg. HSW does not have it any more
+ */
+ if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+ return -EINVAL;
+
+ /*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
@@ -665,6 +681,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +698,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
case 7:
+ if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+ intel_plane->can_scale = false;
+ else
+ intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 62bb048c135e..ea93520c1278 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
}
j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
}
intel_tv->tv_format = tv_mode->name;
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
int ret = 0;
bool changed = false;
- ret = drm_connector_property_set_value(connector, property, val);
+ ret = drm_object_property_set_value(&connector->base, property, val);
if (ret < 0)
goto out;
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
ARRAY_SIZE(tv_modes),
tv_format_names);
- drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
initial_mode);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1e910117b0a2..122b571ccc7c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -60,8 +60,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
}
-static int __devinit
-mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
mgag200_kick_out_firmware_fb(pdev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index d6a1aae33701..70dd3c5529d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
{
void __iomem *mem;
struct apertures_struct *aper = alloc_apertures(1);
+ if (!aper)
+ return -ENOMEM;
/* BAR 0 is VRAM */
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
aper->ranges[0].base = mdev->mc.vram_base;
aper->ranges[0].size = mdev->mc.vram_window;
- aper->count = 1;
remove_conflicting_framebuffers(aper, "mgafb", true);
+ kfree(aper);
if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 1504699666c4..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
static int mgag200_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
ttm_bo_type_device, &mgabo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, mgag200_bo_ttm_destroy);
if (ret)
return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c04..ab25752a0b1e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/conn.o
nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
nouveau-y += core/subdev/bios/dp.o
nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
nouveau-y += core/subdev/fb/nv50.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
nouveau-y += core/engine/dmaobj/nv04.o
nouveau-y += core/engine/dmaobj/nv50.o
nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
nouveau-y += core/engine/software/nv04.o
nouveau-y += core/engine/software/nv10.o
nouveau-y += core/engine/software/nv50.o
nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
# drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
# drm/pm
nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
ret = nouveau_handle_create(nv_object(client), ~0, ~0,
nv_object(client), &client->root);
- if (ret) {
- nouveau_namedb_destroy(&client->base);
+ if (ret)
return ret;
- }
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb59..84c71fad2b6c 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
return nouveau_gpuobj_fini(&engctx->base, suspend);
}
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_engctx *engctx;
+ int ret;
+
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+ NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+ *pobject = nv_object(engctx);
+ return ret;
+}
+
void
_nouveau_engctx_dtor(struct nouveau_object *object)
{
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 000000000000..6b0843c33877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+ nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_falcon *falcon = (void *)object;
+ const struct firmware *fw;
+ char name[32] = "internal";
+ int ret, i;
+ u32 caps;
+
+ /* enable engine, and determine its capabilities */
+ ret = nouveau_engine_init(&falcon->base);
+ if (ret)
+ return ret;
+
+ if (device->chipset < 0xa3 ||
+ device->chipset == 0xaa || device->chipset == 0xac) {
+ falcon->version = 0;
+ falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
+ } else {
+ caps = nv_ro32(falcon, 0x12c);
+ falcon->version = (caps & 0x0000000f);
+ falcon->secret = (caps & 0x00000030) >> 4;
+ }
+
+ caps = nv_ro32(falcon, 0x108);
+ falcon->code.limit = (caps & 0x000001ff) << 8;
+ falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+ nv_debug(falcon, "falcon version: %d\n", falcon->version);
+ nv_debug(falcon, "secret level: %d\n", falcon->secret);
+ nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+ nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+ /* wait for 'uc halted' to be signalled before continuing */
+ if (falcon->secret) {
+ nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+ nv_wo32(falcon, 0x004, 0x00000010);
+ }
+
+ /* disable all interrupts */
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ /* no default ucode provided by the engine implementation, try and
+ * locate a "self-bootstrapping" firmware image for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret == 0) {
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ falcon->data.data = NULL;
+ falcon->data.size = 0;
+ release_firmware(fw);
+ }
+
+ falcon->external = true;
+ }
+
+ /* next step is to try and load "static code/data segment" firmware
+ * images for the engine
+ */
+ if (!falcon->code.data) {
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware data\n");
+ return ret;
+ }
+
+ falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->data.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->data.data)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+ device->chipset, falcon->addr >> 12);
+
+ ret = request_firmware(&fw, name, &device->pdev->dev);
+ if (ret) {
+ nv_error(falcon, "unable to load firmware code\n");
+ return ret;
+ }
+
+ falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ falcon->code.size = fw->size;
+ release_firmware(fw);
+ if (!falcon->code.data)
+ return -ENOMEM;
+ }
+
+ nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+ "static code/data segments" : "self-bootstrapping");
+
+ /* ensure any "self-bootstrapping" firmware image is in vram */
+ if (!falcon->data.data && !falcon->core) {
+ ret = nouveau_gpuobj_new(object->parent, NULL,
+ falcon->code.size, 256, 0,
+ &falcon->core);
+ if (ret) {
+ nv_error(falcon, "core allocation failed, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < falcon->code.size; i += 4)
+ nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+ }
+
+ /* upload firmware bootloader (or the full code segments) */
+ if (falcon->core) {
+ if (device->card_type < NV_C0)
+ nv_wo32(falcon, 0x618, 0x04000000);
+ else
+ nv_wo32(falcon, 0x618, 0x00000114);
+ nv_wo32(falcon, 0x11c, 0);
+ nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+ nv_wo32(falcon, 0x114, 0);
+ nv_wo32(falcon, 0x118, 0x00006610);
+ } else {
+ if (falcon->code.size > falcon->code.limit ||
+ falcon->data.size > falcon->data.limit) {
+ nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+ return -EINVAL;
+ }
+
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00100000);
+ for (i = 0; i < falcon->code.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+ } else {
+ nv_wo32(falcon, 0x180, 0x01000000);
+ for (i = 0; i < falcon->code.size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wo32(falcon, 0x188, i >> 6);
+ nv_wo32(falcon, 0x184, falcon->code.data[i]);
+ }
+ }
+ }
+
+ /* upload data segment (if necessary), zeroing the remainder */
+ if (falcon->version < 3) {
+ nv_wo32(falcon, 0xff8, 0x00000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+ for (; i < falcon->data.limit; i += 4)
+ nv_wo32(falcon, 0xff4, 0x00000000);
+ } else {
+ nv_wo32(falcon, 0x1c0, 0x01000000);
+ for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+ nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+ for (; i < falcon->data.limit / 4; i++)
+ nv_wo32(falcon, 0x1c4, 0x00000000);
+ }
+
+ /* start it running */
+ nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+ nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+ nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+ nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+ return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_falcon *falcon = (void *)object;
+
+ if (!suspend) {
+ nouveau_gpuobj_ref(NULL, &falcon->core);
+ if (falcon->external) {
+ kfree(falcon->data.data);
+ kfree(falcon->code.data);
+ falcon->code.data = NULL;
+ }
+ }
+
+ nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+ nv_wo32(falcon, 0x014, 0xffffffff);
+
+ return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 addr, bool enable,
+ const char *iname, const char *fname,
+ int length, void **pobject)
+{
+ struct nouveau_falcon *falcon;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+ fname, length, pobject);
+ falcon = *pobject;
+ if (ret)
+ return ret;
+
+ falcon->addr = addr;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69cf..560b2214cf1c 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
}
u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
}
void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
namedb = namedb->parent;
- handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
+
+ *phandle = handle;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f7..0261a11b2ae0 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
int
nouveau_mm_fini(struct nouveau_mm *mm)
{
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
- int nodes = 0;
+ if (nouveau_mm_initialised(mm)) {
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (WARN_ON(nodes++ == mm->heap_nodes))
+ return -EBUSY;
+ }
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (WARN_ON(nodes++ == mm->heap_nodes))
- return -EBUSY;
+ kfree(heap);
}
- kfree(heap);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907ee..1d9f614cb97d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/bsp.h>
struct nv84_bsp_priv {
- struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
- struct nouveau_bsp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
* BSP context
******************************************************************************/
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_bsp_chan *priv;
- int ret;
-
- ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_chan *priv = (void *)object;
- return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_bsp_context_ctor,
- .dtor = nv84_bsp_context_dtor,
- .init = nv84_bsp_context_init,
- .fini = nv84_bsp_context_fini,
- .rd32 = _nouveau_bsp_context_rd32,
- .wr32 = _nouveau_bsp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
* BSP engine/subdev functions
******************************************************************************/
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_bsp_priv *priv;
int ret;
- ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
- nv_subdev(priv)->intr = nv84_bsp_intr;
nv_engine(priv)->cclass = &nv84_bsp_cclass;
nv_engine(priv)->sclass = nv84_bsp_sclass;
return 0;
}
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_bsp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_bsp_priv *priv = (void *)object;
- return nouveau_bsp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_ctor,
- .dtor = nv84_bsp_dtor,
- .init = nv84_bsp_init,
- .fini = nv84_bsp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 000000000000..0a5aa6bb0870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+ { 0x90b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+ struct nvc0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+ nv_engine(priv)->sclass = nvc0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 000000000000..d4f23bbd75b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+ { 0x95b1, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+ struct nve0_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000fff2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+ "PBSP", "bsp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00008000;
+ nv_engine(priv)->cclass = &nve0_bsp_cclass;
+ nv_engine(priv)->sclass = nve0_bsp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_bsp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af740..283248c7b050 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
@@ -36,11 +35,7 @@
#include "fuc/nva3.fuc.h"
struct nva3_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nva3_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nva3_copy_cclass = {
.handle = NV_ENGCTX(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
{}
};
-static void
+void
nva3_copy_intr(struct nouveau_subdev *subdev)
{
struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_falcon *falcon = (void *)subdev;
struct nouveau_object *engctx;
- struct nva3_copy_priv *priv = (void *)subdev;
- u32 dispatch = nv_rd32(priv, 0x10401c);
- u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
- u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
- u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040) >> 16;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+ u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+ u32 addr = nv_ro32(falcon, 0x040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044);
+ u32 data = nv_ro32(falcon, 0x044);
int chid;
engctx = nouveau_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
+ nv_error(falcon, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004, 0x00000040);
+ nv_wo32(falcon, 0x004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004, stat);
+ nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+ nv_wo32(falcon, 0x004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_copy_priv *priv;
int ret;
- ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nva3_copy_cclass;
nv_engine(priv)->sclass = nva3_copy_sclass;
nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+ nv_falcon(priv)->code.data = nva3_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+ nv_falcon(priv)->data.data = nva3_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
return 0;
}
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
- struct nva3_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188, i >> 6);
- nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x10410c, 0x00000000);
- nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
- struct nva3_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014, 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nva3_copy_oclass = {
.handle = NV_ENGINE(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = nva3_copy_init,
- .fini = nva3_copy_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a8791055..b3ed2737e21f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
#include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
#include <engine/fifo.h>
#include <engine/copy.h>
@@ -33,11 +32,7 @@
#include "fuc/nvc0.fuc.h"
struct nvc0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nvc0_copy_context_ofuncs = {
- .ctor = nvc0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
};
static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
* PCOPY engine/subdev functions
******************************************************************************/
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
- { 0x0001, "ILLEGAL_MTHD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "INVALID_BITFIELD" },
- {}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
- struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_object *engctx;
- int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)subdev;
- u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
- u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
- u32 stat = intr & disp & ~(disp >> 16);
- u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
- u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
- u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
- int chid;
-
- engctx = nouveau_engctx_get(engine, inst);
- chid = pfifo->chid(pfifo, engctx);
-
- if (stat & 0x00000040) {
- nv_error(priv, "DISPATCH_ERROR [");
- nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, (u64)inst << 12, subc, mthd, data);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
- stat &= ~0x00000040;
- }
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret;
- if (stat) {
- nv_error(priv, "unhandled intr 0x%08x\n", stat);
- nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
- }
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
- nouveau_engctx_put(engctx);
+ nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+ return 0;
}
static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy0_cclass;
nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy1_cclass;
nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ nv_falcon(priv)->code.data = nvc0_pcopy_code;
+ nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+ nv_falcon(priv)->data.data = nvc0_pcopy_data;
+ nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_copy_init(&priv->base);
- if (ret)
- return ret;
-
- /* disable all interrupts */
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- /* upload ucode */
- nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
- nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
- nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
- nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
- nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
- nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
- int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
- struct nvc0_copy_priv *priv = (void *)object;
-
- nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
- nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
- return nouveau_copy_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nvc0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
+ .dtor = _nouveau_falcon_dtor,
.init = nvc0_copy_init,
- .fini = nvc0_copy_fini,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac5..dbbe9e8998fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
#include <engine/copy.h>
struct nve0_copy_priv {
- struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
- struct nouveau_copy_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
* PCOPY context
******************************************************************************/
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_copy_chan *priv;
- int ret;
-
- ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_ofuncs
nve0_copy_context_ofuncs = {
- .ctor = nve0_copy_context_ctor,
- .dtor = _nouveau_copy_context_dtor,
- .init = _nouveau_copy_context_init,
- .fini = _nouveau_copy_context_fini,
- .rd32 = _nouveau_copy_context_rd32,
- .wr32 = _nouveau_copy_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
};
static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE0", "copy0", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCE1", "copy1", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy0_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy1_ctor,
- .dtor = _nouveau_copy_dtor,
- .init = _nouveau_copy_init,
- .fini = _nouveau_copy_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca0..b97490512723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
#include <engine/crypt.h>
struct nv84_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv84_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
struct nv84_crypt_priv *priv = (void *)object;
int ret;
- ret = nouveau_crypt_init(&priv->base);
+ ret = nouveau_engine_init(&priv->base);
if (ret)
return ret;
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
+ .dtor = _nouveau_engine_dtor,
.init = nv84_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b96..21986f3bf0c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
+#include <core/falcon.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
@@ -36,11 +37,7 @@
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
- struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
- struct nouveau_crypt_chan base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_crypt_chan *priv;
- int ret;
-
- ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
- 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
static struct nouveau_oclass
nv98_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_crypt_context_ctor,
- .dtor = _nouveau_crypt_context_dtor,
- .init = _nouveau_crypt_context_init,
- .fini = _nouveau_crypt_context_fini,
- .rd32 = _nouveau_crypt_context_rd32,
- .wr32 = _nouveau_crypt_context_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x087004, stat);
}
- nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_crypt_priv *priv;
int ret;
- ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+ "PCRYPT", "crypt", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nv98_crypt_cclass;
nv_engine(priv)->sclass = nv98_crypt_sclass;
nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
- return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
- struct nv98_crypt_priv *priv = (void *)object;
- int ret, i;
-
- ret = nouveau_crypt_init(&priv->base);
- if (ret)
- return ret;
-
- /* wait for exit interrupt to signal */
- nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
- nv_wr32(priv, 0x087004, 0x00000010);
-
- /* upload microcode code and data segments */
- nv_wr32(priv, 0x087ff8, 0x00100000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
- nv_wr32(priv, 0x087ff8, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
- nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
- /* start it running */
- nv_wr32(priv, 0x08710c, 0x00000000);
- nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
- nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+ nv_falcon(priv)->code.data = nv98_pcrypt_code;
+ nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+ nv_falcon(priv)->data.data = nv98_pcrypt_data;
+ nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
return 0;
}
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_ctor,
- .dtor = _nouveau_crypt_dtor,
- .init = nv98_crypt_init,
- .fini = _nouveau_crypt_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = _nouveau_falcon_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 000000000000..d0817d94454c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+ (data & NV50_DISP_DAC_PWR_VSYNC) |
+ (data & NV50_DISP_DAC_PWR_DATA) |
+ (data & NV50_DISP_DAC_PWR_STATE);
+ const u32 doff = (or * 0x800);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+ const u32 doff = (or * 0x800);
+ int load = -EINVAL;
+ nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+ udelay(9500);
+ nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+ load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+ nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+ u32 *data = args;
+ int ret;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_DAC_PWR:
+ ret = priv->dac.power(priv, or, data[0]);
+ break;
+ case NV50_DISP_DAC_LOAD:
+ ret = priv->dac.sense(priv, or, data[0]);
+ if (ret >= 0) {
+ data[0] = ret;
+ ret = 0;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 000000000000..373dbcc523b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x800);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 000000000000..dc57e24fc1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+ const u32 soff = (or * 0x030);
+ int i;
+
+ if (data && data[0]) {
+ for (i = 0; i < size; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+ } else
+ if (data) {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+ } else {
+ nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 000000000000..0d36bdc51417
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+ nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+ nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+ nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+ nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+ nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 000000000000..f065fc248adf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 soff = (or * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+ nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+ nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+ nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+ nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+ /* Audio InfoFrame */
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+ nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+ nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+ nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+ /* ??? */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 000000000000..5151bb261832
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+ const u32 hoff = (head * 0x800);
+
+ if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ return 0;
+ }
+
+ /* AVI InfoFrame */
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+ nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+ nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+ nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+ nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+ nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+ nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 15b182c84ce8..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,20 +22,740 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nv50_disp_priv {
- struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_base *base = (void *)parent;
+ struct nv50_disp_chan *chan;
+ int ret;
+
+ if (base->chan & (1 << chid))
+ return -EBUSY;
+ base->chan |= (1 << chid);
+
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ (1ULL << NVDEV_ENGINE_DMAOBJ),
+ length, pobject);
+ chan = *pobject;
+ if (ret)
+ return ret;
+
+ chan->chid = chid;
+ return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+ struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+ base->chan &= ~(1 << chan->chid);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_chan *chan = (void *)object;
+ nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 chid = chan->chid;
+ u32 data = (chid << 28) | (addr << 10) | chid;
+ return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ int length, void **pobject)
+{
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+ dmac = *pobject;
+ if (ret)
+ return ret;
+
+ dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!dmac->pushdma)
+ return -ENOENT;
+
+ switch (nv_mclass(dmac->pushdma)) {
+ case 0x0002:
+ case 0x003d:
+ if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+ return -EINVAL;
+
+ switch (dmac->pushdma->target) {
+ case NV_MEM_TARGET_VRAM:
+ dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_dmac *dmac = (void *)object;
+ nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+ nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini timeout, 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+ /* attempt to unstick channel from some unknown state */
+ if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+ nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+ if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+ nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610204, mast->push);
+ nv_wr32(priv, 0x610208, 0x00010000);
+ nv_wr32(priv, 0x61020c, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610200, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+ .ctor = nv50_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_mast_init,
+ .fini = nv50_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+ .ctor = nv50_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 3 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+ .ctor = nv50_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nv50_disp_dmac_init,
+ .fini = nv50_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+ nv_error(pioc, "timeout0: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "timeout1: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610200 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+ .ctor = nv50_disp_oimm_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head > 1)
+ return -EINVAL;
+
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+ .ctor = nv50_disp_curs_ctor,
+ .dtor = nv50_disp_pioc_dtor,
+ .init = nv50_disp_pioc_init,
+ .fini = nv50_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar. NFI what the 0x614004 caps are for..
+ */
+ tmp = nv_rd32(priv, 0x614004);
+ nv_wr32(priv, 0x610184, tmp);
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+ nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+ }
+
+ /* ... EXT caps */
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+ nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x610024) & 0x00000100) {
+ nv_wr32(priv, 0x610024, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x61002c, 0x00000370);
+ nv_wr32(priv, 0x610028, 0x00000000);
+ return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x610024, 0x00000000);
+ nv_wr32(priv, 0x610020, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
+ .init = nv50_disp_base_init,
+ .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+ { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- {},
+ { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nouveau_engctx *ectx;
+ int ret = -EBUSY;
+
+ /* no context needed for channel objects... */
+ if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ atomic_inc(&parent->refcount);
+ *pobject = parent;
+ return 0;
+ }
+
+ /* allocate display hardware to client */
+ mutex_lock(&nv_subdev(priv)->mutex);
+ if (list_empty(&nv_engine(priv)->contexts)) {
+ ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+ 0x10000, 0x10000,
+ NVOBJ_FLAG_HEAP, &ectx);
+ *pobject = nv_object(ectx);
+ }
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+ .handle = NV_ENGCTX(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_data_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
+ },
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+ u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
+
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
+ continue;
+
+ nv_wr32(priv, 0x610020, 0x00010000 << chid);
+ addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+ data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+ nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+ chid, addr & 0xffc, data, addr);
+ }
+}
+
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
@@ -80,30 +800,428 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+ i += 4;
+ }
+ }
+
+ if (!(ctrl & (1 << head)))
+ return false;
+ i--;
+
+ data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+ struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+ u32 ctrl = 0x00000000;
+ int i;
+
+ for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+ i += 4;
+ }
+ }
+
+ if (!(ctrl & (1 << head)))
+ return 0x0000;
+ i--;
+
+ data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+ if (!data)
+ return 0x0000;
+
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = outp,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000060) >> 5) - 1;
+ if (head >= 0) {
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 1);
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000010);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+ struct dcb_output *outp, u32 pclk)
+{
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+ const u32 symbol = 100000;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+ u32 clksor = nv_rd32(priv, 0x614300 + soff);
+ int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+ int TU, VTUi, VTUf, VTUa;
+ u64 link_data_rate, link_ratio, unk;
+ u32 best_diff = 64 * symbol;
+ u32 link_nr, link_bw, bits, r;
+
+ /* calculate packed data rate for each lane */
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ if (clksor & 0x000c0000)
+ link_bw = 270000;
+ else
+ link_bw = 162000;
+
+ if ((ctrl & 0xf0000) == 0x60000) bits = 30;
+ else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+ else bits = 18;
+
+ link_data_rate = (pclk * bits / 8) / link_nr;
+
+ /* calculate ratio of packed data rate to link symbol rate */
+ link_ratio = link_data_rate * symbol;
+ r = do_div(link_ratio, link_bw);
+
+ for (TU = 64; TU >= 32; TU--) {
+ /* calculate average number of valid symbols in each TU */
+ u32 tu_valid = link_ratio * TU;
+ u32 calc, diff;
+
+ /* find a hw representation for the fraction.. */
+ VTUi = tu_valid / symbol;
+ calc = VTUi * symbol;
+ diff = tu_valid - calc;
+ if (diff) {
+ if (diff >= (symbol / 2)) {
+ VTUf = symbol / (symbol - diff);
+ if (symbol - (VTUf * diff))
+ VTUf++;
+
+ if (VTUf <= 15) {
+ VTUa = 1;
+ calc += symbol - (symbol / VTUf);
+ } else {
+ VTUa = 0;
+ VTUf = 1;
+ calc += symbol;
+ }
+ } else {
+ VTUa = 0;
+ VTUf = min((int)(symbol / diff), 15);
+ calc += symbol / VTUf;
+ }
+
+ diff = calc - tu_valid;
+ } else {
+ /* no remainder, but the hw doesn't like the fractional
+ * part to be zero. decrement the integer part and
+ * have the fraction add a whole symbol back
+ */
+ VTUa = 0;
+ VTUf = 1;
+ VTUi--;
+ }
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ bestTU = TU;
+ bestVTUa = VTUa;
+ bestVTUf = VTUf;
+ bestVTUi = VTUi;
+ if (diff == 0)
+ break;
+ }
+ }
+
+ if (!bestTU) {
+ nv_error(priv, "unable to find suitable dp config\n");
+ return;
+ }
+
+ /* XXX close to vbios numbers, but not right */
+ unk = (symbol - link_ratio) * bestTU;
+ unk *= link_ratio;
+ r = do_div(unk, symbol);
+ r = do_div(unk, symbol);
+ unk += 6;
+
+ nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+ nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+ bestVTUf << 16 |
+ bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+ struct dcb_output outp;
+ u32 addr, mask, data;
+ int head;
+
+ /* finish detaching encoder? */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0)
+ exec_script(priv, head, 2);
+
+ /* check whether a vpll change is required */
+ head = ffs((super & 0x00000600) >> 9) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+ }
+
+ /* (re)attach the relevant OR to the head */
+ head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+ if (conf) {
+ if (outp.type == DCB_OUTPUT_ANALOG) {
+ addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0xffffffff;
+ data = 0x00000000;
+ } else {
+ if (outp.type == DCB_OUTPUT_DP)
+ nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+ addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+ mask = 0x00000707;
+ data = (conf & 0x0100) ? 0x0101 : 0x0000;
+ }
+
+ nv_mask(priv, addr, mask, data);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000020);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const int link = !(outp->sorconf.link & 1);
+ const int or = ffs(outp->or) - 1;
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ u8 ver, hdr;
+
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
+static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+ int head = ffs((super & 0x00000180) >> 7) - 1;
+ if (head >= 0) {
+ struct dcb_output outp;
+ u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+ if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+ if (outp.type == DCB_OUTPUT_TMDS)
+ nv50_disp_intr_unk40_tmds(priv, &outp);
+ }
+ }
+
+ nv_wr32(priv, 0x610024, 0x00000040);
+ nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+ u32 super = nv_rd32(priv, 0x610030);
+
+ nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+ if (intr1 & 0x00000010)
+ nv50_disp_intr_unk10(priv, super);
+ if (intr1 & 0x00000020)
+ nv50_disp_intr_unk20(priv, super);
+ if (intr1 & 0x00000040)
+ nv50_disp_intr_unk40(priv, super);
+}
+
+void
nv50_disp_intr(struct nouveau_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
- u32 stat1 = nv_rd32(priv, 0x610024);
+ u32 intr0 = nv_rd32(priv, 0x610020);
+ u32 intr1 = nv_rd32(priv, 0x610024);
- if (stat1 & 0x00000004) {
+ if (intr0 & 0x001f0000) {
+ nv50_disp_intr_error(priv);
+ intr0 &= ~0x001f0000;
+ }
+
+ if (intr1 & 0x00000004) {
nv50_disp_intr_vblank(priv, 0);
nv_wr32(priv, 0x610024, 0x00000004);
- stat1 &= ~0x00000004;
+ intr1 &= ~0x00000004;
}
- if (stat1 & 0x00000008) {
+ if (intr1 & 0x00000008) {
nv50_disp_intr_vblank(priv, 1);
nv_wr32(priv, 0x610024, 0x00000008);
- stat1 &= ~0x00000008;
+ intr1 &= ~0x00000008;
}
+ if (intr1 & 0x00000070) {
+ nv50_disp_intr_super(priv, intr1);
+ intr1 &= ~0x00000070;
+ }
}
static int
nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
@@ -114,8 +1232,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nv50_disp_sclass;
+ nv_engine(priv)->sclass = nv50_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv50_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 000000000000..a6bb931450f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+ struct nouveau_oclass *sclass;
+ struct {
+ int nr;
+ } head;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+ int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ } dac;
+ struct {
+ int nr;
+ int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+ int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+ int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+ u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+ int head, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+ int lane, u16 type, u16 mask, u32 data,
+ struct dcb_output *);
+ u32 lvdsconf;
+ } sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+ u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+ struct dcb_output *);
+
+struct nv50_disp_base {
+ struct nouveau_parent base;
+ struct nouveau_ramht *ramht;
+ u32 chan;
+};
+
+struct nv50_disp_chan {
+ struct nouveau_namedb base;
+ int chid;
+};
+
+int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a) \
+ nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b) \
+ nouveau_namedb_fini(&(a)->base, (b))
+
+int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+ struct nv50_disp_chan base;
+ struct nouveau_dmaobj *pushdma;
+ u32 push;
+};
+
+struct nv50_disp_pioc {
+ struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 000000000000..fc84eacdfbec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+ { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+ { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv84_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv84_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x82),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 000000000000..ba9dfd4669a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+ { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+ { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ {}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv94_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nv94_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x88),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv94_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 000000000000..5d63902cdeda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+ { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+ { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ {}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva0_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 2;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hdmi = nv84_hdmi_ctrl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x83),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 000000000000..e9192ca389fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+ { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+ { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+ { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+ { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+ { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ {}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+ { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
+ { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
+ { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+ { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
+ { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
+ {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+ { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nva3_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+ priv->sclass = nva3_disp_sclass;
+ priv->head.nr = 2;
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nva3_hda_eld;
+ priv->sor.hdmi = nva3_hdmi_ctrl;
+ priv->sor.dp_train = nv94_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x85),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b8..9e38ebff5fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
* Authors: Ben Skeggs
*/
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
#include <engine/software.h>
#include <engine/disp.h>
-struct nvd0_disp_priv {
- struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 name)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ struct nv50_disp_chan *chan = (void *)parent;
+ u32 addr = nv_gpuobj(object)->node->offset;
+ u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+ return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_disp_base *base = (void *)parent->parent;
+ nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&dmac->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+ nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+ nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+ nv_error(dmac, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *dmac = (void *)object;
+ int chid = dmac->base.chid;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+ nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+ nv_error(dmac, "fini: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_mast_class *args = data;
+ struct nv50_disp_dmac *mast;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 0, sizeof(*mast), (void **)&mast);
+ *pobject = nv_object(mast);
+ if (ret)
+ return ret;
+
+ nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+ int ret;
+
+ ret = nv50_disp_chan_init(&mast->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nv_wr32(priv, 0x610494, mast->push);
+ nv_wr32(priv, 0x610498, 0x00010000);
+ nv_wr32(priv, 0x61049c, 0x00000001);
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x640000, 0x00000000);
+ nv_wr32(priv, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+ nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_dmac *mast = (void *)object;
+
+ /* deactivate channel */
+ nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+ nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+ if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+ nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+ return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+ .ctor = nvd0_disp_mast_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_mast_init,
+ .fini = nvd0_disp_mast_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_sync_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 1 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+ .ctor = nvd0_disp_sync_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_ovly_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_dmac *dmac;
+ int ret;
+
+ if (size < sizeof(*data) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ 5 + args->head, sizeof(*dmac),
+ (void **)&dmac);
+ *pobject = nv_object(dmac);
+ if (ret)
+ return ret;
+
+ nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+ nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+ .ctor = nvd0_disp_ovly_ctor,
+ .dtor = nv50_disp_dmac_dtor,
+ .init = nvd0_disp_dmac_init,
+ .fini = nvd0_disp_dmac_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int chid,
+ int length, void **pobject)
+{
+ return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_pioc *pioc = (void *)object;
+ nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+ int ret;
+
+ ret = nv50_disp_chan_init(&pioc->base);
+ if (ret)
+ return ret;
+
+ /* enable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* activate channel */
+ nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+ nv_error(pioc, "init: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_pioc *pioc = (void *)object;
+ int chid = pioc->base.chid;
+
+ nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+ nv_error(pioc, "timeout: 0x%08x\n",
+ nv_rd32(priv, 0x610490 + (chid * 0x10)));
+ if (suspend)
+ return -EBUSY;
+ }
+
+ /* disable error reporting */
+ nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+ nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+ return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_oimm_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+ .ctor = nvd0_disp_oimm_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_display_curs_class *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_pioc *pioc;
+ int ret;
+
+ if (size < sizeof(*args) || args->head >= priv->head.nr)
+ return -EINVAL;
+
+ ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+ sizeof(*pioc), (void **)&pioc);
+ *pobject = nv_object(pioc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+ .ctor = nvd0_disp_curs_ctor,
+ .dtor = nvd0_disp_pioc_dtor,
+ .init = nvd0_disp_pioc_init,
+ .fini = nvd0_disp_pioc_fini,
+ .rd32 = nv50_disp_chan_rd32,
+ .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv = (void *)engine;
+ struct nv50_disp_base *base;
+ int ret;
+
+ ret = nouveau_parent_create(parent, engine, oclass, 0,
+ priv->sclass, 0, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
+
+ return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+ struct nv50_disp_base *base = (void *)object;
+ nouveau_ramht_ref(NULL, &base->ramht);
+ nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+ int ret, i;
+ u32 tmp;
+
+ ret = nouveau_parent_init(&base->base);
+ if (ret)
+ return ret;
+
+ /* The below segments of code copying values from one register to
+ * another appear to inform EVO of the display capabilities or
+ * something similar.
+ */
+
+ /* ... CRTC caps */
+ for (i = 0; i < priv->head.nr; i++) {
+ tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+ nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+ nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+ tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+ nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+ }
+
+ /* ... DAC caps */
+ for (i = 0; i < priv->dac.nr; i++) {
+ tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+ nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+ }
+
+ /* ... SOR caps */
+ for (i = 0; i < priv->sor.nr; i++) {
+ tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+ nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+ }
+
+ /* steal display away from vbios, or something like that */
+ if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+ nv_wr32(priv, 0x6100ac, 0x00000100);
+ nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+ if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+ nv_error(priv, "timeout acquiring display\n");
+ return -EBUSY;
+ }
+ }
+
+ /* point at display engine memory area (hash table, objects) */
+ nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+ /* enable supervisor interrupts, disable everything else */
+ nv_wr32(priv, 0x610090, 0x00000000);
+ nv_wr32(priv, 0x6100a0, 0x00000000);
+ nv_wr32(priv, 0x6100b0, 0x00000307);
+
+ return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nv50_disp_base *base = (void *)object;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x6100b0, 0x00000000);
+
+ return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+ .ctor = nvd0_disp_base_ctor,
+ .dtor = nvd0_disp_base_dtor,
+ .init = nvd0_disp_base_init,
+ .fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+ { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- {},
+ { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
};
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+ struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ u16 mask, type, data;
+
+ if (outp < 4) {
+ type = DCB_OUTPUT_ANALOG;
+ mask = 0;
+ } else {
+ outp -= 4;
+ switch (ctrl & 0x00000f00) {
+ case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+ case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+ case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+ case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+ case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+ case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+ default:
+ nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+ return 0x0000;
+ }
+ dcb->sorconf.link = mask;
+ }
+
+ mask = 0x00c0 & (mask << 6);
+ mask |= 0x0001 << outp;
+ mask |= 0x0100 << head;
+
+ data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+ if (!data)
+ return 0x0000;
+
+ return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[id],
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ return nvbios_exec(&init) == 0;
+ }
+
+ return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+ u32 ctrl, int id, u32 pclk)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_outp info1;
+ struct nvbios_ocfg info2;
+ struct dcb_output dcb;
+ u8 ver, hdr, cnt, len;
+ u16 data, conf;
+
+ data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+ if (data == 0x0000)
+ return false;
+
+ switch (dcb.type) {
+ case DCB_OUTPUT_TMDS:
+ conf = (ctrl & 0x00000f00) >> 8;
+ if (pclk >= 165000)
+ conf |= 0x0100;
+ break;
+ case DCB_OUTPUT_LVDS:
+ conf = priv->sor.lvdsconf;
+ break;
+ case DCB_OUTPUT_DP:
+ conf = (ctrl & 0x00000f00) >> 8;
+ break;
+ case DCB_OUTPUT_ANALOG:
+ default:
+ conf = 0x00ff;
+ break;
+ }
+
+ data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+ if (data) {
+ data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+ if (data) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = data,
+ .outp = &dcb,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (nvbios_exec(&init))
+ return 0x0000;
+ return conf;
+ }
+ }
+
+ return 0x0000;
+}
+
+static void
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 1);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+ const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
+ const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+ const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+ const u32 hoff = (head * 0x800);
+ const u32 soff = ( or * 0x800);
+ const u32 loff = (link * 0x080) + soff;
+ const u32 symbol = 100000;
+ const u32 TU = 64;
+ u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+ u32 clksor = nv_rd32(priv, 0x612300 + soff);
+ u32 datarate, link_nr, link_bw, bits;
+ u64 ratio, value;
+
+ if ((conf & 0x3c0) == 0x180) bits = 30;
+ else if ((conf & 0x3c0) == 0x140) bits = 24;
+ else bits = 18;
+ datarate = (pclk * bits) / 8;
+
+ if (dpctrl > 0x00030000) link_nr = 4;
+ else if (dpctrl > 0x00010000) link_nr = 2;
+ else link_nr = 1;
+
+ link_bw = (clksor & 0x007c0000) >> 18;
+ link_bw *= 27000;
+
+ ratio = datarate;
+ ratio *= symbol;
+ do_div(ratio, link_nr * link_bw);
+
+ value = (symbol - ratio) * TU;
+ value *= ratio;
+ do_div(value, symbol);
+ do_div(value, symbol);
+
+ value += 5;
+ value |= 0x08000000;
+
+ nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ u32 pclk;
+ int i;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+ if (mcc & (1 << head))
+ exec_script(priv, head, i, mcc, 2);
+ }
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+ nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+ if (pclk && (mask & 0x00010000)) {
+ struct nouveau_clock *clk = nouveau_clock(priv);
+ clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+ }
+
+ nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+ if (mcp & (1 << head)) {
+ if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+ u32 addr, mask, data = 0x00000000;
+ if (i < 4) {
+ addr = 0x612280 + ((i - 0) * 0x800);
+ mask = 0xffffffff;
+ } else {
+ switch (mcp & 0x00000f00) {
+ case 0x00000800:
+ case 0x00000900:
+ nvd0_display_unk2_calc_tu(priv, head, i - 4);
+ break;
+ default:
+ break;
+ }
+
+ addr = 0x612300 + ((i - 4) * 0x800);
+ mask = 0x00000707;
+ if (cfg & 0x00000100)
+ data = 0x00000101;
+ }
+ nv_mask(priv, addr, mask, data);
+ }
+ break;
+ }
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+ int pclk, i;
+
+ pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+ for (i = 0; mask && i < 8; i++) {
+ u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+ if (mcp & (1 << head))
+ exec_clkcmp(priv, head, i, mcp, 1, pclk);
+ }
+
+ nv_wr32(priv, 0x6101d4, 0x00000000);
+ nv_wr32(priv, 0x6109d4, 0x00000000);
+ nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
disp->vblank.notify(disp->vblank.data, crtc);
}
-static void
+void
nvd0_disp_intr(struct nouveau_subdev *subdev)
{
- struct nvd0_disp_priv *priv = (void *)subdev;
+ struct nv50_disp_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x610088);
int i;
- for (i = 0; i < 4; i++) {
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x61008c);
+ nv_wr32(priv, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
+ if (intr & 0x00000002) {
+ u32 stat = nv_rd32(priv, 0x61009c);
+ int chid = ffs(stat) - 1;
+ if (chid >= 0) {
+ u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+ u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+ u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+ nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+ "0x%08x 0x%08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+ nv_wr32(priv, 0x61009c, (1 << chid));
+ nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+ }
+
+ intr &= ~0x00000002;
+ }
+
+ if (intr & 0x00100000) {
+ u32 stat = nv_rd32(priv, 0x6100ac);
+ u32 mask = 0, crtc = ~0;
+
+ while (!mask && ++crtc < priv->head.nr)
+ mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+ if (stat & 0x00000001) {
+ nv_wr32(priv, 0x6100ac, 0x00000001);
+ nvd0_display_unk1_handler(priv, crtc, mask);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ nv_wr32(priv, 0x6100ac, 0x00000002);
+ nvd0_display_unk2_handler(priv, crtc, mask);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000004) {
+ nv_wr32(priv, 0x6100ac, 0x00000004);
+ nvd0_display_unk4_handler(priv, crtc, mask);
+ stat &= ~0x00000004;
+ }
+
+ if (stat) {
+ nv_info(priv, "unknown intr24 0x%08x\n", stat);
+ nv_wr32(priv, 0x6100ac, stat);
+ }
+
+ intr &= ~0x00100000;
+ }
+
+ for (i = 0; i < priv->head.nr; i++) {
u32 mask = 0x01000000 << i;
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
static int
nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nvd0_disp_priv *priv;
+ struct nv50_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- nv_engine(priv)->sclass = nvd0_disp_sclass;
+ nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nvd0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass
nvd0_disp_oclass = {
- .handle = NV_ENGINE(DISP, 0xd0),
+ .handle = NV_ENGINE(DISP, 0x90),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvd0_disp_ctor,
.dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 000000000000..259537c4587e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+ { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+ { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+ { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+ { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+ { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+ { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+ {}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nve0_disp_base_oclass;
+ nv_engine(priv)->cclass = &nv50_disp_cclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+ priv->sclass = nve0_disp_sclass;
+ priv->head.nr = nv_rd32(priv, 0x022448);
+ priv->dac.nr = 3;
+ priv->sor.nr = 4;
+ priv->dac.power = nv50_dac_power;
+ priv->dac.sense = nv50_dac_sense;
+ priv->sor.power = nv50_sor_power;
+ priv->sor.hda_eld = nvd0_hda_eld;
+ priv->sor.hdmi = nvd0_hdmi_ctrl;
+ priv->sor.dp_train = nvd0_sor_dp_train;
+ priv->sor.dp_train_init = nv94_sor_dp_train_init;
+ priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+ priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+ priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x91),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 000000000000..39b6b67732d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+ const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+ const u32 soff = (or * 0x800);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+ nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+ nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+ return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+ const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+ const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+ const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
+ const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+ struct dcb_output outp;
+ u8 ver, hdr;
+ u32 data;
+ int ret = -EINVAL;
+
+ if (size < sizeof(u32))
+ return -EINVAL;
+ data = *(u32 *)args;
+
+ if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+ return -ENODEV;
+
+ switch (mthd & ~0x3f) {
+ case NV50_DISP_SOR_PWR:
+ ret = priv->sor.power(priv, or, data);
+ break;
+ case NVA3_DISP_SOR_HDA_ELD:
+ ret = priv->sor.hda_eld(priv, or, args, size);
+ break;
+ case NV84_DISP_SOR_HDMI_PWR:
+ ret = priv->sor.hdmi(priv, head, or, data);
+ break;
+ case NV50_DISP_SOR_LVDS_SCRIPT:
+ priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+ ret = 0;
+ break;
+ case NV94_DISP_SOR_DP_TRAIN:
+ switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+ case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+ ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+ ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+ ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+ break;
+ default:
+ break;
+ }
+ break;
+ case NV94_DISP_SOR_DP_LNKCTL:
+ ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+ break;
+ case NV94_DISP_SOR_DP_DRVCTL(0):
+ case NV94_DISP_SOR_DP_DRVCTL(1):
+ case NV94_DISP_SOR_DP_DRVCTL(2):
+ case NV94_DISP_SOR_DP_DRVCTL(3):
+ ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+ type, mask, data, &outp);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 000000000000..f6edd009762e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+ static const u8 nv94[] = { 16, 8, 0, 24 };
+ if (nv_device(priv)->chipset == 0xaf)
+ return nvaf[lane];
+ return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+ init.offset = info.script[2];
+ else
+ init.offset = info.script[3];
+ nvbios_exec(&init);
+
+ init.offset = info.script[0];
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ struct nvbios_dpout info;
+ u8 ver, hdr, cnt, len;
+ u16 outp;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[1],
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+
+ return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+ return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ /* -> 10Khz units */
+ link_bw *= 2700;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (link_bw < nv_ro16(bios, info.lnkcmp))
+ info.lnkcmp += 4;
+ init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+ nvbios_exec(&init);
+ }
+
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+ if (link_bw > 16200)
+ clksor |= 0x00040000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 000000000000..c37ce7e29f5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+ static const u8 nvd0[] = { 16, 8, 0, 24 };
+ return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+ u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+ nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+ return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u32 soff = (or * 0x800);
+ const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+ const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+ u32 outp, lane = 0;
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout info;
+ int i;
+
+ outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+ if (outp && info.lnkcmp) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = 0x0000,
+ .outp = dcbo,
+ .crtc = head,
+ .execute = 1,
+ };
+
+ while (nv_ro08(bios, info.lnkcmp) < link_bw)
+ info.lnkcmp += 3;
+ init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+ nvbios_exec(&init);
+ }
+
+ clksor |= link_bw << 18;
+ dpctrl |= ((1 << link_nr) - 1) << 16;
+ if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+ dpctrl |= 0x00004000;
+
+ for (i = 0; i < link_nr; i++)
+ lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+ nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+ nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+ nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+ return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+ u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+ struct nouveau_bios *bios = nouveau_bios(priv);
+ const u32 loff = (or * 0x800) + (link * 0x80);
+ const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+ const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+ u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+ u8 ver, hdr, cnt, len;
+ struct nvbios_dpout outp;
+ struct nvbios_dpcfg ocfg;
+
+ addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+ if (!addr)
+ return -ENODEV;
+
+ addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+ if (!addr)
+ return -EINVAL;
+
+ nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+ nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+ nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+ nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d39768..5103e88d1877 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
#include <subdev/fb.h>
#include <engine/dmaobj.h>
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass,
- void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nouveau_dmaobj *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
struct nv_dma_class *args = data;
- struct nouveau_dmaobj *object;
int ret;
if (size < sizeof(*args))
return -EINVAL;
- ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
- object = *pobject;
+ ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+ *pobject = nv_object(dmaobj);
if (ret)
return ret;
switch (args->flags & NV_DMA_TARGET_MASK) {
case NV_DMA_TARGET_VM:
- object->target = NV_MEM_TARGET_VM;
+ dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_TARGET_VRAM:
- object->target = NV_MEM_TARGET_VRAM;
+ dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_TARGET_PCI:
- object->target = NV_MEM_TARGET_PCI;
+ dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_TARGET_PCI_US:
case NV_DMA_TARGET_AGP:
- object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+ dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
switch (args->flags & NV_DMA_ACCESS_MASK) {
case NV_DMA_ACCESS_VM:
- object->access = NV_MEM_ACCESS_VM;
+ dmaobj->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_ACCESS_RD:
- object->access = NV_MEM_ACCESS_RO;
+ dmaobj->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_ACCESS_WR:
- object->access = NV_MEM_ACCESS_WO;
+ dmaobj->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_ACCESS_RDWR:
- object->access = NV_MEM_ACCESS_RW;
+ dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
- object->start = args->start;
- object->limit = args->limit;
- return 0;
+ dmaobj->start = args->start;
+ dmaobj->limit = args->limit;
+ dmaobj->conf0 = args->conf0;
+
+ switch (nv_mclass(parent)) {
+ case NV_DEVICE_CLASS:
+ /* delayed, or no, binding */
+ break;
+ default:
+ ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+ if (ret == 0) {
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ }
+ break;
+ }
+
+ return ret;
}
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+ .ctor = nouveau_dmaobj_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+ { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f31994..027d8217c0fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv04_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
u32 length = dmaobj->limit - dmaobj->start;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV03_CHANNEL_DMA_CLASS:
+ case NV10_CHANNEL_DMA_CLASS:
+ case NV17_CHANNEL_DMA_CLASS:
+ case NV40_CHANNEL_DMA_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
if (dmaobj->target == NV_MEM_TARGET_VM) {
if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
}
static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv04_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV03_CHANNEL_DMA_CLASS:
- case NV10_CHANNEL_DMA_CLASS:
- case NV17_CHANNEL_DMA_CLASS:
- case NV40_CHANNEL_DMA_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
- .ctor = nv04_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
- { 0x0002, &nv04_dmaobj_ofuncs },
- { 0x0003, &nv04_dmaobj_ofuncs },
- { 0x003d, &nv04_dmaobj_ofuncs },
- {}
-};
-
-static int
nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv04_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv04_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e289..750183f7c057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nv50_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags = nv_mclass(dmaobj);
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NV50_CHANNEL_DMA_CLASS:
+ case NV84_CHANNEL_DMA_CLASS:
+ case NV50_CHANNEL_IND_CLASS:
+ case NV84_CHANNEL_IND_CLASS:
+ case NV50_DISP_MAST_CLASS:
+ case NV84_DISP_MAST_CLASS:
+ case NV94_DISP_MAST_CLASS:
+ case NVA0_DISP_MAST_CLASS:
+ case NVA3_DISP_MAST_CLASS:
+ case NV50_DISP_SYNC_CLASS:
+ case NV84_DISP_SYNC_CLASS:
+ case NV94_DISP_SYNC_CLASS:
+ case NVA0_DISP_SYNC_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NV50_DISP_OVLY_CLASS:
+ case NV84_DISP_OVLY_CLASS:
+ case NV94_DISP_OVLY_CLASS:
+ case NVA0_DISP_OVLY_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+ dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+ dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
switch (dmaobj->target) {
case NV_MEM_TARGET_VM:
- flags |= 0x00000000;
- flags |= 0x60000000; /* COMPRESSION_USEVM */
- flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+ flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags |= 0x00010000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags |= 0x00020000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags |= 0x00030000;
- flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ flags0 |= 0x00030000;
break;
default:
return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags |= 0x00040000;
+ flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags |= 0x00080000;
+ flags0 |= 0x00080000;
break;
}
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags);
+ nv_wo32(*pgpuobj, 0x00, flags0);
nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
upper_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
}
return ret;
}
static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
- struct nv50_dmaobj_priv *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- ret = nouveau_dmaobj_create(parent, engine, oclass,
- data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- break;
- case NV50_CHANNEL_DMA_CLASS:
- case NV84_CHANNEL_DMA_CLASS:
- case NV50_CHANNEL_IND_CLASS:
- case NV84_CHANNEL_IND_CLASS:
- ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
- .ctor = nv50_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
- { 0x0002, &nv50_dmaobj_ofuncs },
- { 0x0003, &nv50_dmaobj_ofuncs },
- { 0x003d, &nv50_dmaobj_ofuncs },
- {}
-};
-
-static int
nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nv50_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
priv->base.bind = nv50_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa08695535..cd3970d03b80 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
* Authors: Ben Skeggs
*/
+#include <core/device.h>
#include <core/gpuobj.h>
+#include <core/class.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
struct nouveau_dmaeng base;
};
-struct nvc0_dmaobj_priv {
- struct nouveau_dmaobj base;
-};
-
static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
{
- struct nvc0_dmaobj_priv *dmaobj;
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags5 = 0x00000000;
int ret;
- ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
- *pobject = nv_object(dmaobj);
- if (ret)
- return ret;
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVA3_DISP_MAST_CLASS:
+ case NVA3_DISP_SYNC_CLASS:
+ case NVA3_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+ } else {
+ dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
+ dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= 0x00020000;
+ }
+ }
- if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+ flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+ flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VM:
+ flags0 |= 0x00000000;
+ break;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
return -EINVAL;
+ }
- return 0;
-}
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ flags0 |= 0x00080000;
+ break;
+ }
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
- .ctor = nvc0_dmaobj_ctor,
- .dtor = _nouveau_dmaobj_dtor,
- .init = _nouveau_dmaobj_init,
- .fini = _nouveau_dmaobj_fini,
-};
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+ upper_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, flags5);
+ }
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
- { 0x0002, &nvc0_dmaobj_ofuncs },
- { 0x0003, &nvc0_dmaobj_ofuncs },
- { 0x003d, &nvc0_dmaobj_ofuncs },
- {}
-};
+ return ret;
+}
static int
nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.base.sclass = nvc0_dmaobj_sclass;
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvc0_dmaobj_bind;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 000000000000..d1528752980c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ u32 flags0 = 0x00000000;
+ int ret;
+
+ if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+ switch (nv_mclass(parent->parent)) {
+ case NVD0_DISP_MAST_CLASS:
+ case NVD0_DISP_SYNC_CLASS:
+ case NVD0_DISP_OVLY_CLASS:
+ case NVE0_DISP_MAST_CLASS:
+ case NVE0_DISP_SYNC_CLASS:
+ case NVE0_DISP_OVLY_CLASS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return 0;
+
+ if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+ } else {
+ dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+ dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+ }
+ }
+
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+ flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00000009;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0);
+ nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+ nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+ nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ }
+
+ return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+ priv->base.bind = nvd0_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2ae..c2b9db335816 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
#include <core/object.h>
#include <core/handle.h>
+#include <core/class.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int bar, u32 addr, u32 size, u32 pushbuf,
- u32 engmask, int len, void **ptr)
+ u64 engmask, int len, void **ptr)
{
struct nouveau_device *device = nv_device(engine);
struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
- case 0x0002:
- case 0x003d:
+ case NV_DMA_FROM_MEMORY_CLASS:
+ case NV_DMA_IN_MEMORY_CLASS:
break;
default:
return -EINVAL;
}
- if (dmaeng->bind) {
- ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
- if (ret)
- return ret;
- }
+ ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ if (ret)
+ return ret;
/* find a free fifo channel */
spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
}
u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
return ioread32_native(chan->user + addr);
}
void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c2..a47a8548f9e0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
- nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+ nv_error(priv, "CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
u32 ib_get = nv_rd32(priv, 0x003334);
u32 ib_put = nv_rd32(priv, 0x003330);
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x003334, ib_put);
}
} else {
- nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+ nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
if (device->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_trap(nouveau_fb(priv), 1);
status &= ~0x00000010;
nv_wr32(priv, 0x002100, 0x00000010);
}
}
if (status) {
- nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+ nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
status, chid);
nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status) {
- nv_info(priv, "still angry after %d spins, halt\n", cnt);
+ nv_error(priv, "still angry after %d spins, halt\n", cnt);
nv_wr32(priv, 0x002140, 0);
nv_wr32(priv, 0x000140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89d..2c927c1d173b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b1..a9cb51d38c57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
&chan);
*pobject = nv_object(chan);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b86..2b1f91721225 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x1000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00d..bd096364f680 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
ret = -EBUSY;
}
-
nv_wr32(priv, 0x00b860, me);
+
+ if (ret == 0) {
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ }
+
return ret;
}
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f166..1eb1c512f503 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EINVAL;
}
- nv_wo32(base->eng, addr + 0x00, 0x00000000);
- nv_wo32(base->eng, addr + 0x04, 0x00000000);
- nv_wo32(base->eng, addr + 0x08, 0x00000000);
- nv_wo32(base->eng, addr + 0x0c, 0x00000000);
- nv_wo32(base->eng, addr + 0x10, 0x00000000);
- nv_wo32(base->eng, addr + 0x14, 0x00000000);
- bar->flush(bar);
-
save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
if (suspend)
return -EBUSY;
}
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
- (1 << NVDEV_ENGINE_DMAOBJ) |
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_MPEG) |
- (1 << NVDEV_ENGINE_ME) |
- (1 << NVDEV_ENGINE_VP) |
- (1 << NVDEV_ENGINE_CRYPT) |
- (1 << NVDEV_ENGINE_BSP) |
- (1 << NVDEV_ENGINE_PPP) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ (1ULL << NVDEV_ENGINE_DMAOBJ) |
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_MPEG) |
+ (1ULL << NVDEV_ENGINE_ME) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_CRYPT) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_PPP) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be600557..b4365dde1859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
args->pushbuf,
- (1 << NVDEV_ENGINE_SW) |
- (1 << NVDEV_ENGINE_GR) |
- (1 << NVDEV_ENGINE_COPY0) |
- (1 << NVDEV_ENGINE_COPY1), &chan);
+ (1ULL << NVDEV_ENGINE_SW) |
+ (1ULL << NVDEV_ENGINE_GR) |
+ (1ULL << NVDEV_ENGINE_COPY0) |
+ (1ULL << NVDEV_ENGINE_COPY1) |
+ (1ULL << NVDEV_ENGINE_BSP) |
+ (1ULL << NVDEV_ENGINE_VP) |
+ (1ULL << NVDEV_ENGINE_PPP), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
- nv_info(priv, "unknown status 0x00000100\n");
+ nv_warn(priv, "unknown status 0x00000100\n");
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafbc..c930da99c2c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
#include <engine/dmaobj.h>
#include <engine/fifo.h>
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
static const struct {
- int subdev;
- u32 mask;
+ u64 subdev;
+ u64 mask;
} fifo_engine[] = {
- _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
+ _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)),
_(NVDEV_ENGINE_VP , 0),
_(NVDEV_ENGINE_PPP , 0),
_(NVDEV_ENGINE_BSP , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_GR :
case NVDEV_ENGINE_COPY0:
case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+ case NVDEV_ENGINE_BSP : addr = 0x0270; break;
+ case NVDEV_ENGINE_VP : addr = 0x0250; break;
+ case NVDEV_ENGINE_PPP : addr = 0x0260; break;
default:
return -EINVAL;
}
- nv_wo32(base, addr + 0x00, 0x00000000);
- nv_wo32(base, addr + 0x04, 0x00000000);
- bar->flush(bar);
-
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
return -EBUSY;
}
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
index 7b715fda2763..62ab231cd6b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -57,6 +57,11 @@ chipsets:
.b16 #nve4_gpc_mmio_tail
.b16 #nve4_tpc_mmio_head
.b16 #nve4_tpc_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 26c2165bad0f..09ee4702c8b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
0x00000000,
/* 0x0064: chipsets */
0x000000e4,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
0x000000e7,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
+ 0x000000e6,
+ 0x0110008c,
+ 0x01580110,
0x00000000,
-/* 0x0080: nve4_gpc_mmio_head */
+/* 0x008c: nve4_gpc_mmio_head */
0x00000380,
0x04000400,
0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
0x14003100,
0x000031d0,
0x040031e0,
-/* 0x0104: nve4_gpc_mmio_tail */
-/* 0x0104: nve4_tpc_mmio_head */
+/* 0x0110: nve4_gpc_mmio_tail */
+/* 0x0110: nve4_tpc_mmio_head */
0x00000048,
0x00000064,
0x00000088,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index acfc457654bd..0bcfa4d447e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -754,6 +754,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 85a8d556f484..bb03d2a1d57b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
0x01fa0613,
0xf803f806,
/* 0x0829: ctx_xfer */
- 0x0611f400,
-/* 0x082f: ctx_xfer_pre */
- 0xf01102f4,
- 0x21f510f7,
- 0x21f50698,
- 0x11f40631,
-/* 0x083d: ctx_xfer_pre_load */
- 0x02f7f01c,
- 0x065721f5,
- 0x066621f5,
- 0x067821f5,
- 0x21f5f4bd,
- 0x21f50657,
-/* 0x0856: ctx_xfer_exec */
- 0x019806b8,
- 0x1427f116,
- 0x0624b604,
- 0xf10020d0,
- 0xf0a500e7,
- 0x1fb941e3,
- 0x8d21f402,
- 0xf004e0b6,
- 0x2cf001fc,
- 0x0124b602,
- 0xf405f2fd,
- 0x17f18d21,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1020721,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x06a5f001,
- 0x9800b7f0,
- 0x0d98140c,
- 0x00e7f015,
- 0x015c21f5,
- 0xf508a7f0,
- 0xf5010321,
- 0xf4020721,
- 0xa7f02201,
- 0xc921f40c,
- 0x0a1017f1,
- 0xf00614b6,
- 0x12d00527,
-/* 0x08dd: ctx_xfer_post_save_wait */
- 0x0012cf00,
- 0xf40522fd,
- 0x02f4fa1b,
-/* 0x08e9: ctx_xfer_post */
- 0x02f7f032,
- 0x065721f5,
- 0x21f5f4bd,
- 0x21f50698,
- 0x21f50226,
- 0xf4bd0666,
- 0x065721f5,
- 0x981011f4,
- 0x11fd8001,
- 0x070bf405,
- 0x07df21f5,
-/* 0x0914: ctx_xfer_no_post_mmio */
- 0x064921f5,
-/* 0x0918: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x00f7f100,
+ 0x06f4b60c,
+ 0xd004e7f0,
+/* 0x0836: ctx_xfer_idle */
+ 0xfecf80fe,
+ 0x00e4f100,
+ 0xf91bf420,
+ 0xf40611f4,
+/* 0x0846: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x9821f510,
+ 0x3121f506,
+ 0x1c11f406,
+/* 0x0854: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf5065721,
+ 0xf5066621,
+ 0xbd067821,
+ 0x5721f5f4,
+ 0xb821f506,
+/* 0x086d: ctx_xfer_exec */
+ 0x16019806,
+ 0x041427f1,
+ 0xd00624b6,
+ 0xe7f10020,
+ 0xe3f0a500,
+ 0x021fb941,
+ 0xb68d21f4,
+ 0xfcf004e0,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0x21f405f2,
+ 0xfc17f18d,
+ 0x0213f04a,
+ 0xd00c27f0,
+ 0x21f50012,
+ 0x27f10207,
+ 0x23f047fc,
+ 0x0020d002,
+ 0xb6012cf0,
+ 0x12d00320,
+ 0x01acf000,
+ 0xf006a5f0,
+ 0x0c9800b7,
+ 0x150d9814,
+ 0xf500e7f0,
+ 0xf0015c21,
+ 0x21f508a7,
+ 0x21f50103,
+ 0x01f40207,
+ 0x0ca7f022,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+/* 0x08f4: ctx_xfer_post_save_wait */
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x3202f4fa,
+/* 0x0900: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd065721,
+ 0x9821f5f4,
+ 0x2621f506,
+ 0x6621f502,
+ 0xf5f4bd06,
+ 0xf4065721,
+ 0x01981011,
+ 0x0511fd80,
+ 0xf5070bf4,
+/* 0x092b: ctx_xfer_no_post_mmio */
+ 0xf507df21,
+/* 0x092f: ctx_xfer_done */
+ 0xf8064921,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
index 138eeaa28665..7fe9d7cf486b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -44,6 +44,9 @@ chipsets:
.b8 0xe7 0 0 0
.b16 #nve4_hub_mmio_head
.b16 #nve4_hub_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
.b8 0 0 0 0
nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index decf0c60ca3b..e3421af68ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
0x00000000,
/* 0x005c: chipsets */
0x000000e4,
- 0x013c0070,
+ 0x01440078,
0x000000e7,
- 0x013c0070,
+ 0x01440078,
+ 0x000000e6,
+ 0x01440078,
0x00000000,
-/* 0x0070: nve4_hub_mmio_head */
+/* 0x0078: nve4_hub_mmio_head */
0x0417e91c,
0x04400204,
0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
0x00408840,
0x08408900,
0x00408980,
-/* 0x013c: nve4_hub_mmio_tail */
- 0x00000000,
- 0x00000000,
+/* 0x0144: nve4_hub_mmio_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
0x0613f002,
0xf80601fa,
/* 0x07fb: ctx_xfer */
- 0xf400f803,
- 0x02f40611,
-/* 0x0801: ctx_xfer_pre */
- 0x10f7f00d,
- 0x067221f5,
-/* 0x080b: ctx_xfer_pre_load */
- 0xf01c11f4,
- 0x21f502f7,
- 0x21f50631,
- 0x21f50640,
- 0xf4bd0652,
- 0x063121f5,
- 0x069221f5,
-/* 0x0824: ctx_xfer_exec */
- 0xf1160198,
- 0xb6041427,
- 0x20d00624,
- 0x00e7f100,
- 0x41e3f0a5,
- 0xf4021fb9,
- 0xe0b68d21,
- 0x01fcf004,
- 0xb6022cf0,
- 0xf2fd0124,
- 0x8d21f405,
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x0721f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f006a5,
- 0x140c9800,
- 0xf0150d98,
- 0x21f500e7,
- 0xa7f0015c,
- 0x0321f508,
- 0x0721f501,
- 0x2201f402,
- 0xf40ca7f0,
- 0x17f1c921,
- 0x14b60a10,
- 0x0527f006,
-/* 0x08ab: ctx_xfer_post_save_wait */
- 0xcf0012d0,
- 0x22fd0012,
- 0xfa1bf405,
-/* 0x08b7: ctx_xfer_post */
- 0xf02e02f4,
- 0x21f502f7,
- 0xf4bd0631,
- 0x067221f5,
- 0x022621f5,
- 0x064021f5,
- 0x21f5f4bd,
- 0x11f40631,
- 0x80019810,
- 0xf40511fd,
- 0x21f5070b,
-/* 0x08e2: ctx_xfer_no_post_mmio */
-/* 0x08e2: ctx_xfer_done */
- 0x00f807b1,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xf100f803,
+ 0xb60c00f7,
+ 0xe7f006f4,
+ 0x80fed004,
+/* 0x0808: ctx_xfer_idle */
+ 0xf100fecf,
+ 0xf42000e4,
+ 0x11f4f91b,
+ 0x0d02f406,
+/* 0x0818: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf4067221,
+/* 0x0822: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x3121f502,
+ 0x4021f506,
+ 0x5221f506,
+ 0xf5f4bd06,
+ 0xf5063121,
+/* 0x083b: ctx_xfer_exec */
+ 0x98069221,
+ 0x27f11601,
+ 0x24b60414,
+ 0x0020d006,
+ 0xa500e7f1,
+ 0xb941e3f0,
+ 0x21f4021f,
+ 0x04e0b68d,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0xf18d21f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00c,
+ 0x020721f5,
+ 0x47fc27f1,
+ 0xd00223f0,
+ 0x2cf00020,
+ 0x0320b601,
+ 0xf00012d0,
+ 0xa5f001ac,
+ 0x00b7f006,
+ 0x98140c98,
+ 0xe7f0150d,
+ 0x5c21f500,
+ 0x08a7f001,
+ 0x010321f5,
+ 0x020721f5,
+ 0xf02201f4,
+ 0x21f40ca7,
+ 0x1017f1c9,
+ 0x0614b60a,
+ 0xd00527f0,
+/* 0x08c2: ctx_xfer_post_save_wait */
+ 0x12cf0012,
+ 0x0522fd00,
+ 0xf4fa1bf4,
+/* 0x08ce: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x3121f502,
+ 0xf5f4bd06,
+ 0xf5067221,
+ 0xf5022621,
+ 0xbd064021,
+ 0x3121f5f4,
+ 0x1011f406,
+ 0xfd800198,
+ 0x0bf40511,
+ 0xb121f507,
+/* 0x08f9: ctx_xfer_no_post_mmio */
+/* 0x08f9: ctx_xfer_done */
+ 0x0000f807,
0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 618528248457..e30a9c5ff1fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv03_graph_gdi_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_gdi_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_blit_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_iifc_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_chroma },
- { 0x018c, nv01_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv01_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv01_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_ifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifc_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv03_graph_sifm_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_sifm_omthds[] = {
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x0304, nv04_graph_mthd_set_operation },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_surf3d_omthds[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
{}
};
static struct nouveau_omthds
nv03_graph_ttri_omthds[] = {
- { 0x0188, nv01_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
{}
};
static struct nouveau_omthds
nv01_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv01_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
static struct nouveau_omthds
nv04_graph_prim_omthds[] = {
- { 0x0184, nv01_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
+ { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77f..5c0f843ea249 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv17_celcius_omthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+ { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a78..5b20401bf911 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
- if (nv_device(engine)->card_type == NV_20) {
+ if (nv_device(engine)->chipset != 0x34) {
nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
- nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, subc, class, mthd, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index cc6574eeb80e..0b36dd3deebd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
switch (nv_device(priv)->chipset) {
case 0x40:
- case 0x41: /* guess */
+ case 0x41:
case 0x42:
case 0x43:
- case 0x45: /* guess */
+ case 0x45:
case 0x4e:
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
break;
case 0x44:
case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
+ case 0x4c:
case 0x47:
case 0x49:
case 0x4b:
- case 0x4c:
+ case 0x63:
case 0x67:
- default:
+ case 0x68:
nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+ switch (nv_device(priv)->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+ nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
break;
}
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf478..b1c3d835b4c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
return 0;
}
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+ { 0x00000001, "BUSY" }, /* set when any bit is set */
+ { 0x00000002, "DISPATCH" },
+ { 0x00000004, "UNK2" },
+ { 0x00000008, "UNK3" },
+ { 0x00000010, "UNK4" },
+ { 0x00000020, "UNK5" },
+ { 0x00000040, "M2MF" },
+ { 0x00000080, "UNK7" },
+ { 0x00000100, "CTXPROG" },
+ { 0x00000200, "VFETCH" },
+ { 0x00000400, "CCACHE_UNK4" },
+ { 0x00000800, "STRMOUT_GSCHED_UNK5" },
+ { 0x00001000, "UNK14XX" },
+ { 0x00002000, "UNK24XX_CSCHED" },
+ { 0x00004000, "UNK1CXX" },
+ { 0x00008000, "CLIPID" },
+ { 0x00010000, "ZCULL" },
+ { 0x00020000, "ENG2D" },
+ { 0x00040000, "UNK34XX" },
+ { 0x00080000, "TPRAST" },
+ { 0x00100000, "TPROP" },
+ { 0x00200000, "TEX" },
+ { 0x00400000, "TPVP" },
+ { 0x00800000, "MP" },
+ { 0x01000000, "ROP" },
+ {}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+ "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+ "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+ "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+ "ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+ const char *const units[], u32 status)
+{
+ int i;
+
+ nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+ for (i = 0; units[i] && status; i++) {
+ if ((status & 7) == 1)
+ pr_cont(" %s", units[i]);
+ status >>= 3;
+ }
+ if (status)
+ pr_cont(" (invalid: 0x%x)", status);
+ pr_cont("\n");
+}
+
static int
nv84_graph_tlb_flush(struct nouveau_engine *engine)
{
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
!(timeout = ptimer->read(ptimer) - start > 2000000000));
if (timeout) {
- nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
- "0x%08x 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
- nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+ nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+ tmp = nv_rd32(priv, 0x400700);
+ nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
+ nouveau_bitfield_print(nv50_pgraph_status, tmp);
+ pr_cont("\n");
+
+ nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+ nv_rd32(priv, 0x400380));
+ nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+ nv_rd32(priv, 0x400384));
+ nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+ nv_rd32(priv, 0x400388));
}
nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
}
if (ustatus) {
if (display)
- nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
nv_wr32(priv, ustatus_addr, 0xc0000000);
}
if (!tps && display)
- nv_info(priv, "%s - No TPs claiming errors?\n", name);
+ nv_warn(priv, "%s - No TPs claiming errors?\n", name);
}
static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x400500, 0x00010001);
if (show) {
- nv_info(priv, "");
+ nv_error(priv, "");
nouveau_bitfield_print(nv50_graph_intr_name, show);
printk("\n");
nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
"mthd 0x%04x data 0x%08x\n",
chid, (u64)inst << 12, subc, class, mthd, data);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0a..45aff5f5085a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
- bool enable = true;
int ret, i;
- switch (device->chipset) {
- case 0xd9: /* known broken without binary driver firmware */
- enable = false;
- break;
- default:
- break;
- }
-
- ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -814,7 +805,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a100, 0x00000002);
nv_wr32(priv, 0x409100, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
- nv_info(priv, "0x409800 wait failed\n");
+ nv_warn(priv, "0x409800 wait failed\n");
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 18d2210e12eb..a1e78de46456 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
return 0x9297;
case 0xe4:
case 0xe7:
+ case 0xe6:
return 0xa097;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 539d4c72f192..9f82e9702b46 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_graph_priv *priv;
int ret, i;
- ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->magic_not_rop_nr = 1;
break;
case 0xe7:
+ case 0xe6:
priv->magic_not_rop_nr = 1;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cecb..fde8e24415e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i))
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
@@ -261,9 +263,12 @@
#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i))
#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i))
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e7..9fd86375f4c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
static struct nouveau_omthds
nv31_mpeg_omthds[] = {
- { 0x0190, nv31_mpeg_mthd_dma },
- { 0x01a0, nv31_mpeg_mthd_dma },
- { 0x01b0, nv31_mpeg_mthd_dma },
+ { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+ { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+ { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d57..bc7d12b30fc1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x00b100, stat);
nv_wr32(priv, 0x00b230, 0x00000001);
- nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da1981..5a5b2a773ed7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/ppp.h>
struct nv98_ppp_priv {
- struct nouveau_ppp base;
+ struct nouveau_engine base;
};
struct nv98_ppp_chan {
- struct nouveau_ppp_chan base;
+ struct nouveau_engctx base;
};
/*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
* PPPP context
******************************************************************************/
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv98_ppp_chan *priv;
- int ret;
-
- ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_chan *priv = (void *)object;
- return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv98_ppp_context_ctor,
- .dtor = nv98_ppp_context_dtor,
- .init = nv98_ppp_context_init,
- .fini = nv98_ppp_context_fini,
- .rd32 = _nouveau_ppp_context_rd32,
- .wr32 = _nouveau_ppp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
* PPPP engine/subdev functions
******************************************************************************/
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_ppp_priv *priv;
int ret;
- ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PPPP", "ppp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00400002;
- nv_subdev(priv)->intr = nv98_ppp_intr;
nv_engine(priv)->cclass = &nv98_ppp_cclass;
nv_engine(priv)->sclass = nv98_ppp_sclass;
return 0;
}
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_ppp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv98_ppp_priv *priv = (void *)object;
- return nouveau_ppp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
- .dtor = nv98_ppp_dtor,
- .init = nv98_ppp_init,
- .fini = nv98_ppp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 000000000000..ebf0d860e2dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+ { 0x90b3, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+ .handle = NV_ENGCTX(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+ struct nvc0_ppp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x086010, 0x0000fff2);
+ nv_wr32(priv, 0x08601c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_ppp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+ "PPPP", "ppp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+ nv_engine(priv)->sclass = nvc0_ppp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+ .handle = NV_ENGINE(PPP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_ppp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_ppp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b7..2a859a31c30d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv04_software_omthds[] = {
- { 0x0150, nv04_software_set_ref },
- { 0x0500, nv04_software_flip },
+ { 0x0150, 0x0150, nv04_software_set_ref },
+ { 0x0500, 0x0500, nv04_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb7..a019364b1e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv10_software_omthds[] = {
- { 0x0500, nv10_software_flip },
+ { 0x0500, 0x0500, nv10_software_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544a..b0e7e1c01ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nv50_software_omthds[] = {
- { 0x018c, nv50_software_mthd_dma_vblsem },
- { 0x0400, nv50_software_mthd_vblsem_offset },
- { 0x0404, nv50_software_mthd_vblsem_value },
- { 0x0408, nv50_software_mthd_vblsem_release },
- { 0x0500, nv50_software_mthd_flip },
+ { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+ { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+ { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nv50_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d6..282a1cd1bc2f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
static struct nouveau_omthds
nvc0_software_omthds[] = {
- { 0x0400, nvc0_software_mthd_vblsem_offset },
- { 0x0404, nvc0_software_mthd_vblsem_offset },
- { 0x0408, nvc0_software_mthd_vblsem_value },
- { 0x040c, nvc0_software_mthd_vblsem_release },
- { 0x0500, nvc0_software_mthd_flip },
+ { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+ { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+ { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+ { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+ { 0x0500, 0x0500, nvc0_software_mthd_flip },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e5405..261cd96e6951 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
+#include <core/class.h>
#include <engine/vp.h>
struct nv84_vp_priv {
- struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
- struct nouveau_vp_chan base;
+ struct nouveau_engine base;
};
/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
* PVP context
******************************************************************************/
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv84_vp_chan *priv;
- int ret;
-
- ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
- 0, 0, 0, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
- struct nv84_vp_chan *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_context_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_chan *priv = (void *)object;
- return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
static struct nouveau_oclass
nv84_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv84_vp_context_ctor,
- .dtor = nv84_vp_context_dtor,
- .init = nv84_vp_context_init,
- .fini = nv84_vp_context_fini,
- .rd32 = _nouveau_vp_context_rd32,
- .wr32 = _nouveau_vp_context_wr32,
+ .ctor = _nouveau_engctx_ctor,
+ .dtor = _nouveau_engctx_dtor,
+ .init = _nouveau_engctx_init,
+ .fini = _nouveau_engctx_fini,
+ .rd32 = _nouveau_engctx_rd32,
+ .wr32 = _nouveau_engctx_wr32,
},
};
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
* PVP engine/subdev functions
******************************************************************************/
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
static int
nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_vp_priv *priv;
int ret;
- ret = nouveau_vp_create(parent, engine, oclass, &priv);
+ ret = nouveau_engine_create(parent, engine, oclass, true,
+ "PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
- nv_subdev(priv)->intr = nv84_vp_intr;
nv_engine(priv)->cclass = &nv84_vp_cclass;
nv_engine(priv)->sclass = nv84_vp_sclass;
return 0;
}
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
- struct nv84_vp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_vp_init(&priv->base);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
- struct nv84_vp_priv *priv = (void *)object;
- return nouveau_vp_fini(&priv->base, suspend);
-}
-
struct nouveau_oclass
nv84_vp_oclass = {
.handle = NV_ENGINE(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_vp_ctor,
- .dtor = nv84_vp_dtor,
- .init = nv84_vp_init,
- .fini = nv84_vp_fini,
+ .dtor = _nouveau_engine_dtor,
+ .init = _nouveau_engine_init,
+ .fini = _nouveau_engine_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 000000000000..f761949d7039
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+ { 0x90b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+ struct nvc0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nvc0_vp_cclass;
+ nv_engine(priv)->sclass = nvc0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nvc0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 000000000000..2384ce5dbe16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+ struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+ { 0x95b2, &nouveau_object_ofuncs },
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+ struct nve0_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000fff2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+ "PVP", "vp", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00020000;
+ nv_engine(priv)->cclass = &nve0_vp_cclass;
+ nv_engine(priv)->sclass = nve0_vp_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_vp_ctor,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nve0_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800fc..47c4b3a5bd3a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
struct nv_device_class {
u64 device; /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
#define NV_DMA_ACCESS_WR 0x00000200
#define NV_DMA_ACCESS_RDWR 0x00000300
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE 0x80000000
+#define NV50_DMA_CONF0_PRIV 0x00300000
+#define NV50_DMA_CONF0_PRIV_VM 0x00000000
+#define NV50_DMA_CONF0_PRIV_US 0x00100000
+#define NV50_DMA_CONF0_PRIV__S 0x00200000
+#define NV50_DMA_CONF0_PART 0x00030000
+#define NV50_DMA_CONF0_PART_VM 0x00000000
+#define NV50_DMA_CONF0_PART_256 0x00010000
+#define NV50_DMA_CONF0_PART_1KB 0x00020000
+#define NV50_DMA_CONF0_COMP 0x00000180
+#define NV50_DMA_CONF0_COMP_NONE 0x00000000
+#define NV50_DMA_CONF0_COMP_VM 0x00000180
+#define NV50_DMA_CONF0_TYPE 0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE 0x80000000
+#define NVC0_DMA_CONF0_PRIV 0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
+#define NVC0_DMA_CONF0_PRIV_US 0x00100000
+#define NVC0_DMA_CONF0_PRIV__S 0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
+#define NVC0_DMA_CONF0_TYPE 0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE 0x80000000
+#define NVD0_DMA_CONF0_PAGE 0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
+#define NVD0_DMA_CONF0_TYPE 0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
+
struct nv_dma_class {
u32 flags;
u32 pad0;
u64 start;
u64 limit;
+ u32 conf0;
};
/* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
u32 engine;
};
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS 0x00005070
+#define NV84_DISP_CLASS 0x00008270
+#define NVA0_DISP_CLASS 0x00008370
+#define NV94_DISP_CLASS 0x00008870
+#define NVA3_DISP_CLASS 0x00008570
+#define NVD0_DISP_CLASS 0x00009070
+#define NVE0_DISP_CLASS 0x00009170
+
+#define NV50_DISP_SOR_MTHD 0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
+#define NV50_DISP_SOR_MTHD_LINK 0x00000004
+#define NV50_DISP_SOR_MTHD_OR 0x00000003
+
+#define NV50_DISP_SOR_PWR 0x00010000
+#define NV50_DISP_SOR_PWR_STATE 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
+#define NVA3_DISP_SOR_HDA_ELD 0x00010100
+#define NV84_DISP_SOR_HDMI_PWR 0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN 0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
+
+#define NV50_DISP_DAC_MTHD 0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
+#define NV50_DISP_DAC_MTHD_OR 0x00000003
+
+#define NV50_DISP_DAC_PWR 0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
+#define NV50_DISP_DAC_PWR_DATA 0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
+#define NV50_DISP_DAC_PWR_STATE 0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
+#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS 0x0000507a
+#define NV84_DISP_CURS_CLASS 0x0000827a
+#define NVA0_DISP_CURS_CLASS 0x0000837a
+#define NV94_DISP_CURS_CLASS 0x0000887a
+#define NVA3_DISP_CURS_CLASS 0x0000857a
+#define NVD0_DISP_CURS_CLASS 0x0000907a
+#define NVE0_DISP_CURS_CLASS 0x0000917a
+
+struct nv50_display_curs_class {
+ u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS 0x0000507b
+#define NV84_DISP_OIMM_CLASS 0x0000827b
+#define NVA0_DISP_OIMM_CLASS 0x0000837b
+#define NV94_DISP_OIMM_CLASS 0x0000887b
+#define NVA3_DISP_OIMM_CLASS 0x0000857b
+#define NVD0_DISP_OIMM_CLASS 0x0000907b
+#define NVE0_DISP_OIMM_CLASS 0x0000917b
+
+struct nv50_display_oimm_class {
+ u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS 0x0000507c
+#define NV84_DISP_SYNC_CLASS 0x0000827c
+#define NVA0_DISP_SYNC_CLASS 0x0000837c
+#define NV94_DISP_SYNC_CLASS 0x0000887c
+#define NVA3_DISP_SYNC_CLASS 0x0000857c
+#define NVD0_DISP_SYNC_CLASS 0x0000907c
+#define NVE0_DISP_SYNC_CLASS 0x0000917c
+
+struct nv50_display_sync_class {
+ u32 pushbuf;
+ u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS 0x0000507d
+#define NV84_DISP_MAST_CLASS 0x0000827d
+#define NVA0_DISP_MAST_CLASS 0x0000837d
+#define NV94_DISP_MAST_CLASS 0x0000887d
+#define NVA3_DISP_MAST_CLASS 0x0000857d
+#define NVD0_DISP_MAST_CLASS 0x0000907d
+#define NVE0_DISP_MAST_CLASS 0x0000917d
+
+struct nv50_display_mast_class {
+ u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS 0x0000507e
+#define NV84_DISP_OVLY_CLASS 0x0000827e
+#define NVA0_DISP_OVLY_CLASS 0x0000837e
+#define NV94_DISP_OVLY_CLASS 0x0000887e
+#define NVA3_DISP_OVLY_CLASS 0x0000857e
+#define NVD0_DISP_OVLY_CLASS 0x0000907e
+#define NVE0_DISP_OVLY_CLASS 0x0000917e
+
+struct nv50_display_ovly_class {
+ u32 pushbuf;
+ u32 head;
+};
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
int nouveau_client_create_(const char *name, u64 device, const char *cfg,
const char *dbg, int, void **);
+#define nouveau_client_destroy(p) \
+ nouveau_namedb_destroy(&(p)->base)
+
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872eb..2fd48b564c7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
int nouveau_engctx_init(struct nouveau_engctx *);
int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
+int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
void _nouveau_engctx_dtor(struct nouveau_object *);
int _nouveau_engctx_init(struct nouveau_object *);
int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 000000000000..1edec386ab36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+ bool external;
+};
+
+struct nouveau_falcon {
+ struct nouveau_engine base;
+
+ u32 addr;
+ u8 version;
+ u8 secret;
+
+ struct nouveau_gpuobj *core;
+ bool external;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } code;
+
+ struct {
+ u32 limit;
+ u32 *data;
+ u32 size;
+ } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
+ nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
+ sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p) \
+ nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_init(nv_object(falcon)); \
+})
+#define nouveau_falcon_fini(p,s) ({ \
+ struct nouveau_falcon *falcon = (p); \
+ _nouveau_falcon_fini(nv_object(falcon), (s)); \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, u32, bool, const char *,
+ const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int _nouveau_falcon_init(struct nouveau_object *);
+int _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377ae..b3b9ce4e9d38 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
void _nouveau_gpuobj_dtor(struct nouveau_object *);
int _nouveau_gpuobj_init(struct nouveau_object *);
int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a6..2514e81ade02 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
int heap_nodes;
};
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+ return mm->block_size != 0;
+}
+
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
int nouveau_mm_fini(struct nouveau_mm *);
int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 486f1a9217fd..5982935ee23a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
}
struct nouveau_omthds {
- u32 method;
+ u32 start;
+ u32 limit;
int (*call)(struct nouveau_object *, u32, void *, u32);
};
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
void (*dtor)(struct nouveau_object *);
int (*init)(struct nouveau_object *);
int (*fini)(struct nouveau_object *, bool suspend);
- u8 (*rd08)(struct nouveau_object *, u32 offset);
- u16 (*rd16)(struct nouveau_object *, u32 offset);
- u32 (*rd32)(struct nouveau_object *, u32 offset);
- void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
- void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
- void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+ u8 (*rd08)(struct nouveau_object *, u64 offset);
+ u16 (*rd16)(struct nouveau_object *, u64 offset);
+ u32 (*rd32)(struct nouveau_object *, u64 offset);
+ void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+ void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+ void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
};
static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
void nouveau_object_debug(void);
static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
{
struct nouveau_omthds *method = nv_oclass(obj)->omthds;
while (method && method->call) {
- if (method->method == mthd)
- return method->call(obj, mthd, &data, sizeof(data));
+ if (mthd >= method->start && mthd <= method->limit)
+ return method->call(obj, mthd, data, size);
method++;
}
return -EINVAL;
}
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+ return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
{
u8 data = nv_ofuncs(obj)->rd08(obj, addr);
nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
}
static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
{
u16 data = nv_ofuncs(obj)->rd16(obj, addr);
nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
}
static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
{
u32 data = nv_ofuncs(obj)->rd32(obj, addr);
nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,28 +154,28 @@ nv_ro32(void *obj, u32 addr)
}
static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
{
nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
nv_ofuncs(obj)->wr08(obj, addr, data);
}
static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
{
nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
nv_ofuncs(obj)->wr16(obj, addr, data);
}
static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
{
nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
nv_ofuncs(obj)->wr32(obj, addr, data);
}
static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
{
u32 temp = nv_ro32(obj, addr);
nv_wo32(obj, addr, (temp & ~mask) | data);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f8..31cd852c96df 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
struct nouveau_object base;
struct nouveau_sclass *sclass;
- u32 engine;
+ u64 engine;
int (*context_attach)(struct nouveau_object *,
struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85fd..13ccdf54dfad 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_BSP_H__
#define __NOUVEAU_BSP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
- struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf5..8cad2cf28cef 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
#ifndef __NOUVEAU_COPY_H__
#define __NOUVEAU_COPY_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
- struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d) \
- nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
extern struct nouveau_oclass nva3_copy_oclass;
extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baaa..db975618e937 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
#ifndef __NOUVEAU_CRYPT_H__
#define __NOUVEAU_CRYPT_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
- struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_crypt_oclass;
extern struct nouveau_oclass nv98_crypt_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cbaa..46948285f3e7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
extern struct nouveau_oclass nv04_disp_oclass;
extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941f..b28914ed1752 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
+ u32 conf0;
};
-#define nouveau_dmaobj_create(p,e,c,a,s,d) \
- nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p) \
- nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p) \
- nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s) \
- nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, void *data, u32 size,
- int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
struct nouveau_dmaeng {
struct nouveau_engine base;
- int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
- struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+ /* creates a "physical" dma object from a struct nouveau_dmaobj */
+ int (*bind)(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **);
};
#define nouveau_dmaeng_create(p,e,c,d) \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
extern struct nouveau_oclass nv04_dmaeng_oclass;
extern struct nouveau_oclass nv50_dmaeng_oclass;
extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e3970..f18846c8c6fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *,
struct nouveau_object *,
struct nouveau_oclass *,
int bar, u32 addr, u32 size, u32 push,
- u32 engmask, int len, void **);
+ u64 engmask, int len, void **);
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_init _nouveau_namedb_init
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb3281..0a66781e8cf1 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
#ifndef __NOUVEAU_PPP_H__
#define __NOUVEAU_PPP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
- struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba377..d7b287b115bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
#ifndef __NOUVEAU_VP_H__
#define __NOUVEAU_VP_H__
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
- struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
- nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d) \
- nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d) \
- nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s) \
- nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
- struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d) \
- nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d) \
- nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s) \
- nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
index d145b25e6be4..5bd1ca8cd20d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -17,6 +17,7 @@ struct nouveau_bios {
u8 chip;
u8 minor;
u8 micro;
+ u8 patch;
} version;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb625833..b79025da581e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
uint8_t bus;
uint8_t location;
uint8_t or;
+ uint8_t link;
bool duallink_possible;
union {
struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+ struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+ struct dcb_output *);
int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
(struct nouveau_bios *, void *, int index, u16 entry));
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 000000000000..c35937e2f6a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+ u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr__, u8 *sub,
+ struct nvbios_disp *);
+
+struct nvbios_outp {
+ u16 type;
+ u16 mask;
+ u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+ u16 match;
+ u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75a..6e54218b55fc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+ u16 type;
+ u16 mask;
+ u8 flags;
+ u32 script[5];
+ u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+ u8 drv;
+ u8 pre;
+ u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 2bf178082a36..e6563b5cb08e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
u8 param;
};
-u16 dcb_gpio_table(struct nouveau_bios *);
-u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
-int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
+u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *);
+u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
index e69a8bdc6e97..ca2f6bf37f46 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -13,6 +13,7 @@ struct nvbios_init {
u32 nested;
u16 repeat;
u16 repend;
+ u32 ramcfg;
};
int nvbios_exec(struct nvbios_init *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
PLL_UNK42 = 0x42,
PLL_VPLL0 = 0x80,
PLL_VPLL1 = 0x81,
+ PLL_VPLL2 = 0x82,
+ PLL_VPLL3 = 0x83,
PLL_MAX = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f9..da470e6851b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
} type;
u64 stolen;
u64 size;
+
int ranks;
+ int parts;
+ int (*init)(struct nouveau_fb *);
int (*get)(struct nouveau_fb *, u64 size, u32 align,
u32 size_nc, u32 type, struct nouveau_mem **);
void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
int regions;
void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
+ void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
void (*fini)(struct nouveau_fb *, int i,
struct nouveau_fb_tile *);
void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
#define nouveau_fb_create(p,e,c,d) \
nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int nouveau_fb_created(struct nouveau_fb *);
+int nouveau_fb_preinit(struct nouveau_fb *);
void nouveau_fb_destroy(struct nouveau_fb *);
int nouveau_fb_init(struct nouveau_fb *);
#define nouveau_fb_fini(p,s) \
@@ -111,9 +116,19 @@ int _nouveau_fb_init(struct nouveau_object *);
extern struct nouveau_oclass nv04_fb_oclass;
extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
extern struct nouveau_oclass nv50_fb_oclass;
extern struct nouveau_oclass nvc0_fb_oclass;
@@ -122,13 +137,35 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *);
bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+int nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv30_fb_init(struct nouveau_object *);
void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *);
+
+int nv41_fb_vram_init(struct nouveau_fb *);
+int nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int nv44_fb_vram_init(struct nouveau_fb *);
+int nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+ u32 pitch, u32 flags, struct nouveau_fb_tile *);
void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 9ea2b12cc15d..b75e8f18e52c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -11,7 +11,7 @@ struct nouveau_gpio {
struct nouveau_subdev base;
/* hardware interfaces */
- void (*reset)(struct nouveau_gpio *);
+ void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
void (*irq_enable)(struct nouveau_gpio *, int line, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007a..d70ba342aa2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
}
static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_barobj *barobj = (void *)object;
return ioread32_native(barobj->iomem + addr);
}
static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_barobj *barobj = (void *)object;
iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa1..f621f69fa1a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
struct pci_dev *pdev = nv_device(bios)->pdev;
struct device_node *dn;
const u32 *data;
- int size, i;
+ int size;
dn = pci_device_to_OF_node(pdev);
if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
return;
bios->data = kmalloc(bios->size, GFP_KERNEL);
- for (i = 0; bios->data && i < bios->size; i += cnt) {
- cnt = min((bios->size - i), (u32)4096);
- ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
- if (ret != cnt)
- break;
+ if (bios->data) {
+ /* disobey the acpi spec - much faster on at least w530 ... */
+ ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+ if (ret != bios->size ||
+ nvbios_checksum(bios->data, bios->size)) {
+ /* ... that didn't work, ok, i'll be good now */
+ for (i = 0; i < bios->size; i += cnt) {
+ cnt = min((bios->size - i), (u32)4096);
+ ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+ if (ret != cnt)
+ break;
+ }
+ }
}
}
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
}
static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return bios->data[addr];
}
static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le16(&bios->data[addr]);
}
static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_bios *bios = (void *)object;
return get_unaligned_le32(&bios->data[addr]);
}
static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
struct nouveau_bios *bios = (void *)object;
bios->data[addr] = data;
}
static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le16(data, &bios->data[addr]);
}
static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_bios *bios = (void *)object;
put_unaligned_le32(data, &bios->data[addr]);
@@ -439,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+ bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
} else
if (bmp_version(bios)) {
bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -447,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
}
- nv_info(bios, "version %02x.%02x.%02x.%02x\n",
+ nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
bios->version.major, bios->version.chip,
- bios->version.minor, bios->version.micro);
+ bios->version.minor, bios->version.micro, bios->version.patch);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index c51197157749..0fd87df99dd6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
return 0x0000;
}
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct dcb_output *outp)
+{
+ u16 dcb = dcb_outp(bios, idx, ver, len);
+ if (dcb) {
+ if (*ver >= 0x20) {
+ u32 conn = nv_ro32(bios, dcb + 0x00);
+ outp->or = (conn & 0x0f000000) >> 24;
+ outp->location = (conn & 0x00300000) >> 20;
+ outp->bus = (conn & 0x000f0000) >> 16;
+ outp->connector = (conn & 0x0000f000) >> 12;
+ outp->heads = (conn & 0x00000f00) >> 8;
+ outp->i2c_index = (conn & 0x000000f0) >> 4;
+ outp->type = (conn & 0x0000000f);
+ outp->link = 0;
+ } else {
+ dcb = 0x0000;
+ }
+
+ if (*ver >= 0x40) {
+ u32 conf = nv_ro32(bios, dcb + 0x04);
+ switch (outp->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ outp->link = (conf & 0x00000030) >> 4;
+ outp->sorconf.link = outp->link; /*XXX*/
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+ return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+ return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *len, struct dcb_output *outp)
+{
+ u16 dcb, idx = 0;
+ while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+ if (dcb_outp_hasht(outp) == type) {
+ if ((dcb_outp_hashm(outp) & mask) == mask)
+ break;
+ }
+ }
+ return dcb;
+}
+
int
dcb_outp_foreach(struct nouveau_bios *bios, void *data,
int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 000000000000..7f16e52d9bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+ struct bit_entry U;
+
+ if (!bit_entry(bios, 'U', &U)) {
+ if (U.version == 1) {
+ u16 data = nv_ro16(bios, U.offset);
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x20:
+ case 0x21:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ *sub = nv_ro08(bios, data + 0x04);
+ return data;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub)
+{
+ u8 hdr, cnt;
+ u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+ if (data && idx < cnt)
+ return data + hdr + (idx * *len);
+ *ver = 0x00;
+ return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *len, u8 *sub,
+ struct nvbios_disp *info)
+{
+ u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+ if (data && *len >= 2) {
+ info->data = nv_ro16(bios, data + 0);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ struct nvbios_disp info;
+ u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+ if (data) {
+ *cnt = nv_ro08(bios, info.data + 0x05);
+ *len = 0x06;
+ data = info.data;
+ }
+ return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *hdr >= 0x0a) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro32(bios, data + 0x02);
+ if (*ver <= 0x20) /* match any link */
+ info->mask |= 0x00c0;
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->script[2] = 0x0000;
+ if (*hdr >= 0x0c)
+ info->script[2] = nv_ro16(bios, data + 0x0a);
+ return data;
+ }
+ return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_outp *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+ return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ info->match = nv_ro16(bios, data + 0x00);
+ info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+ info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+ }
+ return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_ocfg *info)
+{
+ u16 data, idx = 0;
+ while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+ if (info->match == type)
+ break;
+ }
+ return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+ while (cmp) {
+ if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+ return nv_ro16(bios, cmp + 0x02);
+ cmp += 0x04;
+ }
+ return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5e..663853bcca82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
#include "subdev/bios.h"
#include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
#include "subdev/bios/dp.h"
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- struct bit_entry bit_d;
+ struct bit_entry d;
- if (!bit_entry(bios, 'd', &bit_d)) {
- if (bit_d.version == 1) {
- u16 data = nv_ro16(bios, bit_d.offset);
+ if (!bit_entry(bios, 'd', &d)) {
+ if (d.version == 1 && d.length >= 2) {
+ u16 data = nv_ro16(bios, d.offset);
if (data) {
- *ver = nv_ro08(bios, data + 0);
- *hdr = nv_ro08(bios, data + 1);
- *len = nv_ro08(bios, data + 2);
- *cnt = nv_ro08(bios, data + 3);
- return data;
+ *ver = nv_ro08(bios, data + 0x00);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x01);
+ *len = nv_ro08(bios, data + 0x02);
+ *cnt = nv_ro08(bios, data + 0x03);
+ return data;
+ default:
+ break;
+ }
}
}
}
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
return 0x0000;
}
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ if (data && idx < *cnt) {
+ u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+ switch (*ver * !!outp) {
+ case 0x21:
+ case 0x30:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *len = nv_ro08(bios, data + 0x05);
+ *cnt = nv_ro08(bios, outp + 0x04);
+ break;
+ case 0x40:
+ *hdr = nv_ro08(bios, data + 0x04);
+ *cnt = 0;
+ *len = 0;
+ break;
+ default:
+ break;
+ }
+ return outp;
+ }
+ *ver = 0x00;
+ return 0x0000;
+}
+
u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 hdr, cnt;
- u16 table = dp_table(bios, ver, &hdr, &cnt, len);
- if (table && idx < cnt)
- return nv_ro16(bios, table + hdr + (idx * *len));
- return 0xffff;
+ u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+ if (data && *ver) {
+ info->type = nv_ro16(bios, data + 0x00);
+ info->mask = nv_ro16(bios, data + 0x02);
+ switch (*ver) {
+ case 0x21:
+ case 0x30:
+ info->flags = nv_ro08(bios, data + 0x05);
+ info->script[0] = nv_ro16(bios, data + 0x06);
+ info->script[1] = nv_ro16(bios, data + 0x08);
+ info->lnkcmp = nv_ro16(bios, data + 0x0a);
+ info->script[2] = nv_ro16(bios, data + 0x0c);
+ info->script[3] = nv_ro16(bios, data + 0x0e);
+ info->script[4] = nv_ro16(bios, data + 0x10);
+ break;
+ case 0x40:
+ info->flags = nv_ro08(bios, data + 0x04);
+ info->script[0] = nv_ro16(bios, data + 0x05);
+ info->script[1] = nv_ro16(bios, data + 0x07);
+ info->lnkcmp = nv_ro16(bios, data + 0x09);
+ info->script[2] = nv_ro16(bios, data + 0x0b);
+ info->script[3] = nv_ro16(bios, data + 0x0d);
+ info->script[4] = nv_ro16(bios, data + 0x0f);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
}
u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
- u8 *ver, u8 *len)
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpout *info)
{
- u8 idx = 0;
- u16 data;
- while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
- if (data) {
- u32 hash = nv_ro32(bios, data);
- if (dcb_hash_match(outp, hash))
- return data;
+ u16 data, idx = 0;
+ while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+ if (data && info->type == type) {
+ if ((info->mask & mask) == mask)
+ break;
}
}
+ return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+ if (*ver >= 0x40) {
+ outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+ *hdr = *hdr + (*len * * cnt);
+ *len = nv_ro08(bios, outp + 0x06);
+ *cnt = nv_ro08(bios, outp + 0x07);
+ }
+
+ if (idx < *cnt)
+ return outp + *hdr + (idx * *len);
+
return 0x0000;
}
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+ if (data) {
+ switch (*ver) {
+ case 0x21:
+ info->drv = nv_ro08(bios, data + 0x02);
+ info->pre = nv_ro08(bios, data + 0x03);
+ info->unk = nv_ro08(bios, data + 0x04);
+ break;
+ case 0x30:
+ case 0x40:
+ info->drv = nv_ro08(bios, data + 0x01);
+ info->pre = nv_ro08(bios, data + 0x02);
+ info->unk = nv_ro08(bios, data + 0x03);
+ break;
+ default:
+ data = 0x0000;
+ break;
+ }
+ }
+ return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+ u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ struct nvbios_dpcfg *info)
+{
+ u8 idx = 0xff;
+ u16 data;
+
+ if (*ver >= 0x30) {
+ const u8 vsoff[] = { 0, 4, 7, 9 };
+ idx = (un * 10) + vsoff[vs] + pe;
+ } else {
+ while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+ ver, hdr, cnt, len))) {
+ if (nv_ro08(bios, data + 0x00) == vs &&
+ nv_ro08(bios, data + 0x01) == pe)
+ break;
+ idx++;
+ }
+ }
+
+ return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e508165..c84e93fa6d95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -27,84 +27,105 @@
#include <subdev/bios/gpio.h>
u16
-dcb_gpio_table(struct nouveau_bios *bios)
+dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u8 ver, hdr, cnt, len;
- u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+ u16 data = 0x0000;
+ u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
- if (ver >= 0x30 && hdr >= 0x0c)
- return nv_ro16(bios, dcb + 0x0a);
- if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
- return nv_ro16(bios, dcb - 0x0f);
+ if (*ver >= 0x30 && *hdr >= 0x0c)
+ data = nv_ro16(bios, dcb + 0x0a);
+ else
+ if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+ data = nv_ro16(bios, dcb - 0x0f);
+
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ if (*ver < 0x30) {
+ *hdr = 3;
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x01);
+ } else
+ if (*ver <= 0x41) {
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ } else {
+ data = 0x0000;
+ }
+ }
}
- return 0x0000;
+ return data;
}
u16
-dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u16 gpio = dcb_gpio_table(bios);
- if (gpio) {
- *ver = nv_ro08(bios, gpio);
- if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
- return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
- else if (ent < nv_ro08(bios, gpio + 2))
- return gpio + nv_ro08(bios, gpio + 1) +
- (ent * nv_ro08(bios, gpio + 3));
- }
+ u8 hdr, cnt;
+ u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ if (gpio && ent < cnt)
+ return gpio + hdr + (ent * *len);
return 0x0000;
}
-int
-dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+u16
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
- u8 ver, hdr, cnt, len;
- u16 entry;
- int i = -1;
-
- while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
- if (ver < 0x40) {
- u16 data = nv_ro16(bios, entry);
+ u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
+ if (data) {
+ if (*ver < 0x40) {
+ u16 info = nv_ro16(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x001f) >> 0,
- .func = (data & 0x07e0) >> 5,
- .log[0] = (data & 0x1800) >> 11,
- .log[1] = (data & 0x6000) >> 13,
- .param = !!(data & 0x8000),
+ .line = (info & 0x001f) >> 0,
+ .func = (info & 0x07e0) >> 5,
+ .log[0] = (info & 0x1800) >> 11,
+ .log[1] = (info & 0x6000) >> 13,
+ .param = !!(info & 0x8000),
};
} else
- if (ver < 0x41) {
- u32 data = nv_ro32(bios, entry);
+ if (*ver < 0x41) {
+ u32 info = nv_ro32(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000001f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data & 0x18000000) >> 27,
- .log[1] = (data & 0x60000000) >> 29,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000001f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info & 0x18000000) >> 27,
+ .log[1] = (info & 0x60000000) >> 29,
+ .param = !!(info & 0x80000000),
};
} else {
- u32 data = nv_ro32(bios, entry + 0);
- u8 data1 = nv_ro32(bios, entry + 4);
+ u32 info = nv_ro32(bios, data + 0);
+ u8 info1 = nv_ro32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000003f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data1 & 0x30) >> 4,
- .log[1] = (data1 & 0xc0) >> 6,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000003f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info1 & 0x30) >> 4,
+ .log[1] = (info1 & 0xc0) >> 6,
+ .param = !!(info & 0x80000000),
};
}
+ }
+
+ return data;
+}
+u16
+dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
+{
+ u8 hdr, cnt, i = 0;
+ u16 data;
+
+ while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
if ((line == 0xff || line == gpio->line) &&
(func == 0xff || func == gpio->func))
- return 0;
+ return data;
}
/* DCB 2.2, fixed TVDAC GPIO data */
- if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
- if (func == DCB_GPIO_TVDAC0) {
- u8 conf = nv_ro08(bios, entry - 5);
- u8 addr = nv_ro08(bios, entry - 4);
+ if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
+ if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
+ u8 conf = nv_ro08(bios, data - 5);
+ u8 addr = nv_ro08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
.log[0] = !!(conf & 0x02),
.log[1] = !(conf & 0x02),
};
- return 0;
+ *ver = 0x00;
+ return data;
}
}
}
- return -EINVAL;
+ return 0x0000;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4c..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2,11 +2,12 @@
#include <core/device.h>
#include <subdev/bios.h>
-#include <subdev/bios/conn.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
}
static u8
+init_ram_restrict_strap(struct nvbios_init *init)
+{
+ /* This appears to be the behaviour of the VBIOS parser, and *is*
+ * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
+ * avoid fucking up the memory controller (somehow) by reading it
+ * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
+ *
+ * Preserving the non-caching behaviour on earlier chipsets just
+ * in case *not* re-reading the strap causes similar breakage.
+ */
+ if (!init->ramcfg || init->bios->version.major < 0x70)
+ init->ramcfg = init_rd32(init, 0x101000);
+ return (init->ramcfg & 0x00000003c) >> 2;
+}
+
+static u8
init_ram_restrict(struct nvbios_init *init)
{
- u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
+ u8 strap = init_ram_restrict_strap(init);
u16 table = init_ram_restrict_table(init);
if (table)
return nv_ro08(init->bios, table + strap);
@@ -743,9 +760,10 @@ static void
init_dp_condition(struct nvbios_init *init)
{
struct nouveau_bios *bios = init->bios;
+ struct nvbios_dpout info;
u8 cond = nv_ro08(bios, init->offset + 1);
u8 unkn = nv_ro08(bios, init->offset + 2);
- u8 ver, len;
+ u8 ver, hdr, cnt, len;
u16 data;
trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +777,12 @@ init_dp_condition(struct nvbios_init *init)
case 1:
case 2:
if ( init->outp &&
- (data = dp_outp_match(bios, init->outp, &ver, &len))) {
- if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
- init_exec_set(init, false);
- if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+ (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+ (init->outp->or << 0) |
+ (init->outp->sorconf.link << 6),
+ &ver, &hdr, &cnt, &len, &info)))
+ {
+ if (!(info.flags & cond))
init_exec_set(init, false);
break;
}
@@ -1514,7 +1534,6 @@ init_io(struct nvbios_init *init)
mdelay(10);
init_wr32(init, 0x614100, 0x10000018);
init_wr32(init, 0x614900, 0x10000018);
- return;
}
value = init_rdport(init, port) & mask;
@@ -1778,7 +1797,7 @@ init_gpio(struct nvbios_init *init)
init->offset += 1;
if (init_exec(init) && gpio && gpio->reset)
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
/**
@@ -1992,6 +2011,47 @@ init_i2c_long_if(struct nvbios_init *init)
init_exec_set(init, false);
}
+/**
+ * INIT_GPIO_NE - opcode 0xa9
+ *
+ */
+static void
+init_gpio_ne(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ struct nouveau_gpio *gpio = nouveau_gpio(bios);
+ struct dcb_gpio_func func;
+ u8 count = nv_ro08(bios, init->offset + 1);
+ u8 idx = 0, ver, len;
+ u16 data, i;
+
+ trace("GPIO_NE\t");
+ init->offset += 2;
+
+ for (i = init->offset; i < init->offset + count; i++)
+ cont("0x%02x ", nv_ro08(bios, i));
+ cont("\n");
+
+ while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
+ if (func.func != DCB_GPIO_UNUSED) {
+ for (i = init->offset; i < init->offset + count; i++) {
+ if (func.func == nv_ro08(bios, i))
+ break;
+ }
+
+ trace("\tFUNC[0x%02x]", func.func);
+ if (i == (init->offset + count)) {
+ cont(" *");
+ if (init_exec(init) && gpio && gpio->reset)
+ gpio->reset(gpio, func.func);
+ }
+ cont("\n");
+ }
+ }
+
+ init->offset += count;
+}
+
static struct nvbios_init_opcode {
void (*exec)(struct nvbios_init *);
} init_opcode[] = {
@@ -2056,6 +2116,7 @@ static struct nvbios_init_opcode {
[0x98] = { init_auxch },
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
+ [0xa9] = { init_gpio_ne },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index f6962c9b6c36..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
nv_wr32(priv, info.reg + 0x10, fN << 16);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8a..f8a7ed4166cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
#include <core/object.h>
#include <core/device.h>
#include <core/client.h>
-#include <core/device.h>
#include <core/option.h>
#include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
static const u64 disable_map[] = {
[NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
[NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
[NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
[NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
[NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
[NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
[NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
[NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
[NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
[NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
};
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
/* determine frequency of timing crystal */
if ( device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset <= 0x25))
+ (device->chipset >= 0x20 && device->chipset < 0x25))
strap &= 0x00000040;
else
strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
}
static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
{
return nv_rd08(object->engine, addr);
}
static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
{
return nv_rd16(object->engine, addr);
}
static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object->engine, addr);
}
static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
{
nv_wr08(object->engine, addr, data);
}
static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
{
nv_wr16(object->engine, addr, data);
}
static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
nv_wr32(object->engine, addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e31..9c40b0fb23f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b5..74f88f48e1c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6cccc..0ac1b2c4f61d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0a..41d59689a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
- device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6fc..6ccfd8585ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x86:
device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x92:
device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
break;
case 0x94:
device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x96:
device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0x98:
device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa0:
device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
break;
case 0xaa:
device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xac:
device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
break;
case 0xa3:
device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa5:
device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xa8:
device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xaf:
device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c293..f0461685a422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc4:
device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc3:
device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xce:
device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xcf:
device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc1:
device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xc8:
device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
break;
case 0xd9:
device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
- device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
- device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
- device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab853..03a652876e73 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
#include <engine/graph.h>
#include <engine/disp.h>
#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
int
nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
case 0xe7:
device->cname = "GK107";
@@ -92,13 +98,44 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
- device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
+ case 0xe6:
+ device->cname = "GK106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e9..ae7249b09797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
* Authors: Ben Skeggs
*/
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/vga.h>
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
static int
nv50_devinit_init(struct nouveau_object *object)
{
+ struct nouveau_bios *bios = nouveau_bios(object);
struct nv50_devinit_priv *priv = (void *)object;
+ struct nvbios_outp info;
+ struct dcb_output outp;
+ u8 ver = 0xff, hdr, cnt, len;
+ int ret, i = 0;
if (!priv->base.post) {
if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
}
}
- return nouveau_devinit_init(&priv->base);
+ ret = nouveau_devinit_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* if we ran the init tables, execute first script pointer for each
+ * display table output entry that has a matching dcb entry.
+ */
+ while (priv->base.post && ver) {
+ u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+ if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+ struct nvbios_init init = {
+ .subdev = nv_subdev(priv),
+ .bios = bios,
+ .offset = info.script[0],
+ .outp = &outp,
+ .crtc = -1,
+ .execute = 1,
+ };
+
+ nvbios_exec(&init);
+ }
+ };
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af31..d6d16007ec1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
}
int
-nouveau_fb_init(struct nouveau_fb *pfb)
+nouveau_fb_preinit(struct nouveau_fb *pfb)
{
- int ret, i;
+ static const char *name[] = {
+ [NV_MEM_TYPE_UNKNOWN] = "unknown",
+ [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+ [NV_MEM_TYPE_SGRAM ] = "SGRAM",
+ [NV_MEM_TYPE_SDRAM ] = "SDRAM",
+ [NV_MEM_TYPE_DDR1 ] = "DDR1",
+ [NV_MEM_TYPE_DDR2 ] = "DDR2",
+ [NV_MEM_TYPE_DDR3 ] = "DDR3",
+ [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
+ [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
+ [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
+ [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
+ };
+ int ret, tags;
- ret = nouveau_subdev_init(&pfb->base);
- if (ret)
- return ret;
+ tags = pfb->ram.init(pfb);
+ if (tags < 0 || !pfb->ram.size) {
+ nv_fatal(pfb, "error detecting memory configuration!!\n");
+ return (tags < 0) ? tags : -ERANGE;
+ }
- for (i = 0; i < pfb->tile.regions; i++)
- pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+ if (!nouveau_mm_initialised(&pfb->vram)) {
+ ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+ if (ret)
+ return ret;
+ }
- return 0;
-}
+ if (!nouveau_mm_initialised(&pfb->tags) && tags) {
+ ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+ if (ret)
+ return ret;
+ }
-int
-_nouveau_fb_init(struct nouveau_object *object)
-{
- struct nouveau_fb *pfb = (void *)object;
- return nouveau_fb_init(pfb);
+ nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+ nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+ nv_info(pfb, " ZCOMP: %d tags\n", tags);
+ return 0;
}
void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
for (i = 0; i < pfb->tile.regions; i++)
pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
- if (pfb->tags.block_size)
- nouveau_mm_fini(&pfb->tags);
-
- if (pfb->vram.block_size)
- nouveau_mm_fini(&pfb->vram);
+ nouveau_mm_fini(&pfb->tags);
+ nouveau_mm_fini(&pfb->vram);
nouveau_subdev_destroy(&pfb->base);
}
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
struct nouveau_fb *pfb = (void *)object;
nouveau_fb_destroy(pfb);
}
-
int
-nouveau_fb_created(struct nouveau_fb *pfb)
+nouveau_fb_init(struct nouveau_fb *pfb)
{
- static const char *name[] = {
- [NV_MEM_TYPE_UNKNOWN] = "unknown",
- [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
- [NV_MEM_TYPE_SGRAM ] = "SGRAM",
- [NV_MEM_TYPE_SDRAM ] = "SDRAM",
- [NV_MEM_TYPE_DDR1 ] = "DDR1",
- [NV_MEM_TYPE_DDR2 ] = "DDR2",
- [NV_MEM_TYPE_DDR3 ] = "DDR3",
- [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
- [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
- [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
- [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
- };
+ int ret, i;
- if (pfb->ram.size == 0) {
- nv_fatal(pfb, "no vram detected!!\n");
- return -ERANGE;
- }
+ ret = nouveau_subdev_init(&pfb->base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pfb->tile.regions; i++)
+ pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
- nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
- nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
return 0;
}
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = (void *)object;
+ return nouveau_fb_init(pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f7..6e369f85361e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+ if (boot0 & 0x00000100) {
+ pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ pfb->ram.size *= 1024 * 1024;
+ } else {
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ pfb->ram.size = 32 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ pfb->ram.size = 16 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ pfb->ram.size = 8 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ pfb->ram.size = 4 * 1024 * 1024;
+ break;
+ }
+ }
+
+ if ((boot0 & 0x00000038) <= 0x10)
+ pfb->ram.type = NV_MEM_TYPE_SGRAM;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+ return 0;
+}
+
+static int
nv04_fb_init(struct nouveau_object *object)
{
struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv04_fb_priv *priv;
- u32 boot0;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
- if (boot0 & 0x00000100) {
- priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
- priv->base.ram.size *= 1024 * 1024;
- } else {
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- priv->base.ram.size = 32 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- priv->base.ram.size = 16 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- priv->base.ram.size = 8 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- priv->base.ram.size = 4 * 1024 * 1024;
- break;
- }
- }
-
- if ((boot0 & 0x00000038) <= 0x10)
- priv->base.ram.type = NV_MEM_TYPE_SGRAM;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- return nouveau_fb_created(&priv->base);
+ priv->base.ram.init = nv04_fb_vram_init;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f4..edbbe26e858d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
struct nouveau_fb base;
};
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 cfg0 = nv_rd32(pfb, 0x100200);
+ if (cfg0 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+ else
+ pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+void
nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
tile->pitch = pitch;
}
-static void
+void
nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
}
static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv10_fb_priv *priv;
int ret;
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- if (device->chipset == 0x1a || device->chipset == 0x1f) {
- struct pci_dev *bridge;
- u32 mem, mib;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- nv_fatal(device, "no bridge device\n");
- return 0;
- }
-
- if (device->chipset == 0x1a) {
- pci_read_config_dword(bridge, 0x7c, &mem);
- mib = ((mem >> 6) & 31) + 1;
- } else {
- pci_read_config_dword(bridge, 0x84, &mem);
- mib = ((mem >> 4) & 127) + 1;
- }
-
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- priv->base.ram.size = mib * 1024 * 1024;
- } else {
- u32 cfg0 = nv_rd32(priv, 0x100200);
- if (cfg0 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- else
- priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
- }
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv10_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv10_fb_tile_init;
priv->base.tile.fini = nv10_fb_tile_fini;
priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 000000000000..48366841db4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct pci_dev *bridge;
+ u32 mem, mib;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ nv_fatal(pfb, "no bridge device\n");
+ return -ENODEV;
+ }
+
+ if (nv_device(pfb)->chipset == 0x1a) {
+ pci_read_config_dword(bridge, 0x7c, &mem);
+ mib = ((mem >> 6) & 31) + 1;
+ } else {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ mib = ((mem >> 4) & 127) + 1;
+ }
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.size = mib * 1024 * 1024;
+ return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv1a_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv1a_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv10_fb_tile_init;
+ priv->base.tile.fini = nv10_fb_tile_fini;
+ priv->base.tile.prog = nv10_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x1a),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv1a_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7fb..5d14612a2c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
struct nouveau_fb base;
};
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+ }
+ pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- struct nouveau_device *device = nv_device(pfb);
- int bpp = (flags & 2) ? 32 : 16;
-
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
-
- /* Allocate some of the on-die tag memory, used to store Z
- * compression meta-data (most likely just a bitmap determining
- * if a given tile is compressed or not).
- */
- size /= 256;
if (flags & 4) {
- if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
- /* Enable Z compression */
- tile->zcomp = tile->tag->offset;
- if (device->chipset >= 0x25) {
- if (bpp == 16)
- tile->zcomp |= 0x00100000;
- else
- tile->zcomp |= 0x00200000;
- } else {
- tile->zcomp |= 0x80000000;
- if (bpp != 16)
- tile->zcomp |= 0x04000000;
- }
- }
-
+ pfb->tile.comp(pfb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+ else tile->zcomp = 0x04000000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+ tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x08000000;
+#endif
+ }
+}
+
+void
nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
tile->addr = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
nouveau_mm_free(&pfb->tags, &tile->tag);
}
-static void
+void
nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
{
nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100240 + (i * 0x10));
nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
}
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv20_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
- if (device->chipset >= 0x25)
- ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
- else
- ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
- if (ret)
- return ret;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv20_fb_tile_comp;
priv->base.tile.fini = nv20_fb_tile_fini;
priv->base.tile.prog = nv20_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 000000000000..0042ace6bef9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+ else tile->zcomp = 0x00200000; /* Z24S8 */
+ tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x01000000;
+#endif
+ }
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv25_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv20_fb_tile_init;
+ priv->base.tile.comp = nv25_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x25),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv25_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = _nouveau_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc91390..a7ba0d048aec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nouveau_fb_tile *tile)
{
- tile->addr = addr | 1;
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) {
+ tile->addr = (0 << 4);
+ } else {
+ if (pfb->tile.comp) /* z compression */
+ pfb->tile.comp(pfb, i, size, flags, tile);
+ tile->addr = (1 << 4);
+ }
+
+ tile->addr |= 0x00000001; /* enable */
+ tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- tile->addr = 0;
- tile->limit = 0;
- tile->pitch = 0;
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+ else tile->zcomp |= 0x02000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x10000000;
+#endif
+ }
}
static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
return x;
}
-static int
+int
nv30_fb_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nv30_fb_priv *priv;
- u32 pbus1218;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
- }
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- priv->base.tile.prog = nv10_fb_tile_prog;
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv30_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 000000000000..092f6f4f3521
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+ else tile->zcomp |= 0x08000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv35_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv35_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x35),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv35_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 000000000000..797ab3b821b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+ struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
+{
+ u32 tiles = DIV_ROUND_UP(size, 0x40);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
+ if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+ else tile->zcomp |= 0x20000000; /* Z24S8 */
+ tile->zcomp |= ((tile->tag->offset ) >> 6);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x80000000;
+#endif
+ }
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv36_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv20_fb_vram_init;
+ priv->base.tile.regions = 8;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv36_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x36),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv36_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv30_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad8..65e131b90f37 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
struct nouveau_fb base;
};
-static inline int
-nv44_graph_class(struct nouveau_device *device)
-{
- if ((device->chipset & 0xf0) == 0x60)
- return 1;
-
- return !(0x0baf & (1 << (device->chipset & 0x0f)));
-}
-
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
{
- nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
- nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
- nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
+ u32 pbus1218 = nv_rd32(pfb, 0x001218);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ }
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
- nv_wr32(priv, 0x100800, 0x00000001);
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
}
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+ struct nouveau_fb_tile *tile)
{
- nv_wr32(priv, 0x100850, 0x80000000);
- nv_wr32(priv, 0x100800, 0x00000001);
+ u32 tiles = DIV_ROUND_UP(size, 0x80);
+ u32 tags = round_up(tiles / pfb->ram.parts, 0x100);
+ if ( (flags & 2) &&
+ !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+ tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
+ tile->zcomp |= ((tile->tag->offset ) >> 8);
+ tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+ tile->zcomp |= 0x40000000;
+#endif
+ }
}
static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
if (ret)
return ret;
- switch (nv_device(priv)->chipset) {
- case 0x40:
- case 0x45:
- nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
- break;
- default:
- if (nv44_graph_class(nv_device(priv)))
- nv44_fb_init_gart(priv);
- else
- nv40_fb_init_gart(priv);
- break;
- }
-
+ nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
return 0;
}
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_device *device = nv_device(parent);
struct nv40_fb_priv *priv;
int ret;
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- /* 0x001218 is actually present on a few other NV4X I looked at,
- * and even contains sane values matching 0x100474. From looking
- * at various vbios images however, this isn't the case everywhere.
- * So, I chose to use the same regs I've seen NVIDIA reading around
- * the memory detection, hopefully that'll get us the right numbers
- */
- if (device->chipset == 0x40) {
- u32 pbus1218 = nv_rd32(priv, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- }
- } else
- if (device->chipset == 0x49 || device->chipset == 0x4b) {
- u32 pfb914 = nv_rd32(priv, 0x100914);
- switch (pfb914 & 0x00000003) {
- case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000003: break;
- }
- } else
- if (device->chipset != 0x4e) {
- u32 pfb474 = nv_rd32(priv, 0x100474);
- if (pfb474 & 0x00000004)
- priv->base.ram.type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- priv->base.ram.type = NV_MEM_TYPE_DDR1;
- } else {
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
priv->base.memtype_valid = nv04_fb_memtype_valid;
- switch (device->chipset) {
- case 0x40:
- case 0x45:
- priv->base.tile.regions = 8;
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- case 0x4c:
- priv->base.tile.regions = 15;
- break;
- default:
- priv->base.tile.regions = 12;
- break;
- }
+ priv->base.ram.init = nv40_fb_vram_init;
+ priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
- priv->base.tile.fini = nv30_fb_tile_fini;
- if (device->chipset == 0x40)
- priv->base.tile.prog = nv10_fb_tile_prog;
- else
- priv->base.tile.prog = nv40_fb_tile_prog;
-
- return nouveau_fb_created(&priv->base);
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv20_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 000000000000..e9e5a08c41a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+ nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+ struct nv41_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv41_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x41),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv41_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 000000000000..ae89b5006f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+ struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb474 = nv_rd32(pfb, 0x100474);
+ if (pfb474 & 0x00000004)
+ pfb->ram.type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ tile->addr = 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+ nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+ nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+ nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+ nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+ struct nv44_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x100850, 0x80000000);
+ nv_wr32(priv, 0x100800, 0x00000001);
+ return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv44_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv44_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x44),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv44_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 000000000000..589b93ea2994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+ struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+ u32 flags, struct nouveau_fb_tile *tile)
+{
+ /* for performance, select alternate bank offset for zeta */
+ if (!(flags & 4)) tile->addr = (0 << 3);
+ else tile->addr = (1 << 3);
+
+ tile->addr |= 0x00000001; /* mode = vram */
+ tile->addr |= addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv46_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv44_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x46),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv46_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 000000000000..818bba35b368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv47_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv41_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x47),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv47_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 000000000000..84a31af16ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+ u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+ switch (pfb914 & 0x00000003) {
+ case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000003: break;
+ }
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv49_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv49_fb_vram_init;
+ priv->base.tile.regions = 15;
+ priv->base.tile.init = nv30_fb_tile_init;
+ priv->base.tile.comp = nv40_fb_tile_comp;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv41_fb_tile_prog;
+
+ return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x49),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv49_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv41_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 000000000000..797fd558170b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+ struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+ pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv4e_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.memtype_valid = nv04_fb_memtype_valid;
+ priv->base.ram.init = nv4e_fb_vram_init;
+ priv->base.tile.regions = 12;
+ priv->base.tile.init = nv46_fb_tile_init;
+ priv->base.tile.fini = nv20_fb_tile_fini;
+ priv->base.tile.prog = nv44_fb_tile_prog;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x4e),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv4e_fb_ctor,
+ .dtor = _nouveau_fb_dtor,
+ .init = nv44_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143a..487cb8c6c204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
return types[(memtype & 0xff00) >> 8] != 0;
}
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(pfb, 0x100200);
+ r4 = nv_rd32(pfb, 0x100204);
+ rt = nv_rd32(pfb, 0x100250);
+ ru = nv_rd32(pfb, 0x001540);
+ nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != pfb->ram.size) {
+ nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+ (u32)(pfb->ram.size >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_device *device = nv_device(pfb);
+ struct nouveau_bios *bios = nouveau_bios(device);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 size;
+ int ret;
+
+ pfb->ram.size = nv_rd32(pfb, 0x10020c);
+ pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+ ((pfb->ram.size & 0x000000ff) << 32);
+
+ size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ switch (device->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf: /* IGPs, no reordering, no real VRAM */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+ if (ret)
+ return ret;
+
+ pfb->ram.type = NV_MEM_TYPE_STOLEN;
+ pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ break;
+ default:
+ switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+ case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+ pfb->ram.type = NV_MEM_TYPE_DDR3;
+ else
+ pfb->ram.type = NV_MEM_TYPE_DDR2;
+ break;
+ case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+ case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+ case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+ default:
+ break;
+ }
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+ nv50_fb_vram_rblock(pfb) >> 12);
+ if (ret)
+ return ret;
+
+ pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+ break;
+ }
+
+ return nv_rd32(pfb, 0x100320);
+}
+
static int
nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
kfree(mem);
}
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru, rblock_size;
-
- r0 = nv_rd32(priv, 0x100200);
- r4 = nv_rd32(priv, 0x100204);
- rt = nv_rd32(priv, 0x100250);
- ru = nv_rd32(priv, 0x001540);
- nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != priv->base.ram.size) {
- nv_warn(priv, "memory controller reports %d MiB VRAM\n",
- (u32)(priv->base.ram.size >> 20));
- }
-
- rblock_size = rowsize;
- if (rt & 1)
- rblock_size *= 3;
-
- nv_debug(priv, "rblock %d bytes\n", rblock_size);
- return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_device *device = nv_device(parent);
- struct nouveau_bios *bios = nouveau_bios(device);
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- struct nv50_fb_priv *priv;
- u32 tags;
- int ret;
-
- ret = nouveau_fb_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- switch (nv_rd32(priv, 0x100714) & 0x00000007) {
- case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
- priv->base.ram.type = NV_MEM_TYPE_DDR3;
- else
- priv->base.ram.type = NV_MEM_TYPE_DDR2;
- break;
- case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
- case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
- case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
-
- priv->base.ram.size = nv_rd32(priv, 0x10020c);
- priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
- ((priv->base.ram.size & 0x000000ff) << 32);
-
- tags = nv_rd32(priv, 0x100320);
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
-
- nv_debug(priv, "%d compression tags\n", tags);
-
- size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- switch (device->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf: /* IGPs, no reordering, no real VRAM */
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
- if (ret)
- return ret;
-
- priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
- priv->base.ram.type = NV_MEM_TYPE_STOLEN;
- break;
- default:
- ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
- nv50_vram_rblock(priv) >> 12);
- if (ret)
- return ret;
-
- priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
- break;
- }
-
- priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (priv->r100c08_page) {
- priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, priv->r100c08))
- nv_warn(priv, "failed 0x100c08 page map\n");
- } else {
- nv_warn(priv, "failed 0x100c08 page alloc\n");
- }
-
- priv->base.memtype_valid = nv50_fb_memtype_valid;
- priv->base.ram.get = nv50_fb_vram_new;
- priv->base.ram.put = nv50_fb_vram_del;
- return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
-
- if (priv->r100c08_page) {
- pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- }
-
- nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nv50_fb_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_fb_init(&priv->base);
- if (ret)
- return ret;
-
- /* Not a clue what this is exactly. Without pointing it at a
- * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
- * cause IOMMU "read from address 0" errors (rh#561267)
- */
- nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
- switch (device->chipset) {
- case 0x50:
- nv_wr32(priv, 0x100c90, 0x000707ff);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_wr32(priv, 0x100c90, 0x000d0fff);
- break;
- case 0xaf:
- nv_wr32(priv, 0x100c90, 0x089d1fff);
- break;
- default:
- nv_wr32(priv, 0x100c90, 0x001d07ff);
- break;
- }
-
- return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
- .handle = NV_SUBDEV(FB, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_fb_ctor,
- .dtor = nv50_fb_dtor,
- .init = nv50_fb_init,
- .fini = _nouveau_fb_fini,
- },
-};
-
static const struct nouveau_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX", NULL },
{ 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
{}
};
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
{
- struct nouveau_device *device = nv_device(pfb);
- struct nv50_fb_priv *priv = (void *)pfb;
+ struct nouveau_device *device = nv_device(subdev);
+ struct nv50_fb_priv *priv = (void *)subdev;
const struct nouveau_enum *en, *cl;
u32 trap[6], idx, chan;
u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
}
nv_wr32(priv, 0x100c90, idx | 0x80000000);
- if (!display)
- return;
-
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
else
printk("0x%08x\n", st1);
}
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_device *device = nv_device(parent);
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ ret = nouveau_fb_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (priv->r100c08_page) {
+ priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+ nv_warn(priv, "failed 0x100c08 page map\n");
+ } else {
+ nv_warn(priv, "failed 0x100c08 page alloc\n");
+ }
+
+ priv->base.memtype_valid = nv50_fb_memtype_valid;
+ priv->base.ram.init = nv50_fb_vram_init;
+ priv->base.ram.get = nv50_fb_vram_new;
+ priv->base.ram.put = nv50_fb_vram_del;
+ nv_subdev(priv)->intr = nv50_fb_intr;
+ return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nv50_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ switch (device->chipset) {
+ case 0x50:
+ nv_wr32(priv, 0x100c90, 0x000707ff);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(priv, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(priv, 0x100c90, 0x089d1fff);
+ break;
+ default:
+ nv_wr32(priv, 0x100c90, 0x001d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+ .handle = NV_SUBDEV(FB, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fb_ctor,
+ .dtor = nv50_fb_dtor,
+ .init = nv50_fb_init,
+ .fini = _nouveau_fb_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf0079..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
}
static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+ struct nouveau_bios *bios = nouveau_bios(pfb);
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 parts = nv_rd32(pfb, 0x022438);
+ u32 pmask = nv_rd32(pfb, 0x022554);
+ u32 bsize = nv_rd32(pfb, 0x10f20c);
+ u32 offset, length;
+ bool uniform = true;
+ int ret, part;
+
+ nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+ nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+ pfb->ram.type = nouveau_fb_bios_memtype(bios);
+ pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+ /* read amount of vram attached to each memory controller */
+ for (part = 0; part < parts; part++) {
+ if (!(pmask & (1 << part))) {
+ u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+ pfb->ram.size += (u64)psize << 20;
+ }
+ }
+
+ /* if all controllers have the same amount attached, there's no holes */
+ if (uniform) {
+ offset = rsvd_head;
+ length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+ return nouveau_mm_init(&pfb->vram, offset, length, 1);
+ }
+
+ /* otherwise, address lowest common amount from 0GiB */
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+ if (ret)
+ return ret;
+
+ /* and the rest starting from (8GiB + common_size) */
+ offset = (0x0200000000ULL >> 12) + (bsize << 8);
+ length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+ if (ret) {
+ nouveau_mm_fini(&pfb->vram);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_mem **pmem)
{
@@ -86,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mem->memtype = type;
mem->size = size;
- mutex_lock(&mm->mutex);
+ mutex_lock(&pfb->base.mutex);
do {
if (back)
ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
else
ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
if (ret) {
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
pfb->ram.put(pfb, &mem);
return ret;
}
@@ -101,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
}
static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
- struct nouveau_bios *bios = nouveau_bios(priv);
- struct nouveau_fb *pfb = &priv->base;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(priv, 0x022438);
- u32 pmask = nv_rd32(priv, 0x022554);
- u32 bsize = nv_rd32(priv, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
-
- nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
- nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
- priv->base.ram.type = nouveau_fb_bios_memtype(bios);
- priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
- /* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
- priv->base.ram.size += (u64)psize << 20;
- }
- }
-
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
- offset = rsvd_head;
- length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
- return nouveau_mm_init(&pfb->vram, offset, length, 1);
- }
-
- /* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
- if (ret)
- return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
- ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
- if (ret) {
- nouveau_mm_fini(&pfb->vram);
- return ret;
- }
-
- return 0;
-}
-
-static int
nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.memtype_valid = nvc0_fb_memtype_valid;
+ priv->base.ram.init = nvc0_fb_vram_init;
priv->base.ram.get = nvc0_fb_vram_new;
priv->base.ram.put = nv50_fb_vram_del;
- ret = nvc0_vram_detect(priv);
- if (ret)
- return ret;
-
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c10_page)
return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (pci_dma_mapping_error(device->pdev, priv->r100c10))
return -EFAULT;
- return nouveau_fb_created(&priv->base);
+ return nouveau_fb_preinit(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index acf818c58bf0..9fb0f9b92d49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -43,10 +43,15 @@ static int
nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
struct dcb_gpio_func *func)
{
+ struct nouveau_bios *bios = nouveau_bios(gpio);
+ u8 ver, len;
+ u16 data;
+
if (line == 0xff && tag == 0xff)
return -EINVAL;
- if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
+ data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
+ if (data)
return 0;
/* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
int ret = nouveau_subdev_init(&gpio->base);
if (ret == 0 && gpio->reset) {
if (dmi_check_system(gpio_reset_ids))
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index f3502c961cd9..bf13a1200f26 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
};
static void
-nv50_gpio_reset(struct nouveau_gpio *gpio)
+nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nv50_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
static const u32 regs[] = { 0xe100, 0xe28c };
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
u32 val = (unk1 << 16) | unk0;
u32 reg = regs[line >> 4]; line &= 0x0f;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 8d18fcad26e0..83e8b8f16e6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
};
static void
-nvd0_gpio_reset(struct nouveau_gpio *gpio)
+nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nvd0_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
u8 unk0 = (data & 0x00ff0000) >> 16;
u8 unk1 = (data & 0x1f000000) >> 24;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba9..dc27e794a851 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
udelay(1);
if (!timeout--) {
- AUX_ERR("begin idle timeout 0x%08x", ctrl);
+ AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
return -EBUSY;
}
} while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
list_add(&iobj->head, &imem->list);
+ mutex_unlock(&imem->base.mutex);
return 0;
}
void
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
{
- if (iobj->head.prev)
- list_del(&iobj->head);
+ struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+ mutex_lock(&subdev->mutex);
+ list_del(&iobj->head);
+ mutex_unlock(&subdev->mutex);
+
return nouveau_object_destroy(&iobj->base);
}
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
}
}
+ mutex_unlock(&imem->base.mutex);
+
return 0;
}
@@ -104,17 +114,26 @@ int
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
{
struct nouveau_instobj *iobj;
- int i;
+ int i, ret = 0;
if (suspend) {
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
- if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- iobj->suspend[i / 4] = nv_ro32(iobj, i);
- } else
- return -ENOMEM;
+ if (!iobj->suspend) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < iobj->size; i += 4)
+ iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
+
+ mutex_unlock(&imem->base.mutex);
+
+ if (ret)
+ return ret;
}
return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b50368..f5bbd3834116 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instobj_priv *node = (void *)object;
return nv_ro32(object->engine, node->mem->offset + addr);
}
static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instobj_priv *node = (void *)object;
nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
}
static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
{
return nv_rd32(object, 0x700000 + addr);
}
static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
return nv_wr32(object, 0x700000 + addr, data);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd5932..da64253201ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
{
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10b..cfc7e31461de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
}
static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
}
static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
{
struct nv50_instmem_priv *priv = (void *)object->engine;
struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c2..8379aafa6e1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
struct nouveau_mc *pmc = nouveau_mc(subdev);
const struct nouveau_mc_intr *map = pmc->intr_map;
struct nouveau_subdev *unit;
- u32 stat;
+ u32 stat, intr;
- stat = nv_rd32(pmc, 0x000100);
+ intr = stat = nv_rd32(pmc, 0x000100);
while (stat && map->stat) {
if (stat & map->stat) {
unit = nouveau_subdev(subdev, map->unit);
if (unit && unit->intr)
unit->intr(unit);
- stat &= ~map->stat;
+ intr &= ~map->stat;
}
map++;
}
- if (stat) {
+ if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b02977..8d759f830323 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0000d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38d..ceb5c83f9459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
{ 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0040d101, NVDEV_SUBDEV_FB },
{},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17d..92796682722d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00020000, NVDEV_ENGINE_VP },
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 93e3ddf7303a..e286e132c7e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nv_ro08(bios, data))) {
- nv_info(mxm, "no VBIOS data, nothing to do\n");
+ nv_debug(mxm, "no VBIOS data, nothing to do\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
u64 mm_length = (offset + length) - mm_offset;
int ret;
- vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
return ret;
}
+ *pvm = vm;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cbf1fc60a386..41241922263f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -246,14 +246,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, -ENODEV);
client = nv_client(abi16->client);
-
- if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return nouveau_abi16_put(abi16, -EINVAL);
-
device = nv_device(abi16->device);
imem = nouveau_instmem(device);
pfb = nouveau_fb(device);
+ /* hack to allow channel engine type specification on kepler */
+ if (device->card_type >= NV_E0) {
+ if (init->fb_ctxdma_handle != ~0)
+ init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ else
+ init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+ /* allow flips to be executed if this is a graphics channel */
+ init->tt_ctxdma_handle = 0;
+ if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+ init->tt_ctxdma_handle = 1;
+ }
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return nouveau_abi16_put(abi16, -EINVAL);
+
/* allocate "abi16 channel" data and make up a handle for it */
init->channel = ffsll(~abi16->handles);
if (!init->channel--)
@@ -268,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
abi16->handles |= (1 << init->channel);
/* create channel object and initialise dma and fence management */
- if (device->card_type >= NV_E0) {
- init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
- init->tt_ctxdma_handle = 0;
- }
-
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
init->channel, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
@@ -382,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret;
if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114c..d97f20069d3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
acpi_handle rom_handle;
} nouveau_dsm_priv;
+bool nouveau_is_optimus(void) {
+ return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+ return nouveau_dsm_priv.dsm_detected;
+}
+
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
- /* perhaps the _DSM functions are mutually exclusive, but prepare for
- * the future */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
/* Optimus laptops have the card already disabled in
* nouveau_switcheroo_set_state */
- if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.dsm_detected)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
has_optimus = 1;
}
- if (vga_count == 2 && has_dsm && guid_valid) {
+ /* find the optimus DSM or the old v1 DSM */
+ if (has_optimus == 1) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.dsm_detected = true;
+ nouveau_dsm_priv.optimus_detected = true;
ret = true;
- }
-
- if (has_optimus == 1) {
+ } else if (vga_count == 2 && has_dsm && guid_valid) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
- printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
- nouveau_dsm_priv.optimus_detected = true;
+ nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b57..d0da230d7706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef235882..865eddfa30a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
return 0;
}
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
- if ((hash & 0x000000f0) != (dcb->location << 4))
- return false;
- if ((hash & 0x0000000f) != dcb->type)
- return false;
- if (!(hash & (dcb->or << 16)))
- return false;
-
- switch (dcb->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (hash & 0x00c00000) {
- if (!(hash & (dcb->sorconf.link << 22)))
- return false;
- }
- default:
- return true;
- }
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
- struct dcb_output *dcbent, int crtc)
-{
- /*
- * The display script table is located by the BIT 'U' table.
- *
- * It contains an array of pointers to various tables describing
- * a particular output type. The first 32-bits of the output
- * tables contains similar information to a DCB entry, and is
- * used to decide whether that particular table is suitable for
- * the output you want to access.
- *
- * The "record header length" field here seems to indicate the
- * offset of the first configuration entry in the output tables.
- * This is 10 on most cards I've seen, but 12 has been witnessed
- * on DP cards, and there's another script pointer within the
- * header.
- *
- * offset + 0 ( 8 bits): version
- * offset + 1 ( 8 bits): header length
- * offset + 2 ( 8 bits): record length
- * offset + 3 ( 8 bits): number of records
- * offset + 4 ( 8 bits): record header length
- * offset + 5 (16 bits): pointer to first output script table
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvbios *bios = &drm->vbios;
- uint8_t *table = &bios->data[bios->display.script_table_ptr];
- uint8_t *otable = NULL;
- uint16_t script;
- int i;
-
- if (!bios->display.script_table_ptr) {
- NV_ERROR(drm, "No pointer to output script table\n");
- return 1;
- }
-
- /*
- * Nothing useful has been in any of the pre-2.0 tables I've seen,
- * so until they are, we really don't need to care.
- */
- if (table[0] < 0x20)
- return 1;
-
- if (table[0] != 0x20 && table[0] != 0x21) {
- NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
- table[0]);
- return 1;
- }
-
- /*
- * The output script tables describing a particular output type
- * look as follows:
- *
- * offset + 0 (32 bits): output this table matches (hash of DCB)
- * offset + 4 ( 8 bits): unknown
- * offset + 5 ( 8 bits): number of configurations
- * offset + 6 (16 bits): pointer to some script
- * offset + 8 (16 bits): pointer to some script
- *
- * headerlen == 10
- * offset + 10 : configuration 0
- *
- * headerlen == 12
- * offset + 10 : pointer to some script
- * offset + 12 : configuration 0
- *
- * Each config entry is as follows:
- *
- * offset + 0 (16 bits): unknown, assumed to be a match value
- * offset + 2 (16 bits): pointer to script table (clock set?)
- * offset + 4 (16 bits): pointer to script table (reset?)
- *
- * There doesn't appear to be a count value to say how many
- * entries exist in each script table, instead, a 0 value in
- * the first 16-bit word seems to indicate both the end of the
- * list and the default entry. The second 16-bit word in the
- * script tables is a pointer to the script to execute.
- */
-
- NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
- dcbent->type, dcbent->location, dcbent->or);
- for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
- break;
- }
-
- if (!otable) {
- NV_DEBUG(drm, "failed to match any output table\n");
- return 1;
- }
-
- if (pclk < -2 || pclk > 0) {
- /* Try to find matching script table entry */
- for (i = 0; i < otable[5]; i++) {
- if (ROM16(otable[table[4] + i*6]) == type)
- break;
- }
-
- if (i == otable[5]) {
- NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
- "using first\n",
- type, dcbent->type, dcbent->or);
- i = 0;
- }
- }
-
- if (pclk == 0) {
- script = ROM16(otable[6]);
- if (!script) {
- NV_DEBUG(drm, "output script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -1) {
- script = ROM16(otable[8]);
- if (!script) {
- NV_DEBUG(drm, "output script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk == -2) {
- if (table[4] >= 12)
- script = ROM16(otable[10]);
- else
- script = 0;
- if (!script) {
- NV_DEBUG(drm, "output script 2 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk > 0) {
- script = ROM16(otable[table[4] + i*6 + 2]);
- if (script)
- script = clkcmptable(bios, script, pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 0 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- } else
- if (pclk < 0) {
- script = ROM16(otable[table[4] + i*6 + 4]);
- if (script)
- script = clkcmptable(bios, script, -pclk);
- if (!script) {
- NV_DEBUG(drm, "clock script 1 not found\n");
- return 1;
- }
-
- NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
- nouveau_bios_run_init_table(dev, script, dcbent, crtc);
- }
-
- return 0;
-}
-
-
int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
{
/*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
}
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
- struct bit_entry *bitentry)
-{
- /*
- * Parses the pointer to the G80 output script tables
- *
- * Starting at bitentry->offset:
- *
- * offset + 0 (16 bits): output script table pointer
- */
-
- struct nouveau_drm *drm = nouveau_drm(dev);
- uint16_t outputscripttableptr;
-
- if (bitentry->length != 3) {
- NV_ERROR(drm, "Do not understand BIT U table\n");
- return -EINVAL;
- }
-
- outputscripttableptr = ROM16(bios->data[bitentry->offset]);
- bios->display.script_table_ptr = outputscripttableptr;
- return 0;
-}
-
struct bit_table {
const char id;
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
- parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
return 0;
}
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- int i, ret = 0;
+ int ret = 0;
/* Reset the BIOS head to 0. */
bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
bios->fp.lvds_init_run = false;
}
- if (nv_device(drm->device)->card_type >= NV_50) {
- for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
- nouveau_bios_run_display_table(dev, 0, 0,
- &bios->dcb.entry[i], -1);
- }
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a56..f68c54ca422f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -128,12 +128,6 @@ struct nvbios {
} state;
struct {
- struct dcb_output *output;
- int crtc;
- uint16_t script_table_ptr;
- } display;
-
- struct {
uint16_t fptablepointer; /* also used by tmds */
uint16_t fpxlatetableptr;
int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
int nouveau_run_vbios_init(struct drm_device *);
struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
- struct dcb_output *, int crtc);
bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
int head, int pxclk);
int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35ac57f0aab6..69d7b1d0b9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
+ align >> PAGE_SHIFT, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ interruptible, no_wait_gpu);
if (ret)
return ret;
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
int ret;
@@ -566,8 +565,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
- no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
+ no_wait_gpu, new_mem);
nouveau_fence_unref(&fence);
return ret;
}
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_reserve,
no_wait_gpu, new_mem);
}
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1098,8 +1094,7 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* CPU copy if we have no accelerated method available */
if (!drm->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
goto out;
}
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
if (nv_device(drm->device)->card_type < NV_50) {
@@ -1279,7 +1276,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = true;
+ mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
break;
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, true, false);
+ return nouveau_bo_validate(nvbo, false, false);
}
static int
@@ -1472,19 +1469,19 @@ nouveau_bo_fence_ref(void *sync_obj)
}
static bool
-nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_signalled(void *sync_obj)
{
return nouveau_fence_done(sync_obj);
}
static int
-nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
{
return nouveau_fence_wait(sync_obj, lazy, intr);
}
static int
-nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_flush(void *sync_obj)
{
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu);
+ bool no_wait_gpu);
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9c..174300b6a02e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
+ if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+ nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
}
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_fb *pfb = nouveau_fb(device);
struct nouveau_software_chan *swch;
struct nouveau_object *object;
- struct nv_dma_class args;
+ struct nv_dma_class args = {};
int ret, i;
/* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
- if (chan != chan->drm->cechan) {
+ if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
ret = nouveau_object_new(nv_object(client), chan->handle,
NvSw, nouveau_abi16_swclass(chan->drm),
NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d3595b23434a..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev = nv_connector->base.dev;
drm = nouveau_drm(dev);
gpio = nouveau_gpio(drm->device);
- NV_DEBUG(drm, "\n");
if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -128,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- int i;
+ struct nouveau_i2c_port *port = NULL;
+ int i, panel = -ENODEV;
+
+ /* eDP panels need powering on by us (if the VBIOS doesn't default it
+ * to on) before doing any AUX channel transactions. LVDS panel power
+ * is handled by the SOR itself, and not required for LVDS DDC.
+ */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ if (panel == 0) {
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+ msleep(300);
+ }
+ }
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_port *port = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -151,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
- return port;
+ break;
}
+
+ port = NULL;
}
- return NULL;
+ /* eDP panel not detected, restore panel power GPIO to previous
+ * state to avoid confusing the SOR for other output types.
+ */
+ if (!port && panel == 0)
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+ return port;
}
static struct nouveau_encoder *
@@ -221,7 +242,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
@@ -929,8 +950,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
int type, ret = 0;
bool dummy;
- NV_DEBUG(drm, "\n");
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
nv_connector = nouveau_connector(connector);
if (nv_connector->index == index)
@@ -1043,7 +1062,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
/* Init DVI-I specific properties */
if (nv_connector->type == DCB_CONNECTOR_DVI_I)
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
/* Add overscan compensation options to digital outputs */
if (disp->underscan_property &&
@@ -1051,31 +1070,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
type == DRM_MODE_CONNECTOR_DVII ||
type == DRM_MODE_CONNECTOR_HDMIA ||
type == DRM_MODE_CONNECTOR_DisplayPort)) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_hborder_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->underscan_vborder_property,
0);
}
/* Add hue and saturation options */
if (disp->vibrant_hue_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->vibrant_hue_property,
90);
if (disp->color_vibrance_property)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->color_vibrance_property,
150);
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (nv_device(drm->device)->card_type >= NV_50) {
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
@@ -1088,18 +1107,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
if (disp->dithering_mode) {
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->dithering_mode);
}
if (disp->dithering_depth) {
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->dithering_depth);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8f..20eb84cce9e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
#define __NOUVEAU_CONNECTOR_H__
#include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
struct nouveau_i2c_port;
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb0133..d1e5890784d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
return &crtc->base;
}
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width,
- uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4f..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
nv_fb->r_dma = NvEvoVRAM_LP;
switch (fb->depth) {
- case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
- case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
- case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
case 24:
- case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
- case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
default:
NV_ERROR(drm, "unknown depth %d\n", fb->depth);
return -EINVAL;
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* power on internal panel if it's not already. the init tables of
- * some vbios default this to off for some reason, causing the
- * panel to not work after resume
- */
- if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
- gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
- msleep(300);
- }
-
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
@@ -324,7 +315,7 @@ nouveau_display_create(struct drm_device *dev)
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen == 1) {
+ if (gen >= 1) {
disp->vibrant_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"vibrant hue", 2);
@@ -366,10 +357,7 @@ nouveau_display_create(struct drm_device *dev)
if (nv_device(drm->device)->card_type < NV_50)
ret = nv04_display_create(dev);
else
- if (nv_device(drm->device)->card_type < NV_D0)
ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
if (ret)
goto disp_create_err;
@@ -400,11 +388,12 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
if (disp->dtor)
disp->dtor(dev);
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
nouveau_drm(dev)->display = NULL;
kfree(disp);
}
@@ -659,10 +648,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
- if (nv_device(drm->device)->card_type >= NV_D0)
- ret = nvd0_display_flip_next(crtc, fb, chan, 0);
- else
- ret = nv50_display_flip_next(crtc, fb, chan);
+ ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret) {
mutex_unlock(&chan->cli->mutex);
goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a1..59838651ee8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include <core/class.h>
+
#include <subdev/gpio.h>
#include <subdev/i2c.h>
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct bit_entry d;
- u8 *table;
- int i;
-
- if (bit_table(dev, 'd', &d)) {
- NV_ERROR(drm, "BIT 'd' table not found\n");
- return NULL;
- }
-
- if (d.version != 1) {
- NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
- return NULL;
- }
-
- table = ROMPTR(dev, d.data[0]);
- if (!table) {
- NV_ERROR(drm, "displayport table pointer invalid\n");
- return NULL;
- }
-
- switch (table[0]) {
- case 0x20:
- case 0x21:
- case 0x30:
- case 0x40:
- break;
- default:
- NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
- return NULL;
- }
-
- for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
- if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
- return table;
- }
-
- NV_ERROR(drm, "displayport encoder table not found\n");
- return NULL;
-}
-
/******************************************************************************
* link training
*****************************************************************************/
struct dp_state {
struct nouveau_i2c_port *auxch;
- struct dp_train_func *func;
+ struct nouveau_object *core;
struct dcb_output *dcb;
int crtc;
u8 *dpcd;
@@ -97,13 +54,20 @@ static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink[2];
+ u32 data;
NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
/* set desired link configuration on the source */
- dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
- dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+ if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
/* inform the sink of the new configuration */
sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
u8 sink_tp;
NV_DEBUG(drm, "training pattern %d\n", pattern);
- dp->func->train_set(dev, dp->dcb, pattern);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
int i;
for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
- dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
}
return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
}
static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30) {
- if (enable) script = ROM16(entry[12]);
- else script = ROM16(entry[14]);
- } else
- if (table[0] == 0x40) {
- if (enable) script = ROM16(entry[11]);
- else script = ROM16(entry[13]);
- }
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[6]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[5]);
- }
-
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
+
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+ NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+ NV94_DISP_SOR_DP_TRAIN_OP_INIT);
}
static void
dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
{
- u16 script = 0x0000;
- u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
- if (table) {
- if (table[0] >= 0x20 && table[0] <= 0x30)
- script = ROM16(entry[8]);
- else
- if (table[0] == 0x40)
- script = ROM16(entry[7]);
- }
+ struct dcb_output *dcb = dp->dcb;
+ const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+ const u32 moff = (dp->crtc << 3) | (link << 2) | or;
- nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+ nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+ NV94_DISP_SOR_DP_TRAIN_OP_FINI);
}
static bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
if (!dp.auxch)
return false;
- dp.func = func;
+ dp.core = core;
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
*/
gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
- /* enable down-spreading, if possible */
- dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
- /* execute pre-train script from vbios */
- dp_link_train_init(dev, &dp);
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
/* start off at highest link rate supported by encoder and display */
while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
void
nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
- struct dp_train_func *func)
+ struct nouveau_object *core)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nv_wraux(auxch, DP_SET_POWER, &status, 1);
if (mode == DRM_MODE_DPMS_ON)
- nouveau_dp_link_train(encoder, datarate, func);
+ nouveau_dp_link_train(encoder, datarate, core);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8503b2ea570a..8b090f1eb51d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
-#include "nouveau_ttm.h"
-
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
@@ -86,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
struct nouveau_cli *cli;
int ret;
+ *pcli = NULL;
ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
nouveau_debug, size, pcli);
cli = *pcli;
- if (ret)
+ if (ret) {
+ if (cli)
+ nouveau_client_destroy(&cli->base);
+ *pcli = NULL;
return ret;
+ }
mutex_init(&cli->mutex);
return 0;
@@ -149,7 +152,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
- arg1 = 0;
+ arg1 = 1;
} else {
arg0 = NvDmaFB;
arg1 = NvDmaTT;
@@ -191,8 +194,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
nouveau_bo_move_init(drm);
}
-static int __devinit
-nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
+static int nouveau_drm_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
{
struct nouveau_device *device;
struct apertures_struct *aper;
@@ -224,6 +227,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ kfree(aper);
ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
nouveau_config, nouveau_debug, &device);
@@ -395,17 +399,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
}
int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- pm_state.event == PM_EVENT_PRETHAW)
- return 0;
-
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "suspending fbcon...\n");
nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +435,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
goto fail_client;
nouveau_agp_fini(drm);
-
- pci_save_state(pdev);
- if (pm_state.event == PM_EVENT_SUSPEND) {
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
return 0;
fail_client:
@@ -457,24 +449,33 @@ fail_client:
return ret;
}
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_cli *cli;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(drm, "re-enabling device...\n");
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
+ ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
- pci_set_master(pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli;
+
+ NV_INFO(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
@@ -500,6 +501,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
return 0;
}
+int nouveau_pmops_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return nouveau_do_resume(drm_dev);
+}
+
+
static int
nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
@@ -652,14 +689,22 @@ nouveau_drm_pci_table[] = {
{}
};
+static const struct dev_pm_ops nouveau_pm_ops = {
+ .suspend = nouveau_pmops_suspend,
+ .resume = nouveau_pmops_resume,
+ .freeze = nouveau_pmops_freeze,
+ .thaw = nouveau_pmops_thaw,
+ .poweroff = nouveau_pmops_freeze,
+ .restore = nouveau_pmops_resume,
+};
+
static struct pci_driver
nouveau_drm_pci_driver = {
.name = "nouveau",
.id_table = nouveau_drm_pci_table,
.probe = nouveau_drm_probe,
.remove = nouveau_drm_remove,
- .suspend = nouveau_drm_suspend,
- .resume = nouveau_drm_resume,
+ .driver.pm = &nouveau_pm_ops,
};
static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a10169927086..aa89eb938b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
return nv_device(nouveau_drm(dev)->device);
}
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a4..d0d95bd511ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
/* nouveau_dp.c */
bool nouveau_dp_detect(struct drm_encoder *);
void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
- struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+ struct nouveau_object *);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_bo_validate(nvbo, true, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc889..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- if (nv_device(drm->device)->chipset < 0xa3 ||
- nv_device(drm->device)->chipset == 0xaa ||
- nv_device(drm->device)->chipset == 0xac)
- return false;
- return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- if (!hdmi_sor(encoder))
- return 0x616500 + (nv_crtc->index * 0x800);
- return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
- u32 tmp = hdmi_rd32(encoder, reg);
- hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
- return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- u32 or = nv_encoder->or * 0x800;
-
- if (hdmi_sor(encoder))
- nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_connector *nv_connector;
- u32 or = nv_encoder->or * 0x800;
- int i;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid)) {
- nouveau_audio_disconnect(encoder);
- return;
- }
-
- if (hdmi_sor(encoder)) {
- nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
- nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
- }
- }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
- /* calculate checksum for the infoframe */
- u8 sum = 0, i;
- for (i = 0; i < frame[2]; i++)
- sum += frame[i];
- frame[3] = 256 - sum;
-
- /* disable infoframe, and write header */
- hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
- hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
- /* register scans tell me the audio infoframe has only one set of
- * subpack regs, according to tegra (gee nvidia, it'd be nice if we
- * could get those docs too!), the hdmi block pads out the rest of
- * the packet on its own.
- */
- if (ctrl == 0x020)
- frame[2] = 6;
-
- /* write out checksum and data, weird weird 7 byte register pairs */
- for (i = 0; i < frame[2] + 1; i += 7) {
- u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
- u32 *subpack = (u32 *)&frame[3 + i];
- hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
- hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
- }
-
- /* enable the infoframe */
- hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
- const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
- const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
- u8 frame[20];
-
- frame[0x00] = 0x82; /* AVI infoframe */
- frame[0x01] = 0x02; /* version */
- frame[0x02] = 0x0d; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
- frame[0x05] = (C << 6) | (M << 4) | R;
- frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
- frame[0x07] = VIC;
- frame[0x08] = PR;
- frame[0x09] = bar_top & 0xff;
- frame[0x0a] = bar_top >> 8;
- frame[0x0b] = bar_bottom & 0xff;
- frame[0x0c] = bar_bottom >> 8;
- frame[0x0d] = bar_left & 0xff;
- frame[0x0e] = bar_left >> 8;
- frame[0x0f] = bar_right & 0xff;
- frame[0x10] = bar_right >> 8;
- frame[0x11] = 0x00;
- frame[0x12] = 0x00;
- frame[0x13] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
- const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
- u8 frame[12];
-
- frame[0x00] = 0x84; /* Audio infoframe */
- frame[0x01] = 0x01; /* version */
- frame[0x02] = 0x0a; /* length */
- frame[0x03] = 0x00;
- frame[0x04] = (CT << 4) | CC;
- frame[0x05] = (SF << 2) | ceaSS;
- frame[0x06] = FMT;
- frame[0x07] = CA;
- frame[0x08] = (DM_INH << 7) | (LSV << 3);
- frame[0x09] = 0x00;
- frame[0x0a] = 0x00;
- frame[0x0b] = 0x00;
-
- nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
- nouveau_audio_disconnect(encoder);
-
- /* disable audio and avi infoframes */
- hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
- hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
- /* disable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- u32 max_ac_packet, rekey;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!mode || !nv_connector || !nv_connector->edid ||
- !drm_detect_hdmi_monitor(nv_connector->edid)) {
- nouveau_hdmi_disconnect(encoder);
- return;
- }
-
- nouveau_hdmi_video_infoframe(encoder, mode);
- nouveau_hdmi_audio_infoframe(encoder, mode);
-
- hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
- hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
- hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
- nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* value matches nvidia binary driver, and tegra constant */
- rekey = 56;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* enable hdmi */
- hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
- 0x1f000000 | /* unknown */
- max_ac_packet << 16 |
- rekey);
-
- nouveau_audio_mode_set(encoder, mode);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28a..1303680affd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
nv_subdev(pmc)->intr(nv_subdev(pmc));
-
- if (dev->mode_config.num_crtc) {
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
- }
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 5566172774df..a701ff5ffa5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -698,10 +698,10 @@ static int
nouveau_hwmon_init(struct drm_device *dev)
{
struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
struct device *hwmon_dev;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2c..b8e05ae38212 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
return ret;
nvbo = *pnvbo;
- /* we restrict allowed domains on nv50+ to only the types
- * that were requested at creation time. not possibly on
- * earlier chips without busting the ABI.
- */
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
@@ -197,6 +193,7 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
if (nvbo->gem) {
if (nvbo->gem->dev == dev) {
drm_gem_object_reference(nvbo->gem);
+ dma_buf_put(dma_buf);
return nvbo->gem;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873df..25d3495725eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- nouveau_drm_resume(pdev);
+ nouveau_pmops_resume(&pdev->dev);
drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
- nouveau_drm_suspend(pdev, pmm);
+ nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda3..6578cd28c556 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
kfree(nv_crtc);
}
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
if (ret)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
static inline bool is_powersaving_dpms(int mode)
{
- return (mode != DRM_MODE_DPMS_ON);
+ return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
}
static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c23..2cd6fb8c548e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
struct nv04_display *disp;
int i, ret;
- NV_DEBUG(drm, "\n");
-
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
void
nv04_display_destroy(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_display *disp = nv04_display(dev);
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NV_DEBUG(drm, "\n");
-
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4e..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,11 +155,20 @@ nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
+void nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
int
nv10_fence_create(struct nouveau_drm *drm)
{
@@ -183,8 +192,11 @@ nv10_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
@@ -192,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2d..2ca276ada507 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
break;
}
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
tv_enc->select_subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_mode_property,
tv_enc->tv_norm);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
tv_enc->flicker);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_saturation_property,
tv_enc->saturation);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_hue_property,
tv_enc->hue);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
conf->tv_overscan_property,
tv_enc->overscan);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d6269..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- NV_DEBUG(drm, "\n");
-
- for (i = 0; i < 256; i++) {
- writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
- }
-
- if (nv_crtc->lut.depth == 30) {
- writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
- writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
- writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
- }
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int index = nv_crtc->index, ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
- NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
- if (blanked) {
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
- if (ret) {
- NV_ERROR(drm, "no space while blanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- } else {
- if (nv_crtc->cursor.visible)
- nv_crtc->cursor.show(nv_crtc, false);
- else
- nv_crtc->cursor.hide(nv_crtc, false);
-
- ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
- if (ret) {
- NV_ERROR(drm, "no space while unblanking crtc\n");
- return ret;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
- OUT_RING(evo, nv_crtc->lut.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF :
- NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
- if (nv_device(drm->device)->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
- }
-
- nv_crtc->fb.blanked = blanked;
- return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- int head = nv_crtc->index, ret;
- u32 mode = 0x00;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
- OUT_RING (evo, mode);
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
- }
-
- return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
- int adj;
- u32 hue, vib;
-
- NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
- nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(drm, "no space while setting color vibrance\n");
- return ret;
- }
-
- adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
- vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
- hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING (evo, (hue << 20) | (vib << 8));
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
- }
-
- return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_connector *connector;
- struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
- /* The safest approach is to find an encoder with the right crtc, that
- * is also linked to a connector. */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
- }
-
- return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_connector *nv_connector;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *umode = &crtc->mode;
- struct drm_display_mode *omode;
- int scaling_mode, ret;
- u32 ctrl = 0, oX, oY;
-
- NV_DEBUG(drm, "\n");
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(drm, "no native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
- } else {
- scaling_mode = nv_connector->scaling_mode;
- }
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- if (scaling_mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- if (umode->hdisplay != oX || umode->vdisplay != oY ||
- umode->flags & DRM_MODE_FLAG_INTERLACE ||
- umode->flags & DRM_MODE_FLAG_DBLSCAN)
- ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
- ret = RING_SPACE(evo, 5);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- OUT_RING (evo, ctrl);
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING (evo, oY << 16 | oX);
- OUT_RING (evo, oY << 16 | oX);
-
- if (update) {
- nv50_display_flip_stop(crtc);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
- }
-
- return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_clock *clk = nouveau_clock(device);
-
- return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
- NV_DEBUG(drm, "\n");
-
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- drm_crtc_cleanup(&nv_crtc->base);
- kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_bo *cursor = NULL;
- struct drm_gem_object *gem;
- int ret = 0, i;
-
- if (!buffer_handle) {
- nv_crtc->cursor.hide(nv_crtc, true);
- return 0;
- }
-
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
- if (!gem)
- return -ENOENT;
- cursor = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(cursor);
- if (ret)
- goto out;
-
- /* The simple will do for now. */
- for (i = 0; i < 64 * 64; i++)
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
- nouveau_bo_unmap(cursor);
-
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
- nv_crtc->cursor.show(nv_crtc, true);
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nv_crtc->cursor.set_pos(nv_crtc, x, y);
- return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- int end = (start + size > 256) ? 256 : start + size, i;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- /* We need to know the depth before we upload, but it's possible to
- * get called before a framebuffer is bound. If this is the case,
- * mark the lut values as dirty by setting depth==0, and it'll be
- * uploaded on the first mode_set_base()
- */
- if (!nv_crtc->base.fb) {
- nv_crtc->lut.depth = 0;
- return;
- }
-
- nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
- .save = nv50_crtc_save,
- .restore = nv50_crtc_restore,
- .cursor_set = nv50_crtc_cursor_set,
- .cursor_move = nv50_crtc_cursor_move,
- .gamma_set = nv50_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = nouveau_crtc_page_flip,
- .destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_display_flip_stop(crtc);
- drm_vblank_pre_modeset(dev, nv_crtc->index);
- nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- nv50_crtc_blank(nv_crtc, false);
- drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_display_sync(dev);
- nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *passed_fb,
- int x, int y, bool atomic)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
- int ret;
-
- NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
- /* no fb bound */
- if (!atomic && !crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- /* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
- */
- if (atomic) {
- drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
- } else {
- drm_fb = crtc->fb;
- fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
- }
-
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
- nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
- nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
- if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- OUT_RING (evo, fb->r_dma);
- }
-
- ret = RING_SPACE(evo, 12);
- if (ret)
- return ret;
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING (evo, nv_crtc->fb.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
- OUT_RING (evo, fb->r_pitch);
- OUT_RING (evo, fb->r_format);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING (evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING (evo, (y << 16) | x);
-
- if (nv_crtc->lut.depth != fb->base.depth) {
- nv_crtc->lut.depth = fb->base.depth;
- nv50_crtc_lut_load(crtc);
- }
-
- return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 head = nv_crtc->index * 0x400;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- int ret;
-
- /* hw timing description looks like this:
- *
- * <sync> <back porch> <---------display---------> <front porch>
- * ______
- * |____________|---------------------------|____________|
- *
- * ^ synce ^ blanke ^ blanks ^ active
- *
- * interlaced modes also have 2 additional values pointing at the end
- * and start of the next field's blanking period.
- */
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = RING_SPACE(evo, 18);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0804 + head, 2);
- OUT_RING (evo, 0x00800000 | mode->clock);
- OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_NV04(evo, 0, 0x0810 + head, 6);
- OUT_RING (evo, 0x00000000); /* border colour */
- OUT_RING (evo, (vactive << 16) | hactive);
- OUT_RING (evo, ( vsynce << 16) | hsynce);
- OUT_RING (evo, (vblanke << 16) | hblanke);
- OUT_RING (evo, (vblanks << 16) | hblanks);
- OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_NV04(evo, 0, 0x082c + head, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0900 + head, 1);
- OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
- OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
- OUT_RING (evo, 0x00000000); /* screen position */
- }
-
- nv_crtc->set_dither(nv_crtc, false);
- nv_crtc->set_scale(nv_crtc, false);
- nv_crtc->set_color_vibrance(nv_crtc, false);
-
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
- if (ret)
- return ret;
-
- ret = nv50_display_sync(crtc->dev);
- if (ret)
- return ret;
-
- return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- int ret;
-
- nv50_display_flip_stop(crtc);
- ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
- if (ret)
- return ret;
-
- return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
- .dpms = nv50_crtc_dpms,
- .prepare = nv50_crtc_prepare,
- .commit = nv50_crtc_commit,
- .mode_fixup = nv50_crtc_mode_fixup,
- .mode_set = nv50_crtc_mode_set,
- .mode_set_base = nv50_crtc_mode_set_base,
- .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
- .load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_crtc *nv_crtc = NULL;
- int ret, i;
-
- NV_DEBUG(drm, "\n");
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
- nv_crtc->color_vibrance = 50;
- nv_crtc->vibrant_hue = 0;
- nv_crtc->lut.depth = 0;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
- ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
-
- ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- nv50_cursor_init(nv_crtc);
-out:
- if (ret)
- nv50_crtc_destroy(&nv_crtc->base);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113ceee..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while unhiding cursor\n");
- return;
- }
-
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NvEvoVRAM);
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
- OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = true;
- }
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG(drm, "\n");
-
- if (update && !nv_crtc->cursor.visible)
- return;
-
- ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
- if (ret) {
- NV_ERROR(drm, "no space while hiding cursor\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
- OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
- OUT_RING(evo, 0);
- if (nv_device(drm->device)->chipset != 0x50) {
- BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
- OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
- }
-
- if (update) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- nv_crtc->cursor.visible = false;
- }
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
- struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
- nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
- ((y & 0xFFFF) << 16) | (x & 0xFFFF));
- /* Needed to make the cursor move. */
- nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
- if (offset == nv_crtc->cursor.offset)
- return;
-
- nv_crtc->cursor.offset = offset;
- if (nv_crtc->cursor.visible) {
- nv_crtc->cursor.visible = false;
- nv_crtc->cursor.show(nv_crtc, true);
- }
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
- nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
- nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
- nv_crtc->cursor.hide = nv50_cursor_hide;
- nv_crtc->cursor.show = nv50_cursor_show;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a1748573..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- enum drm_connector_status status = connector_status_disconnected;
- uint32_t dpms_state, load_pattern, load_state;
- int or = nv_encoder->or;
-
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
- dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return status;
- }
-
- /* Use bios provided value if possible. */
- if (drm->vbios.dactestval) {
- load_pattern = drm->vbios.dactestval;
- NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
- load_pattern);
- } else {
- load_pattern = 340;
- NV_DEBUG(drm, "Using default load_pattern of %d\n",
- load_pattern);
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
- NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
- mdelay(45); /* give it some time to process */
- load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
- nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
- if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
- NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
- status = connector_status_connected;
-
- if (status == connector_status_connected)
- NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
- else
- NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
- return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
- return;
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
- if (mode != DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
- switch (mode) {
- case DRM_MODE_DPMS_STANDBY:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- case DRM_MODE_DPMS_OFF:
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
- val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
- break;
- default:
- break;
- }
-
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- uint32_t mode_ctl = 0, mode_ctl2 = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
- nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
- /* Lacking a working tv-out, this is not a 100% sure. */
- if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
- mode_ctl |= 0x40;
- else
- if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
- mode_ctl |= 0x100;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
- ret = RING_SPACE(evo, 3);
- if (ret) {
- NV_ERROR(drm, "no space while connecting DAC\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
- OUT_RING(evo, mode_ctl);
- OUT_RING(evo, mode_ctl2);
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
- .dpms = nv50_dac_dpms,
- .save = nv50_dac_save,
- .restore = nv50_dac_restore,
- .mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_disconnect,
- .commit = nv50_dac_commit,
- .mode_set = nv50_dac_mode_set,
- .get_crtc = nv50_dac_crtc_get,
- .detect = nv50_dac_detect,
- .disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- if (!encoder)
- return;
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
- .destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
-
- drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
- DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6bb..35874085a61e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+ /*
+ * Copyright 2011 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
#include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
#include "nouveau_fence.h"
+#include "nv50_display.h"
+#include <core/client.h>
#include <core/gpuobj.h>
-#include <subdev/timer.h>
+#include <core/class.h>
-static void nv50_display_bh(unsigned long);
-
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
+ (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+ struct nouveau_object *user;
+ u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+ const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+ int ret;
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0)
- return 2;
+ ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+ oclass, data, size, &chan->user);
+ if (ret)
+ return ret;
- return 4;
+ chan->handle = handle;
+ return 0;
}
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 mask = 0;
- int i;
-
- if (device->chipset < 0x90 ||
- device->chipset == 0x92 ||
- device->chipset == 0xa0) {
- for (i = 0; i < 2; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- } else {
- for (i = 0; i < 4; i++)
- mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
- }
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ if (chan->handle)
+ nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
- for (i = 0; i < 3; i++)
- mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
- return mask & 3;
-}
+struct nv50_pioc {
+ struct nv50_chan base;
+};
-int
-nv50_display_early_init(struct drm_device *dev)
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
{
- return 0;
+ nv50_chan_destroy(core, &pioc->base);
}
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, struct nv50_pioc *pioc)
{
+ return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
}
-int
-nv50_display_sync(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- int ret;
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
- ret = RING_SPACE(evo, 6);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
+struct nv50_dmac {
+ struct nv50_chan base;
+ dma_addr_t handle;
+ u32 *ptr;
+};
- nv_wo32(disp->ramin, 0x2000, 0x00000000);
- FIRE_RING (evo);
-
- if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
- return 0;
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+ if (dmac->ptr) {
+ struct pci_dev *pdev = nv_device(core)->pdev;
+ pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
- return 0;
+ nv50_chan_destroy(core, &dmac->base);
}
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_channel *evo;
- int ret, i;
- u32 val;
-
- NV_DEBUG(drm, "\n");
-
- nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
- /*
- * I think the 0x006101XX range is some kind of main control area
- * that enables things.
- */
- /* CRTC? */
- for (i = 0; i < 2; i++) {
- val = nv_rd32(device, 0x00616100 + (i * 0x800));
- nv_wr32(device, 0x00610190 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616104 + (i * 0x800));
- nv_wr32(device, 0x00610194 + (i * 0x10), val);
- val = nv_rd32(device, 0x00616108 + (i * 0x800));
- nv_wr32(device, 0x00610198 + (i * 0x10), val);
- val = nv_rd32(device, 0x0061610c + (i * 0x800));
- nv_wr32(device, 0x0061019c + (i * 0x10), val);
- }
-
- /* DAC */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061a000 + (i * 0x800));
- nv_wr32(device, 0x006101d0 + (i * 0x04), val);
- }
-
- /* SOR */
- for (i = 0; i < nv50_sor_nr(dev); i++) {
- val = nv_rd32(device, 0x0061c000 + (i * 0x800));
- nv_wr32(device, 0x006101e0 + (i * 0x04), val);
- }
-
- /* EXT */
- for (i = 0; i < 3; i++) {
- val = nv_rd32(device, 0x0061e000 + (i * 0x800));
- nv_wr32(device, 0x006101f0 + (i * 0x04), val);
- }
-
- for (i = 0; i < 3; i++) {
- nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
- NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
- }
-
- /* The precise purpose is unknown, i suspect it has something to do
- * with text mode.
- */
- if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
- nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
- nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
- if (!nv_wait(device, 0x006194e8, 2, 0)) {
- NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
- NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
-
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
- NV_ERROR(drm, "timeout: "
- "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
- NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- return -EBUSY;
- }
- }
-
- nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
- ret = nv50_evo_init(dev);
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- evo = nv50_display(dev)->master;
-
- nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
- ret = RING_SPACE(evo, 3);
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
if (ret)
return ret;
- BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING (evo, NvEvoSync);
- return nv50_display_sync(dev);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+ NV50_DMA_CONF0_PART_256,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
}
-void
-nv50_display_fini(struct drm_device *dev)
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- struct drm_crtc *drm_crtc;
- int ret, i;
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- NV_DEBUG(drm, "\n");
+ ret = nouveau_object_new(client, parent, NvEvoFB16,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- nv50_crtc_blank(crtc, true);
- }
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- ret = RING_SPACE(evo, 2);
- if (ret == 0) {
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- }
- FIRE_RING(evo);
+ ret = nouveau_object_new(client, parent, NvEvoFB32,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+ NVD0_DMA_CONF0_PAGE_LP,
+ }, sizeof(struct nv_dma_class), &object);
+ return ret;
+}
- /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
- * cleaning up?
- */
- list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
- uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+ void *data, u32 size, u64 syncbuf,
+ struct nv50_dmac *dmac)
+{
+ struct nouveau_fb *pfb = nouveau_fb(core);
+ struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+ struct nouveau_object *object;
+ u32 pushbuf = *(u32 *)data;
+ int ret;
- if (!crtc->base.enabled)
- continue;
+ dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+ &dmac->handle);
+ if (!dmac->ptr)
+ return -ENOMEM;
- nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
- if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
- NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
- "0x%08x\n", mask, mask);
- NV_ERROR(drm, "0x610024 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_INTR_1));
- }
- }
+ ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+ NV_DMA_FROM_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_PCI_US |
+ NV_DMA_ACCESS_RD,
+ .start = dmac->handle + 0x0000,
+ .limit = dmac->handle + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 2; i++) {
- nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
- if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
- NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
- NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
- NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
- nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
- }
- }
+ ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+ if (ret)
+ return ret;
- nv50_evo_fini(dev);
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = syncbuf + 0x0000,
+ .limit = syncbuf + 0x0fff,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- for (i = 0; i < 3; i++) {
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
- }
- }
+ ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+ NV_DMA_IN_MEMORY_CLASS,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = 0,
+ .limit = pfb->ram.size - 1,
+ }, sizeof(struct nv_dma_class), &object);
+ if (ret)
+ return ret;
- /* disable interrupts. */
- nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+ if (nv_device(core)->card_type < NV_C0)
+ ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ if (nv_device(core)->card_type < NV_D0)
+ ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+ else
+ ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+ return ret;
}
-int
-nv50_display_create(struct drm_device *dev)
+struct nv50_mast {
+ struct nv50_dmac base;
+};
+
+struct nv50_curs {
+ struct nv50_pioc base;
+};
+
+struct nv50_sync {
+ struct nv50_dmac base;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_ovly {
+ struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+ struct nv50_pioc base;
+};
+
+struct nv50_head {
+ struct nouveau_crtc base;
+ struct nv50_curs curs;
+ struct nv50_sync sync;
+ struct nv50_ovly ovly;
+ struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+ struct nouveau_object *core;
+ struct nv50_mast mast;
+
+ u32 modeset;
+
+ struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *ct;
- struct nv50_display *priv;
- int ret, i;
+ return nouveau_display(dev)->priv;
+}
- NV_DEBUG(drm, "\n");
+#define nv50_mast(d) (&nv50_disp(d)->mast)
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = priv;
- nouveau_display(dev)->dtor = nv50_display_destroy;
- nouveau_display(dev)->init = nv50_display_init;
- nouveau_display(dev)->fini = nv50_display_fini;
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
- /* Create CRTC objects */
- for (i = 0; i < 2; i++) {
- ret = nv50_crtc_create(dev, i);
- if (ret)
- return ret;
- }
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+ struct nv50_dmac *dmac = evoc;
+ u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
- /* We setup the encoders from the BIOS table */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_output *entry = &dcb->entry[i];
+ if (put + nr >= (PAGE_SIZE / 4) - 8) {
+ dmac->ptr[put] = 0x20000000;
- if (entry->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
- entry->type, ffs(entry->or) - 1);
- continue;
+ nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+ if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ NV_ERROR(dmac->base.user, "channel stalled\n");
+ return NULL;
}
- connector = nouveau_connector_create(dev, entry->connector);
- if (IS_ERR(connector))
- continue;
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nv50_sor_create(connector, entry);
- break;
- case DCB_OUTPUT_ANALOG:
- nv50_dac_create(connector, entry);
- break;
- default:
- NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
- continue;
- }
+ put = 0;
}
- list_for_each_entry_safe(connector, ct,
- &dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
- }
+ return dmac->ptr + put;
+}
- tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+static void
+evo_kick(u32 *push, void *evoc)
+{
+ struct nv50_dmac *dmac = evoc;
+ nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
- ret = nv50_evo_create(dev);
- if (ret) {
- nv50_display_destroy(dev);
- return ret;
- }
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d) *((p)++) = (d)
- return 0;
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
}
-void
-nv50_display_destroy(struct drm_device *dev)
+static int
+evo_sync(struct drm_device *dev)
{
- struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_device *device = nouveau_dev(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_mast *mast = nv50_mast(dev);
+ u32 *push = evo_wait(mast, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+ return 0;
+ }
- nv50_evo_destroy(dev);
- kfree(disp);
+ return -EBUSY;
}
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
struct nouveau_bo *
nv50_display_crtc_sema(struct drm_device *dev, int crtc)
{
- return nv50_display(dev)->crtc[crtc].sem.bo;
+ return nv50_disp(dev)->sync;
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nv50_display *disp = nv50_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
- int ret;
-
- ret = RING_SPACE(evo, 8);
- if (ret) {
- WARN_ON(1);
- return;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
+
+ push = evo_wait(sync, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
}
-
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0094, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan)
+ struct nouveau_channel *chan, u32 swap_interval)
{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
- struct nouveau_channel *evo = dispc->sync;
+ struct nv50_sync *sync = nv50_sync(crtc);
+ u32 *push;
int ret;
- ret = RING_SPACE(evo, chan ? 25 : 27);
- if (unlikely(ret))
- return ret;
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(sync, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
ret = RING_SPACE(chan, 10);
- if (ret) {
- WIND_RING(evo);
+ if (ret)
return ret;
- }
- if (nv_device(drm->device)->chipset < 0xc0) {
- BEGIN_NV04(chan, 0, 0x0060, 2);
+ if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, dispc->sem.offset);
- BEGIN_NV04(chan, 0, 0x006c, 1);
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_NV04(chan, 0, 0x0064, 2);
- OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, sync->sem.offset);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_NV04(chan, 0, 0x0060, 1);
- if (nv_device(drm->device)->chipset < 0x84)
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram);
} else {
u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ offset += sync->sem.offset;
+
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0xf00d0000 | sync->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x1001);
}
+
FIRE_RING (chan);
} else {
- nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
- 0xf00d0000 | dispc->sem.value);
+ nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+ 0xf00d0000 | sync->sem.value);
+ evo_sync(crtc->dev);
}
- /* queue the flip on the crtc's "display sync" channel */
- BEGIN_NV04(evo, 0, 0x0100, 1);
- OUT_RING (evo, 0xfffe0000);
- if (chan) {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000100);
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, sync->sem.offset);
+ evo_data(push, 0xf00d0000 | sync->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+ evo_mthd(push, 0x0800, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
} else {
- BEGIN_NV04(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000010);
- /* allows gamma somehow, PDISP will bitch at you if
- * you don't wait for vblank before changing this..
- */
- BEGIN_NV04(evo, 0, 0x00e0, 1);
- OUT_RING (evo, 0x40000000);
- }
- BEGIN_NV04(evo, 0, 0x0088, 4);
- OUT_RING (evo, dispc->sem.offset);
- OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
- OUT_RING (evo, 0x74b1e000);
- OUT_RING (evo, NvEvoSync);
- BEGIN_NV04(evo, 0, 0x00a0, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x00c0, 1);
- OUT_RING (evo, nv_fb->r_dma);
- BEGIN_NV04(evo, 0, 0x0110, 2);
- OUT_RING (evo, 0x00000000);
- OUT_RING (evo, 0x00000000);
- BEGIN_NV04(evo, 0, 0x0800, 5);
- OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
- OUT_RING (evo, 0);
- OUT_RING (evo, (fb->height << 16) | fb->width);
- OUT_RING (evo, nv_fb->r_pitch);
- OUT_RING (evo, nv_fb->r_format);
- BEGIN_NV04(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0x00000000);
- FIRE_RING (evo);
-
- dispc->sem.offset ^= 0x10;
- dispc->sem.value++;
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ }
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, sync);
+
+ sync->sem.offset ^= 0x10;
+ sync->sem.value++;
return 0;
}
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
- u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_connector *nv_connector = NULL;
- struct drm_encoder *encoder;
- struct nvbios *bios = &drm->vbios;
- u32 script = 0, or;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
+
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (nv_encoder->dcb != dcb)
- continue;
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+ evo_data(push, mode);
+ } else
+ if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ } else {
+ evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+ evo_data(push, mode);
+ }
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- break;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
}
- or = ffs(dcb->or) - 1;
- switch (dcb->type) {
- case DCB_OUTPUT_LVDS:
- script = (mc >> 8) & 0xf;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- script |= 0x0100;
- if (bios->fp.if_is_24bit)
- script |= 0x0200;
- } else {
- /* determine number of lvds links */
- if (nv_connector && nv_connector->edid &&
- nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- /* http://www.spwg.org */
- if (((u8 *)nv_connector->edid)[121] == 2)
- script |= 0x0100;
- } else
- if (pxclk >= bios->fp.duallink_transition_clk) {
- script |= 0x0100;
- }
+ return 0;
+}
- /* determine panel depth */
- if (script & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- script |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- script |= 0x0200;
- }
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct nouveau_connector *nv_connector;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
- if (nv_connector && nv_connector->edid &&
- (nv_connector->edid->revision >= 4) &&
- (nv_connector->edid->input & 0x70) >= 0x20)
- script |= 0x0200;
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DCB_OUTPUT_TMDS:
- script = (mc >> 8) & 0xf;
- if (pxclk >= 165000)
- script |= 0x0100;
- break;
- case DCB_OUTPUT_DP:
- script = (mc >> 8) & 0xf;
- break;
- case DCB_OUTPUT_ANALOG:
- script = 0xff;
- break;
default:
- NV_ERROR(drm, "modeset on unsupported output type!\n");
break;
}
- return script;
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ /*XXX: SCALE_CTRL_ACTIVE??? */
+ evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ } else {
+ evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+ }
+
+ evo_kick(push, mast);
+
+ if (update) {
+ nv50_display_flip_stop(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ }
+ }
+
+ return 0;
}
-static void
-nv50_display_unk10_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), mc;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push, hue, vib;
+ int adj;
+
+ adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+ vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+ hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ } else {
+ evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (hue << 20) | (vib << 8));
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
+ return 0;
+}
- /* Determine which CRTC we're dealing with, only 1 ever will be
- * signalled at the same time with the current nouveau code.
- */
- crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
-
- /* Find which encoder was connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ int x, int y, bool update)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push;
+
+ push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, (y << 16) | x);
+ if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nvfb->r_dma);
+ }
+ } else {
+ evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nvfb->nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nvfb->r_pitch);
+ evo_data(push, nvfb->r_format);
+ evo_data(push, nvfb->r_dma);
+ evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, (y << 16) | x);
+ }
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ if (update) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
}
+ evo_kick(push, mast);
+ }
- or = i;
+ nv_crtc->fb.tile_flags = nvfb->r_dma;
+ return 0;
+}
+
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x85000000);
+ evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ }
+ evo_kick(push, mast);
}
+}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push = evo_wait(mast, 16);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x05000000);
+ evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+ evo_kick(push, mast);
+ }
+}
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+ if (show)
+ nv50_crtc_cursor_show(nv_crtc);
+ else
+ nv50_crtc_cursor_hide(nv_crtc);
+
+ if (update) {
+ u32 *push = evo_wait(mast, 2);
+ if (push) {
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ nv50_display_flip_stop(crtc);
+
+ push = evo_wait(mast, 2);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x40000000);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x03000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, false, false);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ u32 *push;
+
+ push = evo_wait(mast, 32);
+ if (push) {
+ if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM_LP);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ } else
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0xc0000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, NvEvoVRAM);
+ } else {
+ evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, nv_crtc->fb.tile_flags);
+ evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+ evo_data(push, 0x83000000);
+ evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, NvEvoVRAM);
+ evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0xffffff00);
}
- or = i;
+ evo_kick(push, mast);
+ }
+
+ nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ nvfb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(nvfb->nvbo);
}
- /* There was no encoder to disable */
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nv50_mast *mast = nv50_mast(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 *push;
+ int ret;
+
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ }
- /* Disable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
- if (dcb->type == type && (dcb->or & (1 << or))) {
- nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
- disp->irq.dcb = dcb;
- goto ack;
+ push = evo_wait(mast, 64);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00800000 | mode->clock);
+ evo_data(push, (ilace == 2) ? 2 : 0);
+ evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ } else {
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+ evo_data(push, 0x00000000);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
+ evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+ evo_data(push, 0x00000000); /* ??? */
+ evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+ evo_data(push, mode->clock * 1000);
+ evo_data(push, 0x00200000); /* ??? */
+ evo_data(push, mode->clock * 1000);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
}
+
+ evo_kick(push, mast);
}
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ nv50_crtc_set_dither(nv_crtc, false);
+ nv50_crtc_set_scale(nv_crtc, false);
+ nv50_crtc_set_color_vibrance(nv_crtc, false);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ if (!crtc->fb) {
+ NV_DEBUG(drm, "No FB bound\n");
+ return 0;
+ }
+
+ ret = nv50_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic state)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nv50_display_flip_stop(crtc);
+ nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+ return 0;
}
static void
-nv50_display_unk20_handler(struct drm_device *dev)
+nv50_crtc_lut_load(struct drm_crtc *crtc)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
- struct dcb_output *dcb;
- int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- dcb = disp->irq.dcb;
- if (dcb) {
- nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
- disp->irq.dcb = NULL;
- }
-
- /* CRTC clock change requested? */
- crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
- if (crtc >= 0) {
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
- pclk &= 0x003fffff;
- if (pclk)
- nv50_crtc_set_clock(dev, crtc, pclk);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
- tmp &= ~0x000000f;
- nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
- }
-
- /* Nothing needs to be done for the encoder */
- crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
- if (crtc < 0)
- goto ack;
- pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
- /* Find which encoder is connected to the CRTC */
- for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
- mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
- NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_ANALOG; break;
- case 1: type = DCB_OUTPUT_TV; break;
- default:
- NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
- goto ack;
+ for (i = 0; i < 256; i++) {
+ u16 r = nv_crtc->lut.r[i] >> 2;
+ u16 g = nv_crtc->lut.g[i] >> 2;
+ u16 b = nv_crtc->lut.b[i] >> 2;
+
+ if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+ writew(r + 0x0000, lut + (i * 0x08) + 0);
+ writew(g + 0x0000, lut + (i * 0x08) + 2);
+ writew(b + 0x0000, lut + (i * 0x08) + 4);
+ } else {
+ writew(r + 0x6000, lut + (i * 0x20) + 0);
+ writew(g + 0x6000, lut + (i * 0x20) + 2);
+ writew(b + 0x6000, lut + (i * 0x20) + 4);
+ }
+ }
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool visible = (handle != 0);
+ int i, ret = 0;
+
+ if (visible) {
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (unlikely(!gem))
+ return -ENOENT;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret == 0) {
+ for (i = 0; i < 64 * 64; i++) {
+ u32 v = nouveau_bo_rd32(nvbo, i);
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+ }
+ nouveau_bo_unmap(nvbo);
}
- or = i;
+ drm_gem_object_unreference_unlocked(gem);
}
- for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
- if (nv_device(drm->device)->chipset < 0x90 ||
- nv_device(drm->device)->chipset == 0x92 ||
- nv_device(drm->device)->chipset == 0xa0)
- mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
- else
- mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
+ if (visible != nv_crtc->cursor.visible) {
+ nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+ nv_crtc->cursor.visible = visible;
+ }
- NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
- if (!(mc & (1 << crtc)))
- continue;
+ return ret;
+}
- switch ((mc & 0x00000f00) >> 8) {
- case 0: type = DCB_OUTPUT_LVDS; break;
- case 1: type = DCB_OUTPUT_TMDS; break;
- case 2: type = DCB_OUTPUT_TMDS; break;
- case 5: type = DCB_OUTPUT_TMDS; break;
- case 8: type = DCB_OUTPUT_DP; break;
- case 9: type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
- goto ack;
- }
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nv50_curs *curs = nv50_curs(crtc);
+ struct nv50_chan *chan = nv50_chan(curs);
+ nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+ nv_wo32(chan->user, 0x0080, 0x00000000);
+ return 0;
+}
- or = i;
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 end = max(start + size, (u32)256);
+ u32 i;
+
+ for (i = start; i < end; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
}
- if (type == DCB_OUTPUT_ANY)
- goto ack;
+ nv50_crtc_lut_load(crtc);
+}
- /* Enable the encoder */
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)))
- break;
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_disp *disp = nv50_disp(crtc->dev);
+ struct nv50_head *head = nv50_head(crtc);
+ nv50_dmac_destroy(disp->core, &head->ovly.base);
+ nv50_pioc_destroy(disp->core, &head->oimm.base);
+ nv50_dmac_destroy(disp->core, &head->sync.base);
+ nv50_pioc_destroy(disp->core, &head->curs.base);
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ if (nv_crtc->cursor.nvbo)
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_unmap(nv_crtc->lut.nvbo);
+ if (nv_crtc->lut.nvbo)
+ nouveau_bo_unpin(nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+ .dpms = nv50_crtc_dpms,
+ .prepare = nv50_crtc_prepare,
+ .commit = nv50_crtc_commit,
+ .mode_fixup = nv50_crtc_mode_fixup,
+ .mode_set = nv50_crtc_mode_set,
+ .mode_set_base = nv50_crtc_mode_set_base,
+ .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+ .load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+ .cursor_set = nv50_crtc_cursor_set,
+ .cursor_move = nv50_crtc_cursor_move,
+ .gamma_set = nv50_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv50_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_head *head;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ head->base.index = index;
+ head->base.set_dither = nv50_crtc_set_dither;
+ head->base.set_scale = nv50_crtc_set_scale;
+ head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+ head->base.color_vibrance = 50;
+ head->base.vibrant_hue = 0;
+ head->base.cursor.set_offset = nv50_cursor_set_offset;
+ head->base.cursor.set_pos = nv50_cursor_set_pos;
+ for (i = 0; i < 256; i++) {
+ head->base.lut.r[i] = i << 8;
+ head->base.lut.g[i] = i << 8;
+ head->base.lut.b[i] = i << 8;
}
- if (i == drm->vbios.dcb.entries) {
- NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
- goto ack;
+ crtc = &head->base.base;
+ drm_crtc_init(dev, crtc, &nv50_crtc_func);
+ drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.lut.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.lut.nvbo);
}
- script = nv50_display_script_select(dev, dcb, mc, pclk);
- nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
+ if (ret)
+ goto out;
- if (type == DCB_OUTPUT_DP) {
- int link = !(dcb->dpconf.sor.link & 1);
- if ((mc & 0x000f0000) == 0x00020000)
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
- else
- nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+ nv50_crtc_lut_load(crtc);
+
+ /* allocate cursor resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+ &(struct nv50_display_curs_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_curs_class),
+ &head->curs.base);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &head->base.cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(head->base.cursor.nvbo);
+ if (ret)
+ nouveau_bo_unpin(head->base.lut.nvbo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
}
- if (dcb->type != DCB_OUTPUT_ANALOG) {
- tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
- tmp &= ~0x00000f0f;
- if (script & 0x0100)
- tmp |= 0x00000101;
- nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
- } else {
- nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+ if (ret)
+ goto out;
+
+ /* allocate page flip / sync resources */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+ &(struct nv50_display_sync_class) {
+ .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+ .head = index,
+ }, sizeof(struct nv50_display_sync_class),
+ disp->sync->bo.offset, &head->sync.base);
+ if (ret)
+ goto out;
+
+ head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+ /* allocate overlay resources */
+ ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+ &(struct nv50_display_oimm_class) {
+ .head = index,
+ }, sizeof(struct nv50_display_oimm_class),
+ &head->oimm.base);
+ if (ret)
+ goto out;
+
+ ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+ &(struct nv50_display_ovly_class) {
+ .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+ .head = index,
+ }, sizeof(struct nv50_display_ovly_class),
+ disp->sync->bo.offset, &head->ovly.base);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv50_crtc_destroy(crtc);
+ return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int or = nv_encoder->or;
+ u32 dpms_ctrl;
+
+ dpms_ctrl = 0x00000000;
+ if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000001;
+ if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+ dpms_ctrl |= 0x00000004;
+
+ nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
}
- disp->irq.dcb = dcb;
- disp->irq.pclk = pclk;
- disp->irq.script = script;
+ return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ u32 *push;
+
+ nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(mast, 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ u32 syncs = 0x00000000;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000001;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000002;
+
+ evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+ evo_data(push, 1 << nv_crtc->index);
+ evo_data(push, syncs);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs);
+ evo_data(push, magic);
+ evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, 1 << nv_crtc->index);
+ }
+
+ evo_kick(push, mast);
+ }
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
- nv_wr32(device, 0x610030, 0x80000000);
+ nv_encoder->crtc = encoder->crtc;
}
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_dac_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0400 + (or * 0x080), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0180 + (or * 0x020), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
+ }
+
+ nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ int ret, or = nouveau_encoder(encoder)->or;
+ u32 load = 0;
+
+ ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+ if (ret || load != 7)
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+ .dpms = nv50_dac_dpms,
+ .mode_fixup = nv50_dac_mode_fixup,
+ .prepare = nv50_dac_disconnect,
+ .commit = nv50_dac_commit,
+ .mode_set = nv50_dac_mode_set,
+ .disable = nv50_dac_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+ .detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+ .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- u32 tmp;
- if (dcb->type != DCB_OUTPUT_TMDS)
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+ nv_connector->base.eld,
+ nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+ nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+ NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+ (max_ac_packet << 16) | rekey);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
- nv_encoder->dcb->or & (1 << or)) {
- tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
- tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+ nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+ nv50_audio_disconnect(encoder);
+
+ nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_encoder *partner;
+ int or = nv_encoder->or;
+
+ nv_encoder->last_dpms = mode;
+
+ list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+ if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+ continue;
+
+ if (nv_partner != nv_encoder &&
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
+ if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+ return;
break;
}
}
+
+ nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (nv_connector && nv_connector->native_mode) {
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *nv_connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+ }
+
+ return true;
}
static void
-nv50_display_unk40_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct dcb_output *dcb = disp->irq.dcb;
- u16 script = disp->irq.script;
- u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ const int or = nv_encoder->or;
+ u32 *push;
+
+ if (nv_encoder->crtc) {
+ nv50_crtc_prepare(nv_encoder->crtc);
+
+ push = evo_wait(mast, 4);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ evo_mthd(push, 0x0600 + (or * 0x40), 1);
+ evo_data(push, 0x00000000);
+ } else {
+ evo_mthd(push, 0x0200 + (or * 0x20), 1);
+ evo_data(push, 0x00000000);
+ }
+
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, mast);
+ }
- NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
- disp->irq.dcb = NULL;
- if (!dcb)
- goto ack;
+ nv50_hdmi_disconnect(encoder);
+ }
- nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
- nv50_display_unk40_dp_set_tmds(dev, dcb);
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ nv_encoder->crtc = NULL;
+}
-ack:
- nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
- nv_wr32(device, 0x610030, 0x80000000);
- nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+ nv50_sor_disconnect(encoder);
+ if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+ evo_sync(encoder->dev);
}
static void
-nv50_display_bh(unsigned long data)
+nv50_sor_commit(struct drm_encoder *encoder)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
+{
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nv50_mast *mast = nv50_mast(encoder->dev);
+ struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct nvbios *bios = &drm->vbios;
+ u32 *push, lvds = 0;
+ u8 owner = 1 << nv_crtc->index;
+ u8 proto = 0xf;
+ u8 depth = 0x0;
- for (;;) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (mode->clock < 165000)
+ proto = 0x1;
+ else
+ proto = 0x5;
+ } else {
+ proto = 0x2;
+ }
- NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+ nv50_hdmi_mode_set(encoder, mode);
+ break;
+ case DCB_OUTPUT_LVDS:
+ proto = 0x0;
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
- nv50_display_unk10_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
- nv50_display_unk20_handler(dev);
- else
- if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
- nv50_display_unk40_handler(dev);
+ if (bios->fp_no_ddc) {
+ if (bios->fp.dual_link)
+ lvds |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ lvds |= 0x0200;
+ } else {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (((u8 *)nv_connector->edid)[121] == 2)
+ lvds |= 0x0100;
+ } else
+ if (mode->clock >= bios->fp.duallink_transition_clk) {
+ lvds |= 0x0100;
+ }
+
+ if (lvds & 0x0100) {
+ if (bios->fp.strapless_is_24bit & 2)
+ lvds |= 0x0200;
+ } else {
+ if (bios->fp.strapless_is_24bit & 1)
+ lvds |= 0x0200;
+ }
+
+ if (nv_connector->base.display_info.bpc == 8)
+ lvds |= 0x0200;
+ }
+
+ nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+ break;
+ case DCB_OUTPUT_DP:
+ if (nv_connector->base.display_info.bpc == 6) {
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ depth = 0x2;
+ } else
+ if (nv_connector->base.display_info.bpc == 8) {
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ depth = 0x5;
+ } else {
+ nv_encoder->dp.datarate = mode->clock * 30 / 8;
+ depth = 0x6;
+ }
+
+ if (nv_encoder->dcb->sorconf.link & 1)
+ proto = 0x8;
else
- break;
+ proto = 0x9;
+ break;
+ default:
+ BUG_ON(1);
+ break;
}
- nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ push = evo_wait(nv50_mast(dev), 8);
+ if (push) {
+ if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+ evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+ evo_data(push, (depth << 16) | (proto << 8) | owner);
+ } else {
+ u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+ u32 syncs = 0x00000001;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ syncs |= 0x00000008;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ syncs |= 0x00000010;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ magic |= 0x00000001;
+
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, syncs | (depth << 6));
+ evo_data(push, magic);
+ evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+ evo_data(push, owner | (proto << 8));
+ }
+
+ evo_kick(push, mast);
+ }
+
+ nv_encoder->crtc = encoder->crtc;
}
static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_destroy(struct drm_encoder *encoder)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
- u32 addr, data;
- int chid;
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
- for (chid = 0; chid < 5; chid++) {
- if (!(channels & (1 << chid)))
- continue;
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+ .dpms = nv50_sor_dpms,
+ .mode_fixup = nv50_sor_mode_fixup,
+ .prepare = nv50_sor_prepare,
+ .commit = nv50_sor_commit,
+ .mode_set = nv50_sor_mode_set,
+ .disable = nv50_sor_disconnect,
+ .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+ .destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ nv_encoder->dcb = dcbe;
+ nv_encoder->or = ffs(dcbe->or) - 1;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
- addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
- data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
- NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
- "(0x%04x 0x%02x)\n", chid,
- addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+ encoder = to_drm_encoder(nv_encoder);
+ encoder->possible_crtcs = dcbe->heads;
+ encoder->possible_clones = 0;
+ drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
- nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+int
+nv50_display_init(struct drm_device *dev)
+{
+ u32 *push = evo_wait(nv50_mast(dev), 32);
+ if (push) {
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_kick(push, nv50_mast(dev));
+ return evo_sync(dev);
}
+
+ return -EBUSY;
}
void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
+{
+ struct nv50_disp *disp = nv50_disp(dev);
+
+ nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+ nouveau_bo_unmap(disp->sync);
+ if (disp->sync)
+ nouveau_bo_unpin(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
+
+ nouveau_display(dev)->priv = NULL;
+ kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
{
+ static const u16 oclass[] = {
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ };
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- uint32_t delayed = 0;
-
- while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
- uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
- uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
- uint32_t clock;
+ struct dcb_table *dcb = &drm->vbios.dcb;
+ struct drm_connector *connector, *tmp;
+ struct nv50_disp *disp;
+ struct dcb_output *dcbe;
+ int crtcs, ret, i;
- NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+ disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
- if (!intr0 && !(intr1 & ~delayed))
- break;
+ nouveau_display(dev)->priv = disp;
+ nouveau_display(dev)->dtor = nv50_display_destroy;
+ nouveau_display(dev)->init = nv50_display_init;
+ nouveau_display(dev)->fini = nv50_display_fini;
- if (intr0 & 0x001f0000) {
- nv50_display_error_handler(dev);
- intr0 &= ~0x001f0000;
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret) {
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_unpin(disp->sync);
}
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
- if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
- intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
- }
+ if (ret)
+ goto out;
+
+ /* attempt to allocate a supported evo display class */
+ ret = -ENODEV;
+ for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ 0xd1500000, oclass[i], NULL, 0,
+ &disp->core);
+ }
- clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
- NV50_PDISPLAY_INTR_1_CLK_UNK20 |
- NV50_PDISPLAY_INTR_1_CLK_UNK40));
- if (clock) {
- nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
- tasklet_schedule(&disp->tasklet);
- delayed |= clock;
- intr1 &= ~clock;
- }
+ if (ret)
+ goto out;
+
+ /* allocate master evo channel */
+ ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+ &(struct nv50_display_mast_class) {
+ .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+ }, sizeof(struct nv50_display_mast_class),
+ disp->sync->bo.offset, &disp->mast.base);
+ if (ret)
+ goto out;
+
+ /* create crtc objects to represent the hw heads */
+ if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+ crtcs = nv_rd32(device, 0x022448);
+ else
+ crtcs = 2;
+
+ for (i = 0; i < crtcs; i++) {
+ ret = nv50_crtc_create(dev, disp->core, i);
+ if (ret)
+ goto out;
+ }
+
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+ connector = nouveau_connector_create(dev, dcbe->connector);
+ if (IS_ERR(connector))
+ continue;
- if (intr0) {
- NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
+ if (dcbe->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
- if (intr1) {
- NV_ERROR(drm,
- "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
- nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+ switch (dcbe->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ case DCB_OUTPUT_DP:
+ nv50_sor_create(connector, dcbe);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ nv50_dac_create(connector, dcbe);
+ break;
+ default:
+ NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+ dcbe->type, ffs(dcbe->or) - 1);
+ continue;
}
}
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->encoder_ids[0])
+ continue;
+
+ NV_WARN(drm, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+
+out:
+ if (ret)
+ nv50_display_destroy(dev);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a6..70da347aa8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
#include "nouveau_display.h"
#include "nouveau_crtc.h"
#include "nouveau_reg.h"
-#include "nv50_evo.h"
-struct nv50_display_crtc {
- struct nouveau_channel *sync;
- struct {
- struct nouveau_bo *bo;
- u32 offset;
- u16 value;
- } sem;
-};
+int nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
-struct nv50_display {
- struct nouveau_channel *master;
-
- struct nouveau_gpuobj *ramin;
- u32 dmao;
- u32 hash;
-
- struct nv50_display_crtc crtc[2];
-
- struct tasklet_struct tasklet;
- struct {
- struct dcb_output *dcb;
- u16 script;
- u32 pclk;
- } irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32 nv50_display_active_crtcs(struct drm_device *);
-
-int nv50_display_sync(struct drm_device *);
-int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
- struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
-
-int nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
- u64 size);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **);
-
-int nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *, u32 swap_interval);
struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa77..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
- void __iomem *iomem = object->oclass->ofuncs->rd08;
- iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
- struct nouveau_channel *evo = *pevo;
-
- if (!evo)
- return;
- *pevo = NULL;
-
- nouveau_bo_unmap(evo->push.buffer);
- nouveau_bo_ref(NULL, &evo->push.buffer);
-
- if (evo->object)
- iounmap(evo->object->oclass->ofuncs);
-
- kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
- struct drm_device *dev = evo->fence;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- u32 dmao = disp->dmao;
- u32 hash = disp->hash;
- u32 flags5;
-
- if (nv_device(drm->device)->chipset < 0xc0) {
- /* not supported on 0x50, specified in format mthd */
- if (nv_device(drm->device)->chipset == 0x50)
- memtype = 0;
- flags5 = 0x00010000;
- } else {
- if (memtype & 0x80000000)
- flags5 = 0x00000000; /* large pages */
- else
- flags5 = 0x00020000;
- }
-
- nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
- nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
- nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
- upper_32_bits(base));
- nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
- nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
- nv_wo32(disp->ramin, hash + 0x00, handle);
- nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
- evo->handle);
-
- disp->dmao += 0x20;
- disp->hash += 0x08;
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
- struct nouveau_channel **pevo)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret;
-
- evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!evo)
- return -ENOMEM;
- *pevo = evo;
-
- evo->drm = drm;
- evo->handle = chid;
- evo->fence = dev;
- evo->user_get = 4;
- evo->user_put = 0;
-
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
- &evo->push.buffer);
- if (ret == 0)
- ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- ret = nouveau_bo_map(evo->push.buffer);
- if (ret) {
- NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pevo);
- return ret;
- }
-
- evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
- evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
- evo->object->parent = nv_object(disp->ramin)->parent;
- evo->object->engine = nv_object(disp->ramin)->engine;
- evo->object->oclass =
- kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
- evo->object->oclass->ofuncs =
- kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
- evo->object->oclass->ofuncs->rd08 =
- ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
- return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle, ret, i;
- u64 pushbuf = evo->push.buffer->bo.offset;
- u32 tmp;
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x009f0000) == 0x00020000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
- tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
- if ((tmp & 0x003f0000) == 0x00030000)
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
- /* initialise fifo */
- nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
- NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_EVO_DMA_CB_VALID);
- nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
- nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
- nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
- nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
- NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- return -EBUSY;
- }
-
- /* enable error reporting on the channel */
- nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.max &= ~7;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
- if (ret)
- return ret;
-
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
-
- return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
- struct nouveau_drm *drm = evo->drm;
- struct nouveau_device *device = nv_device(drm->device);
- int id = evo->handle;
-
- nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
- nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
- nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
- if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
- NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
- nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
- }
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sem.bo) {
- nouveau_bo_unmap(disp->crtc[i].sem.bo);
- nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
- }
- nv50_evo_channel_del(&disp->crtc[i].sync);
- }
- nv50_evo_channel_del(&disp->master);
- nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo;
- int ret, i, j;
-
- /* setup object management on it, any other evo channel will
- * use this also as there's no per-channel support on the
- * hardware
- */
- ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
- if (ret) {
- NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
- goto err;
- }
-
- disp->hash = 0x0000;
- disp->dmao = 0x1000;
-
- /* create primary evo channel, the one we use for modesetting
- * purporses
- */
- ret = nv50_evo_channel_new(dev, 0, &disp->master);
- if (ret)
- return ret;
- evo = disp->master;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
- disp->ramin->addr + 0x2000, 0x1000, NULL);
- if (ret)
- goto err;
-
- /* create some default objects for the scanout memtypes we support */
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- /* create "display sync" channels and other structures we need
- * to implement page flipping
- */
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- u64 offset;
-
- ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
- if (ret)
- goto err;
-
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &dispc->sem.bo);
- if (!ret) {
- ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(dispc->sem.bo);
- if (ret)
- nouveau_bo_ref(NULL, &dispc->sem.bo);
- offset = dispc->sem.bo->bo.offset;
- }
-
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
- offset, 4096, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x7a : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
- (nv_device(drm->device)->chipset < 0xc0 ?
- 0x70 : 0xfe),
- 0, pfb->ram.size, NULL);
- if (ret)
- goto err;
-
- for (j = 0; j < 4096; j += 4)
- nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
- dispc->sem.offset = 0;
- }
-
- return 0;
-
-err:
- nv50_evo_destroy(dev);
- return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int ret, i;
-
- ret = nv50_evo_channel_init(disp->master);
- if (ret)
- return ret;
-
- for (i = 0; i < 2; i++) {
- ret = nv50_evo_channel_init(disp->crtc[i].sync);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
- struct nv50_display *disp = nv50_display(dev);
- int i;
-
- for (i = 0; i < 2; i++) {
- if (disp->crtc[i].sync)
- nv50_evo_channel_fini(disp->crtc[i].sync);
- }
-
- if (disp->master)
- nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc834..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE 0x00000080
-#define NV50_EVO_UNK84 0x00000084
-#define NV50_EVO_UNK84_NOTIFY 0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
-#define NV50_EVO_DMA_NOTIFY 0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
-#define NV50_EVO_UNK8C 0x0000008C
-
-#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL 0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
-
-#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL 0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
-
-#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800 0x00000800
-#define NV50_EVO_CRTC_CLOCK 0x00000804
-#define NV50_EVO_CRTC_INTERLACE 0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
-#define NV50_EVO_CRTC_UNK0820 0x00000820
-#define NV50_EVO_CRTC_UNK0824 0x00000824
-#define NV50_EVO_CRTC_UNK082C 0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
-#define NV50_EVO_CRTC_FB_SIZE 0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
-#define NV50_EVO_CRTC_FB_DMA 0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
-#define NV50_EVO_CRTC_FB_POS 0x000008c0
-#define NV50_EVO_CRTC_REAL_RES 0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
- ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
-#define NV50_EVO_CRTC_UNK900 0x00000900
-#define NV50_EVO_CRTC_UNK904 0x00000904
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee2..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (!ret)
+ if (!ret) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
@@ -119,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1ca..8bd5d2781baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nouveau_dev(dev);
- u32 crtc_mask = nv50_display_active_crtcs(dev);
+ u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
struct nouveau_mem_exec_func exec = {
.dev = dev,
.precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e1326..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
- static const u8 nv50[] = { 16, 8, 0, 24 };
- if (nv_device(drm->device)->chipset == 0xaf)
- return nvaf[lane];
- return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- config = entry + table[4];
- while (config[0] != swing || config[1] != preem) {
- config += table[5];
- if (config >= entry + table[4] + entry[4] * table[5])
- return;
- }
-
- nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
- nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
- u8 *table, *entry, mask;
- int i;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- entry = ROMPTR(dev, entry[10]);
- if (entry) {
- while (link_bw < ROM16(entry[0]) * 10)
- entry += 4;
-
- nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
- }
-
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- if (link_bw > 162000)
- clksor |= 0x00040000;
-
- nv_wr32(device, 0x614300 + (or * 0x800), clksor);
- nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
- mask = 0;
- for (i = 0; i < link_nr; i++)
- mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
- nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
- if (clksor & 0x000c0000)
- *bw = 270000;
- else
- *bw = 162000;
-
- if (dpctrl > 0x00030000) *nr = 4;
- else if (dpctrl > 0x00010000) *nr = 2;
- else *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 symbol = 100000;
- int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
- int TU, VTUi, VTUf, VTUa;
- u64 link_data_rate, link_ratio, unk;
- u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, r;
-
- /* calculate packed data rate for each lane */
- nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
- link_data_rate = (clk * bpp / 8) / link_nr;
-
- /* calculate ratio of packed data rate to link symbol rate */
- link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
-
- for (TU = 64; TU >= 32; TU--) {
- /* calculate average number of valid symbols in each TU */
- u32 tu_valid = link_ratio * TU;
- u32 calc, diff;
-
- /* find a hw representation for the fraction.. */
- VTUi = tu_valid / symbol;
- calc = VTUi * symbol;
- diff = tu_valid - calc;
- if (diff) {
- if (diff >= (symbol / 2)) {
- VTUf = symbol / (symbol - diff);
- if (symbol - (VTUf * diff))
- VTUf++;
-
- if (VTUf <= 15) {
- VTUa = 1;
- calc += symbol - (symbol / VTUf);
- } else {
- VTUa = 0;
- VTUf = 1;
- calc += symbol;
- }
- } else {
- VTUa = 0;
- VTUf = min((int)(symbol / diff), 15);
- calc += symbol / VTUf;
- }
-
- diff = calc - tu_valid;
- } else {
- /* no remainder, but the hw doesn't like the fractional
- * part to be zero. decrement the integer part and
- * have the fraction add a whole symbol back
- */
- VTUa = 0;
- VTUf = 1;
- VTUi--;
- }
-
- if (diff < best_diff) {
- best_diff = diff;
- bestTU = TU;
- bestVTUa = VTUa;
- bestVTUf = VTUf;
- bestVTUi = VTUi;
- if (diff == 0)
- break;
- }
- }
-
- if (!bestTU) {
- NV_ERROR(drm, "DP: unable to find suitable config\n");
- return;
- }
-
- /* XXX close to vbios numbers, but not right */
- unk = (symbol - link_ratio) * bestTU;
- unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
- unk += 6;
-
- nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
- nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
- bestVTUf << 16 |
- bestVTUi << 8 |
- unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- if (!nv_encoder->crtc)
- return;
- nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
- NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
- ret = RING_SPACE(evo, 4);
- if (ret) {
- NV_ERROR(drm, "no space while disconnecting SOR\n");
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING (evo, 0);
- BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
-
- nouveau_hdmi_mode_set(encoder, NULL);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_device *device = nouveau_dev(encoder->dev);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_encoder *enc;
- uint32_t val;
- int or = nv_encoder->or;
-
- NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
- nv_encoder->last_dpms = mode;
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
- if (nvenc == nv_encoder ||
- (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
- nvenc->dcb->type != DCB_OUTPUT_LVDS &&
- nvenc->dcb->type != DCB_OUTPUT_DP) ||
- nvenc->dcb->or != nv_encoder->dcb->or)
- continue;
-
- if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
- return;
- }
-
- /* wait for it to be done */
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
- }
-
- val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
- if (mode == DRM_MODE_DPMS_ON)
- val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
- else
- val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
- nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
- NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
- if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
- NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
- NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
- NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
- nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
- }
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nv50_sor_dp_link_set,
- .train_set = nv50_sor_dp_train_set,
- .train_adj = nv50_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *connector;
-
- NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
- connector = nouveau_encoder_connector_get(nv_encoder);
- if (!connector) {
- NV_ERROR(drm, "Encoder has no connector\n");
- return false;
- }
-
- if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
- connector->native_mode)
- drm_mode_copy(adjusted_mode, connector->native_mode);
-
- return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nv50_sor_disconnect(encoder);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- /* avoid race between link training and supervisor intr */
- nv50_display_sync(encoder->dev);
- }
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
- struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- uint32_t mode_ctl = 0;
- int ret;
-
- NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
- nv_encoder->or, nv_encoder->dcb->type, crtc->index);
- nv_encoder->crtc = encoder->crtc;
-
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctl = 0x0100;
- else
- mode_ctl = 0x0500;
- } else
- mode_ctl = 0x0200;
-
- nouveau_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_DP:
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- mode_ctl |= 0x00020000;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- mode_ctl |= 0x00050000;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctl |= 0x00000800;
- else
- mode_ctl |= 0x00000900;
- break;
- default:
- break;
- }
-
- if (crtc->index == 1)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
- else
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
- nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(drm, "no space while connecting SOR\n");
- nv_encoder->crtc = NULL;
- return;
- }
- BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
- .dpms = nv50_sor_dpms,
- .save = nv50_sor_save,
- .restore = nv50_sor_restore,
- .mode_fixup = nv50_sor_mode_fixup,
- .prepare = nv50_sor_prepare,
- .commit = nv50_sor_commit,
- .mode_set = nv50_sor_mode_set,
- .get_crtc = nv50_sor_crtc_get,
- .detect = NULL,
- .disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
- NV_DEBUG(drm, "\n");
-
- drm_encoder_cleanup(encoder);
-
- kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
- .destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
- struct nouveau_encoder *nv_encoder = NULL;
- struct drm_device *dev = connector->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_encoder *encoder;
- int type;
-
- NV_DEBUG(drm, "\n");
-
- switch (entry->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_DP:
- type = DRM_MODE_ENCODER_TMDS;
- break;
- case DCB_OUTPUT_LVDS:
- type = DRM_MODE_ENCODER_LVDS;
- break;
- default:
- return -EINVAL;
- }
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- encoder = to_drm_encoder(nv_encoder);
-
- nv_encoder->dcb = entry;
- nv_encoder->or = ffs(entry->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
- drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
- encoder->possible_crtcs = entry->heads;
- encoder->possible_clones = 0;
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac9676..2a56b1b551cb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
struct nvc0_fence_chan *fctx = chan->fence;
int i;
- if (nv_device(chan->drm->device)->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
- } else
- if (nv_device(chan->drm->device)->card_type >= NV_50) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
- }
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (nv_device(chan->drm->device)->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(chan->drm->dev, i);
- else
- bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
{
struct nvc0_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
- if (ret == 0)
+ if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
-
-struct evo {
- int idx;
- dma_addr_t handle;
- u32 *ptr;
- struct {
- u32 offset;
- u16 value;
- } sem;
-};
-
-struct nvd0_display {
- struct nouveau_gpuobj *mem;
- struct nouveau_bo *sync;
- struct evo evo[9];
-
- struct tasklet_struct tasklet;
- u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
- return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
- return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- int ret = 0;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
- nv_wr32(device, 0x610704 + (id * 0x10), data);
- nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
- if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
- ret = -EBUSY;
- nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
- return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
- if (put + nr >= (PAGE_SIZE / 4)) {
- disp->evo[id].ptr[put] = 0x20000000;
-
- nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
- if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
- NV_ERROR(drm, "evo %d dma stalled\n", id);
- return NULL;
- }
-
- put = 0;
- }
-
- return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
-
- nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d) *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 flags;
-
- flags = 0x00000000;
- if (ch == EVO_MASTER)
- flags |= 0x01000000;
-
- nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
- nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
- nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
-
- nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
- if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
- NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
- nv_rd32(device, 0x610490 + (ch * 0x0010)));
- return -EBUSY;
- }
-
- nv_mask(device, 0x610090, (1 << ch), (1 << ch));
- nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
- return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
-
- if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
- return;
-
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
- nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
- nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
- nv_mask(device, 0x610090, (1 << ch), 0x00000000);
- nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
- return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 *push = evo_wait(dev, ch, 8);
- if (push) {
- nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000 | EVO_MAST_NTFY);
- evo_mthd(push, 0x0080, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, ch);
- if (nv_wait_cb(device, evo_sync_wait, disp->sync))
- return 0;
- }
-
- return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
- return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u32 *push;
-
- push = evo_wait(crtc->dev, evo->idx, 8);
- if (push) {
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0094, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
- }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct nouveau_channel *chan, u32 swap_interval)
-{
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nvd0_display *disp = nvd0_display(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
- u64 offset;
- u32 *push;
- int ret;
-
- swap_interval <<= 4;
- if (swap_interval == 0)
- swap_interval |= 0x100;
-
- push = evo_wait(crtc->dev, evo->idx, 128);
- if (unlikely(push == NULL))
- return -EBUSY;
-
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
- ret = RING_SPACE(chan, 10);
- if (ret)
- return ret;
-
-
- offset = nvc0_fence_crtc(chan, nv_crtc->index);
- offset += evo->sem.offset;
-
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | evo->sem.value);
- OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x1001);
- FIRE_RING (chan);
- } else {
- nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
- 0xf00d0000 | evo->sem.value);
- evo_sync(crtc->dev, EVO_MASTER);
- }
-
- /* queue the flip */
- evo_mthd(push, 0x0100, 1);
- evo_data(push, 0xfffe0000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, swap_interval);
- if (!(swap_interval & 0x00000100)) {
- evo_mthd(push, 0x00e0, 1);
- evo_data(push, 0x40000000);
- }
- evo_mthd(push, 0x0088, 4);
- evo_data(push, evo->sem.offset);
- evo_data(push, 0xf00d0000 | evo->sem.value);
- evo_data(push, 0x74b1e000);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x00a0, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
- evo_mthd(push, 0x0110, 2);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0400, 5);
- evo_data(push, nv_fb->nvbo->bo.offset >> 8);
- evo_data(push, 0);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nv_fb->r_pitch);
- evo_data(push, nv_fb->r_format);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, evo->idx);
-
- evo->sem.offset ^= 0x10;
- evo->sem.value++;
- return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_connector *nv_connector;
- struct drm_connector *connector;
- u32 *push, mode = 0x00;
- u32 mthd;
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- connector = &nv_connector->base;
- if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
- if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
- mode = DITHERING_MODE_DYNAMIC2X2;
- } else {
- mode = nv_connector->dithering_mode;
- }
-
- if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
- if (connector->display_info.bpc >= 8)
- mode |= DITHERING_DEPTH_8BPC;
- } else {
- mode |= nv_connector->dithering_depth;
- }
-
- if (nv_device(drm->device)->card_type < NV_E0)
- mthd = 0x0490 + (nv_crtc->index * 0x0300);
- else
- mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, mthd, 1);
- evo_data(push, mode);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, dev, EVO_MASTER);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
- struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
- struct drm_device *dev = nv_crtc->base.dev;
- struct drm_crtc *crtc = &nv_crtc->base;
- struct nouveau_connector *nv_connector;
- int mode = DRM_MODE_SCALE_NONE;
- u32 oX, oY, *push;
-
- /* start off at the resolution we programmed the crtc for, this
- * effectively handles NONE/FULL scaling
- */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode)
- mode = nv_connector->scaling_mode;
-
- if (mode != DRM_MODE_SCALE_NONE)
- omode = nv_connector->native_mode;
- else
- omode = umode;
-
- oX = omode->hdisplay;
- oY = omode->vdisplay;
- if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
- oY *= 2;
-
- /* add overscan compensation if necessary, will keep the aspect
- * ratio the same as the backend mode unless overridden by the
- * user setting both hborder and vborder properties.
- */
- if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
- (nv_connector->underscan == UNDERSCAN_AUTO &&
- nv_connector->edid &&
- drm_detect_hdmi_monitor(nv_connector->edid)))) {
- u32 bX = nv_connector->underscan_hborder;
- u32 bY = nv_connector->underscan_vborder;
- u32 aspect = (oY << 19) / oX;
-
- if (bX) {
- oX -= (bX * 2);
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- } else {
- oX -= (oX >> 4) + 32;
- if (bY) oY -= (bY * 2);
- else oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- }
-
- /* handle CENTER/ASPECT scaling, taking into account the areas
- * removed already for overscan compensation
- */
- switch (mode) {
- case DRM_MODE_SCALE_CENTER:
- oX = min((u32)umode->hdisplay, oX);
- oY = min((u32)umode->vdisplay, oY);
- /* fall-through */
- case DRM_MODE_SCALE_ASPECT:
- if (oY < oX) {
- u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
- oX = ((oY * aspect) + (aspect / 2)) >> 19;
- } else {
- u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
- oY = ((oX * aspect) + (aspect / 2)) >> 19;
- }
- break;
- default:
- break;
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_data(push, (oY << 16) | oX);
- evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
- evo_kick(push, dev, EVO_MASTER);
- if (update) {
- nvd0_display_flip_stop(crtc);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- }
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
- int x, int y, bool update)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
- u32 *push;
-
- push = evo_wait(fb->dev, EVO_MASTER, 16);
- if (push) {
- evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nvfb->nvbo->bo.offset >> 8);
- evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
- evo_data(push, (fb->height << 16) | fb->width);
- evo_data(push, nvfb->r_pitch);
- evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
- evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (y << 16) | x);
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
- evo_kick(push, fb->dev, EVO_MASTER);
- }
-
- nv_crtc->fb.tile_flags = nvfb->r_dma;
- return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
- struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, EVO_MASTER, 16);
- if (push) {
- if (show) {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x85000000);
- evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- } else {
- evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x05000000);
- evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- }
-
- if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- }
-
- evo_kick(push, dev, EVO_MASTER);
- }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- nvd0_display_flip_stop(crtc);
-
- push = evo_wait(crtc->dev, EVO_MASTER, 2);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x03000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 *push;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 32);
- if (push) {
- evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
- evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
- evo_data(push, 0x83000000);
- evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
- evo_data(push, 0x00000000);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
- evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
- int ret;
-
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
- }
-
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
- struct drm_display_mode *mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector;
- u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
- u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
- u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
- u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
- u32 vblan2e = 0, vblan2s = 1;
- u32 *push;
- int ret;
-
- hactive = mode->htotal;
- hsynce = mode->hsync_end - mode->hsync_start - 1;
- hbackp = mode->htotal - mode->hsync_end;
- hblanke = hsynce + hbackp;
- hfrontp = mode->hsync_start - mode->hdisplay;
- hblanks = mode->htotal - hfrontp - 1;
-
- vactive = mode->vtotal * vscan / ilace;
- vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
- vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
- vblanke = vsynce + vbackp;
- vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
- vblanks = vactive - vfrontp - 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vblan2e = vactive + vsynce + vbackp;
- vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
- vactive = (vactive * 2) + 1;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- push = evo_wait(crtc->dev, EVO_MASTER, 64);
- if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
- evo_data(push, 0x00000000);
- evo_data(push, (vactive << 16) | hactive);
- evo_data(push, ( vsynce << 16) | hsynce);
- evo_data(push, (vblanke << 16) | hblanke);
- evo_data(push, (vblanks << 16) | hblanks);
- evo_data(push, (vblan2e << 16) | vblan2s);
- evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
- evo_data(push, 0x00000000); /* ??? */
- evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
- evo_data(push, mode->clock * 1000);
- evo_data(push, 0x00200000); /* ??? */
- evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
- evo_data(push, 0x00000311);
- evo_data(push, 0x00000100);
- evo_kick(push, crtc->dev, EVO_MASTER);
- }
-
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, false);
- nvd0_crtc_set_scale(nv_crtc, false);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct nouveau_drm *drm = nouveau_drm(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
-
- if (!crtc->fb) {
- NV_DEBUG(drm, "No FB bound\n");
- return 0;
- }
-
- ret = nvd0_crtc_swap_fbs(crtc, old_fb);
- if (ret)
- return ret;
-
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
- nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
- return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic state)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nvd0_display_flip_stop(crtc);
- nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
- return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- int i;
-
- for (i = 0; i < 256; i++) {
- writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
- writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
- writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
- }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- bool visible = (handle != 0);
- int i, ret = 0;
-
- if (visible) {
- if (width != 64 || height != 64)
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, handle);
- if (unlikely(!gem))
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_map(nvbo);
- if (ret == 0) {
- for (i = 0; i < 64 * 64; i++) {
- u32 v = nouveau_bo_rd32(nvbo, i);
- nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
- }
- nouveau_bo_unmap(nvbo);
- }
-
- drm_gem_object_unreference_unlocked(gem);
- }
-
- if (visible != nv_crtc->cursor.visible) {
- nvd0_crtc_cursor_show(nv_crtc, visible, true);
- nv_crtc->cursor.visible = visible;
- }
-
- return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ch = EVO_CURS(nv_crtc->index);
-
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
- evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
- return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = max(start + size, (u32)256);
- u32 i;
-
- for (i = start; i < end; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
- nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- drm_crtc_cleanup(crtc);
- kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
- .dpms = nvd0_crtc_dpms,
- .prepare = nvd0_crtc_prepare,
- .commit = nvd0_crtc_commit,
- .mode_fixup = nvd0_crtc_mode_fixup,
- .mode_set = nvd0_crtc_mode_set,
- .mode_set_base = nvd0_crtc_mode_set_base,
- .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
- .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
- .cursor_set = nvd0_crtc_cursor_set,
- .cursor_move = nvd0_crtc_cursor_move,
- .gamma_set = nvd0_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = nvd0_crtc_destroy,
- .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
- struct nouveau_crtc *nv_crtc;
- struct drm_crtc *crtc;
- int ret, i;
-
- nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
- if (!nv_crtc)
- return -ENOMEM;
-
- nv_crtc->index = index;
- nv_crtc->set_dither = nvd0_crtc_set_dither;
- nv_crtc->set_scale = nvd0_crtc_set_scale;
- nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
- nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
-
- crtc = &nv_crtc->base;
- drm_crtc_init(dev, crtc, &nvd0_crtc_func);
- drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
- drm_mode_crtc_set_gamma_size(crtc, 256);
-
- ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- }
-
- if (ret)
- goto out;
-
- ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(nv_crtc->lut.nvbo);
- if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
- }
-
- if (ret)
- goto out;
-
- nvd0_crtc_lut_load(crtc);
-
-out:
- if (ret)
- nvd0_crtc_destroy(crtc);
- return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x80000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
-
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
- nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- u32 syncs, magic, *push;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
- push = evo_wait(encoder->dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
- evo_data(push, 1 << nv_crtc->index);
- evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = NULL;
- }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
- enum drm_connector_status status = connector_status_disconnected;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or;
- u32 load;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
- udelay(9500);
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
- load = nv_rd32(device, 0x61a00c + (or * 0x800));
- if ((load & 0x38000000) == 0x38000000)
- status = connector_status_connected;
-
- nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
- return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
- .dpms = nvd0_dac_dpms,
- .mode_fixup = nvd0_dac_mode_fixup,
- .prepare = nvd0_dac_disconnect,
- .commit = nvd0_dac_commit,
- .mode_set = nvd0_dac_mode_set,
- .disable = nvd0_dac_disconnect,
- .get_crtc = nvd0_display_crtc_get,
- .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
- .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
- drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int i, or = nv_encoder->or * 0x30;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_monitor_audio(nv_connector->edid))
- return;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
- drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
- if (nv_connector->base.eld[0]) {
- u8 *eld = nv_connector->base.eld;
-
- for (i = 0; i < eld[2] * 4; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
- for (i = eld[2] * 4; i < 0x60; i++)
- nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
- nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
- }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int or = nv_encoder->or * 0x30;
-
- nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
- u32 rekey = 56; /* binary driver, and tegra constant */
- u32 max_ac_packet;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (!drm_detect_hdmi_monitor(nv_connector->edid))
- return;
-
- max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
- max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- /* AVI InfoFrame */
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x61671c + head, 0x000d0282);
- nv_wr32(device, 0x616720 + head, 0x0000006f);
- nv_wr32(device, 0x616724 + head, 0x00000000);
- nv_wr32(device, 0x616728 + head, 0x00000000);
- nv_wr32(device, 0x61672c + head, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
- /* ??? InfoFrame? */
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_wr32(device, 0x6167ac + head, 0x00000010);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
- /* HDMI_CTRL */
- nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
- max_ac_packet << 16);
-
- /* NFI, audio doesn't work without it though.. */
- nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
- nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- int head = nv_crtc->index * 0x800;
-
- nvd0_audio_disconnect(encoder);
-
- nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
- nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
- nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
- static const u8 nvd0[] = { 16, 8, 0, 24 };
- return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
- u8 lane, u8 swing, u8 preem)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
- u32 mask = 0x000000ff << shift;
- u8 *table, *entry, *config = NULL;
-
- switch (swing) {
- case 0: preem += 0; break;
- case 1: preem += 4; break;
- case 2: preem += 7; break;
- case 3: preem += 9; break;
- }
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) {
- config = entry + table[4];
- config += table[5] * preem;
- } else
- if (table[0] == 0x40) {
- config = table + table[1];
- config += table[2] * table[3];
- config += table[6] * preem;
- }
- }
-
- if (!config) {
- NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
- return;
- }
-
- nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
- nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
- nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
- nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
- int link_nr, u32 link_bw, bool enhframe)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
- u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
- u32 script = 0x0000, lane_mask = 0;
- u8 *table, *entry;
- int i;
-
- link_bw /= 27000;
-
- table = nouveau_dp_bios_data(dev, dcb, &entry);
- if (table) {
- if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
- else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
- else entry = NULL;
-
- while (entry) {
- if (entry[0] >= link_bw)
- break;
- entry += 3;
- }
-
- nouveau_bios_run_init_table(dev, script, dcb, crtc);
- }
-
- clksor |= link_bw << 18;
- dpctrl |= ((1 << link_nr) - 1) << 16;
- if (enhframe)
- dpctrl |= 0x00004000;
-
- for (i = 0; i < link_nr; i++)
- lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
- nv_wr32(device, 0x612300 + soff, clksor);
- nv_wr32(device, 0x61c10c + loff, dpctrl);
- nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
- u32 *link_nr, u32 *link_bw)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
- const u32 loff = (or * 0x800) + (link * 0x80);
- const u32 soff = (or * 0x800);
- u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
- u32 clksor = nv_rd32(device, 0x612300 + soff);
-
- if (dpctrl > 0x00030000) *link_nr = 4;
- else if (dpctrl > 0x00010000) *link_nr = 2;
- else *link_nr = 1;
-
- *link_bw = (clksor & 0x007c0000) >> 18;
- *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
- u32 crtc, u32 datarate)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- const u32 symbol = 100000;
- const u32 TU = 64;
- u32 link_nr, link_bw;
- u64 ratio, value;
-
- nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
- ratio = datarate;
- ratio *= symbol;
- do_div(ratio, link_nr * link_bw);
-
- value = (symbol - ratio) * TU;
- value *= ratio;
- do_div(value, symbol);
- do_div(value, symbol);
-
- value += 5;
- value |= 0x08000000;
-
- nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
- struct drm_encoder *partner;
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- nv_encoder->last_dpms = mode;
-
- list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
- if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
- continue;
-
- if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->dcb->or) {
- if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
- return;
- break;
- }
- }
-
- dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
- dpms_ctrl |= 0x80000000;
-
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
- nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
- nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- struct dp_train_func func = {
- .link_set = nvd0_sor_dp_link_set,
- .train_set = nvd0_sor_dp_train_set,
- .train_adj = nvd0_sor_dp_train_adj
- };
-
- nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
- }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- if (nv_connector && nv_connector->native_mode) {
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- int id = adjusted_mode->base.id;
- *adjusted_mode = *nv_connector->native_mode;
- adjusted_mode->base.id = id;
- }
- }
-
- return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- u32 *push;
-
- if (nv_encoder->crtc) {
- nvd0_crtc_prepare(nv_encoder->crtc);
-
- push = evo_wait(dev, EVO_MASTER, 4);
- if (push) {
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nvd0_hdmi_disconnect(encoder);
-
- nv_encoder->crtc = NULL;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
- }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
- nvd0_sor_disconnect(encoder);
- if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
- evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
- struct nvbios *bios = &drm->vbios;
- u32 mode_ctrl = (1 << nv_crtc->index);
- u32 syncs, magic, *push;
- u32 or_config;
-
- syncs = 0x00000001;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- syncs |= 0x00000008;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- syncs |= 0x00000010;
-
- magic = 0x31ec6000 | (nv_crtc->index << 25);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- magic |= 0x00000001;
-
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_TMDS:
- if (nv_encoder->dcb->sorconf.link & 1) {
- if (mode->clock < 165000)
- mode_ctrl |= 0x00000100;
- else
- mode_ctrl |= 0x00000500;
- } else {
- mode_ctrl |= 0x00000200;
- }
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (mode->clock >= 165000)
- or_config |= 0x0100;
-
- nvd0_hdmi_mode_set(encoder, mode);
- break;
- case DCB_OUTPUT_LVDS:
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- if (bios->fp_no_ddc) {
- if (bios->fp.dual_link)
- or_config |= 0x0100;
- if (bios->fp.if_is_24bit)
- or_config |= 0x0200;
- } else {
- if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
- if (((u8 *)nv_connector->edid)[121] == 2)
- or_config |= 0x0100;
- } else
- if (mode->clock >= bios->fp.duallink_transition_clk) {
- or_config |= 0x0100;
- }
-
- if (or_config & 0x0100) {
- if (bios->fp.strapless_is_24bit & 2)
- or_config |= 0x0200;
- } else {
- if (bios->fp.strapless_is_24bit & 1)
- or_config |= 0x0200;
- }
-
- if (nv_connector->base.display_info.bpc == 8)
- or_config |= 0x0200;
-
- }
- break;
- case DCB_OUTPUT_DP:
- if (nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = mode->clock * 18 / 8;
- syncs |= 0x00000002 << 6;
- } else {
- nv_encoder->dp.datarate = mode->clock * 24 / 8;
- syncs |= 0x00000005 << 6;
- }
-
- if (nv_encoder->dcb->sorconf.link & 1)
- mode_ctrl |= 0x00000800;
- else
- mode_ctrl |= 0x00000900;
-
- or_config = (mode_ctrl & 0x00000f00) >> 8;
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
- nv_encoder->dp.datarate);
- }
-
- push = evo_wait(dev, EVO_MASTER, 8);
- if (push) {
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
- evo_data(push, syncs);
- evo_data(push, magic);
- evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
- evo_data(push, mode_ctrl);
- evo_data(push, or_config);
- evo_kick(push, dev, EVO_MASTER);
- }
-
- nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
- .dpms = nvd0_sor_dpms,
- .mode_fixup = nvd0_sor_mode_fixup,
- .prepare = nvd0_sor_prepare,
- .commit = nvd0_sor_commit,
- .mode_set = nvd0_sor_mode_set,
- .disable = nvd0_sor_disconnect,
- .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
- .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
- struct drm_device *dev = connector->dev;
- struct nouveau_encoder *nv_encoder;
- struct drm_encoder *encoder;
-
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
- nv_encoder->or = ffs(dcbe->or) - 1;
- nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
- encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
- drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
- drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- int type, or, i, link = -1;
-
- if (id < 4) {
- type = DCB_OUTPUT_ANALOG;
- or = id;
- } else {
- switch (mc & 0x00000f00) {
- case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
- case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
- case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
- case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
- case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
- default:
- NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
- return NULL;
- }
-
- or = id - 4;
- }
-
- for (i = 0; i < drm->vbios.dcb.entries; i++) {
- struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
- if (dcb->type == type && (dcb->or & (1 << or)) &&
- (link < 0 || link == !(dcb->sorconf.link & 1)))
- return dcb;
- }
-
- NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
- return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_output *dcb;
- u32 or, tmp, pclk;
- int i;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
- if (!(mcc & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcc);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
- }
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
- NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
- crtc, pclk, mask);
- if (pclk && (mask & 0x00010000)) {
- nv50_crtc_set_clock(dev, crtc, pclk);
- }
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
- or = ffs(dcb->or) - 1;
-
- nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
- nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
- switch (dcb->type) {
- case DCB_OUTPUT_ANALOG:
- nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
- break;
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- if (cfg & 0x00000100)
- tmp = 0x00000101;
- else
- tmp = 0x00000000;
-
- nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
- break;
- default:
- break;
- }
-
- break;
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct dcb_output *dcb;
- int pclk, i;
-
- pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
- for (i = 0; mask && i < 8; i++) {
- u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
- u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
- if (!(mcp & (1 << crtc)))
- continue;
-
- dcb = lookup_dcb(dev, i, mcp);
- if (!dcb)
- continue;
-
- nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
- }
-
- nv_wr32(device, 0x6101d4, 0x00000000);
- nv_wr32(device, 0x6109d4, 0x00000000);
- nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *)data;
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvd0_display *disp = nvd0_display(dev);
- u32 mask = 0, crtc = ~0;
- int i;
-
- if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
- NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
- NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(device, 0x6101d0),
- nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
- for (i = 0; i < 8; i++) {
- NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
- i < 4 ? "DAC" : "SOR", i,
- nv_rd32(device, 0x640180 + (i * 0x20)),
- nv_rd32(device, 0x660180 + (i * 0x20)));
- }
- }
-
- while (!mask && ++crtc < dev->mode_config.num_crtc)
- mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
- if (disp->modeset & 0x00000001)
- nvd0_display_unk1_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000002)
- nvd0_display_unk2_handler(dev, crtc, mask);
- if (disp->modeset & 0x00000004)
- nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- u32 intr = nv_rd32(device, 0x610088);
-
- if (intr & 0x00000001) {
- u32 stat = nv_rd32(device, 0x61008c);
- nv_wr32(device, 0x61008c, stat);
- intr &= ~0x00000001;
- }
-
- if (intr & 0x00000002) {
- u32 stat = nv_rd32(device, 0x61009c);
- int chid = ffs(stat) - 1;
- if (chid >= 0) {
- u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
- u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
- u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
- NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
- "0x%08x 0x%08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
- nv_wr32(device, 0x61009c, (1 << chid));
- nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
- }
-
- intr &= ~0x00000002;
- }
-
- if (intr & 0x00100000) {
- u32 stat = nv_rd32(device, 0x6100ac);
-
- if (stat & 0x00000007) {
- disp->modeset = stat;
- tasklet_schedule(&disp->tasklet);
-
- nv_wr32(device, 0x6100ac, (stat & 0x00000007));
- stat &= ~0x00000007;
- }
-
- if (stat) {
- NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
- nv_wr32(device, 0x6100ac, stat);
- }
-
- intr &= ~0x00100000;
- }
-
- intr &= ~0x0f000000; /* vblank, handled in core */
- if (intr)
- NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
- int i;
-
- /* fini cursors + overlays + flips */
- for (i = 1; i >= 0; i--) {
- evo_fini_pio(dev, EVO_CURS(i));
- evo_fini_pio(dev, EVO_OIMM(i));
- evo_fini_dma(dev, EVO_OVLY(i));
- evo_fini_dma(dev, EVO_FLIP(i));
- }
-
- /* fini master */
- evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- int ret, i;
- u32 *push;
-
- if (nv_rd32(device, 0x6100ac) & 0x00000100) {
- nv_wr32(device, 0x6100ac, 0x00000100);
- nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
- if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
- NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
- nv_rd32(device, 0x6194e8));
- return -EBUSY;
- }
- }
-
- /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
- * work at all unless you do the SOR part below.
- */
- for (i = 0; i < 3; i++) {
- u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
- nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
- }
-
- for (i = 0; i < 4; i++) {
- u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
- nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
- }
-
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
- u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
- u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
- nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
- nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
- nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
- }
-
- /* point at our hash table / objects, enable interrupts */
- nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
- nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
- /* init master */
- ret = evo_init_dma(dev, EVO_MASTER);
- if (ret)
- goto error;
-
- /* init flips + overlays + cursors */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
- (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
- (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
- (ret = evo_init_pio(dev, EVO_CURS(i))))
- goto error;
- }
-
- push = evo_wait(dev, EVO_MASTER, 32);
- if (!push) {
- ret = -EBUSY;
- goto error;
- }
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x00000000);
- evo_mthd(push, 0x0084, 1);
- evo_data(push, 0x80000000);
- evo_mthd(push, 0x008c, 1);
- evo_data(push, 0x00000000);
- evo_kick(push, dev, EVO_MASTER);
-
-error:
- if (ret)
- nvd0_display_fini(dev);
- return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
- struct nvd0_display *disp = nvd0_display(dev);
- struct pci_dev *pdev = dev->pdev;
- int i;
-
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
- }
-
- nouveau_gpuobj_ref(NULL, &disp->mem);
- nouveau_bo_unmap(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
-
- nouveau_display(dev)->priv = NULL;
- kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bar *bar = nouveau_bar(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct dcb_table *dcb = &drm->vbios.dcb;
- struct drm_connector *connector, *tmp;
- struct pci_dev *pdev = dev->pdev;
- struct nvd0_display *disp;
- struct dcb_output *dcbe;
- int crtcs, ret, i;
-
- disp = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- nouveau_display(dev)->priv = disp;
- nouveau_display(dev)->dtor = nvd0_display_destroy;
- nouveau_display(dev)->init = nvd0_display_init;
- nouveau_display(dev)->fini = nvd0_display_fini;
-
- /* create crtc objects to represent the hw heads */
- crtcs = nv_rd32(device, 0x022448);
- for (i = 0; i < crtcs; i++) {
- ret = nvd0_crtc_create(dev, i);
- if (ret)
- goto out;
- }
-
- /* create encoder/connector objects based on VBIOS DCB table */
- for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
- if (IS_ERR(connector))
- continue;
-
- if (dcbe->location != DCB_LOC_ON_CHIP) {
- NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
-
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- nvd0_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- nvd0_dac_create(connector, dcbe);
- break;
- default:
- NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
- dcbe->type, ffs(dcbe->or) - 1);
- continue;
- }
- }
-
- /* cull any connectors we created that don't have an encoder */
- list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
- continue;
-
- NV_WARN(drm, "%s has no encoders, removing\n",
- drm_get_connector_name(connector));
- connector->funcs->destroy(connector);
- }
-
- /* setup interrupt handling */
- tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
- /* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
- if (!ret)
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
- }
-
- if (ret)
- goto out;
-
- /* hash table and dma objects for the memory areas we care about */
- ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
- if (ret)
- goto out;
-
- /* create evo dma channels */
- for (i = 0; i < EVO_DMA_NR; i++) {
- struct evo *evo = &disp->evo[i];
- u64 offset = disp->sync->bo.offset;
- u32 dmao = 0x1000 + (i * 0x100);
- u32 hash = 0x0000 + (i * 0x040);
-
- evo->idx = i;
- evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
- evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
- if (!evo->ptr) {
- ret = -ENOMEM;
- goto out;
- }
-
- nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
- nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
- nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
- nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
- nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
- ((dmao + 0x00) << 9));
-
- nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
- nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
- nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
- nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
- ((dmao + 0x20) << 9));
-
- nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
- nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
- nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
- nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
- ((dmao + 0x40) << 9));
-
- nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
- nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
- nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
- nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
- nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
- nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
- ((dmao + 0x60) << 9));
- }
-
- bar->flush(bar);
-
-out:
- if (ret)
- nvd0_display_destroy(dev);
- return ret;
-}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 24d932f53203..9175615bbd8a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index d5699fe4f1e8..064023bed480 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -34,8 +34,7 @@
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
-#define DP_LINK_STATUS_SIZE 6
-#define DP_DPCD_SIZE 8
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/***** general DP utility functions *****/
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
- int s = (lane & 1) * 4;
- u8 l = dp_link_status(link_status, i);
- return (l >> s) & 0xf;
-}
-
-static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- int lane;
- u8 lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
-
-static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- u8 lane_align;
- u8 lane_status;
- int lane;
-
- lane_align = dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
- return false;
- }
- return true;
-}
-
-static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane;
for (lane = 0; lane < lane_count; lane++) {
- u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
- u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
return (link_rate * lane_num * 8) / bpp;
}
-static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
-{
- switch (dpcd[DP_MAX_LINK_RATE]) {
- case DP_LINK_BW_1_62:
- default:
- return 162000;
- case DP_LINK_BW_2_7:
- return 270000;
- case DP_LINK_BW_5_4:
- return 540000;
- }
-}
-
-static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
-{
- return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static u8 dp_get_dp_link_rate_coded(int link_rate)
-{
- switch (link_rate) {
- case 162000:
- default:
- return DP_LINK_BW_1_62;
- case 270000:
- return DP_LINK_BW_2_7;
- case 540000:
- return DP_LINK_BW_5_4;
- }
-}
-
/***** radeon specific DP functions *****/
/* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
- int max_link_rate = dp_get_max_link_rate(dpcd);
- int max_lane_num = dp_get_max_lane_number(dpcd);
+ int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
return 540000;
}
- return dp_get_max_link_rate(dpcd);
+ return drm_dp_max_link_rate(dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- u8 msg[25];
+ u8 msg[DP_DPCD_SIZE];
int ret, i;
- ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+ DP_DPCD_SIZE, 0);
if (ret > 0) {
- memcpy(dig_connector->dpcd, msg, 8);
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
- for (i = 0; i < 8; i++)
+ for (i = 0; i < DP_DPCD_SIZE; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
- if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+ if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
int enc_id;
int dp_clock;
int dp_lane_count;
- int rd_interval;
bool tp3_supported;
- u8 dpcd[8];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
/* set the link rate on the sink */
- tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
+ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
voltage = 0xff;
while (1) {
- if (dp_info->rd_interval == 0)
- udelay(100);
- else
- mdelay(dp_info->rd_interval * 4);
+ drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
- if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
break;
}
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
channel_eq = false;
while (1) {
- if (dp_info->rd_interval == 0)
- udelay(400);
- else
- mdelay(dp_info->rd_interval * 4);
+ drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
- if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
break;
}
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
- dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
- memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
+ memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 010bae19554a..4552d4aff317 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- radeon_dp_set_link_config(connector, mode);
+ radeon_dp_set_link_config(connector, adjusted_mode);
}
return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 219942c660d7..a2d478e8692a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
/* wait for the next frame */
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
+ /* wait for the MC to settle */
+ udelay(100);
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -1650,7 +1660,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
@@ -1821,7 +1831,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
- rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 4;
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
@@ -1844,7 +1854,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1866,7 +1876,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1914,7 +1924,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
- rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2034,10 +2044,22 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG, gb_addr_config);
- tmp = gb_addr_config & NUM_PIPES_MASK;
- tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
- EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ if ((rdev->config.evergreen.max_backends == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ }
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
@@ -2305,22 +2327,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring);
}
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -2330,10 +2350,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@@ -2357,15 +2374,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -2375,13 +2391,71 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+}
+
+static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ evergreen_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ evergreen_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev);
+ return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* Interrupts */
@@ -2403,8 +2477,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
+ tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ WREG32(CAYMAN_DMA1_CNTL, tmp);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2457,6 +2535,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+ u32 dma_cntl, dma_cntl1 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2484,6 +2563,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2506,6 +2587,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2591,6 +2685,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
+
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_CAYMAN)
+ WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3093,6 +3193,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3116,9 +3226,19 @@ restart_ih:
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ if (rdev->family >= CHIP_CAYMAN) {
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ }
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3144,6 +3264,143 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3207,6 +3464,12 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3221,12 +3484,23 @@ static int evergreen_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
r = evergreen_cp_resume(rdev);
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -3273,11 +3547,9 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- ring->ready = false;
+ r600_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -3354,6 +3626,9 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3366,6 +3641,7 @@ int evergreen_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3393,6 +3669,7 @@ void evergreen_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c042e497e450..7a445666e71f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
/* height is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
+ /* always assume 8x8 htile */
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
switch (track->npipes) {
case 8:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 64 * 8);
break;
case 4:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 32 * 8);
break;
case 2:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 1:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 16 * 8);
break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
}
}
/* compute number of htile */
- nbx = nbx / 8;
- nby = nby / 8;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_SURFACE:
/* 8x8 only */
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size, info;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ info = radeon_get_ib_value(p, idx+1);
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if (size % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ /* GDS is ok */
+ if (((info & 0x60000000) >> 29) != 1) {
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ if (((info & 0x60000000) >> 29) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ } else if (((info & 0x60000000) >> 29) != 2) {
+ DRM_ERROR("bad CP DMA SRC_SEL\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ /* GDS is ok */
+ if (((info & 0x00300000) >> 20) != 1) {
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ if (((info & 0x00300000) >> 20) == 0) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ } else {
+ DRM_ERROR("bad CP DMA DST_SEL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2540,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2715,6 +2858,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/*
+ * DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset, dst2_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 7;
+ } else {
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+7];
+ dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+7];
+ src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ dst2_offset = ib[idx+2];
+ dst2_offset <<= 8;
+ src_offset = ib[idx+8];
+ src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+7];
+ dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+7];
+ src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst2_offset = ib[idx+2];
+ dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
+ src_offset = ib[idx+3];
+ src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
+
/* vm parser */
static bool evergreen_vm_reg_valid(u32 reg)
{
@@ -2724,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
+ case WAIT_UNTIL:
case GRBM_GFX_INDEX:
case CP_STRMOUT_CNTL:
case CP_COHER_CNTL:
@@ -2843,6 +3436,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2917,6 +3511,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if ((command & 0x1fffff) % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
return -EINVAL;
}
@@ -2958,3 +3610,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
return ret;
}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib: radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ u32 idx = 0;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+
+ do {
+ header = ib->ptr[idx];
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ if (tiled)
+ idx += count + 7;
+ else
+ idx += count + 3;
+ break;
+ case DMA_PACKET_COPY:
+ if (tiled) {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ idx += 5;
+ }
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (idx < ib->length_dw);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 2bc0f6a1b428..0bfd0e9e469b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
/* Registers */
@@ -355,6 +357,54 @@
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7138
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x5e78
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -651,6 +701,7 @@
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT1_CNTL2 0x1434
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
@@ -672,6 +723,8 @@
#define CACHE_UPDATE_MODE(x) ((x) << 6)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
#define WAIT_UNTIL 0x8040
@@ -689,8 +742,9 @@
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
/* display watermarks */
@@ -854,6 +908,37 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* ASYNC DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xD0B8
+
+#define CAYMAN_DMA1_CNTL 0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
@@ -951,6 +1036,53 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
@@ -1896,4 +2028,15 @@
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 81e6a568c29d..835992d8d067 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-7 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
- WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
@@ -1059,7 +1072,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
WREG32(SCRATCH_UMSK, 0xff);
@@ -1076,7 +1089,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
#endif
WREG32(cp_rb_cntl[i], rb_cntl);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
@@ -1118,22 +1131,199 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1143,19 +1333,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14F8));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14D8));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14FC));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14DC));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@@ -1180,16 +1358,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1199,13 +1375,113 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+
+}
+
+static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ cayman_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ cayman_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev);
+ return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ else
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int cayman_startup(struct radeon_device *rdev)
@@ -1289,6 +1565,18 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1303,6 +1591,23 @@ static int cayman_startup(struct radeon_device *rdev)
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = cayman_cp_load_microcode(rdev);
if (r)
return r;
@@ -1310,6 +1615,10 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1663,7 @@ int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
cayman_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ cayman_dma_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1730,14 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1433,6 +1750,7 @@ int cayman_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1463,6 +1781,7 @@ void cayman_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
@@ -1538,30 +1857,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
}
-
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
}
}
}
@@ -1596,3 +1942,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907a..48e5022ee921 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -80,7 +98,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -588,5 +617,61 @@
#define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
-#endif
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_MODE 0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd2..8ff7cac222dc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
}
}
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
{
- if (reg < rdev->rmmio_size)
+ if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cda280d157da..becb03e8b32f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
*/
-static int r600_gpu_soft_reset(struct radeon_device *rdev)
+static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
u32 tmp;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- rv515_mc_stop(rdev, &save);
- if (r600_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- /* Wait a little for things to settle down */
- mdelay(1);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1348,6 +1343,66 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+
+}
+
+static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct rv515_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ r600_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ r600_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ mdelay(1);
+
rv515_mc_resume(rdev, &save);
return 0;
}
@@ -1370,9 +1425,34 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 dma_status_reg;
+
+ dma_status_reg = RREG32(DMA_STATUS_REG);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
int r600_asic_reset(struct radeon_device *rdev)
{
- return r600_gpu_soft_reset(rdev);
+ return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
@@ -1382,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
- u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
/* mask out the RBs that don't exist on that asic */
- disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -1424,13 +1507,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
int r600_count_pipe_bits(uint32_t val)
{
- int i, ret = 0;
-
- for (i = 0; i < 32; i++) {
- ret += val & 1;
- val >>= 1;
- }
- return ret;
+ return hweight32(val);
}
static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1671,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+ WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1949,7 @@ void r600_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2275,132 @@ void r600_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
/*
* GPU scratch registers helpers function.
@@ -2252,6 +2457,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -2315,6 +2578,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2334,6 +2650,80 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2739,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
static int r600_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -2394,6 +2784,12 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2403,12 +2799,20 @@ static int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
+
r = r600_cp_load_microcode(rdev);
if (r)
return r;
@@ -2416,6 +2820,10 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2879,7 @@ int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2544,6 +2952,9 @@ int r600_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2556,6 +2967,7 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2572,6 +2984,7 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2674,6 +3087,104 @@ free_scratch:
return r;
}
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
/*
* Interrupts
*
@@ -2865,6 +3376,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3519,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
+ u32 dma_cntl;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3554,19 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
+
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3611,7 @@ int r600_irq_set(struct radeon_device *rdev)
}
WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3991,10 @@ restart_ih:
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 2514123d2d00..be85f75aedda 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static int r600_count_pipe_bits(uint32_t val)
{
- int i, ret = 0;
- for (i = 0; i < 32; i++) {
- ret += val & 1;
- val >>= 1;
- }
- return ret;
+ return hweight32(val);
}
static void r600_gfx_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe0..69ec24ab8d63 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8);
} else {
- /* htile widht & nby (8 or 4) make 2 bits number */
- tmp = track->htile_surface & 3;
+ /* always assume 8x8 htile */
/* align is htile align * 8, htile align vary according to
* number of pipe and tile width and nby
*/
switch (track->npipes) {
case 8:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 64 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
break;
case 4:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 64 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 2:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 32 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
break;
case 1:
- switch (tmp) {
- case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
- nbx = round_up(nbx, 32 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
- case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 16 * 8);
- break;
- case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
- nbx = round_up(nbx, 16 * 8);
- nby = round_up(nby, 8 * 8);
- break;
- default:
- return -EINVAL;
- }
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
break;
default:
dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
}
}
/* compute number of htile */
- nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
- nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
- size = nbx * nby * 4;
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_HTILE_SURFACE:
track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
track->db_dirty = true;
break;
case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ }
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2276,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2429,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
@@ -2496,3 +2545,209 @@ void r600_cs_legacy_init(void)
{
r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
}
+
+/*
+ * DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p: parser structure holding parsing context.
+ * @cs_reloc: reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ unsigned idx;
+
+ *cs_reloc = NULL;
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ idx = p->dma_reloc_idx;
+ if (idx >= p->nrelocs) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, p->nrelocs);
+ return -EINVAL;
+ }
+ *cs_reloc = p->relocs_ptr[idx];
+ p->dma_reloc_idx++;
+ return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ u32 header, cmd, count, tiled;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 5;
+ } else {
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = ib[idx+1];
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = ib[idx+5];
+ dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = ib[idx+5];
+ src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = ib[idx+1];
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 7;
+ } else {
+ if (p->family >= CHIP_RV770) {
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ } else {
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+ p->idx += 4;
+ }
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("Constant Fill is 7xx only !\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18a..909219b1bf80 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
#define R600_CONFIG_F0_BASE 0x542C
#define R600_CONFIG_APER_SIZE 0x5430
+#define R600_BIF_FB_EN 0x5490
+#define R600_FB_READ_EN (1 << 0)
+#define R600_FB_WRITE_EN (1 << 1)
+
+#define R600_CITF_CNTL 0x200c
+#define R600_BLACKOUT_MASK 0x00000003
+
+#define R700_MC_CITF_CNTL 0x25c0
+
#define R600_ROM_CNTL 0x1600
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba9..4a53402b1852 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
+/* async DMA */
+#define DMA_TILING_CONFIG 0x3ec4
+#define DMA_CONFIG 0x3e4c
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_MODE 0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
+#define DMA_PACKET_NOP 0xf
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
-# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
+# define RV770_SOFT_RESET_DMA (1 << 20)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
@@ -1134,6 +1186,38 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e26..a08f657329a0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 5
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
@@ -122,11 +122,21 @@ extern int radeon_lockup_timeout;
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX 3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+/* reset flags */
+#define RADEON_RESET_GFX (1 << 0)
+#define RADEON_RESET_COMPUTE (1 << 1)
+#define RADEON_RESET_DMA (1 << 2)
+
/*
* Errata workarounds.
*/
@@ -220,12 +230,13 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
@@ -642,6 +653,8 @@ struct radeon_ring {
u32 ptr_reg_mask;
u32 nop;
u32 idx;
+ u64 last_semaphore_signal_addr;
+ u64 last_semaphore_wait_addr;
};
/*
@@ -787,6 +800,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
/*
* CS.
*/
@@ -824,6 +846,7 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ unsigned dma_reloc_idx;
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
@@ -883,7 +906,9 @@ struct radeon_wb {
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
#define R600_WB_EVENT_OFFSET 3072
/**
@@ -1539,6 +1564,8 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
+ /* protects concurrent MM_INDEX/DATA based register access */
+ spinlock_t mmio_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -1614,8 +1641,10 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -1631,9 +1660,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1689,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab7..0b202c07fe50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &rv770_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &rv770_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
},
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
}
},
.irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = &r600_copy_blit,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
}
},
.irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = NULL,
- .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .copy = NULL,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &si_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &si_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be1..15d70e613076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
struct rv515_mc_save {
u32 vga_render_control;
u32 vga_hdp_control;
+ bool crtc_enabled[2];
};
int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -388,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* evergreen
@@ -416,6 +435,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +448,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* cayman
@@ -449,6 +477,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +509,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cfc..3e403bdda58f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
+ } else if (of_machine_is_compatible("PowerMac3,5")) {
+ /* PowerMac G4 Silver radeon 7500 */
+ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
@@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_MAC_G4_SILVER:
+ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -2419,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
}
+ /* RV100 board with external TDMS bit mis-set.
+ * Actually uses internal TMDS, clear the bit.
+ */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1014 &&
+ dev->pdev->subsystem_device == 0x029A) {
+ tmp &= ~(1 << 4);
+ }
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
@@ -3246,11 +3305,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
while (ram--) {
addr = ram * 1024 * 1024;
/* write to each page */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- WREG32(RADEON_MM_DATA, 0xdeadbeef);
+ WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
/* read back and verify */
- WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
- if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+ if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b884c362a8c2..2399f25ec037 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
return connector->status;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
+ /* bridge chips are always aux */
+ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
}
@@ -1599,7 +1601,7 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
break;
@@ -1608,13 +1610,13 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1627,14 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->doublescan_allowed = false;
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1653,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1669,7 +1671,7 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1692,23 +1694,23 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1732,17 +1734,17 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1771,17 +1773,17 @@ radeon_add_atom_connector(struct drm_device *dev,
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.coherent_mode_property,
1);
if (ASIC_IS_AVIVO(rdev)) {
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_hborder_property,
0);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
}
@@ -1806,7 +1808,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1821,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -1843,7 +1845,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1924,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1940,7 +1942,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
/* no HPD on analog connectors */
@@ -1959,7 +1961,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
}
@@ -1983,10 +1985,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
*/
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
@@ -2002,7 +2004,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
- drm_connector_attach_property(&radeon_connector->base,
+ drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b64..9143fc45e35b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
}
}
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
- u32 ret;
-
- if (addr < 0x10000)
- ret = DRM_READ32(dev_priv->mmio, addr);
- else {
- DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
- ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
- }
-
- return ret;
-}
-
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563fb..5407459e56d2 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return 0;
}
chunk = &p->chunks[p->chunk_relocs_idx];
+ p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
} else
p->ring = RADEON_RING_TYPE_GFX_INDEX;
break;
+ case RADEON_CS_RING_DMA:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (p->priority > 0)
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ else
+ p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+ } else if (p->rdev->family >= CHIP_R600) {
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ } else {
+ return -EINVAL;
+ }
+ break;
}
return 0;
}
@@ -266,13 +279,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- if ((p->rdev->flags & RADEON_IS_AGP)) {
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+ p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
return -ENOMEM;
}
}
@@ -570,7 +585,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
- bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64bd..0d67674b64b1 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
- EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
+ u32 reg;
switch (radeon_crtc->crtc_id) {
case 0:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ reg = RADEON_CRTC_GEN_CNTL;
break;
case 1:
- WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ reg = RADEON_CRTC2_GEN_CNTL;
break;
default:
return;
}
- WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+ WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
}
}
@@ -240,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
y = 0;
}
- if (ASIC_IS_AVIVO(rdev)) {
+ /* fixed on DCE6 and newer */
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c374..0d6562bb0c93 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ if (efi_enabled(EFI_BOOT) &&
+ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
return false;
/* first check CRTCs */
@@ -897,6 +898,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+ /* 6600m in a macbook pro */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0x00e2) {
+ printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -910,10 +930,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
+ unsigned d3_delay = dev->pdev->d3_delay;
+
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ dev->pdev->d3_delay = 20;
+
radeon_resume_kms(dev);
+
+ dev->pdev->d3_delay = d3_delay;
+
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
@@ -1059,6 +1088,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
+ spin_lock_init(&rdev->mmio_idx_lock);
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
@@ -1163,6 +1193,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
+ bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -1205,8 +1236,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_empty_locked(rdev, i);
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* delay GPU reset to resume */
+ force_completion = true;
+ }
+ }
+ if (force_completion) {
+ radeon_fence_driver_force_completion(rdev);
+ }
mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -1337,7 +1376,6 @@ retry:
}
radeon_restore_bios_scratch_regs(rdev);
- drm_helper_resume_force_mode(rdev->ddev);
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1357,11 +1395,14 @@ retry:
}
}
} else {
+ radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
+ drm_helper_resume_force_mode(rdev->ddev);
+
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a6015727..05c96fa0b051 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->old_rbo = rbo;
obj = new_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&rbo->tbo.bdev->fence_lock);
if (rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
+
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
@@ -695,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE)) {
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
@@ -1106,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL)
+ if (radeon_fb == NULL) {
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
+ }
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
kfree(radeon_fb);
drm_gem_object_unreference_unlocked(obj);
- return NULL;
+ return ERR_PTR(ret);
}
return &radeon_fb->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 07eb84e8a8a4..d9bf96ee299a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,14 @@
* 2.22.0 - r600 only: RESOLVE_BOX allowed
* 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
* 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ * 2.25.0 - eg+: new info request for num SE and num SH
+ * 2.26.0 - r600-eg: fix htile size computation
+ * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ * 2.28.0 - r600-eg: Add MEM_WRITE packet support
+ * 2.29.0 - R500 FP16 color clear registers
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 24
+#define KMS_DRIVER_MINOR 29
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +286,15 @@ static struct drm_driver driver_old = {
static struct drm_driver kms_driver;
-static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -295,13 +303,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
kfree(ap);
+
+ return 0;
}
-static int __devinit
-radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int radeon_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
+ int ret;
+
/* Get rid of things like offb */
- radeon_kick_out_firmware_fb(pdev);
+ ret = radeon_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &kms_driver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d01..e7fdf163a8ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c2740..34356252567a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ int r;
- while(1) {
- int r;
- r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r) {
if (r == -EDEADLK) {
- mutex_unlock(&rdev->ring_lock);
- r = radeon_gpu_reset(rdev);
- mutex_lock(&rdev->ring_lock);
- if (!r)
- continue;
- }
- if (r) {
- dev_err(rdev->dev, "error waiting for ring to become"
- " idle (%d)\n", r);
+ return -EDEADLK;
}
- return;
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+ ring, r);
}
+ return 0;
}
/**
@@ -772,7 +766,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
int r;
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- if (rdev->wb.use_event) {
+ if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
*/
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- int ring;
+ int ring, r;
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_empty_locked(rdev, ring);
+ r = radeon_fence_wait_empty_locked(rdev, ring);
+ if (r) {
+ /* no need to trigger GPU reset as we are unloading */
+ radeon_fence_driver_force_completion(rdev);
+ }
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+ }
+}
+
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4debd60e5aa6..6e24f84755b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
{
struct radeon_bo_va *bo_va;
- BUG_ON(!atomic_read(&bo->tbo.reserved));
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index c5bddd630eb9..fc60b74ee304 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ if (use_aux) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
+ } else {
+ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ }
+
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96b..9c312f9afb68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_MAX_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_shader_engines;
+ else if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.num_ses;
+ else
+ value = 1;
+ break;
+ case RADEON_INFO_MAX_SH_PER_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_sh_per_se;
+ else
+ return -EINVAL;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
enum drm_connector_status found = connector_status_disconnected;
bool color = true;
+ /* just don't bother on RN50 those chip are often connected to remoting
+ * console hw and often we get failure to load detect those. So to make
+ * everyone happy report the encoder as always connected.
+ */
+ if (ASIC_IS_RN50(rdev)) {
+ return connector_status_connected;
+ }
+
/* save the regs we need */
vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92c5f473cf08..4003f5a68c09 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,8 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
- CT_SAM440EP
+ CT_SAM440EP,
+ CT_MAC_G4_SILVER
};
enum radeon_dvo_chip {
@@ -427,7 +428,7 @@ struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
- u8 dpcd[8];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
@@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b91118ccef86..d3aface2d12d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -88,10 +88,20 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (domain & RADEON_GEM_DOMAIN_GTT)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- if (domain & RADEON_GEM_DOMAIN_CPU)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ }
+ }
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
@@ -140,7 +150,7 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL,
+ &bo->placement, page_align, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
@@ -240,7 +250,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -269,7 +279,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -355,7 +365,7 @@ int radeon_bo_list_validate(struct list_head *head)
retry:
radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false, false);
+ true, false);
if (unlikely(r)) {
if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
domain |= RADEON_GEM_DOMAIN_GTT;
@@ -384,7 +394,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (!bo->tiling_flags)
return 0;
@@ -510,7 +520,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
if (pitch)
@@ -520,7 +530,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
- BUG_ON(!atomic_read(&bo->tbo.reserved));
+ BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -575,7 +585,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r != 0))
return r;
offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 93cd491fff2e..5fc86b03043b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
{
- return !!atomic_read(&bo->tbo.reserved);
+ return ttm_bo_is_reserved(&bo->tbo);
}
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aa14dbb7e4fb..0bfa656aa87d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
- int i;
+ int i, r;
/* no need to take locks, etc. if nothing's going to change */
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
- if (ring->ready)
- radeon_fence_wait_empty_locked(rdev, i);
+ if (!ring->ready) {
+ continue;
+ }
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* needs a GPU reset dont reset here */
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return;
+ }
}
radeon_unmap_vram_bos(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index e09521858f64..26c23bb651c6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
bo = dma_buf->priv;
if (bo->gem_base.dev == dev) {
drm_gem_object_reference(&bo->gem_base);
+ dma_buf_put(dma_buf);
return &bo->gem_base;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 47634f27f2e5..cd72062d5a91 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
{
int r;
+ /* make sure we aren't trying to allocate more space than there is on the ring */
+ if (ndw > (ring->ring_size / 4))
+ return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
@@ -459,7 +462,7 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *r
*
* @ring: radeon_ring structure holding ring information
*
- * Reset the driver's copy of the wtpr (all asics).
+ * Reset the driver's copy of the wptr (all asics).
*/
void radeon_ring_undo(struct radeon_ring *ring)
{
@@ -503,7 +506,7 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
}
/**
- * radeon_ring_force_activity - update lockup variables
+ * radeon_ring_lockup_update - update lockup variables
*
* @ring: radeon_ring structure holding ring information
*
@@ -770,22 +773,30 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j;
+ u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
- seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
- seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+ tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
}
- seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- i = ring->rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ /* print 8 dw before current rptr as often it's the last executed
+ * packet that is the root issue
+ */
+ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
}
return 0;
@@ -794,11 +805,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 97f3ece81cd2..8dcc20f53d73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
/* we assume caller has already allocated space on waiters ring */
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* for debugging lockup only, used by sysfs debug files */
+ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+ rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba2..fda09c9ea689 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
#include "radeon_reg.h"
#include "radeon.h"
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA 0
+
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
struct radeon_bo *vram_obj = NULL;
struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
- int r;
+ int r, ring;
+
+ switch (flag) {
+ case RADEON_TEST_COPY_DMA:
+ ring = radeon_copy_dma_ring_index(rdev);
+ break;
+ case RADEON_TEST_COPY_BLIT:
+ ring = radeon_copy_blit_ring_index(rdev);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ return;
+ }
size = 1024 * 1024;
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
}
}
+void radeon_test_moves(struct radeon_device *rdev)
+{
+ if (rdev->asic->copy.dma)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+ if (rdev->asic->copy.blit)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5ebe1b3e5db2..1d8ff2f850ba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
@@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+ evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -320,7 +320,7 @@ out_cleanup:
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -359,7 +360,7 @@ out_cleanup:
static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, new_mem);
+ no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
}
return r;
}
@@ -471,13 +472,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
- bool lazy, bool interruptible)
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}
-static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int radeon_sync_obj_flush(void *sync_obj)
{
return 0;
}
@@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
return radeon_fence_ref((struct radeon_fence *)sync_obj);
}
-static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool radeon_sync_obj_signaled(void *sync_obj)
{
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0f656b111c15..a072fa8c46b0 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,6 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..78d5e99d759d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -324,6 +324,8 @@ rv515 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b24..435ed3551364 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
void rv515_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,116 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- /* Stop all video */
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ /* disable VGA render */
WREG32(R_000300_VGA_RENDER_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, 0);
- WREG32(R_006880_D2CRTC_CONTROL, 0);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
- WREG32(R_000330_D1VGA_CONTROL, 0);
- WREG32(R_000338_D2VGA_CONTROL, 0);
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+
+ radeon_mc_wait_for_idle(rdev);
+
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->family >= CHIP_RV770)
+ blackout = RREG32(R700_MC_CITF_CNTL);
+ else
+ blackout = RREG32(R600_CITF_CNTL);
+ if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+ /* Block CPU access */
+ WREG32(R600_BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout |= R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, blackout);
+ else
+ WREG32(R600_CITF_CNTL, blackout);
+ }
+ }
+ /* wait for the MC to settle */
+ udelay(100);
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
- WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
- /* Unlock host access */
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->family >= CHIP_RV770) {
+ if (i == 1) {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ } else {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ }
+ }
+ WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+ WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+ if (rdev->family >= CHIP_R600) {
+ /* unblackout the MC */
+ if (rdev->family >= CHIP_RV770)
+ tmp = RREG32(R700_MC_CITF_CNTL);
+ else
+ tmp = RREG32(R600_CITF_CNTL);
+ tmp &= ~R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, tmp);
+ else
+ WREG32(R600_CITF_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+ }
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ /* Unlock vga access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e5..1b2444f4d8f4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -884,9 +887,83 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int rv770_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -932,6 +1009,12 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -941,11 +1024,20 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
r = rv770_cp_load_microcode(rdev);
if (r)
return r;
@@ -953,6 +1045,10 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1091,7 @@ int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r700_cp_stop(rdev);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1162,9 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1078,6 +1177,7 @@ int rv770_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1093,6 +1193,7 @@ void rv770_fini(struct radeon_device *rdev)
{
r600_blit_fini(rdev);
r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d75..20e29d23d348 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
+#define DMA_TILING_CONFIG 0x3ec8
+#define DMA_TILING_CONFIG2 0xd0b8
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -358,6 +361,26 @@
#define WAIT_UNTIL 0x8040
+/* async DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+
#define SRBM_STATUS 0x0E50
/* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
#define HDMI_OFFSET0 (0x7400 - 0x7400)
#define HDMI_OFFSET1 (0x7800 - 0x7400)
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x7300
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 4422d630b33b..ae8b48205a6c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
si_tiling_mode_table_init(rdev);
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
udelay(50);
}
@@ -2007,7 +2012,7 @@ static int si_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB0_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
@@ -2040,7 +2045,7 @@ static int si_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB1_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
@@ -2066,7 +2071,7 @@ static int si_cp_resume(struct radeon_device *rdev)
ring->wptr = 0;
WREG32(CP_RB2_WPTR, ring->wptr);
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
@@ -2121,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-static int si_gpu_soft_reset(struct radeon_device *rdev)
+static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2140,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
- evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
@@ -2168,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
+
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2180,13 +2179,81 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+}
+
+static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+ evergreen_mc_stop(rdev, &save);
+ if (radeon_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ si_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ si_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev);
+ return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* MC */
@@ -2426,9 +2493,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
/* enable context1-15 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
si_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2534,6 +2612,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
+ u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -2633,6 +2712,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -2809,30 +2934,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 2 + count * 2;
- if (ndw > 0x3FFE)
- ndw = 0x3FFE;
-
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
- for (; ndw > 2; ndw -= 2, --count, pe += 8) {
- uint64_t value;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- addr += incr;
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ /* DMA */
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+ radeon_ring_write(ring, pe); /* dst addr */
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ radeon_ring_write(ring, r600_flags); /* mask */
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, value); /* value */
+ radeon_ring_write(ring, upper_32_bits(value));
+ radeon_ring_write(ring, incr); /* increment size */
+ radeon_ring_write(ring, 0);
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
}
}
@@ -2880,6 +3061,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
/*
* RLC
*/
@@ -3048,6 +3255,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
+ tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3167,6 +3378,7 @@ int si_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 dma_cntl, dma_cntl1;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3187,6 +3399,9 @@ int si_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3200,6 +3415,15 @@ int si_irq_set(struct radeon_device *rdev)
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3259,6 +3483,9 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3684,6 +3911,16 @@ restart_ih:
break;
}
break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
@@ -3707,9 +3944,17 @@ restart_ih:
break;
}
break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
+ case 244: /* DMA trap event */
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -3733,6 +3978,80 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
/*
* startup/shutdown callbacks
*/
@@ -3804,6 +4123,18 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
@@ -3834,6 +4165,22 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
r = si_cp_load_microcode(rdev);
if (r)
return r;
@@ -3841,6 +4188,10 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3882,9 +4233,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
si_cp_enable(rdev, false);
- rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ cayman_dma_stop(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -3962,6 +4311,14 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3974,6 +4331,7 @@ int si_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
@@ -4002,6 +4360,7 @@ void si_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
#endif
si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index a8871afc5b4e..c056aae814f0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -62,6 +62,22 @@
#define SRBM_STATUS 0xE50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
@@ -91,7 +107,18 @@
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -104,6 +131,9 @@
#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
@@ -835,6 +865,54 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
@@ -922,4 +1000,63 @@
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
#define PACKET3_SWITCH_BUFFER 0x8B
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_TILING_CONFIG 0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0c..d917a411ca85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
connector->encoder = encoder;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
return 0;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 1c350fc4e449..d1d5306ebf24 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -33,7 +33,7 @@
* Hardware initialization
*/
-static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
+static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
{
static const u32 ldmt1r[] = {
[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
@@ -67,7 +67,7 @@ static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
return 0;
}
-static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
enum shmob_drm_clk_source clksrc)
{
struct clk *clk;
@@ -330,12 +330,12 @@ static const struct dev_pm_ops shmob_drm_pm_ops = {
* Platform driver
*/
-static int __devinit shmob_drm_probe(struct platform_device *pdev)
+static int shmob_drm_probe(struct platform_device *pdev)
{
return drm_platform_init(&shmob_drm_driver, pdev);
}
-static int __devexit shmob_drm_remove(struct platform_device *pdev)
+static int shmob_drm_remove(struct platform_device *pdev)
{
drm_platform_exit(&shmob_drm_driver, pdev);
@@ -344,7 +344,7 @@ static int __devexit shmob_drm_remove(struct platform_device *pdev)
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
- .remove = __devexit_p(shmob_drm_remove),
+ .remove = shmob_drm_remove,
.driver = {
.owner = THIS_MODULE,
.name = "shmob-drm",
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 000000000000..be1daf7344d3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,23 @@
+config DRM_TEGRA
+ tristate "NVIDIA Tegra DRM"
+ depends on DRM && OF && ARCH_TEGRA
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_DEBUG
+ bool "NVIDIA Tegra DRM debug support"
+ help
+ Say yes here to enable debugging support.
+
+endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..80f73d1315d0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := drm.o fb.o dc.o host1x.o
+tegra-drm-y += output.o rgb.o hdmi.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 000000000000..656b2e3334a6
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,833 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <mach/clk.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_dc_window {
+ fixed20_12 x;
+ fixed20_12 y;
+ fixed20_12 w;
+ fixed20_12 h;
+ unsigned int outx;
+ unsigned int outy;
+ unsigned int outw;
+ unsigned int outh;
+ unsigned int stride;
+ unsigned int fmt;
+};
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+};
+
+static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+ unsigned int bpp)
+{
+ fixed20_12 outf = dfixed_init(out);
+ u32 dda_inc;
+ int max;
+
+ if (v)
+ max = 15;
+ else {
+ switch (bpp) {
+ case 2:
+ max = 8;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ /* fallthrough */
+ case 4:
+ max = 4;
+ break;
+ }
+ }
+
+ outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+ inf.full -= dfixed_const(1);
+
+ dda_inc = dfixed_div(inf, outf);
+ dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+ return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+ return dfixed_frac(in);
+}
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+ struct drm_display_mode *mode)
+{
+ /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
+ unsigned int h_ref_to_sync = 0;
+ unsigned int v_ref_to_sync = 0;
+ unsigned long value;
+
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+ value = (v_ref_to_sync << 16) | h_ref_to_sync;
+ tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+
+ value = ((mode->vsync_end - mode->vsync_start) << 16) |
+ ((mode->hsync_end - mode->hsync_start) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+ value = ((mode->vtotal - mode->vsync_end) << 16) |
+ ((mode->htotal - mode->hsync_end) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+ value = ((mode->vsync_start - mode->vdisplay) << 16) |
+ ((mode->hsync_start - mode->hdisplay) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+ value = (mode->vdisplay << 16) | mode->hdisplay;
+ tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+ return 0;
+}
+
+static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ unsigned long *div)
+{
+ unsigned long pclk = mode->clock * 1000, rate;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_output *output = NULL;
+ struct drm_encoder *encoder;
+ long err;
+
+ list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc) {
+ output = encoder_to_output(encoder);
+ break;
+ }
+
+ if (!output)
+ return -ENODEV;
+
+ /*
+ * This assumes that the display controller will divide its parent
+ * clock by 2 to generate the pixel clock.
+ */
+ err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+ return err;
+ }
+
+ rate = clk_get_rate(dc->clk);
+ *div = (rate * 2 / pclk) - 2;
+
+ DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
+
+ return 0;
+}
+
+static int tegra_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned int h_dda, v_dda, bpp;
+ struct tegra_dc_window win;
+ unsigned long div, value;
+ int err;
+
+ err = tegra_crtc_setup_clk(crtc, mode, &div);
+ if (err) {
+ dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
+ return err;
+ }
+
+ /* program display mode */
+ tegra_dc_set_timings(dc, mode);
+
+ value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+ tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+ value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+ DISP_ORDER_RED_BLUE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+ value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+ /* setup window parameters */
+ memset(&win, 0, sizeof(win));
+ win.x.full = dfixed_const(0);
+ win.y.full = dfixed_const(0);
+ win.w.full = dfixed_const(mode->hdisplay);
+ win.h.full = dfixed_const(mode->vdisplay);
+ win.outx = 0;
+ win.outy = 0;
+ win.outw = mode->hdisplay;
+ win.outh = mode->vdisplay;
+
+ switch (crtc->fb->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+ break;
+
+ case DRM_FORMAT_RGB565:
+ win.fmt = WIN_COLOR_DEPTH_B5G6R5;
+ break;
+
+ default:
+ win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+ WARN_ON(1);
+ break;
+ }
+
+ bpp = crtc->fb->bits_per_pixel / 8;
+ win.stride = crtc->fb->pitches[0];
+
+ /* program window registers */
+ value = WINDOW_A_SELECT;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+ tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
+ tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(win.outy) | H_POSITION(win.outx);
+ tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+ value = V_SIZE(win.outh) | H_SIZE(win.outw);
+ tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+ value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
+ H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
+ tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+ h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
+ v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(win.x);
+ v_dda = compute_initial_dda(win.y);
+
+ tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
+ tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
+ tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
+ DC_WINBUF_ADDR_H_OFFSET);
+ tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
+
+ value = WIN_ENABLE;
+
+ if (bpp < 24)
+ value |= COLOR_EXPAND;
+
+ tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
+ tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+
+ return 0;
+}
+
+static void tegra_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned int syncpt;
+ unsigned long value;
+
+ /* hardware initialization */
+ tegra_periph_reset_deassert(dc->clk);
+ usleep_range(10000, 20000);
+
+ if (dc->pipe)
+ syncpt = SYNCPT_VBLANK1;
+ else
+ syncpt = SYNCPT_VBLANK0;
+
+ /* initialize display controller */
+ tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+}
+
+static void tegra_crtc_commit(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ unsigned long update_mask;
+ unsigned long value;
+
+ update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+
+ tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+ value |= FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+ .dpms = tegra_crtc_dpms,
+ .mode_fixup = tegra_crtc_mode_fixup,
+ .mode_set = tegra_crtc_mode_set,
+ .prepare = tegra_crtc_prepare,
+ .commit = tegra_crtc_commit,
+ .load_lut = tegra_crtc_load_lut,
+};
+
+static irqreturn_t tegra_drm_irq(int irq, void *data)
+{
+ struct tegra_dc *dc = data;
+ unsigned long status;
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ if (status & FRAME_END_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+ */
+ }
+
+ if (status & VBLANK_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+ */
+ drm_handle_vblank(dc->base.dev, dc->pipe);
+ }
+
+ if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+ */
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
+ tegra_dc_readl(dc, name))
+
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
+ DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
+ DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+ DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE);
+ DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+ DUMP_REG(DC_CMD_INT_STATUS);
+ DUMP_REG(DC_CMD_INT_MASK);
+ DUMP_REG(DC_CMD_INT_ENABLE);
+ DUMP_REG(DC_CMD_INT_TYPE);
+ DUMP_REG(DC_CMD_INT_POLARITY);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+ DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+ DUMP_REG(DC_CMD_STATE_ACCESS);
+ DUMP_REG(DC_CMD_STATE_CONTROL);
+ DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+ DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+ DUMP_REG(DC_COM_CRC_CONTROL);
+ DUMP_REG(DC_COM_CRC_CHECKSUM);
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
+ DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
+ DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
+ DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
+ DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
+ DUMP_REG(DC_COM_PIN_MISC_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM0_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
+ DUMP_REG(DC_COM_PIN_PM1_CONTROL);
+ DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
+ DUMP_REG(DC_COM_SPI_CONTROL);
+ DUMP_REG(DC_COM_SPI_START_BYTE);
+ DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
+ DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
+ DUMP_REG(DC_COM_HSPI_CS_DC);
+ DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
+ DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
+ DUMP_REG(DC_COM_GPIO_CTRL);
+ DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
+ DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+ DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+ DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+ DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
+ DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+ DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+ DUMP_REG(DC_DISP_REF_TO_SYNC);
+ DUMP_REG(DC_DISP_SYNC_WIDTH);
+ DUMP_REG(DC_DISP_BACK_PORCH);
+ DUMP_REG(DC_DISP_ACTIVE);
+ DUMP_REG(DC_DISP_FRONT_PORCH);
+ DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+ DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+ DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+ DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+ DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+ DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+ DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+ DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+ DUMP_REG(DC_DISP_M0_CONTROL);
+ DUMP_REG(DC_DISP_M1_CONTROL);
+ DUMP_REG(DC_DISP_DI_CONTROL);
+ DUMP_REG(DC_DISP_PP_CONTROL);
+ DUMP_REG(DC_DISP_PP_SELECT_A);
+ DUMP_REG(DC_DISP_PP_SELECT_B);
+ DUMP_REG(DC_DISP_PP_SELECT_C);
+ DUMP_REG(DC_DISP_PP_SELECT_D);
+ DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+ DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+ DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+ DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+ DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+ DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+ DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+ DUMP_REG(DC_DISP_BORDER_COLOR);
+ DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+ DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+ DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+ DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+ DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+ DUMP_REG(DC_DISP_CURSOR_POSITION);
+ DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+ DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+ DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+ DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
+ DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+ DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+ DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+ DUMP_REG(DC_DISP_SD_CONTROL);
+ DUMP_REG(DC_DISP_SD_CSC_COEFF);
+ DUMP_REG(DC_DISP_SD_LUT(0));
+ DUMP_REG(DC_DISP_SD_LUT(1));
+ DUMP_REG(DC_DISP_SD_LUT(2));
+ DUMP_REG(DC_DISP_SD_LUT(3));
+ DUMP_REG(DC_DISP_SD_LUT(4));
+ DUMP_REG(DC_DISP_SD_LUT(5));
+ DUMP_REG(DC_DISP_SD_LUT(6));
+ DUMP_REG(DC_DISP_SD_LUT(7));
+ DUMP_REG(DC_DISP_SD_LUT(8));
+ DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
+ DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
+ DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
+ DUMP_REG(DC_DISP_SD_BL_TF(0));
+ DUMP_REG(DC_DISP_SD_BL_TF(1));
+ DUMP_REG(DC_DISP_SD_BL_TF(2));
+ DUMP_REG(DC_DISP_SD_BL_TF(3));
+ DUMP_REG(DC_DISP_SD_BL_CONTROL);
+ DUMP_REG(DC_DISP_SD_HW_K_VALUES);
+ DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+ DUMP_REG(DC_WIN_WIN_OPTIONS);
+ DUMP_REG(DC_WIN_BYTE_SWAP);
+ DUMP_REG(DC_WIN_BUFFER_CONTROL);
+ DUMP_REG(DC_WIN_COLOR_DEPTH);
+ DUMP_REG(DC_WIN_POSITION);
+ DUMP_REG(DC_WIN_SIZE);
+ DUMP_REG(DC_WIN_PRESCALED_SIZE);
+ DUMP_REG(DC_WIN_H_INITIAL_DDA);
+ DUMP_REG(DC_WIN_V_INITIAL_DDA);
+ DUMP_REG(DC_WIN_DDA_INC);
+ DUMP_REG(DC_WIN_LINE_STRIDE);
+ DUMP_REG(DC_WIN_BUF_STRIDE);
+ DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+ DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
+ DUMP_REG(DC_WIN_DV_CONTROL);
+ DUMP_REG(DC_WIN_BLEND_NOKEY);
+ DUMP_REG(DC_WIN_BLEND_1WIN);
+ DUMP_REG(DC_WIN_BLEND_2WIN_X);
+ DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+ DUMP_REG(DC_WIN_BLEND32WIN_XY);
+ DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
+ DUMP_REG(DC_WINBUF_START_ADDR);
+ DUMP_REG(DC_WINBUF_START_ADDR_NS);
+ DUMP_REG(DC_WINBUF_START_ADDR_U);
+ DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
+ DUMP_REG(DC_WINBUF_START_ADDR_V);
+ DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+ DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
+ DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
+ DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dc_show_regs, 0, NULL },
+};
+
+static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+{
+ unsigned int i;
+ char *name;
+ int err;
+
+ name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
+ dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+
+ if (!dc->debugfs)
+ return -ENOMEM;
+
+ dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dc->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ dc->debugfs_files[i].data = dc;
+
+ err = drm_debugfs_create_files(dc->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ dc->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ dc->minor = minor;
+
+ return 0;
+
+free:
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+remove:
+ debugfs_remove(dc->debugfs);
+ dc->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
+{
+ drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
+ dc->minor);
+ dc->minor = NULL;
+
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+
+ debugfs_remove(dc->debugfs);
+ dc->debugfs = NULL;
+
+ return 0;
+}
+
+static int tegra_dc_drm_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ dc->pipe = drm->mode_config.num_crtc;
+
+ drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&dc->base, 256);
+ drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+ err = tegra_dc_rgb_init(drm, dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dc_debugfs_init(dc, drm->primary);
+ if (err < 0)
+ dev_err(dc->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+ dev_name(dc->dev), dc);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dc_drm_exit(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ devm_free_irq(dc->dev, dc->irq, dc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_dc_debugfs_exit(dc);
+ if (err < 0)
+ dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
+ }
+
+ err = tegra_dc_rgb_exit(dc);
+ if (err) {
+ dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+ .drm_init = tegra_dc_drm_init,
+ .drm_exit = tegra_dc_drm_exit,
+};
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct resource *regs;
+ struct tegra_dc *dc;
+ int err;
+
+ dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dc->list);
+ dc->dev = &pdev->dev;
+
+ dc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dc->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(dc->clk);
+ }
+
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
+
+ dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
+ if (!dc->regs) {
+ dev_err(&pdev->dev, "failed to remap registers\n");
+ return -ENXIO;
+ }
+
+ dc->irq = platform_get_irq(pdev, 0);
+ if (dc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return -ENXIO;
+ }
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
+ err = tegra_dc_rgb_probe(dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ return err;
+ }
+
+ err = host1x_register_client(host1x, &dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, dc);
+
+ return 0;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_dc *dc = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_unregister_client(host1x, &dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_disable_unprepare(dc->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_dc_of_match[] = {
+ { .compatible = "nvidia,tegra30-dc", },
+ { .compatible = "nvidia,tegra20-dc", },
+ { },
+};
+
+struct platform_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegra-dc",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_dc_of_match,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 000000000000..99977b5d5c36
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT (1 << 2)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+
+#define DC_CMD_STATE_ACCESS 0x040
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+#define NC_HOST_TRIG (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PIN_PM0_CONTROL 0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PIN_PM1_CONTROL 0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f
+
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE_0_ENABLE (1 << 8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
+#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define CURSOR_DELAY(x) (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) << 0)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (4 << 0)
+#define DISP_DATA_FORMAT_DF3S (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB (0 << 8)
+#define DISP_ALIGNMENT_LSB (1 << 8)
+#define DISP_ORDER_RED_BLUE (0 << 9)
+#define DISP_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 (0 << 0)
+#define BASE_COLOR_SIZE111 (1 << 0)
+#define BASE_COLOR_SIZE222 (2 << 0)
+#define BASE_COLOR_SIZE333 (3 << 0)
+#define BASE_COLOR_SIZE444 (4 << 0)
+#define BASE_COLOR_SIZE555 (5 << 0)
+#define BASE_COLOR_SIZE565 (6 << 0)
+#define BASE_COLOR_SIZE332 (7 << 0)
+#define BASE_COLOR_SIZE888 (8 << 0)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK (0 << 0)
+#define DE_SELECT_ACTIVE (1 << 0)
+#define DE_SELECT_ACTIVE_IS (2 << 0)
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+
+#define DC_DISP_CURSOR_POSITION 0x440
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+#define DC_DISP_SD_CONTROL 0x4c2
+#define DC_DISP_SD_CSC_COEFF 0x4c3
+#define DC_DISP_SD_LUT(x) (0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
+#define DC_DISP_DC_PIXEL_COUNT 0x4ce
+#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS 0x4d7
+#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL 0x4dc
+#define DC_DISP_SD_HW_K_VALUES 0x4dd
+#define DC_DISP_SD_MAN_K_VALUES 0x4de
+
+#define DC_WIN_WIN_OPTIONS 0x700
+#define COLOR_EXPAND (1 << 6)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP (0 << 0)
+#define BYTE_SWAP_SWAP2 (1 << 0)
+#define BYTE_SWAP_SWAP4 (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST (0 << 0)
+#define BUFFER_CONTROL_VI (1 << 0)
+#define BUFFER_CONTROL_EPP (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH 0x703
+#define WIN_COLOR_DEPTH_P1 0
+#define WIN_COLOR_DEPTH_P2 1
+#define WIN_COLOR_DEPTH_P4 2
+#define WIN_COLOR_DEPTH_P8 3
+#define WIN_COLOR_DEPTH_B4G4R4A4 4
+#define WIN_COLOR_DEPTH_B5G5R5A 5
+#define WIN_COLOR_DEPTH_B5G6R5 6
+#define WIN_COLOR_DEPTH_AB5G5R5 7
+#define WIN_COLOR_DEPTH_B8G8R8A8 12
+#define WIN_COLOR_DEPTH_R8G8B8A8 13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422 16
+#define WIN_COLOR_DEPTH_YUV422 17
+#define WIN_COLOR_DEPTH_YCbCr420P 18
+#define WIN_COLOR_DEPTH_YUV420P 19
+#define WIN_COLOR_DEPTH_YCbCr422P 20
+#define WIN_COLOR_DEPTH_YUV422P 21
+#define WIN_COLOR_DEPTH_YCbCr422R 22
+#define WIN_COLOR_DEPTH_YUV422R 23
+#define WIN_COLOR_DEPTH_YCbCr422RA 24
+#define WIN_COLOR_DEPTH_YUV422RA 25
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0x1fff) << 0)
+#define V_POSITION(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0x1fff) << 0)
+#define V_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INC 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_DV_CONTROL 0x70e
+
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define DC_WIN_BLEND_1WIN 0x710
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND32WIN_XY 0x713
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+
+#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
+
+/* synchronization points */
+#define SYNCPT_VBLANK0 26
+#define SYNCPT_VBLANK1 27
+
+#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..3a503c9e4686
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <mach/clk.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include "drm.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct device *dev = drm->dev;
+ struct host1x *host1x;
+ int err;
+
+ host1x = dev_get_drvdata(dev);
+ drm->dev_private = host1x;
+ host1x->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ err = host1x_drm_init(host1x, drm);
+ if (err < 0)
+ return err;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+
+ drm_mode_config_cleanup(drm);
+
+ return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ return 0;
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(host1x->fbdev);
+}
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+ .load = tegra_drm_load,
+ .unload = tegra_drm_unload,
+ .open = tegra_drm_open,
+ .lastclose = tegra_drm_lastclose,
+
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_cma_dumb_destroy,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 000000000000..741b5dc2742c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DRM_H
+#define TEGRA_DRM_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fixed.h>
+
+struct tegra_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj;
+};
+
+static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct tegra_framebuffer, base);
+}
+
+struct host1x {
+ struct drm_device *drm;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ int syncpt;
+ int irq;
+
+ struct mutex drm_clients_lock;
+ struct list_head drm_clients;
+ struct list_head drm_active;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+
+ struct drm_fbdev_cma *fbdev;
+ struct tegra_framebuffer fb;
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+ int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
+ int (*drm_exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+ struct host1x *host1x;
+ struct device *dev;
+
+ const struct host1x_client_ops *ops;
+
+ struct list_head list;
+};
+
+extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x *host1x);
+
+extern int host1x_register_client(struct host1x *host1x,
+ struct host1x_client *client);
+extern int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client);
+
+struct tegra_output;
+
+struct tegra_dc {
+ struct host1x_client client;
+
+ struct host1x *host1x;
+ struct device *dev;
+
+ struct drm_crtc base;
+ int pipe;
+
+ struct clk *clk;
+
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *rgb;
+
+ struct list_head list;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct tegra_dc, base);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
+ unsigned long reg)
+{
+ writel(value, dc->regs + (reg << 2));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+ unsigned long reg)
+{
+ return readl(dc->regs + (reg << 2));
+}
+
+struct tegra_output_ops {
+ int (*enable)(struct tegra_output *output);
+ int (*disable)(struct tegra_output *output);
+ int (*setup_clock)(struct tegra_output *output, struct clk *clk,
+ unsigned long pclk);
+ int (*check_mode)(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status);
+};
+
+enum tegra_output_type {
+ TEGRA_OUTPUT_RGB,
+ TEGRA_OUTPUT_HDMI,
+};
+
+struct tegra_output {
+ struct device_node *of_node;
+ struct device *dev;
+
+ const struct tegra_output_ops *ops;
+ enum tegra_output_type type;
+
+ struct i2c_adapter *ddc;
+ const struct edid *edid;
+ unsigned int hpd_irq;
+ int hpd_gpio;
+
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+ return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+ return container_of(c, struct tegra_output, connector);
+}
+
+static inline int tegra_output_enable(struct tegra_output *output)
+{
+ if (output && output->ops && output->ops->enable)
+ return output->ops->enable(output);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_disable(struct tegra_output *output)
+{
+ if (output && output->ops && output->ops->disable)
+ return output->ops->disable(output);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ if (output && output->ops && output->ops->setup_clock)
+ return output->ops->setup_clock(output, clk, pclk);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ if (output && output->ops && output->ops->check_mode)
+ return output->ops->check_mode(output, mode, status);
+
+ return output ? -ENOSYS : -EINVAL;
+}
+
+/* from rgb.c */
+extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+/* from output.c */
+extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+extern int tegra_output_exit(struct tegra_output *output);
+
+/* from fb.c */
+extern int tegra_drm_fb_init(struct drm_device *drm);
+extern void tegra_drm_fb_exit(struct drm_device *drm);
+
+extern struct platform_driver tegra_host1x_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct drm_driver tegra_drm_driver;
+
+#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 000000000000..97993c6835fd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(host1x->fbdev);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = tegra_drm_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+ struct drm_fbdev_cma *fbdev;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+ fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
+
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ drm_fbdev_cma_restore_mode(fbdev);
+#endif
+
+ host1x->fbdev = fbdev;
+
+ return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+ struct host1x *host1x = drm->dev_private;
+
+ drm_fbdev_cma_fini(host1x->fbdev);
+}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 000000000000..e060c7e6434d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1321 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/clk.h>
+
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_hdmi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ struct regulator *vdd;
+ struct regulator *pll;
+
+ void __iomem *regs;
+ unsigned int irq;
+
+ struct clk *clk_parent;
+ struct clk *clk;
+
+ unsigned int audio_source;
+ unsigned int audio_freq;
+ bool stereo;
+ bool dvi;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+ AUTO = 0,
+ SPDIF,
+ HDA,
+};
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+ unsigned long reg)
+{
+ return readl(hdmi->regs + (reg << 2));
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, hdmi->regs + (reg << 2));
+}
+
+struct tegra_hdmi_audio_config {
+ unsigned int pclk;
+ unsigned int n;
+ unsigned int cts;
+ unsigned int aval;
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+ { 25200000, 4096, 25200, 24000 },
+ { 27000000, 4096, 27000, 24000 },
+ { 74250000, 4096, 74250, 24000 },
+ { 148500000, 4096, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+ { 25200000, 5880, 26250, 25000 },
+ { 27000000, 5880, 28125, 25000 },
+ { 74250000, 4704, 61875, 20000 },
+ { 148500000, 4704, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+ { 25200000, 6144, 25200, 24000 },
+ { 27000000, 6144, 27000, 24000 },
+ { 74250000, 6144, 74250, 24000 },
+ { 148500000, 6144, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+ { 25200000, 11760, 26250, 25000 },
+ { 27000000, 11760, 28125, 25000 },
+ { 74250000, 9408, 61875, 20000 },
+ { 148500000, 9408, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+ { 25200000, 12288, 25200, 24000 },
+ { 27000000, 12288, 27000, 24000 },
+ { 74250000, 12288, 74250, 24000 },
+ { 148500000, 12288, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+ { 25200000, 23520, 26250, 25000 },
+ { 27000000, 23520, 28125, 25000 },
+ { 74250000, 18816, 61875, 20000 },
+ { 148500000, 18816, 123750, 20000 },
+ { 0, 0, 0, 0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+ { 25200000, 24576, 25200, 24000 },
+ { 27000000, 24576, 27000, 24000 },
+ { 74250000, 24576, 74250, 24000 },
+ { 148500000, 24576, 148500, 24000 },
+ { 0, 0, 0, 0 },
+};
+
+struct tmds_config {
+ unsigned int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current;
+ u32 drive_current;
+};
+
+static const struct tmds_config tegra2_tmds_config[] = {
+ { /* slow pixel clock modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+ { /* high pixel clock modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+ PE_CURRENT1(PE_CURRENT_6_0_mA) |
+ PE_CURRENT2(PE_CURRENT_6_0_mA) |
+ PE_CURRENT3(PE_CURRENT_6_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+};
+
+static const struct tmds_config tegra3_tmds_config[] = {
+ { /* 480p modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 720p modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 1080p modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+};
+
+static const struct tegra_hdmi_audio_config *
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+{
+ const struct tegra_hdmi_audio_config *table;
+
+ switch (audio_freq) {
+ case 32000:
+ table = tegra_hdmi_audio_32k;
+ break;
+
+ case 44100:
+ table = tegra_hdmi_audio_44_1k;
+ break;
+
+ case 48000:
+ table = tegra_hdmi_audio_48k;
+ break;
+
+ case 88200:
+ table = tegra_hdmi_audio_88_2k;
+ break;
+
+ case 96000:
+ table = tegra_hdmi_audio_96k;
+ break;
+
+ case 176400:
+ table = tegra_hdmi_audio_176_4k;
+ break;
+
+ case 192000:
+ table = tegra_hdmi_audio_192k;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ while (table->pclk) {
+ if (table->pclk == pclk)
+ return table;
+
+ table++;
+ }
+
+ return NULL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+ const unsigned int freqs[] = {
+ 32000, 44100, 48000, 88200, 96000, 176400, 192000
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned int f = freqs[i];
+ unsigned int eight_half;
+ unsigned long value;
+ unsigned int delta;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 480000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ value = AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+{
+ struct device_node *node = hdmi->dev->of_node;
+ const struct tegra_hdmi_audio_config *config;
+ unsigned int offset = 0;
+ unsigned long value;
+
+ switch (hdmi->audio_source) {
+ case HDA:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ break;
+
+ case SPDIF:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ break;
+
+ default:
+ value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ break;
+ }
+
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ } else {
+ value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+
+ value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+ AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ }
+
+ config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+ if (!config) {
+ dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
+ hdmi->audio_freq, pclk);
+ return -EINVAL;
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+ AUDIO_N_VALUE(config->n - 1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ value = ACR_SUBPACK_CTS(config->cts);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+ value &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ switch (hdmi->audio_freq) {
+ case 32000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
+ break;
+
+ case 44100:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
+ break;
+
+ case 48000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
+ break;
+
+ case 88200:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
+ break;
+
+ case 96000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
+ break;
+
+ case 176400:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
+ break;
+
+ case 192000:
+ offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
+ break;
+ }
+
+ tegra_hdmi_writel(hdmi, config->aval, offset);
+ }
+
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+ return 0;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
+ unsigned int offset, u8 type,
+ u8 version, void *data, size_t size)
+{
+ unsigned long value;
+ u8 *ptr = data;
+ u32 subpack[2];
+ size_t i;
+ u8 csum;
+
+ /* first byte of data is the checksum */
+ csum = type + version + size - 1;
+
+ for (i = 1; i < size; i++)
+ csum += ptr[i];
+
+ ptr[0] = 0x100 - csum;
+
+ value = INFOFRAME_HEADER_TYPE(type) |
+ INFOFRAME_HEADER_VERSION(version) |
+ INFOFRAME_HEADER_LEN(size - 1);
+ tegra_hdmi_writel(hdmi, value, offset);
+
+ /* The audio inforame only has one set of subpack registers. The hdmi
+ * block pads the rest of the data as per the spec so we have to fixup
+ * the length before filling in the subpacks.
+ */
+ if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+ size = 6;
+
+ /* each subpack 7 bytes devided into:
+ * subpack_low - bytes 0 - 3
+ * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 0; i < size; i++) {
+ size_t index = i % 7;
+
+ if (index == 0)
+ memset(subpack, 0x0, sizeof(subpack));
+
+ ((u8 *)subpack)[index] = ptr[i];
+
+ if (index == 6 || (i + 1 == size)) {
+ unsigned int reg = offset + 1 + (i / 7) * 2;
+
+ tegra_hdmi_writel(hdmi, subpack[0], reg);
+ tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+ }
+ }
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_avi_infoframe frame;
+ unsigned int h_front_porch;
+ unsigned int hsize = 16;
+ unsigned int vsize = 9;
+
+ if (hdmi->dvi) {
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ return;
+ }
+
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ memset(&frame, 0, sizeof(frame));
+ frame.r = HDMI_AVI_R_SAME;
+
+ switch (mode->vdisplay) {
+ case 480:
+ if (mode->hdisplay == 640) {
+ frame.m = HDMI_AVI_M_4_3;
+ frame.vic = 1;
+ } else {
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 3;
+ }
+ break;
+
+ case 576:
+ if (((hsize * 10) / vsize) > 14) {
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 18;
+ } else {
+ frame.m = HDMI_AVI_M_4_3;
+ frame.vic = 17;
+ }
+ break;
+
+ case 720:
+ case 1470: /* stereo mode */
+ frame.m = HDMI_AVI_M_16_9;
+
+ if (h_front_porch == 110)
+ frame.vic = 4;
+ else
+ frame.vic = 19;
+ break;
+
+ case 1080:
+ case 2205: /* stereo mode */
+ frame.m = HDMI_AVI_M_16_9;
+
+ switch (h_front_porch) {
+ case 88:
+ frame.vic = 16;
+ break;
+
+ case 528:
+ frame.vic = 31;
+ break;
+
+ default:
+ frame.vic = 32;
+ break;
+ }
+ break;
+
+ default:
+ frame.m = HDMI_AVI_M_16_9;
+ frame.vic = 0;
+ break;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
+ &frame, sizeof(frame));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe frame;
+
+ if (hdmi->dvi) {
+ tegra_hdmi_writel(hdmi, 0,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ return;
+ }
+
+ memset(&frame, 0, sizeof(frame));
+ frame.cc = HDMI_AUDIO_CC_2;
+
+ tegra_hdmi_write_infopack(hdmi,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+ HDMI_INFOFRAME_TYPE_AUDIO,
+ HDMI_AUDIO_VERSION,
+ &frame, sizeof(frame));
+
+ tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+ HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_stereo_infoframe frame;
+ unsigned long value;
+
+ if (!hdmi->stereo) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ return;
+ }
+
+ memset(&frame, 0, sizeof(frame));
+ frame.regid0 = 0x03;
+ frame.regid1 = 0x0c;
+ frame.regid2 = 0x00;
+ frame.hdmi_video_format = 2;
+
+ /* TODO: 74 MHz limit? */
+ if (1) {
+ frame._3d_structure = 0;
+ } else {
+ frame._3d_structure = 8;
+ frame._3d_ext_data = 0;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_VENDOR_VERSION, &frame, 6);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+ const struct tmds_config *tmds)
+{
+ unsigned long value;
+
+ tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+ tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+ value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static int tegra_output_hdmi_enable(struct tegra_output *output)
+{
+ unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+ struct drm_display_mode *mode = &dc->base.mode;
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ struct device_node *node = hdmi->dev->of_node;
+ unsigned int pulse_start, div82, pclk;
+ const struct tmds_config *tmds;
+ unsigned int num_tmds;
+ unsigned long value;
+ int retries = 1000;
+ int err;
+
+ pclk = mode->clock * 1000;
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_back_porch = mode->htotal - mode->hsync_end;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(hdmi->pll);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+ return err;
+ }
+
+ /*
+ * This assumes that the display controller will divide its parent
+ * clock by 2 to generate the pixel clock.
+ */
+ err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_set_rate(hdmi->clk, pclk);
+ if (err < 0)
+ return err;
+
+ err = clk_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ tegra_periph_reset_assert(hdmi->clk);
+ usleep_range(1000, 2000);
+ tegra_periph_reset_deassert(hdmi->clk);
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+ DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+ tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+ value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ if (dc->pipe)
+ value = HDMI_SRC_DISPLAYB;
+ else
+ value = HDMI_SRC_DISPLAYA;
+
+ if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+ (mode->vdisplay == 576)))
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_FULL,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+ else
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+ if (!hdmi->dvi) {
+ err = tegra_hdmi_setup_audio(hdmi, pclk);
+ if (err < 0)
+ hdmi->dvi = true;
+ }
+
+ if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
+ /*
+ * TODO: add ELD support
+ */
+ }
+
+ rekey = HDMI_REKEY_DEFAULT;
+ value = HDMI_CTRL_REKEY(rekey);
+ value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+ h_front_porch - rekey - 18) / 32);
+
+ if (!hdmi->dvi)
+ value |= HDMI_CTRL_ENABLE;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (hdmi->dvi)
+ tegra_hdmi_writel(hdmi, 0x0,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ else
+ tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+ HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+
+ /* TMDS CONFIG */
+ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+ num_tmds = ARRAY_SIZE(tegra3_tmds_config);
+ tmds = tegra3_tmds_config;
+ } else {
+ num_tmds = ARRAY_SIZE(tegra2_tmds_config);
+ tmds = tegra2_tmds_config;
+ }
+
+ for (i = 0; i < num_tmds; i++) {
+ if (pclk <= tmds[i].pclk) {
+ tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+ break;
+ }
+ }
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_CTL_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+ value = 0x1c800;
+ value &= ~SOR_CSTM_ROTCLK(~0);
+ value |= SOR_CSTM_ROTCLK(2);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+ tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ do {
+ BUG_ON(--retries < 0);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+ value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ SOR_STATE_ASY_DEPOL_POS;
+
+ /* setup sync polarities */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+ value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+ value = DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+ /* TODO: add HDCP support */
+
+ return 0;
+}
+
+static int tegra_output_hdmi_disable(struct tegra_output *output)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+
+ tegra_periph_reset_assert(hdmi->clk);
+ clk_disable(hdmi->clk);
+ regulator_disable(hdmi->pll);
+ regulator_disable(hdmi->vdd);
+
+ return 0;
+}
+
+static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ struct clk *base;
+ int err;
+
+ err = clk_set_parent(clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(output->dev, "failed to set parent: %d\n", err);
+ return err;
+ }
+
+ base = clk_get_parent(hdmi->clk_parent);
+
+ /*
+ * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+ * respectively, each of which divides the base pll_d by 2.
+ */
+ err = clk_set_rate(base, pclk * 2);
+ if (err < 0)
+ dev_err(output->dev,
+ "failed to set base clock rate to %lu Hz\n",
+ pclk * 2);
+
+ return 0;
+}
+
+static int tegra_output_hdmi_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long pclk = mode->clock * 1000;
+ struct clk *parent;
+ long err;
+
+ parent = clk_get_parent(hdmi->clk_parent);
+
+ err = clk_round_rate(parent, pclk * 4);
+ if (err < 0)
+ *status = MODE_NOCLOCK;
+ else
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops hdmi_ops = {
+ .enable = tegra_output_hdmi_enable,
+ .disable = tegra_output_hdmi_disable,
+ .setup_clock = tegra_output_hdmi_setup_clock,
+ .check_mode = tegra_output_hdmi_check_mode,
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_hdmi *hdmi = node->info_ent->data;
+
+#define DUMP_REG(name) \
+ seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
+ tegra_hdmi_readl(hdmi, name))
+
+ DUMP_REG(HDMI_CTXSW);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+ DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+ DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+ DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
+ DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+ DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+ DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+ DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+ DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+ DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+ DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+ DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+ DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+ DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+ DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+
+#undef DUMP_REG
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
+ struct drm_minor *minor)
+{
+ unsigned int i;
+ int err;
+
+ hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
+ if (!hdmi->debugfs)
+ return -ENOMEM;
+
+ hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!hdmi->debugfs_files) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ hdmi->debugfs_files[i].data = hdmi;
+
+ err = drm_debugfs_create_files(hdmi->debugfs_files,
+ ARRAY_SIZE(debugfs_files),
+ hdmi->debugfs, minor);
+ if (err < 0)
+ goto free;
+
+ hdmi->minor = minor;
+
+ return 0;
+
+free:
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+remove:
+ debugfs_remove(hdmi->debugfs);
+ hdmi->debugfs = NULL;
+
+ return err;
+}
+
+static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+{
+ drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
+ hdmi->minor);
+ hdmi->minor = NULL;
+
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+
+ debugfs_remove(hdmi->debugfs);
+ hdmi->debugfs = NULL;
+
+ return 0;
+}
+
+static int tegra_hdmi_drm_init(struct host1x_client *client,
+ struct drm_device *drm)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ int err;
+
+ hdmi->output.type = TEGRA_OUTPUT_HDMI;
+ hdmi->output.dev = client->dev;
+ hdmi->output.ops = &hdmi_ops;
+
+ err = tegra_output_init(drm, &hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+ if (err < 0)
+ dev_err(client->dev, "debugfs setup failed: %d\n", err);
+ }
+
+ return 0;
+}
+
+static int tegra_hdmi_drm_exit(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ int err;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_hdmi_debugfs_exit(hdmi);
+ if (err < 0)
+ dev_err(client->dev, "debugfs cleanup failed: %d\n",
+ err);
+ }
+
+ err = tegra_output_disable(&hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(&hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+ .drm_init = tegra_hdmi_drm_init,
+ .drm_exit = tegra_hdmi_drm_exit,
+};
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_hdmi *hdmi;
+ struct resource *regs;
+ int err;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = &pdev->dev;
+ hdmi->audio_source = AUTO;
+ hdmi->audio_freq = 44100;
+ hdmi->stereo = false;
+ hdmi->dvi = false;
+
+ hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdmi->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(hdmi->clk);
+ }
+
+ err = clk_prepare(hdmi->clk);
+ if (err < 0)
+ return err;
+
+ hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(hdmi->clk_parent))
+ return PTR_ERR(hdmi->clk_parent);
+
+ err = clk_prepare(hdmi->clk_parent);
+ if (err < 0)
+ return err;
+
+ err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+ return err;
+ }
+
+ hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(hdmi->vdd)) {
+ dev_err(&pdev->dev, "failed to get VDD regulator\n");
+ return PTR_ERR(hdmi->vdd);
+ }
+
+ hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+ if (IS_ERR(hdmi->pll)) {
+ dev_err(&pdev->dev, "failed to get PLL regulator\n");
+ return PTR_ERR(hdmi->pll);
+ }
+
+ hdmi->output.dev = &pdev->dev;
+
+ err = tegra_output_parse_dt(&hdmi->output);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
+ if (!hdmi->regs)
+ return -EADDRNOTAVAIL;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ hdmi->irq = err;
+
+ hdmi->client.ops = &hdmi_client_ops;
+ INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.dev = &pdev->dev;
+
+ err = host1x_register_client(host1x, &hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_unregister_client(host1x, &hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ clk_unprepare(hdmi->clk_parent);
+ clk_unprepare(hdmi->clk);
+
+ return 0;
+}
+
+static struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra20-hdmi", },
+ { },
+};
+
+struct platform_driver tegra_hdmi_driver = {
+ .driver = {
+ .name = "tegra-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_hdmi_of_match,
+ },
+ .probe = tegra_hdmi_probe,
+ .remove = tegra_hdmi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 000000000000..1477f36eb45a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HDMI_INFOFRAME_TYPE_AVI 0x82
+#define HDMI_INFOFRAME_TYPE_SPD 0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned s:2; /* scan information */
+ unsigned b:2; /* bar info data valid */
+ unsigned a:1; /* active info present */
+ unsigned y:2; /* RGB or YCbCr */
+ unsigned res1:1;
+
+ /* PB2 */
+ unsigned r:4; /* active format aspect ratio */
+ unsigned m:2; /* picture aspect ratio */
+ unsigned c:2; /* colorimetry */
+
+ /* PB3 */
+ unsigned sc:2; /* scan information */
+ unsigned q:2; /* quantization range */
+ unsigned ec:3; /* extended colorimetry */
+ unsigned itc:1; /* it content */
+
+ /* PB4 */
+ unsigned vic:7; /* video format id code */
+ unsigned res4:1;
+
+ /* PB5 */
+ unsigned pr:4; /* pixel repetition factor */
+ unsigned cn:2; /* it content type*/
+ unsigned yq:2; /* ycc quantization range */
+
+ /* PB6-7 */
+ u16 top_bar_end_line;
+
+ /* PB8-9 */
+ u16 bot_bar_start_line;
+
+ /* PB10-11 */
+ u16 left_bar_end_pixel;
+
+ /* PB12-13 */
+ u16 right_bar_start_pixel;
+} __packed;
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB 0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT 0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE 0x0
+#define HDMI_AVI_S_OVERSCAN 0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE 0x0
+#define HDMI_AVI_C_SMPTE 0x1
+#define HDMI_AVI_C_ITU_R 0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3 0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME 0x8
+#define HDMI_AVI_R_4_3_CENTER 0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ unsigned cc:3; /* channel count */
+ unsigned res1:1;
+ unsigned ct:4; /* coding type */
+
+ /* PB2 */
+ unsigned ss:2; /* sample size */
+ unsigned sf:3; /* sample frequency */
+ unsigned res2:3;
+
+ /* PB3 */
+ unsigned cxt:5; /* coding extention type */
+ unsigned res3:3;
+
+ /* PB4 */
+ u8 ca; /* channel/speaker allocation */
+
+ /* PB5 */
+ unsigned res5:3;
+ unsigned lsv:4; /* level shift value */
+ unsigned dm_inh:1; /* downmix inhibit */
+
+ /* PB6-10 reserved */
+ u8 res6;
+ u8 res7;
+ u8 res8;
+ u8 res9;
+ u8 res10;
+} __packed;
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2 0x1
+#define HDMI_AUDIO_CC_3 0x2
+#define HDMI_AUDIO_CC_4 0x3
+#define HDMI_AUDIO_CC_5 0x4
+#define HDMI_AUDIO_CC_6 0x5
+#define HDMI_AUDIO_CC_7 0x6
+#define HDMI_AUDIO_CC_8 0x7
+
+#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM 0x1
+#define HDMI_AUDIO_CT_AC3 0x2
+#define HDMI_AUDIO_CT_MPEG1 0x3
+#define HDMI_AUDIO_CT_MP3 0x4
+#define HDMI_AUDIO_CT_MPEG2 0x5
+#define HDMI_AUDIO_CT_AAC_LC 0x6
+#define HDMI_AUDIO_CT_DTS 0x7
+#define HDMI_AUDIO_CT_ATRAC 0x8
+#define HDMI_AUDIO_CT_DSD 0x9
+#define HDMI_AUDIO_CT_E_AC3 0xa
+#define HDMI_AUDIO_CT_DTS_HD 0xb
+#define HDMI_AUDIO_CT_MLP 0xc
+#define HDMI_AUDIO_CT_DST 0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT 0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K 0x1
+#define HDMI_AUDIO_SF_44_1K 0x2
+#define HDMI_AUDIO_SF_48K 0x3
+#define HDMI_AUDIO_SF_88_2K 0x4
+#define HDMI_AUDIO_SF_96K 0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K 0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT 0x1
+#define HDMI_AUDIO_SS_20BIT 0x2
+#define HDMI_AUDIO_SS_24BIT 0x3
+
+#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC 0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+ /* PB0 */
+ u8 csum;
+
+ /* PB1 */
+ u8 regid0;
+
+ /* PB2 */
+ u8 regid1;
+
+ /* PB3 */
+ u8 regid2;
+
+ /* PB4 */
+ unsigned res1:5;
+ unsigned hdmi_video_format:3;
+
+ /* PB5 */
+ unsigned res2:4;
+ unsigned _3d_structure:4;
+
+ /* PB6*/
+ unsigned res3:4;
+ unsigned _3d_ext_data:4;
+} __packed;
+
+#define HDMI_VENDOR_VERSION 0x01
+
+/* register definitions */
+#define HDMI_CTXSW 0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0)
+#define ACR_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCAPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
+#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644
index 000000000000..5d17b113a6fc
--- /dev/null
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+
+struct host1x_drm_client {
+ struct host1x_client *client;
+ struct device_node *np;
+ struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
+{
+ struct host1x_drm_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&client->list);
+ client->np = of_node_get(np);
+
+ list_add_tail(&client->list, &host1x->drm_clients);
+
+ return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x *host1x,
+ struct host1x_drm_client *drm,
+ struct host1x_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&drm->list);
+ list_add_tail(&drm->list, &host1x->drm_active);
+ drm->client = client;
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x *host1x,
+ struct host1x_drm_client *client)
+{
+ mutex_lock(&host1x->drm_clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->drm_clients_lock);
+
+ of_node_put(client->np);
+ kfree(client);
+
+ return 0;
+}
+
+static int host1x_parse_dt(struct host1x *host1x)
+{
+ static const char * const compat[] = {
+ "nvidia,tegra20-dc",
+ "nvidia,tegra20-hdmi",
+ "nvidia,tegra30-dc",
+ "nvidia,tegra30-hdmi",
+ };
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(compat); i++) {
+ struct device_node *np;
+
+ for_each_child_of_node(host1x->dev->of_node, np) {
+ if (of_device_is_compatible(np, compat[i]) &&
+ of_device_is_available(np)) {
+ err = host1x_add_drm_client(host1x, np);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_host1x_probe(struct platform_device *pdev)
+{
+ struct host1x *host1x;
+ struct resource *regs;
+ int err;
+
+ host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+ if (!host1x)
+ return -ENOMEM;
+
+ mutex_init(&host1x->drm_clients_lock);
+ INIT_LIST_HEAD(&host1x->drm_clients);
+ INIT_LIST_HEAD(&host1x->drm_active);
+ mutex_init(&host1x->clients_lock);
+ INIT_LIST_HEAD(&host1x->clients);
+ host1x->dev = &pdev->dev;
+
+ err = host1x_parse_dt(host1x);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+ return err;
+ }
+
+ host1x->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host1x->clk))
+ return PTR_ERR(host1x->clk);
+
+ err = clk_prepare_enable(host1x->clk);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ err = -ENXIO;
+ goto err;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ goto err;
+
+ host1x->syncpt = err;
+
+ err = platform_get_irq(pdev, 1);
+ if (err < 0)
+ goto err;
+
+ host1x->irq = err;
+
+ host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
+ if (!host1x->regs) {
+ err = -EADDRNOTAVAIL;
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, host1x);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(host1x->clk);
+ return err;
+}
+
+static int tegra_host1x_remove(struct platform_device *pdev)
+{
+ struct host1x *host1x = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(host1x->clk);
+
+ return 0;
+}
+
+int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
+{
+ struct host1x_client *client;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_init) {
+ int err = client->ops->drm_init(client, drm);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM setup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+int host1x_drm_exit(struct host1x *host1x)
+{
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+ struct host1x_client *client;
+
+ if (!host1x->drm)
+ return 0;
+
+ mutex_lock(&host1x->clients_lock);
+
+ list_for_each_entry_reverse(client, &host1x->clients, list) {
+ if (client->ops && client->ops->drm_exit) {
+ int err = client->ops->drm_exit(client);
+ if (err < 0) {
+ dev_err(host1x->dev,
+ "DRM cleanup failed for %s: %d\n",
+ dev_name(client->dev), err);
+ return err;
+ }
+ }
+ }
+
+ mutex_unlock(&host1x->clients_lock);
+
+ drm_platform_exit(&tegra_drm_driver, pdev);
+ host1x->drm = NULL;
+
+ return 0;
+}
+
+int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ mutex_lock(&host1x->clients_lock);
+ list_add_tail(&client->list, &host1x->clients);
+ mutex_unlock(&host1x->clients_lock);
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+ if (drm->np == client->dev->of_node)
+ host1x_activate_drm_client(host1x, drm, client);
+
+ if (list_empty(&host1x->drm_clients)) {
+ struct platform_device *pdev = to_platform_device(host1x->dev);
+
+ err = drm_platform_init(&tegra_drm_driver, pdev);
+ if (err < 0) {
+ dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+ return err;
+ }
+ }
+
+ client->host1x = host1x;
+
+ return 0;
+}
+
+int host1x_unregister_client(struct host1x *host1x,
+ struct host1x_client *client)
+{
+ struct host1x_drm_client *drm, *tmp;
+ int err;
+
+ list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+ if (drm->client == client) {
+ err = host1x_drm_exit(host1x);
+ if (err < 0) {
+ dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+ err);
+ return err;
+ }
+
+ host1x_remove_drm_client(host1x, drm);
+ break;
+ }
+ }
+
+ mutex_lock(&host1x->clients_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&host1x->clients_lock);
+
+ return 0;
+}
+
+static struct of_device_id tegra_host1x_of_match[] = {
+ { .compatible = "nvidia,tegra30-host1x", },
+ { .compatible = "nvidia,tegra20-host1x", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
+
+struct platform_driver tegra_host1x_driver = {
+ .driver = {
+ .name = "tegra-host1x",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_host1x_of_match,
+ },
+ .probe = tegra_host1x_probe,
+ .remove = tegra_host1x_remove,
+};
+
+static int __init tegra_host1x_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&tegra_host1x_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_driver_register(&tegra_dc_driver);
+ if (err < 0)
+ goto unregister_host1x;
+
+ err = platform_driver_register(&tegra_hdmi_driver);
+ if (err < 0)
+ goto unregister_dc;
+
+ return 0;
+
+unregister_dc:
+ platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+ platform_driver_unregister(&tegra_host1x_driver);
+ return err;
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+ platform_driver_unregister(&tegra_hdmi_driver);
+ platform_driver_unregister(&tegra_dc_driver);
+ platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 000000000000..8140fc6c34d8
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+#include "drm.h"
+
+static int tegra_connector_get_modes(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct edid *edid = NULL;
+ int err = 0;
+
+ if (output->edid)
+ edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+ else if (output->ddc)
+ edid = drm_get_edid(connector, output->ddc);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ err = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return err;
+}
+
+static int tegra_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_mode_status status = MODE_OK;
+ int err;
+
+ err = tegra_output_check_mode(output, mode, &status);
+ if (err < 0)
+ return MODE_ERROR;
+
+ return status;
+}
+
+static struct drm_encoder *
+tegra_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+
+ return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = tegra_connector_get_modes,
+ .mode_valid = tegra_connector_mode_valid,
+ .best_encoder = tegra_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tegra_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_connector_status status = connector_status_unknown;
+
+ if (gpio_is_valid(output->hpd_gpio)) {
+ if (gpio_get_value(output->hpd_gpio) == 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ status = connector_status_connected;
+ }
+
+ return status;
+}
+
+static void tegra_connector_destroy(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = tegra_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_connector_destroy,
+};
+
+static void tegra_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = tegra_encoder_destroy,
+};
+
+static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ return true;
+}
+
+static void tegra_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ int err;
+
+ err = tegra_output_enable(output);
+ if (err < 0)
+ dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = tegra_encoder_dpms,
+ .mode_fixup = tegra_encoder_mode_fixup,
+ .prepare = tegra_encoder_prepare,
+ .commit = tegra_encoder_commit,
+ .mode_set = tegra_encoder_mode_set,
+};
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+ struct tegra_output *output = data;
+
+ drm_helper_hpd_irq_event(output->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+int tegra_output_parse_dt(struct tegra_output *output)
+{
+ enum of_gpio_flags flags;
+ struct device_node *ddc;
+ size_t size;
+ int err;
+
+ if (!output->of_node)
+ output->of_node = output->dev->of_node;
+
+ output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+ ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+ if (ddc) {
+ output->ddc = of_find_i2c_adapter_by_node(ddc);
+ if (!output->ddc) {
+ err = -EPROBE_DEFER;
+ of_node_put(ddc);
+ return err;
+ }
+
+ of_node_put(ddc);
+ }
+
+ if (!output->edid && !output->ddc)
+ return -ENODEV;
+
+ output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
+ "nvidia,hpd-gpio", 0,
+ &flags);
+
+ return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+ int connector, encoder, err;
+
+ if (gpio_is_valid(output->hpd_gpio)) {
+ unsigned long flags;
+
+ err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
+ "HDMI hotplug detect");
+ if (err < 0) {
+ dev_err(output->dev, "gpio_request_one(): %d\n", err);
+ return err;
+ }
+
+ err = gpio_to_irq(output->hpd_gpio);
+ if (err < 0) {
+ dev_err(output->dev, "gpio_to_irq(): %d\n", err);
+ goto free_hpd;
+ }
+
+ output->hpd_irq = err;
+
+ flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT;
+
+ err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+ flags, "hpd", output);
+ if (err < 0) {
+ dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+ output->hpd_irq, err);
+ goto free_hpd;
+ }
+
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ switch (output->type) {
+ case TEGRA_OUTPUT_RGB:
+ connector = DRM_MODE_CONNECTOR_LVDS;
+ encoder = DRM_MODE_ENCODER_LVDS;
+ break;
+
+ case TEGRA_OUTPUT_HDMI:
+ connector = DRM_MODE_CONNECTOR_HDMIA;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ break;
+
+ default:
+ connector = DRM_MODE_CONNECTOR_Unknown;
+ encoder = DRM_MODE_ENCODER_NONE;
+ break;
+ }
+
+ drm_connector_init(drm, &output->connector, &connector_funcs,
+ connector);
+ drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+
+ drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
+ drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+ drm_sysfs_connector_add(&output->connector);
+
+ output->encoder.possible_crtcs = 0x3;
+
+ return 0;
+
+free_hpd:
+ gpio_free(output->hpd_gpio);
+
+ return err;
+}
+
+int tegra_output_exit(struct tegra_output *output)
+{
+ if (gpio_is_valid(output->hpd_gpio)) {
+ free_irq(output->hpd_irq, output);
+ gpio_free(output->hpd_gpio);
+ }
+
+ if (output->ddc)
+ put_device(&output->ddc->dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 000000000000..ed4416f20260
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+ struct tegra_output output;
+ struct clk *clk_parent;
+ struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+ unsigned long offset;
+ unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+ const struct reg_entry *table,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static int tegra_output_rgb_enable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+ tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+ return 0;
+}
+
+static int tegra_output_rgb_disable(struct tegra_output *output)
+{
+ struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+ tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+
+ return 0;
+}
+
+static int tegra_output_rgb_setup_clock(struct tegra_output *output,
+ struct clk *clk, unsigned long pclk)
+{
+ struct tegra_rgb *rgb = to_rgb(output);
+
+ return clk_set_parent(clk, rgb->clk_parent);
+}
+
+static int tegra_output_rgb_check_mode(struct tegra_output *output,
+ struct drm_display_mode *mode,
+ enum drm_mode_status *status)
+{
+ /*
+ * FIXME: For now, always assume that the mode is okay. There are
+ * unresolved issues with clk_round_rate(), which doesn't always
+ * reliably report whether a frequency can be set or not.
+ */
+
+ *status = MODE_OK;
+
+ return 0;
+}
+
+static const struct tegra_output_ops rgb_ops = {
+ .enable = tegra_output_rgb_enable,
+ .disable = tegra_output_rgb_disable,
+ .setup_clock = tegra_output_rgb_setup_clock,
+ .check_mode = tegra_output_rgb_check_mode,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ struct tegra_rgb *rgb;
+ int err;
+
+ np = of_get_child_by_name(dc->dev->of_node, "rgb");
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return -ENOMEM;
+
+ rgb->clk = devm_clk_get(dc->dev, NULL);
+ if (IS_ERR(rgb->clk)) {
+ dev_err(dc->dev, "failed to get clock\n");
+ return PTR_ERR(rgb->clk);
+ }
+
+ rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+ if (IS_ERR(rgb->clk_parent)) {
+ dev_err(dc->dev, "failed to get parent clock\n");
+ return PTR_ERR(rgb->clk_parent);
+ }
+
+ err = clk_set_parent(rgb->clk, rgb->clk_parent);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ rgb->output.dev = dc->dev;
+ rgb->output.of_node = np;
+
+ err = tegra_output_parse_dt(&rgb->output);
+ if (err < 0)
+ return err;
+
+ dc->rgb = &rgb->output;
+
+ return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+ struct tegra_rgb *rgb = to_rgb(dc->rgb);
+ int err;
+
+ if (!dc->rgb)
+ return -ENODEV;
+
+ rgb->output.type = TEGRA_OUTPUT_RGB;
+ rgb->output.ops = &rgb_ops;
+
+ err = tegra_output_init(dc->base.dev, &rgb->output);
+ if (err < 0) {
+ dev_err(dc->dev, "output setup failed: %d\n", err);
+ return err;
+ }
+
+ /*
+ * By default, outputs can be associated with each display controller.
+ * RGB outputs are an exception, so we make sure they can be attached
+ * to only their parent display controller.
+ */
+ rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+
+ return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+ if (dc->rgb) {
+ int err;
+
+ err = tegra_output_disable(dc->rgb);
+ if (err < 0) {
+ dev_err(dc->dev, "output failed to disable: %d\n", err);
+ return err;
+ }
+
+ err = tegra_output_exit(dc->rgb);
+ if (err < 0) {
+ dev_err(dc->dev, "output cleanup failed: %d\n", err);
+ return err;
+ }
+
+ dc->rgb = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bf6e4b5a73b5..52b20b12c83a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
{
if (interruptible) {
return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->reserved) == 0);
+ !ttm_bo_is_reserved(bo));
} else {
- wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
return 0;
}
}
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
struct ttm_bo_global *glob = bo->glob;
int ret;
- while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ while (unlikely(atomic_read(&bo->reserved) != 0)) {
/**
* Deadlock avoidance for multi-bo reserving.
*/
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
return ret;
}
+ atomic_set(&bo->reserved, 1);
if (use_sequence) {
/**
* Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait_reserve, no_wait_gpu, mem);
+ no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -433,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bo->mem = tmp_mem;
bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
+ *mem = tmp_mem;
}
goto out_err;
@@ -487,40 +489,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
/*
- * Make processes trying to reserve really pick it up.
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
*/
- smp_mb__after_atomic_dec();
- wake_up_all(&bo->event_queue);
+ smp_mb__before_atomic_dec();
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
- struct ttm_bo_driver *driver;
+ struct ttm_bo_driver *driver = bdev->driver;
void *sync_obj = NULL;
- void *sync_obj_arg;
int put_count;
int ret;
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
- if (!bo->sync_obj) {
-
- spin_lock(&glob->lru_lock);
-
- /**
- * Lock inversion between bo:reserve and bdev::fence_lock here,
- * but that's OK, since we're only trylocking.
- */
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY))
- goto queue;
-
+ if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
@@ -530,22 +525,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_list_ref_sub(bo, put_count, true);
return;
- } else {
- spin_lock(&glob->lru_lock);
}
-queue:
- driver = bdev->driver;
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- sync_obj_arg = bo->sync_obj_arg;
+ spin_unlock(&bdev->fence_lock);
+
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bdev->fence_lock);
if (sync_obj) {
- driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ driver->sync_obj_flush(sync_obj);
driver->sync_obj_unref(&sync_obj);
}
schedule_delayed_work(&bdev->wq,
@@ -553,68 +548,84 @@ queue:
}
/**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, do nothing.
*
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
* @interruptible Any sleeps should occur interruptibly.
- * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
* @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
*/
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait_reserve,
- bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
- int ret = 0;
+ int ret;
-retry:
spin_lock(&bdev->fence_lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
- if (unlikely(ret != 0))
- return ret;
+ if (ret && !no_wait_gpu) {
+ void *sync_obj;
-retry_reserve:
- spin_lock(&glob->lru_lock);
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(list_empty(&bo->ddestroy))) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- return 0;
- }
-
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
- if (unlikely(ret != 0))
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
return ret;
- goto retry_reserve;
- }
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ spin_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
- BUG_ON(ret != 0);
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- /**
- * We can re-check for sync object without taking
- * the bo::lock since setting the sync object requires
- * also bo::reserved. A busy object at this point may
- * be caused by another thread recently starting an accelerated
- * eviction.
- */
+ /*
+ * We raced, and lost, someone else holds the reservation now,
+ * and is probably busy in ttm_bo_cleanup_memtype_use.
+ *
+ * Even if it's not the case, because we finished waiting any
+ * delayed destruction would succeed, so just return success
+ * here.
+ */
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ } else
+ spin_unlock(&bdev->fence_lock);
- if (unlikely(bo->sync_obj)) {
+ if (ret || unlikely(list_empty(&bo->ddestroy))) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +668,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
- !remove_all);
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ if (!ret)
+ ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+ !remove_all);
+ else
+ spin_unlock(&glob->lru_lock);
+
kref_put(&entry->list_kref, ttm_bo_release_list);
entry = nentry;
@@ -697,6 +712,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ write_lock(&bdev->vm_lock);
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
drm_mm_put_block(bo->vm_node);
@@ -708,18 +724,14 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
- write_lock(&bdev->vm_lock);
}
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo = *p_bo;
- struct ttm_bo_device *bdev = bo->bdev;
*p_bo = NULL;
- write_lock(&bdev->vm_lock);
kref_put(&bo->kref, ttm_bo_release);
- write_unlock(&bdev->vm_lock);
}
EXPORT_SYMBOL(ttm_bo_unref);
@@ -738,7 +750,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
@@ -756,7 +768,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -769,7 +781,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +792,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait_reserve, no_wait_gpu);
+ no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
@@ -794,49 +806,33 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
- int ret, put_count = 0;
+ int ret = -EBUSY, put_count;
-retry:
spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
}
- bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (ret) {
spin_unlock(&glob->lru_lock);
- ret = ttm_bo_cleanup_refs(bo, interruptible,
- no_wait_reserve, no_wait_gpu);
- kref_put(&bo->list_kref, ttm_bo_release_list);
-
return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
+ kref_get(&bo->list_kref);
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
-
- /**
- * We *need* to retry after releasing the lru lock.
- */
-
- if (unlikely(ret != 0))
- return ret;
- goto retry;
+ return ret;
}
put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +842,7 @@ retry:
ttm_bo_list_ref_sub(bo, put_count, true);
- ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +867,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
- bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +879,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_mem_evict_first(bdev, mem_type,
+ interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -950,7 +945,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1036,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait_reserve, no_wait_gpu);
+ interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
@@ -1054,26 +1049,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
-{
- if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
- return -EBUSY;
-
- return wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_wait_cpu);
-
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret = 0;
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
/*
* FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1078,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, placement, &mem,
+ interruptible, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false,
+ interruptible, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1112,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu)
{
int ret;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1128,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible,
+ no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1179,7 +1167,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
@@ -1200,7 +1187,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
pr_err("Illegal buffer object size\n");
@@ -1233,7 +1219,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
- bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->seq_valid = false;
@@ -1257,7 +1242,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (ret)
goto out_err;
@@ -1306,7 +1291,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
@@ -1321,8 +1305,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- buffer_start, interruptible,
- persistent_swap_storage, acc_size, NULL, NULL);
+ interruptible, persistent_swap_storage, acc_size,
+ NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1344,7 +1328,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1577,7 +1561,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
goto out_no_addr_mm;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
- bdev->nice_mode = true;
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
bdev->glob = glob;
@@ -1721,7 +1704,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
- void *sync_obj_arg;
int ret = 0;
if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1711,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
while (bo->sync_obj) {
- if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+ if (driver->sync_obj_signaled(bo->sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1725,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return -EBUSY;
sync_obj = driver->sync_obj_ref(bo->sync_obj);
- sync_obj_arg = bo->sync_obj_arg;
spin_unlock(&bdev->fence_lock);
- ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+ ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1734,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
return ret;
}
spin_lock(&bdev->fence_lock);
- if (likely(bo->sync_obj == sync_obj &&
- bo->sync_obj_arg == sync_obj_arg)) {
+ if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1777,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
- if (atomic_dec_and_test(&bo->cpu_writers))
- wake_up_all(&bo->event_queue);
+ atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
@@ -1817,40 +1796,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
- while (ret == -EBUSY) {
- if (unlikely(list_empty(&glob->swap_lru))) {
- spin_unlock(&glob->lru_lock);
- return -EBUSY;
- }
-
- bo = list_first_entry(&glob->swap_lru,
- struct ttm_buffer_object, swap);
- kref_get(&bo->list_kref);
+ list_for_each_entry(bo, &glob->swap_lru, swap) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
- if (!list_empty(&bo->ddestroy)) {
- spin_unlock(&glob->lru_lock);
- (void) ttm_bo_cleanup_refs(bo, false, false, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- continue;
- }
+ if (ret) {
+ spin_unlock(&glob->lru_lock);
+ return ret;
+ }
- /**
- * Reserve buffer. Since we unlock while sleeping, we need
- * to re-check that nobody removed us from the swap-list while
- * we slept.
- */
+ kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait_unreserved(bo, false);
- kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- }
+ if (!list_empty(&bo->ddestroy)) {
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
}
- BUG_ON(ret != 0);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
@@ -1876,7 +1840,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false, false);
+ false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2026060f03e0..8be35c809c7b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
+ if (ret) {
+ /* if we fail here don't nuke the mm node
+ * as the bo still owns it */
+ old_copy.mm_node = NULL;
goto out1;
+ }
}
add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- if (ret)
+ if (ret) {
+ /* failing here, means keep old copy as-is */
+ old_copy.mm_node = NULL;
goto out1;
+ }
}
mb();
out2:
@@ -422,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
- fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+ fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
@@ -441,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_lock(&bdev->fence_lock);
+ if (bo->sync_obj)
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ else
+ fbo->sync_obj = NULL;
+ spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
@@ -611,8 +623,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
- bool evict, bool no_wait_reserve,
+ bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
@@ -630,7 +641,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4bd..74705f329d99 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_lock(&bdev->vm_lock);
bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
+ if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+ bo = NULL;
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1937069432c5..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -185,10 +185,7 @@ retry_this_bo:
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_cpu(bo, false);
- if (ret)
- return ret;
- goto retry;
+ return -EBUSY;
}
}
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
driver = bdev->driver;
glob = bo->glob;
- spin_lock(&bdev->fence_lock);
spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = entry->new_sync_obj_arg;
ttm_bo_unreserve_locked(bo);
entry->reserved = false;
}
- spin_unlock(&glob->lru_lock);
spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 479c6b0467ca..dbc2def887cd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
- init_waitqueue_head(&glob->queue);
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index c7857874956a..58a5f3261c0b 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -80,7 +80,7 @@ struct ttm_object_file {
*/
struct ttm_object_device {
- rwlock_t object_lock;
+ spinlock_t object_lock;
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
- write_lock(&tdev->object_lock);
kref_init(&base->refcount);
- ret = drm_ht_just_insert_please(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
- write_unlock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
out_err0:
return ret;
}
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
container_of(kref, struct ttm_base_object, refcount);
struct ttm_object_device *tdev = base->tfile->tdev;
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
- write_unlock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ spin_unlock(&tdev->object_lock);
+
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
}
- write_lock(&tdev->object_lock);
}
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
- struct ttm_object_device *tdev = base->tfile->tdev;
*p_base = NULL;
- /*
- * Need to take the lock here to avoid racing with
- * users trying to look up the object.
- */
-
- write_lock(&tdev->object_lock);
kref_put(&base->refcount, ttm_release_base);
- write_unlock(&tdev->object_lock);
}
EXPORT_SYMBOL(ttm_base_object_unref);
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct drm_hash_item *hash;
int ret;
- read_lock(&tdev->object_lock);
- ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
- kref_get(&base->refcount);
+ ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
}
- read_unlock(&tdev->object_lock);
+ rcu_read_unlock();
if (unlikely(ret != 0))
return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
return NULL;
tdev->mem_glob = mem_glob;
- rwlock_init(&tdev->object_lock);
+ spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
- write_lock(&tdev->object_lock);
+ spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- write_unlock(&tdev->object_lock);
+ spin_unlock(&tdev->object_lock);
kfree(tdev);
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b3b2cedf6745..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
static u8 *udl_get_edid(struct udl_device *udl)
{
u8 *block;
- char rbuf[3];
+ char *rbuf;
int ret, i;
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (block == NULL)
return NULL;
+ rbuf = kmalloc(2, GFP_KERNEL);
+ if (rbuf == NULL)
+ goto error;
+
for (i = 0; i < EDID_LENGTH; i++) {
ret = usb_control_msg(udl->ddev->usbdev,
usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- i--;
goto error;
}
block[i] = rbuf[1];
}
+ kfree(rbuf);
return block;
error:
kfree(block);
+ kfree(rbuf);
return NULL;
}
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
edid = (struct edid *)udl_get_edid(udl);
+ /*
+ * We only read the main block, but if the monitor reports extension
+ * blocks then the drm edid code expects them to be present, so patch
+ * the extension count to 0.
+ */
+ edid->checksum += edid->extensions;
+ edid->extensions = 0;
+
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -84,7 +97,8 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder*
+udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
@@ -97,8 +111,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
return encoder;
}
-int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
- uint64_t val)
+static int udl_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
{
return 0;
}
@@ -110,13 +125,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
.best_encoder = udl_best_single_encoder,
};
-struct drm_connector_funcs udl_connector_funcs = {
+static struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +153,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_sysfs_connector_add(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11f..2cc6cd91ac11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
- vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+ vmwgfx_surface.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..8369c3ba10fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ * 1. Red, bump W, luminance and depth are stored in the first channel.
+ * 2. Green, bump V and stencil are stored in the second channel.
+ * 3. Blue and bump U are stored in the third channel.
+ * 4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ * 1. For compressed formats, only the data channel is used and its size
+ * is equal to that of a singular block in the compression scheme.
+ * 2. For buffer formats, only the data channel is used and its size is
+ * exactly one byte in length.
+ * 3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+ SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
+ SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
+ data */
+ SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
+ data */
+ SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
+ U and V */
+ SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
+ data */
+ SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
+ data */
+ SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
+ channel */
+ SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
+ data */
+ SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
+ data */
+ SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
+ data */
+ SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
+ data */
+ SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
+ SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
+ channel */
+ SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
+ data */
+ SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
+ data */
+ SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
+ data depending on the
+ compression method used */
+ SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
+ floating point
+ representation in
+ all channels */
+ SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
+ data. */
+ SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
+ SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
+ SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
+ SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
+ SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
+ e.g., NV12. */
+ SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
+ Y, U, V, e.g., YV12. */
+
+ SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V,
+ SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_LUMINANCE,
+ SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
+ SVGA3DBLOCKDESC_W,
+ SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
+ SVGA3DBLOCKDESC_V |
+ SVGA3DBLOCKDESC_W |
+ SVGA3DBLOCKDESC_Q,
+ SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
+ SVGA3DBLOCKDESC_IEEE_FP,
+ SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
+ SVGA3DBLOCKDESC_GREEN,
+ SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
+ SVGA3DBLOCKDESC_BLUE,
+ SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
+ SVGA3DBLOCKDESC_ALPHA,
+ SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
+ SVGA3DBLOCKDESC_STENCIL,
+ SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
+ SVGA3DBLOCKDESC_Y,
+ SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
+ SVGA3DBLOCKDESC_Y |
+ SVGA3DBLOCKDESC_U_VIDEO |
+ SVGA3DBLOCKDESC_V_VIDEO,
+ SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
+ SVGA3DBLOCKDESC_EXP,
+ SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+ SVGA3DBLOCKDESC_SRGB,
+ SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_2PLANAR_YUV,
+ SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
+ SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ * 1. Block description.
+ * 2. Dimensions of a block in the surface.
+ * 3. Size of block in bytes.
+ * 4. Bit depth of the pixel data.
+ * 5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type) \
+ struct { \
+ union { \
+ type blue; \
+ type u; \
+ type uv_video; \
+ type u_video; \
+ }; \
+ union { \
+ type green; \
+ type v; \
+ type stencil; \
+ type v_video; \
+ }; \
+ union { \
+ type red; \
+ type w; \
+ type luminance; \
+ type y; \
+ type depth; \
+ type data; \
+ }; \
+ union { \
+ type alpha; \
+ type q; \
+ type exp; \
+ }; \
+ }
+
+struct svga3d_surface_desc {
+ enum svga3d_block_desc block_desc;
+ surf_size_struct block_size;
+ u32 bytes_per_block;
+ u32 pitch_bytes_per_block;
+
+ struct {
+ u32 total;
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_depth;
+
+ struct {
+ SVGA3D_CHANNEL_DEF(uint8);
+ } bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+ {SVGA3DBLOCKDESC_NONE,
+ {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
+ {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
+ {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
+ {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
+ {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
+ {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
+ {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+ {SVGA3DBLOCKDESC_LUMINANCE,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
+
+ {SVGA3DBLOCKDESC_LA,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
+ {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
+ {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
+
+ {SVGA3DBLOCKDESC_RGBA_FP,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
+
+ {SVGA3DBLOCKDESC_UVL,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
+
+ {SVGA3DBLOCKDESC_UVWA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
+
+ {SVGA3DBLOCKDESC_ALPHA,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
+
+ {SVGA3DBLOCKDESC_BUFFER,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
+ {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
+
+ {SVGA3DBLOCKDESC_YUV,
+ {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
+ {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
+
+ {SVGA3DBLOCKDESC_NV12,
+ {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
+
+ {SVGA3DBLOCKDESC_AYUV,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
+ {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
+
+ {SVGA3DBLOCKDESC_UVW,
+ {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
+ {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
+
+ {SVGA3DBLOCKDESC_UVWQ,
+ {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
+ {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+ {SVGA3DBLOCKDESC_R_FP,
+ {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
+ {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
+
+ {SVGA3DBLOCKDESC_RGB_FP,
+ {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
+ {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG_FP,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
+ {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_GREEN,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
+
+ {SVGA3DBLOCKDESC_UV,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
+
+ {SVGA3DBLOCKDESC_RED,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
+
+ {SVGA3DBLOCKDESC_U,
+ {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
+
+ {SVGA3DBLOCKDESC_RED,
+ {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBE,
+ {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
+ {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
+
+ {SVGA3DBLOCKDESC_RG,
+ {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
+ {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
+
+ {SVGA3DBLOCKDESC_COMPRESSED,
+ {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
+ {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+ {SVGA3DBLOCKDESC_RGBA,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGBA_SRGB,
+ {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_RGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
+
+ {SVGA3DBLOCKDESC_RGB_SRGB,
+ {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
+ {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+ {SVGA3DBLOCKDESC_DEPTH,
+ {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
+ {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
+
+ {SVGA3DBLOCKDESC_DS,
+ {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
+ {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+ uint64_t tmp = (uint64_t) a*b;
+ return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+ if (format < ARRAY_SIZE(svga3d_surface_descs))
+ return &svga3d_surface_descs[format];
+
+ return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ * Given a base level size and the mip level, compute the size of
+ * the mip level.
+ *
+ * Results:
+ * See above.
+ *
+ * Side effects:
+ * None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+ surf_size_struct size;
+
+ size.width = max_t(u32, base_level.width >> mip_level, 1);
+ size.height = max_t(u32, base_level.height >> mip_level, 1);
+ size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+ return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *pixel_size,
+ surf_size_struct *block_size)
+{
+ block_size->width = DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+ return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size)
+{
+ u32 pitch;
+ surf_size_struct blocks;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+ pitch = blocks.width * desc->pitch_bytes_per_block;
+
+ return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ * Return the number of bytes of buffer space required to store
+ * one image of a surface, optionally using the specified pitch.
+ *
+ * If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ * This function is overflow-safe. If the result would have
+ * overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ * Byte count.
+ *
+ * Side effects:
+ * None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+ const surf_size_struct *size,
+ u32 pitch)
+{
+ surf_size_struct image_blocks;
+ u32 slice_size, total_size;
+
+ svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+ if (svga3dsurface_is_planar_surface(desc)) {
+ total_size = clamped_umul32(image_blocks.width,
+ image_blocks.height);
+ total_size = clamped_umul32(total_size, image_blocks.depth);
+ total_size = clamped_umul32(total_size, desc->bytes_per_block);
+ return total_size;
+ }
+
+ if (pitch == 0)
+ pitch = svga3dsurface_calculate_pitch(desc, size);
+
+ slice_size = clamped_umul32(image_blocks.height, pitch);
+ total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+ return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+ surf_size_struct base_level_size,
+ u32 num_mip_levels,
+ bool cubemap)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ u32 total_size = 0;
+ u32 mip;
+
+ for (mip = 0; mip < num_mip_levels; mip++) {
+ surf_size_struct size =
+ svga3dsurface_get_mip_size(base_level_size, mip);
+ total_size += svga3dsurface_get_image_buffer_size(desc,
+ &size, 0);
+ }
+
+ if (cubemap)
+ total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+ return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+ u32 width, u32 height,
+ u32 x, u32 y, u32 z)
+{
+ const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+ const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+ const u32 bd = desc->block_size.depth;
+ const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+ const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 offset = (z / bd * imgstride +
+ y / bh * rowstride +
+ x / bw * desc->bytes_per_block);
+ return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+ surf_size_struct baseLevelSize,
+ u32 numMipLevels,
+ u32 face,
+ u32 mip)
+
+{
+ u32 offset;
+ u32 mipChainBytes;
+ u32 mipChainBytesToLevel;
+ u32 i;
+ const struct svga3d_surface_desc *desc;
+ surf_size_struct mipSize;
+ u32 bytes;
+
+ desc = svga3dsurface_get_desc(format);
+
+ mipChainBytes = 0;
+ mipChainBytesToLevel = 0;
+ for (i = 0; i < numMipLevels; i++) {
+ mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+ bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+ mipChainBytes += bytes;
+ if (i < mip)
+ mipChainBytesToLevel += bytes;
+ }
+
+ offset = mipChainBytes * face + mipChainBytesToLevel;
+
+ return offset;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc88154..96dc84dc34d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
*placement = vmw_sys_placement;
}
-/**
- * FIXME: Proper access checks on buffers.
- */
-
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct ttm_object_file *tfile =
+ vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+ return vmw_user_dmabuf_verify_access(bo, tfile);
}
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
}
-static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int vmw_sync_obj_flush(void *sync_obj)
{
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
return 0;
}
-static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool vmw_sync_obj_signaled(void *sync_obj)
{
- unsigned long flags = (unsigned long) sync_arg;
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags);
+ DRM_VMW_FENCE_FLAG_EXEC);
}
-static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
- bool lazy, bool interruptible)
+static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
- unsigned long flags = (unsigned long) sync_arg;
-
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
- (uint32_t) flags,
+ DRM_VMW_FENCE_FLAG_EXEC,
lazy, interruptible,
VMW_FENCE_WAIT_TIMEOUT);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 000000000000..00ae0925aca8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+ .object_type = VMW_RES_CONTEXT,
+ .base_obj_to_res = vmw_user_context_base_to_res,
+ .res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+ &user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+ .res_type = vmw_res_context,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "legacy contexts",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd;
+
+
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, false,
+ res_free, &vmw_legacy_context_func);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a resource id.\n");
+ goto out_early;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+ DRM_ERROR("Out of hw context ids.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+
+out_early:
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ ttm_base_object_kfree(ctx, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of contexts anyway.
+ */
+
+ if (unlikely(vmw_user_context_size == 0))
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_context_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for context"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (unlikely(ctx == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_context_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = ctx->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d1498bfd7873..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
ttm_bo_unreserve(bo);
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_gmr_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve;
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
else
placement = &vmw_vram_placement;
- ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
err_unreserve:
ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
return ret;
if (pin)
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+ vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
- false, false);
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
- BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(!ttm_bo_is_reserved(bo));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
old_mem_type != VMW_PL_GMR);
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
placement.num_placement = 1;
placement.placement = &pl_flags;
- ret = ttm_bo_validate(bo, &placement, false, true, true);
+ ret = ttm_bo_validate(bo, &placement, false, true);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2dd185e42f21..161f8b2549aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
PAGE_SIZE,
ttm_bo_type_device,
&vmw_vram_sys_placement,
- 0, 0, false, NULL,
+ 0, false, NULL,
&dev_priv->dummy_query_bo);
}
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
struct vmw_private *dev_priv;
int ret;
uint32_t svga_id;
+ enum vmw_res_type i;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
- idr_init(&dev_priv->context_idr);
- idr_init(&dev_priv->surface_idr);
- idr_init(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i) {
+ idr_init(&dev_priv->res_idr[i]);
+ INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+ }
+
mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
- INIT_LIST_HEAD(&dev_priv->surface_lru);
+
dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
}
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (ret != 0) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
+
dev_priv->fman = vmw_fence_manager_init(dev_priv);
if (unlikely(dev_priv->fman == NULL))
goto out_no_fman;
- /* Need to start the fifo to check if we can do screen objects */
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
vmw_kms_save_vga(dev_priv);
/* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_kms;
vmw_overlay_init(dev_priv);
- /* 3D Depends on Screen Objects being used. */
- DRM_INFO("Detected %sdevice 3D availability.\n",
- vmw_fifo_have_3d(dev_priv) ?
- "" : "no ");
-
- /* We might be done with the fifo now */
if (dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv, true);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
vmw_fb_init(dev_priv);
- } else {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- }
-
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed installing irq: %d\n", ret);
- goto out_no_irq;
- }
}
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
return 0;
-out_no_irq:
- if (dev_priv->enable_fb)
- vmw_fb_close(dev_priv);
+out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- /* We still have a 3D resource reference held */
- if (dev_priv->enable_fb) {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
- }
-out_no_fifo:
+ vmw_kms_restore_vga(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+out_no_irq:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -684,9 +674,9 @@ out_err2:
out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
+
kfree(dev_priv);
return ret;
}
@@ -694,13 +684,14 @@ out_err0:
static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
+ if (dev_priv->ctx.res_ht_initialized)
+ drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv);
- idr_destroy(&dev_priv->surface_idr);
- idr_destroy(&dev_priv->context_idr);
- idr_destroy(&dev_priv->stream_idr);
+
+ for (i = vmw_res_context; i < vmw_res_max; ++i)
+ idr_destroy(&dev_priv->res_idr[i]);
kfree(dev_priv);
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
out_no_active_lock:
if (!dev_priv->enable_fb) {
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
}
return ret;
}
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
- vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 88a179e26de9..13aeda71280e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
struct vmw_dma_buffer {
struct ttm_buffer_object base;
- struct list_head validate_list;
- bool gmr_bound;
- uint32_t cur_validate_node;
- bool on_validate_list;
+ struct list_head res_list;
};
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+ struct ttm_validate_buffer base;
+ struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
- struct idr *idr;
int id;
- enum ttm_object_type res_type;
bool avail;
- void (*remove_from_lists) (struct vmw_resource *res);
- void (*hw_destroy) (struct vmw_resource *res);
+ unsigned long backup_size;
+ bool res_dirty; /* Protected by backup buffer reserved */
+ bool backup_dirty; /* Protected by backup buffer reserved */
+ struct vmw_dma_buffer *backup;
+ unsigned long backup_offset;
+ const struct vmw_res_func *func;
+ struct list_head lru_head; /* Protected by the resource lock */
+ struct list_head mob_head; /* Protected by @backup reserved */
void (*res_free) (struct vmw_resource *res);
- struct list_head validate_head;
- struct list_head query_head; /* Protected by the cmdbuf mutex */
- /* TODO is a generic snooper needed? */
-#if 0
- void (*snoop)(struct vmw_resource *res,
- struct ttm_object_file *tfile,
- SVGA3dCmdHeader *header);
- void *snoop_priv;
-#endif
+ void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+ vmw_res_context,
+ vmw_res_surface,
+ vmw_res_stream,
+ vmw_res_max
};
struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
struct vmw_surface {
struct vmw_resource res;
- struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size base_size;
struct drm_vmw_size *sizes;
uint32_t num_sizes;
-
bool scanout;
-
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
- struct ttm_buffer_object *backup;
struct vmw_surface_offset *offsets;
- uint32_t backup_size;
+ SVGA3dTextureFilter autogen_filter;
+ uint32_t multisample_count;
};
struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
uint32_t index;
};
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+ bool valid;
+ uint32_t handle;
+ struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+};
+
struct vmw_sw_context{
- struct ida bo_list;
- uint32_t last_cid;
- bool cid_valid;
+ struct drm_open_hash res_ht;
+ bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct vmw_resource *cur_ctx;
- uint32_t last_sid;
- uint32_t sid_translation;
- bool sid_valid;
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
- struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+ struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
uint32_t fence_flags;
- struct list_head query_list;
struct ttm_buffer_object *cur_query_bo;
- uint32_t cur_query_cid;
- bool query_cid_valid;
+ struct list_head res_relocations;
+ uint32_t *buf_start;
+ struct vmw_res_cache_entry res_cache[vmw_res_max];
+ struct vmw_resource *last_query_ctx;
+ bool needs_post_query_barrier;
+ struct vmw_resource *error_resource;
};
struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
*/
rwlock_t resource_lock;
- struct idr context_idr;
- struct idr surface_idr;
- struct idr stream_idr;
-
+ struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
*/
@@ -320,6 +347,7 @@ struct vmw_private {
struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo;
uint32_t query_cid;
+ uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
/*
@@ -329,10 +357,15 @@ struct vmw_private {
* protected by the cmdbuf mutex for simplicity.
*/
- struct list_head surface_lru;
+ struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size;
};
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+ return container_of(res, struct vmw_surface, res);
+}
+
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
* Resource utilities - vmwgfx_resource.c
*/
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+ struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv *converter,
+ struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
{
struct vmw_dma_buffer *tmp_buf = *buf;
- struct ttm_buffer_object *bo = &tmp_buf->base;
+
*buf = NULL;
+ if (tmp_buf != NULL) {
+ struct ttm_buffer_object *bo = &tmp_buf->base;
- ttm_bo_unref(&bo);
+ ttm_bo_unref(&bo);
+ }
}
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc972..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+ struct list_head head;
+ const struct vmw_resource *res;
+ unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_dma_buffer *new_backup;
+ unsigned long new_backup_offset;
+ bool first_usage;
+ bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+ bool backoff)
+{
+ struct vmw_resource_val_node *val;
+
+ list_for_each_entry(val, list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_dma_buffer *new_backup =
+ backoff ? NULL : val->new_backup;
+
+ vmw_resource_unreserve(res, new_backup,
+ val->new_backup_offset);
+ vmw_dmabuf_unreference(&val->new_backup);
+ }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_node)
+{
+ struct vmw_resource_val_node *node;
+ struct drm_hash_item *hash;
+ int ret;
+
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+ &hash) == 0)) {
+ node = container_of(hash, struct vmw_resource_val_node, hash);
+ node->first_usage = false;
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+ return 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(node == NULL)) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ kfree(node);
+ return ret;
+ }
+ list_add_tail(&node->head, &sw_context->resource_list);
+ node->res = vmw_resource_reference(res);
+ node->first_usage = true;
+
+ if (unlikely(p_node != NULL))
+ *p_node = node;
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+ const struct vmw_resource *res,
+ unsigned long offset)
+{
+ struct vmw_resource_relocation *rel;
+
+ rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ if (unlikely(rel == NULL)) {
+ DRM_ERROR("Failed to allocate a resource relocation.\n");
+ return -ENOMEM;
+ }
+
+ rel->res = res;
+ rel->offset = offset;
+ list_add_tail(&rel->head, list);
+
+ return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+ struct vmw_resource_relocation *rel, *n;
+
+ list_for_each_entry_safe(rel, n, list, head) {
+ list_del(&rel->head);
+ kfree(rel);
+ }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+ struct list_head *list)
+{
+ struct vmw_resource_relocation *rel;
+
+ list_for_each_entry(rel, list, head)
+ cb[rel->offset] = rel->res->id;
+}
+
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res = *p_res;
-
- if (list_empty(&res->validate_head)) {
- list_add_tail(&res->validate_head, &sw_context->resource_list);
- *p_res = NULL;
- } else
- vmw_resource_unreference(p_res);
-}
-
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
- uint32_t fence_flags,
uint32_t *p_val_node)
{
uint32_t val_node;
+ struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
+ struct drm_hash_item *hash;
+ int ret;
- val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission"
- " exceeded.\n");
- return -EINVAL;
- }
-
- val_buf = &sw_context->val_bufs[val_node];
- if (unlikely(val_node == sw_context->cur_val_buf)) {
- val_buf->new_sync_obj_arg = NULL;
+ if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+ &hash) == 0)) {
+ vval_buf = container_of(hash, struct vmw_validate_buffer,
+ hash);
+ val_buf = &vval_buf->base;
+ val_node = vval_buf - sw_context->val_bufs;
+ } else {
+ val_node = sw_context->cur_val_buf;
+ if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+ DRM_ERROR("Max number of DMA buffers per submission "
+ "exceeded.\n");
+ return -EINVAL;
+ }
+ vval_buf = &sw_context->val_bufs[val_node];
+ vval_buf->hash.key = (unsigned long) bo;
+ ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to initialize a buffer validation "
+ "entry.\n");
+ return ret;
+ }
+ ++sw_context->cur_val_buf;
+ val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo);
+ val_buf->reserved = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- ++sw_context->cur_val_buf;
}
- val_buf->new_sync_obj_arg = (void *)
- ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
- sw_context->fence_flags |= fence_flags;
+ sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
if (p_val_node)
*p_val_node = val_node;
@@ -103,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return 0;
}
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource *ctx;
-
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- __le32 cid;
- } *cmd;
+ struct vmw_resource_val_node *val;
int ret;
- cmd = container_of(header, struct vmw_cid_cmd, header);
- if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
- return 0;
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
- ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
- &ctx);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use context %u\n",
- (unsigned) cmd->cid);
- return ret;
+ ret = vmw_resource_reserve(res, val->no_buffer_needed);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ret = vmw_bo_to_validate_list
+ (sw_context, bo, NULL);
+
+ if (unlikely(ret != 0))
+ return ret;
+ }
}
+ return 0;
+}
- sw_context->last_cid = cmd->cid;
- sw_context->cid_valid = true;
- sw_context->cur_ctx = ctx;
- vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ ret = vmw_resource_validate(res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+ }
return 0;
}
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- uint32_t *sid)
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id,
+ struct vmw_resource_val_node **p_val)
{
- struct vmw_surface *srf;
- int ret;
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[res_type];
struct vmw_resource *res;
+ struct vmw_resource_val_node *node;
+ int ret;
- if (*sid == SVGA3D_INVALID_ID)
+ if (*id == SVGA3D_INVALID_ID)
return 0;
- if (likely((sw_context->sid_valid &&
- *sid == sw_context->last_sid))) {
- *sid = sw_context->sid_translation;
- return 0;
- }
+ /*
+ * Fastpath in case of repeated commands referencing the same
+ * resource
+ */
- ret = vmw_user_surface_lookup_handle(dev_priv,
- sw_context->tfile,
- *sid, &srf);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface 0x%08x "
- "address 0x%08lx\n",
- (unsigned int) *sid,
- (unsigned long) sid);
- return ret;
+ if (likely(rcache->valid && *id == rcache->handle)) {
+ const struct vmw_resource *res = rcache->res;
+
+ rcache->node->first_usage = false;
+ if (p_val)
+ *p_val = rcache->node;
+
+ return vmw_resource_relocation_add
+ (&sw_context->res_relocations, res,
+ id - sw_context->buf_start);
}
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_user_resource_lookup_handle(dev_priv,
+ sw_context->tfile,
+ *id,
+ converter,
+ &res);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Could not validate surface.\n");
- vmw_surface_unreference(&srf);
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned) *id);
+ dump_stack();
return ret;
}
- sw_context->last_sid = *sid;
- sw_context->sid_valid = true;
- sw_context->sid_translation = srf->res.id;
- *sid = sw_context->sid_translation;
+ rcache->valid = true;
+ rcache->res = res;
+ rcache->handle = *id;
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
+ rcache->node = node;
+ if (p_val)
+ *p_val = node;
+ vmw_resource_unreference(&res);
return 0;
+
+out_no_reloc:
+ BUG_ON(sw_context->error_resource != NULL);
+ sw_context->error_resource = res;
+
+ return ret;
}
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->cid, NULL);
+}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.target.sid, NULL);
return ret;
}
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return -EPERM;
}
- return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->body.sid,
+ NULL);
}
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- uint32_t cid,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
int ret;
- bool add_cid = false;
- uint32_t cid_to_add;
+
+ BUG_ON(!ctx_entry->valid);
+ sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
- BUG_ON(!sw_context->query_cid_valid);
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
+ sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
NULL);
if (unlikely(ret != 0))
return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
NULL);
if (unlikely(ret != 0))
return ret;
}
- if (unlikely(cid != sw_context->cur_query_cid &&
- sw_context->query_cid_valid)) {
- add_cid = true;
- cid_to_add = sw_context->cur_query_cid;
- }
-
- sw_context->cur_query_cid = cid;
- sw_context->query_cid_valid = true;
-
- if (add_cid) {
- struct vmw_resource *ctx = sw_context->cur_ctx;
-
- if (list_empty(&ctx->query_head))
- list_add_tail(&ctx->query_head,
- &sw_context->query_list);
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- DRM_VMW_FENCE_FLAG_EXEC,
- NULL);
- if (unlikely(ret != 0))
- return ret;
- }
return 0;
}
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
-
- struct vmw_resource *ctx, *next_ctx;
- int ret;
-
/*
* The validate list should still hold references to all
* contexts here.
*/
- list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
- query_head) {
- list_del_init(&ctx->query_head);
+ if (sw_context->needs_post_query_barrier) {
+ struct vmw_res_cache_entry *ctx_entry =
+ &sw_context->res_cache[vmw_res_context];
+ struct vmw_resource *ctx;
+ int ret;
- BUG_ON(list_empty(&ctx->validate_head));
+ BUG_ON(!ctx_entry->valid);
+ ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ttm_bo_unref(&dev_priv->pinned_bo);
}
- vmw_bo_pin(sw_context->cur_query_bo, true);
+ if (!sw_context->needs_post_query_barrier) {
+ vmw_bo_pin(sw_context->cur_query_bo, true);
- /*
- * We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
- */
+ /*
+ * We pin also the dummy_query_bo buffer so that we
+ * don't need to validate it when emitting
+ * dummy queries in context destroy paths.
+ */
- vmw_bo_pin(dev_priv->dummy_query_bo, true);
- dev_priv->dummy_query_bo_pinned = true;
+ vmw_bo_pin(dev_priv->dummy_query_bo, true);
+ dev_priv->dummy_query_bo_pinned = true;
- dev_priv->query_cid = sw_context->cur_query_cid;
- dev_priv->pinned_bo =
- ttm_bo_reference(sw_context->cur_query_bo);
+ BUG_ON(sw_context->last_query_ctx == NULL);
+ dev_priv->query_cid = sw_context->last_query_ctx->id;
+ dev_priv->query_cid_valid = true;
+ dev_priv->pinned_bo =
+ ttm_bo_reference(sw_context->cur_query_bo);
+ }
}
}
/**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
*
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
*
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &sw_context->query_list) {
- list_del_init(list);
- }
-}
-
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
- ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
- &reloc->index);
+ ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -479,6 +731,37 @@ out_no_reloc:
return ret;
}
+/**
+ * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_begin_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBeginQuery q;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_begin_query_cmd,
+ header);
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->q.cid,
+ NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
- &vmw_bo->base, sw_context);
+ ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
+/*
+ * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery q;
} *cmd;
int ret;
- struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_query_cmd, header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
vmw_dmabuf_unreference(&vmw_bo);
-
- /*
- * This wait will act as a barrier for previous waits for this
- * context.
- */
-
- ctx = sw_context->cur_ctx;
- if (!list_empty(&ctx->query_head))
- list_del_init(&ctx->query_head);
-
return 0;
}
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_dma_buffer *vmw_bo = NULL;
- struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
- struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- bo = &vmw_bo->base;
- ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
- cmd->dma.host.sid, &srf);
- if (ret) {
- DRM_ERROR("could not find surface\n");
- goto out_no_reloc;
- }
-
- ret = vmw_surface_validate(dev_priv, srf);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter, &cmd->dma.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Culd not validate surface.\n");
- goto out_no_validate;
+ if (unlikely(ret != -ERESTARTSYS))
+ DRM_ERROR("could not find surface for DMA.\n");
+ goto out_no_surface;
}
- /*
- * Patch command stream with device SID.
- */
- cmd->dma.host.sid = srf->res.id;
- vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
- vmw_dmabuf_unreference(&vmw_bo);
-
- res = &srf->res;
- vmw_resource_to_validate_list(sw_context, &res);
+ srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- return 0;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
-out_no_validate:
- vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &decl->array.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &range->indexArray.surfaceId);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
continue;
- ret = vmw_cmd_sid_check(dev_priv, sw_context,
- &cur_state->value);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cur_state->value, NULL);
if (unlikely(ret != 0))
return ret;
}
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return ret;
}
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_cmd,
+ header);
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return 0;
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
- &vmw_cmd_blt_surf_screen_check)
+ &vmw_cmd_blt_surf_screen_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
int32_t cur_size = size;
int ret;
+ sw_context->buf_start = buf;
+
while (cur_size > 0) {
size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index];
+ validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
- } else
+ break;
+ case VMW_PL_GMR:
reloc->location->gmrId = bo->mem.start;
+ break;
+ default:
+ BUG();
+ }
}
vmw_free_relocations(sw_context);
}
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+ struct vmw_resource_val_node *val, *val_next;
+
+ /*
+ * Drop references to resources held during command submission.
+ */
+
+ list_for_each_entry_safe(val, val_next, list, head) {
+ list_del_init(&val->head);
+ vmw_resource_unreference(&val->res);
+ kfree(val);
+ }
+}
+
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry, *next;
- struct vmw_resource *res, *res_next;
+ struct vmw_validate_buffer *entry, *next;
+ struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- head) {
- list_del(&entry->head);
- vmw_dmabuf_validate_clear(entry->bo);
- ttm_bo_unref(&entry->bo);
+ base.head) {
+ list_del(&entry->base.head);
+ ttm_bo_unref(&entry->base.bo);
+ (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
- /*
- * Drop references to resources held during command submission.
- */
- vmw_resource_unreserve(&sw_context->resource_list);
- list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
- validate_head) {
- list_del_init(&res->validate_head);
- vmw_resource_unreference(&res);
- }
+ list_for_each_entry(val, &sw_context->resource_list, head)
+ (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
*/
DRM_INFO("Falling through to VRAM.\n");
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
- struct ttm_validate_buffer *entry;
+ struct vmw_validate_buffer *entry;
int ret;
- list_for_each_entry(entry, &sw_context->validate_nodes, head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
+ struct vmw_resource *error_resource;
+ struct list_head resource_list;
uint32_t handle;
void *cmd;
int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
- sw_context->cid_valid = false;
- sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
- INIT_LIST_HEAD(&sw_context->query_list);
INIT_LIST_HEAD(&sw_context->resource_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
- sw_context->cur_query_cid = dev_priv->query_cid;
- sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+ sw_context->last_query_ctx = NULL;
+ sw_context->needs_post_query_barrier = false;
+ memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
+ INIT_LIST_HEAD(&sw_context->res_relocations);
+ if (!sw_context->res_ht_initialized) {
+ ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ sw_context->res_ht_initialized = true;
+ }
+ INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err;
+ ret = vmw_resources_reserve(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
- vmw_apply_relocations(sw_context);
+ ret = vmw_resources_validate(sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
- goto out_throttle;
+ goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
- goto out_throttle;
+ goto out_err;
}
+ vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
+
+ vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+
vmw_fifo_commit(dev_priv, command_size);
vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
+ vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
(void *) fence);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
vmw_clear_validations(sw_context);
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
+ list_splice_init(&sw_context->resource_list, &resource_list);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+
return 0;
out_err:
+ vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
-out_throttle:
- vmw_query_switch_backoff(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context);
+ if (unlikely(dev_priv->pinned_bo != NULL &&
+ !dev_priv->query_cid_valid))
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
+ list_splice_init(&sw_context->resource_list, &resource_list);
+ error_resource = sw_context->error_resource;
+ sw_context->error_resource = NULL;
mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * Unreference resources outside of the cmdbuf_mutex to
+ * avoid deadlocks in resource destruction paths.
+ */
+ vmw_resource_list_unreference(&resource_list);
+ if (unlikely(error_resource != NULL))
+ vmw_resource_unreference(&error_resource);
+
return ret;
}
@@ -1252,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo.
*
* @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
*
* This function should be used to unpin the pinned query bo, or
* as a query barrier when we need to make sure that all queries have
@@ -1271,31 +1633,26 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
*
* The function will synchronize on the previous query barrier, and will
* thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
*/
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
- bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+ struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
- struct vmw_fence_obj *fence;
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
+ struct vmw_fence_obj *lfence = NULL;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- if (only_on_cid_match && cid != dev_priv->query_cid)
- goto out_unlock;
-
INIT_LIST_HEAD(&validate_list);
- pinned_val.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
list_add_tail(&pinned_val.head, &validate_list);
- query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list);
@@ -1308,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
goto out_no_reserve;
}
- ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
- goto out_no_emit;
+ if (dev_priv->query_cid_valid) {
+ BUG_ON(fence != NULL);
+ ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+ if (unlikely(ret != 0)) {
+ vmw_execbuf_unpin_panic(dev_priv);
+ goto out_no_emit;
+ }
+ dev_priv->query_cid_valid = false;
}
vmw_bo_pin(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
- (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (fence == NULL) {
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+ NULL);
+ fence = lfence;
+ }
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+ if (lfence != NULL)
+ vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
out_unlock:
- mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
out_no_emit:
@@ -1335,6 +1701,31 @@ out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ if (dev_priv->query_cid_valid)
+ __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58c..c62d20e8a6f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
container_of(fence, struct vmw_user_fence, fence);
struct vmw_fence_manager *fman = fence->fman;
- kfree(ufence);
+ ttm_base_object_kfree(ufence, base);
/*
* Free kernel space accounting.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 7290811f89be..d9fbbe191071 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -133,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *clips = NULL;
struct drm_mode_object *obj;
struct vmw_framebuffer *vfb;
+ struct vmw_resource *res;
uint32_t num_clips;
int ret;
@@ -180,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_no_ttm_lock;
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
- &surface);
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+ user_surface_converter,
+ &res);
if (ret)
goto out_no_surface;
+ surface = vmw_res_to_srf(res);
ret = vmw_kms_present(dev_priv, file_priv,
vfb, surface, arg->sid,
arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5af..79f7e8e60529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cb55b7b66377..87e39f68e9d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -35,6 +35,7 @@
#include "svga_escape.h"
#define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
return 0;
}
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+ return (dev_priv->overlay_priv != NULL &&
+ ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+ VMW_OVERLAY_CAP_MASK));
+}
+
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
int ret;
- if (!overlay)
+ if (!vmw_overlay_available(dev_priv))
return -ENOSYS;
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
{
- if (!dev_priv->overlay_priv)
+ if (!vmw_overlay_available(dev_priv))
return 0;
return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
struct vmw_overlay *overlay = dev_priv->overlay_priv;
int i, k;
- if (!overlay)
+ if (!vmw_overlay_available(dev_priv))
return 0;
mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
if (dev_priv->overlay_priv)
return -EINVAL;
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
- (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
- DRM_INFO("hardware doesn't support overlays\n");
- return -ENOSYS;
- }
-
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index da3c6b5b98a1..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
-
-struct vmw_user_context {
- struct ttm_base_object base;
- struct vmw_resource res;
-};
-
-struct vmw_user_surface {
- struct ttm_base_object base;
- struct vmw_surface srf;
- uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
struct vmw_user_dma_buffer {
struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
-struct vmw_surface_offset {
- uint32_t face;
- uint32_t mip;
- uint32_t bo_offset;
-};
-
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
static uint64_t vmw_user_stream_size;
+static const struct vmw_res_func vmw_stream_func = {
+ .res_type = vmw_res_stream,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "video streams",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
*
* Release the resource id to the resource id manager and set it to -1
*/
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
write_lock(&dev_priv->resource_lock);
if (res->id != -1)
- idr_remove(res->idr, res->id);
+ idr_remove(idr, res->id);
res->id = -1;
write_unlock(&dev_priv->resource_lock);
}
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
- int id = res->id;
- struct idr *idr = res->idr;
+ int id;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
res->avail = false;
- if (res->remove_from_lists != NULL)
- res->remove_from_lists(res);
+ list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ if (!list_empty(&res->mob_head) &&
+ res->func->unbind != NULL) {
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+ res->func->unbind(res, false, &val_buf);
+ }
+ res->backup_dirty = false;
+ list_del_init(&res->mob_head);
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&res->backup);
+ }
if (likely(res->hw_destroy != NULL))
res->hw_destroy(res);
+ id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
/**
* vmw_resource_alloc_id - release a resource id to the id manager.
*
- * @dev_priv: Pointer to the device private structure.
* @res: Pointer to the resource.
*
* Allocate the lowest free resource from the resource manager, and set
* @res->id to that id. Returns 0 on success and -ENOMEM on failure.
*/
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
- struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
{
+ struct vmw_private *dev_priv = res->dev_priv;
int ret;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
BUG_ON(res->id != -1);
do {
- if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
return -ENOMEM;
write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(res->idr, res, 1, &res->id);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
write_unlock(&dev_priv->resource_lock);
} while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
return ret;
}
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- bool delay_id,
- void (*res_free) (struct vmw_resource *res),
- void (*remove_from_lists)
- (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @res: The struct vmw_resource to initialize.
+ * @obj_type: Resource object type.
+ * @delay_id: Boolean whether to defer device id allocation until
+ * the first validation.
+ * @res_free: Resource destructor.
+ * @func: Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func)
{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->remove_from_lists = remove_from_lists;
- res->res_type = obj_type;
- res->idr = idr;
res->avail = false;
res->dev_priv = dev_priv;
- INIT_LIST_HEAD(&res->query_head);
- INIT_LIST_HEAD(&res->validate_head);
+ res->func = func;
+ INIT_LIST_HEAD(&res->lru_head);
+ INIT_LIST_HEAD(&res->mob_head);
res->id = -1;
+ res->backup = NULL;
+ res->backup_offset = 0;
+ res->backup_dirty = false;
+ res->res_dirty = false;
if (delay_id)
return 0;
else
- return vmw_resource_alloc_id(dev_priv, res);
+ return vmw_resource_alloc_id(res);
}
/**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
-
-static void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -250,994 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
}
/**
- * Context management:
- */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyContext body;
- } *cmd;
-
-
- vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineContext body;
- } *cmd;
-
- ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, false, res_free, NULL);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a resource id.\n");
- goto out_early;
- }
-
- if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
- DRM_ERROR("Out of hw context ids.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_context_destroy);
- return 0;
-
-out_early:
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
-}
-
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
- return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
-
-static void vmw_user_context_free(struct vmw_resource *res)
-{
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- struct vmw_private *dev_priv = res->dev_priv;
-
- kfree(ctx);
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
-}
-
-/**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
- */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_context *ctx =
- container_of(base, struct vmw_user_context, base);
- struct vmw_resource *res = &ctx->res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_resource *res;
- struct vmw_user_context *ctx;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret = 0;
-
- res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
- if (unlikely(res == NULL))
- return -EINVAL;
-
- if (res->res_free != &vmw_user_context_free) {
- ret = -EINVAL;
- goto out;
- }
-
- ctx = container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable) {
- ret = -EPERM;
- goto out;
- }
-
- ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
- vmw_resource_unreference(&res);
- return ret;
-}
-
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
-
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
- if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_context_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for context"
- " creation.\n");
- goto out_unlock;
- }
-
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_context_size);
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- res = &ctx->res;
- ctx->base.shareable = false;
- ctx->base.tfile = NULL;
-
- /*
- * From here on, the destructor takes over resource freeing.
- */
-
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&ctx->res);
- ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
- }
-
- arg->cid = res->id;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-
-}
-
-int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res)
-{
- struct vmw_resource *res;
- int ret = 0;
-
- read_lock(&dev_priv->resource_lock);
- res = idr_find(&dev_priv->context_idr, id);
- if (res && res->avail) {
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable)
- ret = -EPERM;
- if (p_res)
- *p_res = vmw_resource_reference(res);
- } else
- ret = -EINVAL;
- read_unlock(&dev_priv->resource_lock);
-
- return ret;
-}
-
-struct vmw_bpp {
- uint8_t bpp;
- uint8_t s_bpp;
-};
-
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
- [SVGA3D_FORMAT_INVALID] = {0, 0},
- [SVGA3D_X8R8G8B8] = {32, 32},
- [SVGA3D_A8R8G8B8] = {32, 32},
- [SVGA3D_R5G6B5] = {16, 16},
- [SVGA3D_X1R5G5B5] = {16, 16},
- [SVGA3D_A1R5G5B5] = {16, 16},
- [SVGA3D_A4R4G4B4] = {16, 16},
- [SVGA3D_Z_D32] = {32, 32},
- [SVGA3D_Z_D16] = {16, 16},
- [SVGA3D_Z_D24S8] = {32, 32},
- [SVGA3D_Z_D15S1] = {16, 16},
- [SVGA3D_LUMINANCE8] = {8, 8},
- [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
- [SVGA3D_LUMINANCE16] = {16, 16},
- [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
- [SVGA3D_DXT1] = {4, 16},
- [SVGA3D_DXT2] = {8, 32},
- [SVGA3D_DXT3] = {8, 32},
- [SVGA3D_DXT4] = {8, 32},
- [SVGA3D_DXT5] = {8, 32},
- [SVGA3D_BUMPU8V8] = {16, 16},
- [SVGA3D_BUMPL6V5U5] = {16, 16},
- [SVGA3D_BUMPX8L8V8U8] = {32, 32},
- [SVGA3D_ARGB_S10E5] = {16, 16},
- [SVGA3D_ARGB_S23E8] = {32, 32},
- [SVGA3D_A2R10G10B10] = {32, 32},
- [SVGA3D_V8U8] = {16, 16},
- [SVGA3D_Q8W8V8U8] = {32, 32},
- [SVGA3D_CxV8U8] = {16, 16},
- [SVGA3D_X8L8V8U8] = {32, 32},
- [SVGA3D_A2W10V10U10] = {32, 32},
- [SVGA3D_ALPHA8] = {8, 8},
- [SVGA3D_R_S10E5] = {16, 16},
- [SVGA3D_R_S23E8] = {32, 32},
- [SVGA3D_RG_S10E5] = {16, 16},
- [SVGA3D_RG_S23E8] = {32, 32},
- [SVGA3D_BUFFER] = {8, 8},
- [SVGA3D_Z_D24X8] = {32, 32},
- [SVGA3D_V16U16] = {32, 32},
- [SVGA3D_G16R16] = {32, 32},
- [SVGA3D_A16B16G16R16] = {64, 64},
- [SVGA3D_UYVY] = {12, 12},
- [SVGA3D_YUY2] = {12, 12},
- [SVGA3D_NV12] = {12, 8},
- [SVGA3D_AYUV] = {32, 32},
- [SVGA3D_BC4_UNORM] = {4, 16},
- [SVGA3D_BC5_UNORM] = {8, 32},
- [SVGA3D_Z_DF16] = {16, 16},
- [SVGA3D_Z_DF24] = {24, 24},
- [SVGA3D_Z_D24S8_INT] = {32, 32}
-};
-
-
-/**
- * Surface management.
- */
-
-struct vmw_surface_dma {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA body;
- SVGA3dCopyBox cb;
- SVGA3dCmdSurfaceDMASuffix suffix;
-};
-
-struct vmw_surface_define {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
-};
-
-struct vmw_surface_destroy {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
-};
-
-
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
- return srf->num_sizes * sizeof(struct vmw_surface_dma);
-}
-
-
-/**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
-{
- return sizeof(struct vmw_surface_define) + srf->num_sizes *
- sizeof(SVGA3dSize);
-}
-
-
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
*
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
- return sizeof(struct vmw_surface_destroy);
-}
-
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
- void *cmd_space)
-{
- struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
- cmd_space;
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.sid = id;
-}
-
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
*
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
*/
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
- void *cmd_space)
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter,
+ struct vmw_resource **p_res)
{
- struct vmw_surface_define *cmd = (struct vmw_surface_define *)
- cmd_space;
- struct drm_vmw_size *src_size;
- SVGA3dSize *cmd_size;
- uint32_t cmd_len;
- int i;
-
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
- cmd->header.size = cmd_len;
- cmd->body.sid = srf->res.id;
- cmd->body.surfaceFlags = srf->flags;
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
-
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = src_size->width;
- cmd_size->height = src_size->height;
- cmd_size->depth = src_size->depth;
- }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
- void *cmd_space,
- const SVGAGuestPtr *ptr,
- bool to_surface)
-{
- uint32_t i;
- uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
- uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
- struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
- for (i = 0; i < srf->num_sizes; ++i) {
- SVGA3dCmdHeader *header = &cmd->header;
- SVGA3dCmdSurfaceDMA *body = &cmd->body;
- SVGA3dCopyBox *cb = &cmd->cb;
- SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
- const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
- const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
- header->id = SVGA_3D_CMD_SURFACE_DMA;
- header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
- body->guest.ptr = *ptr;
- body->guest.ptr.offset += cur_offset->bo_offset;
- body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
- body->host.sid = srf->res.id;
- body->host.face = cur_offset->face;
- body->host.mipmap = cur_offset->mip;
- body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM);
- cb->x = 0;
- cb->y = 0;
- cb->z = 0;
- cb->srcx = 0;
- cb->srcy = 0;
- cb->srcz = 0;
- cb->w = cur_size->width;
- cb->h = cur_size->height;
- cb->d = cur_size->depth;
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = body->guest.pitch*cur_size->height*
- cur_size->depth*bpp / stride_bpp;
- suffix->flags.discard = 0;
- suffix->flags.unsynchronized = 0;
- suffix->flags.reserved = 0;
- ++cmd;
- }
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
- struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
- void *cmd;
-
- if (res->id != -1) {
-
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
- /*
- * used_memory_size_atomic, or separate lock
- * to avoid taking dev_priv::cmdbuf_mutex in
- * the destroy path.
- */
-
- mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = container_of(res, struct vmw_surface, res);
- dev_priv->used_memory_size -= srf->backup_size;
- mutex_unlock(&dev_priv->cmdbuf_mutex);
-
- }
- vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
-
- if (likely(res->id != -1))
- return 0;
-
- if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
- dev_priv->memory_size))
- return -EBUSY;
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- if (srf->backup) {
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)((unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC);
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
- }
-
- /*
- * Alloc id for the resource.
- */
-
- ret = vmw_resource_alloc_id(dev_priv, res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a surface id.\n");
- goto out_no_id;
- }
- if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
- ret = -EBUSY;
- goto out_no_fifo;
- }
-
-
- /*
- * Encode surface define- and dma commands.
- */
-
- submit_size = vmw_surface_define_size(srf);
- if (srf->backup)
- submit_size += vmw_surface_dma_size(srf);
-
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "validation.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_surface_define_encode(srf, cmd);
- if (srf->backup) {
- SVGAGuestPtr ptr;
-
- cmd += vmw_surface_define_size(srf);
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, true);
- }
-
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Create a fence object and fence the backup buffer.
- */
-
- if (srf->backup) {
- struct vmw_fence_obj *fence;
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- }
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size += srf->backup_size;
-
- return 0;
-
-out_no_fifo:
- vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- if (srf->backup)
- ttm_bo_unref(&val_buf.bo);
- return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- struct vmw_resource *res = &srf->res;
- struct list_head val_list;
- struct ttm_validate_buffer val_buf;
- uint32_t submit_size;
- uint8_t *cmd;
- int ret;
- struct vmw_fence_obj *fence;
- SVGAGuestPtr ptr;
-
- BUG_ON(res->id == -1);
-
- /*
- * Create a surface backup buffer object.
- */
-
- if (!srf->backup) {
- ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
- ttm_bo_type_device,
- &vmw_srf_placement, 0, 0, true,
- NULL, &srf->backup);
- if (unlikely(ret != 0))
- return ret;
- }
-
- /*
- * Reserve- and validate the backup DMA bo.
- */
-
- INIT_LIST_HEAD(&val_list);
- val_buf.bo = ttm_bo_reference(srf->backup);
- val_buf.new_sync_obj_arg = (void *)(unsigned long)
- DRM_VMW_FENCE_FLAG_EXEC;
- list_add_tail(&val_buf.head, &val_list);
- ret = ttm_eu_reserve_buffers(&val_list);
- if (unlikely(ret != 0))
- goto out_no_reserve;
-
- ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
- true, false, false);
- if (unlikely(ret != 0))
- goto out_no_validate;
-
-
- /*
- * Encode the dma- and surface destroy commands.
- */
-
- submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
- ret = -ENOMEM;
- goto out_no_fifo;
- }
-
- vmw_bo_get_guest_ptr(srf->backup, &ptr);
- vmw_surface_dma_encode(srf, cmd, &ptr, false);
- cmd += vmw_surface_dma_size(srf);
- vmw_surface_destroy_encode(res->id, cmd);
- vmw_fifo_commit(dev_priv, submit_size);
-
- /*
- * Surface memory usage accounting.
- */
-
- dev_priv->used_memory_size -= srf->backup_size;
-
- /*
- * Create a fence object and fence the DMA buffer.
- */
-
- (void) vmw_execbuf_fence_commands(NULL, dev_priv,
- &fence, NULL);
- ttm_eu_fence_buffer_objects(&val_list, fence);
- if (likely(fence != NULL))
- vmw_fence_obj_unreference(&fence);
- ttm_bo_unref(&val_buf.bo);
-
- /*
- * Release the surface ID.
- */
-
- vmw_resource_release_id(res);
-
- return 0;
-
-out_no_fifo:
-out_no_validate:
- if (srf->backup)
- ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
- ttm_bo_unref(&val_buf.bo);
- ttm_bo_unref(&srf->backup);
- return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf)
-{
- int ret;
- struct vmw_surface *evict_srf;
-
- do {
- write_lock(&dev_priv->resource_lock);
- list_del_init(&srf->lru_head);
- write_unlock(&dev_priv->resource_lock);
-
- ret = vmw_surface_do_validate(dev_priv, srf);
- if (likely(ret != -EBUSY))
- break;
-
- write_lock(&dev_priv->resource_lock);
- if (list_empty(&dev_priv->surface_lru)) {
- DRM_ERROR("Out of device memory for surfaces.\n");
- ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
- break;
- }
-
- evict_srf = vmw_surface_reference
- (list_first_entry(&dev_priv->surface_lru,
- struct vmw_surface,
- lru_head));
- list_del_init(&evict_srf->lru_head);
-
- write_unlock(&dev_priv->resource_lock);
- (void) vmw_surface_evict(dev_priv, evict_srf);
-
- vmw_surface_unreference(&evict_srf);
-
- } while (1);
-
- if (unlikely(ret != 0 && srf->res.id != -1)) {
- write_lock(&dev_priv->resource_lock);
- list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
- write_unlock(&dev_priv->resource_lock);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
- struct vmw_resource *res = &srf->res;
-
- BUG_ON(res_free == NULL);
- INIT_LIST_HEAD(&srf->lru_head);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, true, res_free,
- vmw_surface_remove_from_lists);
-
- if (unlikely(ret != 0))
- res_free(res);
-
- /*
- * The surface won't be visible to hardware until a
- * surface validate.
- */
-
- (void) vmw_3d_resource_inc(dev_priv, false);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
- return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
- struct vmw_user_surface *user_srf =
- container_of(srf, struct vmw_user_surface, srf);
- struct vmw_private *dev_priv = srf->res.dev_priv;
- uint32_t size = user_srf->size;
-
- if (srf->backup)
- ttm_bo_unref(&srf->backup);
- kfree(srf->offsets);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(user_srf);
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- rwlock_t *lock = NULL;
-
- list_for_each_entry(res, list, validate_head) {
-
- if (res->res_free != &vmw_surface_res_free &&
- res->res_free != &vmw_user_surface_free)
- continue;
-
- if (unlikely(lock == NULL)) {
- lock = &res->dev_priv->resource_lock;
- write_lock(lock);
- }
-
- srf = container_of(res, struct vmw_surface, res);
- list_del_init(&srf->lru_head);
- list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
- }
-
- if (lock != NULL)
- write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_dma_buffer **out_buf)
-{
- int ret;
-
- BUG_ON(*out_surf || *out_buf);
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
- if (!ret)
- return 0;
-
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
- return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_surface **out)
-{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
struct ttm_base_object *base;
+ struct vmw_resource *res;
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL))
return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
+ if (unlikely(base->object_type != converter->object_type))
goto out_bad_resource;
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
- res = &srf->res;
+ res = converter->base_obj_to_res(base);
read_lock(&dev_priv->resource_lock);
-
- if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ if (!res->avail || res->res_free != converter->res_free) {
read_unlock(&dev_priv->resource_lock);
goto out_bad_resource;
}
@@ -1245,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
kref_get(&res->kref);
read_unlock(&dev_priv->resource_lock);
- *out = srf;
+ *p_res = res;
ret = 0;
out_bad_resource:
@@ -1254,286 +319,32 @@ out_bad_resource:
return ret;
}
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
- struct vmw_resource *res = &user_srf->srf.res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf;
- struct vmw_surface *srf;
struct vmw_resource *res;
- struct vmw_resource *tmp;
- union drm_vmw_surface_create_arg *arg =
- (union drm_vmw_surface_create_arg *)data;
- struct drm_vmw_surface_create_req *req = &arg->req;
- struct drm_vmw_surface_arg *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
- int i, j;
- uint32_t cur_bo_offset;
- struct drm_vmw_size *cur_size;
- struct vmw_surface_offset *cur_offset;
- uint32_t stride_bpp;
- uint32_t bpp;
- uint32_t num_sizes;
- uint32_t size;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- if (unlikely(vmw_user_surface_size == 0))
- vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
-
- num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- num_sizes += req->mip_levels[i];
-
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
- return -EINVAL;
-
- size = vmw_user_surface_size + 128 +
- ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
- ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
- goto out_unlock;
- }
-
- user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
- ret = -ENOMEM;
- goto out_no_user_srf;
- }
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
- srf->backup = NULL;
-
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = num_sizes;
- user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_sizes;
- }
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_no_offsets;
- }
-
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
- cur_bo_offset = 0;
- cur_offset = srf->offsets;
- cur_size = srf->sizes;
-
- bpp = vmw_sf_bpp[srf->format].bpp;
- stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- for (j = 0; j < srf->mip_levels[i]; ++j) {
- uint32_t stride =
- (cur_size->width * stride_bpp + 7) >> 3;
-
- cur_offset->face = i;
- cur_offset->mip = j;
- cur_offset->bo_offset = cur_bo_offset;
- cur_bo_offset += stride * cur_size->height *
- cur_size->depth * bpp / stride_bpp;
- ++cur_offset;
- ++cur_size;
- }
- }
- srf->backup_size = cur_bo_offset;
-
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
-
- /* allocate image area and clear it */
- srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
- }
- srf->snooper.crtc = NULL;
-
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
-
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
-
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
- if (unlikely(ret != 0))
- goto out_unlock;
-
- tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- goto out_unlock;
- }
-
- rep->sid = user_srf->base.hash.key;
- if (rep->sid == SVGA3D_INVALID_ID)
- DRM_ERROR("Created bad Surface ID.\n");
-
- vmw_resource_unreference(&res);
-
- ttm_read_unlock(&vmaster->lock);
- return 0;
-out_no_copy:
- kfree(srf->offsets);
-out_no_offsets:
- kfree(srf->sizes);
-out_no_sizes:
- kfree(user_srf);
-out_no_user_srf:
- ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
- return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_vmw_surface_reference_arg *arg =
- (union drm_vmw_surface_reference_arg *)data;
- struct drm_vmw_surface_arg *req = &arg->req;
- struct drm_vmw_surface_create_req *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct drm_vmw_size __user *user_sizes;
- struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup(tfile, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
-
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_resource;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
-
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a surface.\n");
- goto out_no_reference;
- }
-
- rep->flags = srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- rep->size_addr;
+ BUG_ON(*out_surf || *out_buf);
- if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
- ret = -EFAULT;
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+ user_surface_converter,
+ &res);
+ if (!ret) {
+ *out_surf = vmw_res_to_srf(res);
+ return 0;
}
-out_bad_resource:
-out_no_reference:
- ttm_base_object_unref(&base);
-
- return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id)
-{
- struct ttm_base_object *base;
- struct vmw_user_surface *user_srf;
-
- int ret = -EPERM;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
- return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_surface;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- *id = user_srf->srf.res.id;
- ret = 0;
-
-out_bad_surface:
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
-
- ttm_base_object_unref(&base);
+ *out_surf = NULL;
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
}
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
- INIT_LIST_HEAD(&vmw_bo->validate_list);
+ INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
- 0, 0, interruptible,
+ 0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
}
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- kfree(vmw_user_bo);
+ ttm_base_object_kfree(vmw_user_bo, base);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(user_bo == NULL)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&user_bo->dma.base);
+ ret = ttm_base_object_init(tfile,
+ &user_bo->base,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
+ }
+
+ *p_dma_buf = &user_bo->dma;
+ *handle = user_bo->base.hash.key;
+
+out_no_base_object:
+ return ret;
+}
+
+/**
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+
+ if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+ return -EPERM;
+
+ vmw_user_bo = vmw_user_dma_buffer(bo);
+ return (vmw_user_bo->base.tfile == tfile ||
+ vmw_user_bo->base.shareable) ? 0 : -EPERM;
+}
+
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
+ struct vmw_dma_buffer *dma_buf;
+ uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (unlikely(vmw_user_bo == NULL))
- return -ENOMEM;
-
ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0)) {
- kfree(vmw_user_bo);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &dma_buf);
if (unlikely(ret != 0))
goto out_no_dmabuf;
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0))
- goto out_no_base_object;
- else {
- rep->handle = vmw_user_bo->base.hash.key;
- rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
- rep->cur_gmr_id = vmw_user_bo->base.hash.key;
- rep->cur_gmr_offset = 0;
- }
+ rep->handle = handle;
+ rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
+
+ vmw_dmabuf_unreference(&dma_buf);
-out_no_base_object:
- ttm_bo_unref(&tmp);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- if (likely(vmw_bo->on_validate_list))
- return vmw_bo->cur_validate_node;
-
- vmw_bo->cur_validate_node = cur_validate_node;
- vmw_bo->on_validate_list = true;
-
- return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_bo->on_validate_list = false;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf)
+{
+ struct vmw_user_dma_buffer *user_bo;
+
+ if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+ return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
/*
* Stream management
*/
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &stream->res;
int ret;
- ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, false, res_free, NULL);
+ ret = vmw_resource_init(dev_priv, res, false, res_free,
+ &vmw_stream_func);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
return 0;
}
-/**
- * User-space context management:
- */
-
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
- kfree(stream);
+ ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+ res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource *res;
int ret;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+ *inout_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ bool interruptible)
+{
+ unsigned long size =
+ (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ struct vmw_dma_buffer *backup;
+ int ret;
+
+ if (likely(res->backup)) {
+ BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ return 0;
+ }
+
+ backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+ if (unlikely(backup == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ res->func->backup_placement,
+ interruptible,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out_no_dmabuf;
+
+ res->backup = backup;
+
+out_no_dmabuf:
+ return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ * @val_buf: Information about a buffer possibly
+ * containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ int ret = 0;
+ const struct vmw_res_func *func = res->func;
+
+ if (unlikely(res->id == -1)) {
+ ret = func->create(res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (func->bind &&
+ ((func->needs_backup && list_empty(&res->mob_head) &&
+ val_buf->bo != NULL) ||
+ (!func->needs_backup && val_buf->bo != NULL))) {
+ ret = func->bind(res, val_buf);
+ if (unlikely(ret != 0))
+ goto out_bind_failed;
+ if (func->needs_backup)
+ list_add_tail(&res->mob_head, &res->backup->res_list);
+ }
+
+ /*
+ * Only do this on write operations, and move to
+ * vmw_resource_unreserve if it can be called after
+ * backup buffers have been unreserved. Otherwise
+ * sort out locking.
+ */
+ res->res_dirty = true;
+
+ return 0;
+
+out_bind_failed:
+ func->destroy(res);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res: Pointer to the struct vmw_resource to unreserve.
+ * @new_backup: Pointer to new backup buffer if command submission
+ * switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (!list_empty(&res->lru_head))
+ return;
+
+ if (new_backup && new_backup != res->backup) {
+
+ if (res->backup) {
+ BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ res->backup = vmw_dmabuf_reference(new_backup);
+ BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ }
+ if (new_backup)
+ res->backup_offset = new_backup_offset;
+
+ if (!res->func->may_evict)
+ return;
+
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&res->lru_head,
+ &res->dev_priv->res_lru[res->func->res_type]);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ * for a resource and in that case, allocate
+ * one, reserve and validate it.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ * @val_buf: On successful return contains data about the
+ * reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+ bool interruptible,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+ bool backup_dirty = false;
+ int ret;
+
+ if (unlikely(res->backup == NULL)) {
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf->bo = ttm_bo_reference(&res->backup->base);
+ list_add_tail(&val_buf->head, &val_list);
+ ret = ttm_eu_reserve_buffers(&val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ if (res->func->needs_backup && list_empty(&res->mob_head))
+ return 0;
+
+ backup_dirty = res->backup_dirty;
+ ret = ttm_bo_validate(&res->backup->base,
+ res->func->backup_placement,
+ true, false);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf->bo);
+ if (backup_dirty)
+ vmw_dmabuf_unreference(&res->backup);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res: The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (res->func->needs_backup && res->backup == NULL &&
+ !no_backup) {
+ ret = vmw_resource_buf_alloc(res, true);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ * backup buffer
+ *.
+ * @val_buf: Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+
+ if (likely(val_buf->bo == NULL))
+ return;
+
+ INIT_LIST_HEAD(&val_list);
+ list_add_tail(&val_buf->head, &val_list);
+ ttm_eu_backoff_reservation(&val_list);
+ ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ * to a backup buffer.
+ *
+ * @res: The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+ struct ttm_validate_buffer val_buf;
+ const struct vmw_res_func *func = res->func;
+ int ret;
+
+ BUG_ON(!func->may_evict);
+
+ val_buf.bo = NULL;
+ ret = vmw_resource_check_buffer(res, true, &val_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(func->unbind != NULL &&
+ (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ ret = func->unbind(res, res->res_dirty, &val_buf);
+ if (unlikely(ret != 0))
+ goto out_no_unbind;
+ list_del_init(&res->mob_head);
+ }
+ ret = func->destroy(res);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+out_no_unbind:
+ vmw_resource_backoff_reservation(&val_buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+ int ret;
+ struct vmw_resource *evict_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+ struct ttm_validate_buffer val_buf;
+
+ if (likely(!res->func->may_evict))
+ return 0;
+
+ val_buf.bo = NULL;
+ if (res->backup)
+ val_buf.bo = &res->backup->base;
+ do {
+ ret = vmw_resource_do_validate(res, &val_buf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(lru_list) || !res->func->may_evict) {
+ DRM_ERROR("Out of device device id entries "
+ "for %s.\n", res->func->type_name);
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_res = vmw_resource_reference
+ (list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ else if (!res->func->needs_backup && res->backup) {
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ return 0;
+
+out_no_validate:
+ return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct vmw_fence_obj *old_fence_obj;
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL)
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ else
+ driver->sync_obj_ref(fence);
+
+ spin_lock(&bdev->fence_lock);
+
+ old_fence_obj = bo->sync_obj;
+ bo->sync_obj = fence;
+
+ spin_unlock(&bdev->fence_lock);
+
+ if (old_fence_obj)
+ vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res: The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+ return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @type: The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ enum vmw_res_type type)
+{
+ struct list_head *lru_list = &dev_priv->res_lru[type];
+ struct vmw_resource *evict_res;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+
+ if (list_empty(lru_list))
+ goto out_unlock;
+
+ evict_res = vmw_resource_reference(
+ list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+ vmw_resource_do_evict(evict_res);
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+out_unlock:
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv: Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+ enum vmw_res_type type;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ for (type = 0; type < vmw_res_max; ++type)
+ vmw_resource_evict_type(dev_priv, type);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 000000000000..f3adeed2854c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+ enum ttm_object_type object_type;
+ struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+ void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type: Enum that identifies the lru list to use for eviction.
+ * @needs_backup: Whether the resource is guest-backed and needs
+ * persistent buffer storage.
+ * @type_name: String that identifies the resource type.
+ * @backup_placement: TTM placement for backup buffers.
+ * @may_evict Whether the resource may be evicted.
+ * @create: Create a hardware resource.
+ * @destroy: Destroy a hardware resource.
+ * @bind: Bind a hardware resource to persistent buffer storage.
+ * @unbind: Unbind a hardware resource from persistent
+ * buffer storage.
+ */
+
+struct vmw_res_func {
+ enum vmw_res_type res_type;
+ bool needs_backup;
+ const char *type_name;
+ struct ttm_placement *backup_placement;
+ bool may_evict;
+
+ int (*create) (struct vmw_resource *res);
+ int (*destroy) (struct vmw_resource *res);
+ int (*bind) (struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+ int (*unbind) (struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *));
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6deaf2f8bab1..26387c3d5a21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
return -EINVAL;
}
- if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+ if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");
return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 000000000000..582814339748
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base: The TTM base object handling user-space visibility.
+ * @srf: The surface metadata.
+ * @size: TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+ uint32_t size;
+ uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face: Surface face.
+ * @mip: Mip level.
+ * @bo_offset: Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+ uint32_t face;
+ uint32_t mip;
+ uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+ .object_type = VMW_RES_SURFACE,
+ .base_obj_to_res = vmw_user_surface_base_to_res,
+ .res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+ &user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+ .res_type = vmw_res_surface,
+ .needs_backup = false,
+ .may_evict = true,
+ .type_name = "legacy surfaces",
+ .backup_placement = &vmw_srf_placement,
+ .create = &vmw_legacy_srf_create,
+ .destroy = &vmw_legacy_srf_destroy,
+ .bind = &vmw_legacy_srf_bind,
+ .unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA body;
+ SVGA3dCopyBox cb;
+ SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+ return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+ return sizeof(struct vmw_surface_define) + srf->num_sizes *
+ sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+ return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+ void *cmd_space)
+{
+ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+ cmd_space;
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+ void *cmd_space)
+{
+ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+ cmd_space;
+ struct drm_vmw_size *src_size;
+ SVGA3dSize *cmd_size;
+ uint32_t cmd_len;
+ int i;
+
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+ cmd->header.size = cmd_len;
+ cmd->body.sid = srf->res.id;
+ cmd->body.surfaceFlags = srf->flags;
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = src_size->width;
+ cmd_size->height = src_size->height;
+ cmd_size->depth = src_size->depth;
+ }
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+ void *cmd_space,
+ const SVGAGuestPtr *ptr,
+ bool to_surface)
+{
+ uint32_t i;
+ struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+ const struct svga3d_surface_desc *desc =
+ svga3dsurface_get_desc(srf->format);
+
+ for (i = 0; i < srf->num_sizes; ++i) {
+ SVGA3dCmdHeader *header = &cmd->header;
+ SVGA3dCmdSurfaceDMA *body = &cmd->body;
+ SVGA3dCopyBox *cb = &cmd->cb;
+ SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+ const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+ const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+ header->id = SVGA_3D_CMD_SURFACE_DMA;
+ header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+ body->guest.ptr = *ptr;
+ body->guest.ptr.offset += cur_offset->bo_offset;
+ body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+ cur_size);
+ body->host.sid = srf->res.id;
+ body->host.face = cur_offset->face;
+ body->host.mipmap = cur_offset->mip;
+ body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+ SVGA3D_READ_HOST_VRAM);
+ cb->x = 0;
+ cb->y = 0;
+ cb->z = 0;
+ cb->srcx = 0;
+ cb->srcy = 0;
+ cb->srcz = 0;
+ cb->w = cur_size->width;
+ cb->h = cur_size->height;
+ cb->d = cur_size->depth;
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset =
+ svga3dsurface_get_image_buffer_size(desc, cur_size,
+ body->guest.pitch);
+ suffix->flags.discard = 0;
+ suffix->flags.unsynchronized = 0;
+ suffix->flags.reserved = 0;
+ ++cmd;
+ }
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res: Pointer to a struct vmw_resource embedded in a struct
+ * vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ void *cmd;
+
+ if (res->id != -1) {
+
+ cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+ /*
+ * used_memory_size_atomic, or separate lock
+ * to avoid taking dev_priv::cmdbuf_mutex in
+ * the destroy path.
+ */
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+ srf = vmw_res_to_srf(res);
+ dev_priv->used_memory_size -= res->backup_size;
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ }
+ vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_surface *srf;
+ uint32_t submit_size;
+ uint8_t *cmd;
+ int ret;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ srf = vmw_res_to_srf(res);
+ if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ dev_priv->memory_size))
+ return -EBUSY;
+
+ /*
+ * Alloc id for the resource.
+ */
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a surface id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ /*
+ * Encode surface define- commands.
+ */
+
+ submit_size = vmw_surface_define_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ vmw_surface_define_encode(srf, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size += res->backup_size;
+ return 0;
+
+out_no_fifo:
+ vmw_resource_release_id(res);
+out_no_id:
+ return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ * @bind: Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf,
+ bool bind)
+{
+ SVGAGuestPtr ptr;
+ struct vmw_fence_obj *fence;
+ uint32_t submit_size;
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ uint8_t *cmd;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ BUG_ON(val_buf->bo == NULL);
+
+ submit_size = vmw_surface_dma_size(srf);
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "DMA.\n");
+ return -ENOMEM;
+ }
+ vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+ vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(val_buf->bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ * surface validation process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (!res->backup_dirty)
+ return 0;
+
+ return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ * surface eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ * @val_buf: Pointer to a struct ttm_validate_buffer containing
+ * information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+{
+ if (unlikely(readback))
+ return vmw_legacy_srf_dma(res, val_buf, false);
+ return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ * resource eviction process.
+ *
+ * @res: Pointer to a struct vmw_res embedded in a struct
+ * vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+ BUG_ON(res->id == -1);
+
+ /*
+ * Encode the dma- and surface destroy commands.
+ */
+
+ submit_size = vmw_surface_destroy_size();
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "eviction.\n");
+ return -ENOMEM;
+ }
+
+ vmw_surface_destroy_encode(res->id, cmd);
+ vmw_fifo_commit(dev_priv, submit_size);
+
+ /*
+ * Surface memory usage accounting.
+ */
+
+ dev_priv->used_memory_size -= res->backup_size;
+
+ /*
+ * Release the surface ID.
+ */
+
+ vmw_resource_release_id(res);
+
+ return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @srf: Pointer to the struct vmw_surface to initialize.
+ * @res_free: Pointer to a resource destructor used to free
+ * the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct vmw_resource *res = &srf->res;
+
+ BUG_ON(res_free == NULL);
+ (void) vmw_3d_resource_inc(dev_priv, false);
+ ret = vmw_resource_init(dev_priv, res, true, res_free,
+ &vmw_legacy_surface_func);
+
+ if (unlikely(ret != 0)) {
+ vmw_3d_resource_dec(dev_priv, false);
+ res_free(res);
+ return ret;
+ }
+
+ /*
+ * The surface won't be visible to hardware until a
+ * surface validate.
+ */
+
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ * user visible surfaces
+ *
+ * @base: Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+ return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res: A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_private *dev_priv = srf->res.dev_priv;
+ uint32_t size = user_srf->size;
+
+ kfree(srf->offsets);
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ ttm_base_object_kfree(user_srf, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base: Pointer to a pointer to a TTM base object
+ * embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * the user surface destroy functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf;
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i, j;
+ uint32_t cur_bo_offset;
+ struct drm_vmw_size *cur_size;
+ struct vmw_surface_offset *cur_offset;
+ uint32_t num_sizes;
+ uint32_t size;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ const struct svga3d_surface_desc *desc;
+
+ if (unlikely(vmw_user_surface_size == 0))
+ vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+ 128;
+
+ num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ num_sizes += req->mip_levels[i];
+
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
+ size = vmw_user_surface_size + 128 +
+ ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+ ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+ desc = svga3dsurface_get_desc(req->format);
+ if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+ DRM_ERROR("Invalid surface format for surface creation.\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ size, false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for surface"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+ user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+ if (unlikely(user_srf == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_user_srf;
+ }
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ srf->scanout = req->scanout;
+
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = num_sizes;
+ user_srf->size = size;
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_sizes;
+ }
+ srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_no_offsets;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ ret = -EFAULT;
+ goto out_no_copy;
+ }
+
+ srf->base_size = *srf->sizes;
+ srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ srf->multisample_count = 1;
+
+ cur_bo_offset = 0;
+ cur_offset = srf->offsets;
+ cur_size = srf->sizes;
+
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ for (j = 0; j < srf->mip_levels[i]; ++j) {
+ uint32_t stride = svga3dsurface_calculate_pitch
+ (desc, cur_size);
+
+ cur_offset->face = i;
+ cur_offset->mip = j;
+ cur_offset->bo_offset = cur_bo_offset;
+ cur_bo_offset += svga3dsurface_get_image_buffer_size
+ (desc, cur_size, stride);
+ ++cur_offset;
+ ++cur_size;
+ }
+ }
+ res->backup_size = cur_bo_offset;
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_no_copy;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ rep->sid = user_srf->base.hash.key;
+ vmw_resource_unreference(&res);
+
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_no_copy:
+ kfree(srf->offsets);
+out_no_offsets:
+ kfree(srf->sizes);
+out_no_sizes:
+ ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+ ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+ ret = -EFAULT;
+ }
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index f34838839b08..29437eabe095 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,7 +1,7 @@
config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
- depends on PCI
+ depends on (PCI && !S390)
help
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e25cf31faab2..fa60add0ff63 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -18,7 +18,6 @@
*/
#include <linux/module.h>
-#include <linux/dmi.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
- const char *pdev_name;
int ret;
bool delay = false, can_switch;
bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
if (can_switch) {
- pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
struct vga_switcheroo_client *client;
- const char *pdev_name;
int ret;
int err = -EINVAL;
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
if (!client || !check_can_switch())
goto err;
- pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage2(client);
if (ret)
printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
return err;
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1630150ad2b1..e7d6a13ec6a6 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -265,6 +265,15 @@ config HID_GYRATION
---help---
Support for Gyration remote control.
+config HID_ICADE
+ tristate "ION iCade arcade controller"
+ depends on BT_HIDP
+ ---help---
+ Support for the ION iCade arcade controller to work as a joystick.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-icade.
+
config HID_TWINHAN
tristate "Twinhan IR remote control"
depends on USB_HID
@@ -728,4 +737,6 @@ endif # HID
source "drivers/hid/usbhid/Kconfig"
+source "drivers/hid/i2c-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index cef68ca859d3..b62215716b2f 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
+obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -93,8 +94,8 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_PS3REMOTE) += hid-ps3remote.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
- hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o \
- hid-roccat-savu.o
+ hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-lua.o \
+ hid-roccat-pyra.o hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
@@ -118,3 +119,4 @@ obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
+obj-$(CONFIG_I2C_HID) += i2c-hid/
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index fd7722aecf77..d0f7662aacca 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -439,7 +439,8 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO),
- .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_RDESC_JIS },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f4109fd657ff..eb2ee11b6412 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -92,6 +92,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
{
struct hid_field *field;
+ int i;
if (report->maxfield == HID_MAX_FIELDS) {
hid_err(report->device, "too many fields in report\n");
@@ -110,6 +111,9 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field->value = (s32 *)(field->usage + usages);
field->report = report;
+ for (i = 0; i < usages; i++)
+ field->usage[i].usage_index = i;
+
return field;
}
@@ -315,6 +319,7 @@ static s32 item_sdata(struct hid_item *item)
static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
{
+ __u32 raw_value;
switch (item->tag) {
case HID_GLOBAL_ITEM_TAG_PUSH:
@@ -365,7 +370,14 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
- parser->global.unit_exponent = item_sdata(item);
+ /* Units exponent negative numbers are given through a
+ * two's complement.
+ * See "6.2.2.7 Global Items" for more information. */
+ raw_value = item_udata(item);
+ if (!(raw_value & 0xfffffff0))
+ parser->global.unit_exponent = hid_snto32(raw_value, 4);
+ else
+ parser->global.unit_exponent = raw_value;
return 0;
case HID_GLOBAL_ITEM_TAG_UNIT:
@@ -713,7 +725,12 @@ static int hid_scan_report(struct hid_device *hid)
hid_scan_usage(hid, u);
break;
}
- }
+ } else if (page == HID_UP_SENSOR &&
+ item.type == HID_ITEM_TYPE_MAIN &&
+ item.tag == HID_MAIN_ITEM_TAG_BEGIN_COLLECTION &&
+ (item_udata(&item) & 0xff) == HID_COLLECTION_PHYSICAL &&
+ hid->bus == BUS_USB)
+ hid->group = HID_GROUP_SENSOR_HUB;
}
return 0;
@@ -865,6 +882,12 @@ static s32 snto32(__u32 value, unsigned n)
return value & (1 << (n - 1)) ? value | (-1 << n) : value;
}
+s32 hid_snto32(__u32 value, unsigned n)
+{
+ return snto32(value, n);
+}
+EXPORT_SYMBOL_GPL(hid_snto32);
+
/*
* Convert a signed 32-bit integer to a signed n-bit integer.
*/
@@ -1465,6 +1488,10 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
* there is a proper autodetection and autoloading in place (based on presence
* of HID_DG_CONTACTID), so those devices don't need to be added to this list,
* as we are doing the right thing in hid_scan_usage().
+ *
+ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
+ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
+ * used as a driver. See hid_scan_report().
*/
static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
@@ -1538,6 +1565,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
@@ -1571,10 +1599,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086, USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086, USB_DEVICE_ID_SENSOR_HUB_09FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087, USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087, USB_DEVICE_ID_SENSOR_HUB_09FA) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
@@ -1658,6 +1683,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
@@ -1672,7 +1698,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_SENSOR_HUB_7014) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2150,8 +2175,13 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ }
};
-static bool hid_ignore(struct hid_device *hdev)
+bool hid_ignore(struct hid_device *hdev)
{
+ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
+ return false;
+ if (hdev->quirks & HID_QUIRK_IGNORE)
+ return true;
+
switch (hdev->vendor) {
case USB_VENDOR_ID_CODEMERCS:
/* ignore all Code Mercenaries IOWarrior devices */
@@ -2188,7 +2218,16 @@ static bool hid_ignore(struct hid_device *hdev)
if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
hdev->type == HID_TYPE_USBNONE)
return true;
- break;
+ break;
+ case USB_VENDOR_ID_DWAV:
+ /* These are handled by usbtouchscreen. hdev->type is probably
+ * HID_TYPE_USBNONE, but we say !HID_TYPE_USBMOUSE to match
+ * usbtouchscreen. */
+ if ((hdev->product == USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER ||
+ hdev->product == USB_DEVICE_ID_DWAV_TOUCHCONTROLLER) &&
+ hdev->type != HID_TYPE_USBMOUSE)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
@@ -2197,6 +2236,7 @@ static bool hid_ignore(struct hid_device *hdev)
return !!hid_match_id(hdev, hid_ignore_list);
}
+EXPORT_SYMBOL_GPL(hid_ignore);
int hid_add_device(struct hid_device *hdev)
{
@@ -2208,8 +2248,7 @@ int hid_add_device(struct hid_device *hdev)
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
- if (!(hdev->quirks & HID_QUIRK_NO_IGNORE)
- && (hid_ignore(hdev) || (hdev->quirks & HID_QUIRK_IGNORE)))
+ if (hid_ignore(hdev))
return -ENODEV;
/*
diff --git a/drivers/hid/hid-icade.c b/drivers/hid/hid-icade.c
new file mode 100644
index 000000000000..1d6565e37ba3
--- /dev/null
+++ b/drivers/hid/hid-icade.c
@@ -0,0 +1,259 @@
+/*
+ * ION iCade input driver
+ *
+ * Copyright (c) 2012 Bastien Nocera <hadess@hadess.net>
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+/*
+ * ↑ A C Y L
+ * ← →
+ * ↓ B X Z R
+ *
+ *
+ * UP ON,OFF = w,e
+ * RT ON,OFF = d,c
+ * DN ON,OFF = x,z
+ * LT ON,OFF = a,q
+ * A ON,OFF = y,t
+ * B ON,OFF = h,r
+ * C ON,OFF = u,f
+ * X ON,OFF = j,n
+ * Y ON,OFF = i,m
+ * Z ON,OFF = k,p
+ * L ON,OFF = o,g
+ * R ON,OFF = l,v
+ */
+
+/* The translation code uses HID usage instead of input layer
+ * keys. This code generates a lookup table that makes
+ * translation quick.
+ *
+ * #include <linux/input.h>
+ * #include <stdio.h>
+ * #include <assert.h>
+ *
+ * #define unk KEY_UNKNOWN
+ *
+ * < copy of hid_keyboard[] from hid-input.c >
+ *
+ * struct icade_key_translation {
+ * int from;
+ * const char *to;
+ * int press;
+ * };
+ *
+ * static const struct icade_key_translation icade_keys[] = {
+ * { KEY_W, "KEY_UP", 1 },
+ * { KEY_E, "KEY_UP", 0 },
+ * { KEY_D, "KEY_RIGHT", 1 },
+ * { KEY_C, "KEY_RIGHT", 0 },
+ * { KEY_X, "KEY_DOWN", 1 },
+ * { KEY_Z, "KEY_DOWN", 0 },
+ * { KEY_A, "KEY_LEFT", 1 },
+ * { KEY_Q, "KEY_LEFT", 0 },
+ * { KEY_Y, "BTN_A", 1 },
+ * { KEY_T, "BTN_A", 0 },
+ * { KEY_H, "BTN_B", 1 },
+ * { KEY_R, "BTN_B", 0 },
+ * { KEY_U, "BTN_C", 1 },
+ * { KEY_F, "BTN_C", 0 },
+ * { KEY_J, "BTN_X", 1 },
+ * { KEY_N, "BTN_X", 0 },
+ * { KEY_I, "BTN_Y", 1 },
+ * { KEY_M, "BTN_Y", 0 },
+ * { KEY_K, "BTN_Z", 1 },
+ * { KEY_P, "BTN_Z", 0 },
+ * { KEY_O, "BTN_THUMBL", 1 },
+ * { KEY_G, "BTN_THUMBL", 0 },
+ * { KEY_L, "BTN_THUMBR", 1 },
+ * { KEY_V, "BTN_THUMBR", 0 },
+ *
+ * { }
+ * };
+ *
+ * static int
+ * usage_for_key (int key)
+ * {
+ * int i;
+ * for (i = 0; i < 256; i++) {
+ * if (hid_keyboard[i] == key)
+ * return i;
+ * }
+ * assert(0);
+ * }
+ *
+ * int main (int argc, char **argv)
+ * {
+ * const struct icade_key_translation *trans;
+ * int max_usage = 0;
+ *
+ * for (trans = icade_keys; trans->from; trans++) {
+ * int usage = usage_for_key (trans->from);
+ * max_usage = usage > max_usage ? usage : max_usage;
+ * }
+ *
+ * printf ("#define ICADE_MAX_USAGE %d\n\n", max_usage);
+ * printf ("struct icade_key {\n");
+ * printf ("\tu16 to;\n");
+ * printf ("\tu8 press:1;\n");
+ * printf ("};\n\n");
+ * printf ("static const struct icade_key "
+ * "icade_usage_table[%d] = {\n", max_usage + 1);
+ * for (trans = icade_keys; trans->from; trans++) {
+ * printf ("\t[%d] = { %s, %d },\n",
+ * usage_for_key (trans->from), trans->to, trans->press);
+ * }
+ * printf ("};\n");
+ *
+ * return 0;
+ * }
+ */
+
+#define ICADE_MAX_USAGE 29
+
+struct icade_key {
+ u16 to;
+ u8 press:1;
+};
+
+static const struct icade_key icade_usage_table[30] = {
+ [26] = { KEY_UP, 1 },
+ [8] = { KEY_UP, 0 },
+ [7] = { KEY_RIGHT, 1 },
+ [6] = { KEY_RIGHT, 0 },
+ [27] = { KEY_DOWN, 1 },
+ [29] = { KEY_DOWN, 0 },
+ [4] = { KEY_LEFT, 1 },
+ [20] = { KEY_LEFT, 0 },
+ [28] = { BTN_A, 1 },
+ [23] = { BTN_A, 0 },
+ [11] = { BTN_B, 1 },
+ [21] = { BTN_B, 0 },
+ [24] = { BTN_C, 1 },
+ [9] = { BTN_C, 0 },
+ [13] = { BTN_X, 1 },
+ [17] = { BTN_X, 0 },
+ [12] = { BTN_Y, 1 },
+ [16] = { BTN_Y, 0 },
+ [14] = { BTN_Z, 1 },
+ [19] = { BTN_Z, 0 },
+ [18] = { BTN_THUMBL, 1 },
+ [10] = { BTN_THUMBL, 0 },
+ [15] = { BTN_THUMBR, 1 },
+ [25] = { BTN_THUMBR, 0 },
+};
+
+static const struct icade_key *icade_find_translation(u16 from)
+{
+ if (from < 0 || from > ICADE_MAX_USAGE)
+ return NULL;
+ return &icade_usage_table[from];
+}
+
+static int icade_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ const struct icade_key *trans;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
+ !usage->type)
+ return 0;
+
+ /* We ignore the fake key up, and act only on key down */
+ if (!value)
+ return 1;
+
+ trans = icade_find_translation(usage->hid & HID_USAGE);
+
+ if (!trans)
+ return 1;
+
+ input_event(field->hidinput->input, usage->type,
+ trans->to, trans->press);
+
+ return 1;
+}
+
+static int icade_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ const struct icade_key *trans;
+
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_KEYBOARD) {
+ trans = icade_find_translation(usage->hid & HID_USAGE);
+
+ if (!trans)
+ return -1;
+
+ hid_map_usage(hi, usage, bit, max, EV_KEY, trans->to);
+ set_bit(trans->to, hi->input->keybit);
+
+ return 1;
+ }
+
+ /* ignore others */
+ return -1;
+
+}
+
+static int icade_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY)
+ set_bit(usage->type, hi->input->evbit);
+
+ return -1;
+}
+
+static const struct hid_device_id icade_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+
+ { }
+};
+MODULE_DEVICE_TABLE(hid, icade_devices);
+
+static struct hid_driver icade_driver = {
+ .name = "icade",
+ .id_table = icade_devices,
+ .event = icade_event,
+ .input_mapped = icade_input_mapped,
+ .input_mapping = icade_input_mapping,
+};
+
+static int __init icade_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&icade_driver);
+ if (ret)
+ pr_err("can't register icade driver\n");
+
+ return ret;
+}
+
+static void __exit icade_exit(void)
+{
+ hid_unregister_driver(&icade_driver);
+}
+
+module_init(icade_init);
+module_exit(icade_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("ION iCade input driver");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9d7a42857ea1..34e25471aeaa 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -257,6 +257,7 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+#define USB_DEVICE_ID_DWAV_TOUCHCONTROLLER 0x0002
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7207 0x7207
@@ -305,6 +306,9 @@
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
+#define USB_VENDOR_ID_FORMOSA 0x147a
+#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
+
#define USB_VENDOR_ID_FREESCALE 0x15A2
#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
@@ -423,6 +427,9 @@
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
+#define USB_VENDOR_ID_ION 0x15e4
+#define USB_DEVICE_ID_ICADE 0x0132
+
#define USB_VENDOR_ID_HOLTEK 0x1241
#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
@@ -432,11 +439,6 @@
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
-#define USB_VENDOR_ID_INTEL_8086 0x8086
-#define USB_VENDOR_ID_INTEL_8087 0x8087
-#define USB_DEVICE_ID_SENSOR_HUB_1020 0x1020
-#define USB_DEVICE_ID_SENSOR_HUB_09FA 0x09FA
-
#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
@@ -603,6 +605,7 @@
#define USB_VENDOR_ID_NOVATEK 0x0603
#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
+#define USB_DEVICE_ID_NOVATEK_MOUSE 0x1602
#define USB_VENDOR_ID_NTRIG 0x1b96
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
@@ -677,7 +680,9 @@
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
+#define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
+#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
@@ -696,6 +701,9 @@
#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
+#define USB_VENDOR_ID_SIGMATEL 0x066F
+#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
@@ -714,7 +722,6 @@
#define USB_VENDOR_ID_STANTUM_STM 0x0483
#define USB_DEVICE_ID_MTP_STM 0x3261
-#define USB_DEVICE_ID_SENSOR_HUB_7014 0x7014
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
@@ -762,6 +769,9 @@
#define USB_VENDOR_ID_TOUCHPACK 0x1bfd
#define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
+#define USB_VENDOR_ID_TPV 0x25aa
+#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
+
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d917c0d53685..21b196c394b1 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -192,7 +192,6 @@ static int hidinput_setkeycode(struct input_dev *dev,
return -EINVAL;
}
-
/**
* hidinput_calc_abs_res - calculate an absolute axis resolution
* @field: the HID report field to calculate resolution for
@@ -208,7 +207,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
* Only exponent 1 length units are processed. Centimeters and inches are
* converted to millimeters. Degrees are converted to radians.
*/
-static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
+__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
{
__s32 unit_exponent = field->unit_exponent;
__s32 logical_extents = field->logical_maximum -
@@ -229,17 +228,29 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_X:
case ABS_Y:
case ABS_Z:
- if (field->unit == 0x11) { /* If centimeters */
+ case ABS_MT_POSITION_X:
+ case ABS_MT_POSITION_Y:
+ case ABS_MT_TOOL_X:
+ case ABS_MT_TOOL_Y:
+ case ABS_MT_TOUCH_MAJOR:
+ case ABS_MT_TOUCH_MINOR:
+ if (field->unit & 0xffffff00) /* Not a length */
+ return 0;
+ unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
+ switch (field->unit & 0xf) {
+ case 0x1: /* If centimeters */
/* Convert to millimeters */
unit_exponent += 1;
- } else if (field->unit == 0x13) { /* If inches */
+ break;
+ case 0x3: /* If inches */
/* Convert to millimeters */
prev = physical_extents;
physical_extents *= 254;
if (physical_extents < prev)
return 0;
unit_exponent -= 1;
- } else {
+ break;
+ default:
return 0;
}
break;
@@ -281,8 +292,9 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
}
/* Calculate resolution */
- return logical_extents / physical_extents;
+ return DIV_ROUND_CLOSEST(logical_extents, physical_extents);
}
+EXPORT_SYMBOL_GPL(hidinput_calc_abs_res);
#ifdef CONFIG_HID_BATTERY_STRENGTH
static enum power_supply_property hidinput_battery_props[] = {
@@ -299,6 +311,9 @@ static enum power_supply_property hidinput_battery_props[] = {
static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
@@ -502,9 +517,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (code <= 0xf)
code += BTN_JOYSTICK;
else
- code += BTN_TRIGGER_HAPPY;
+ code += BTN_TRIGGER_HAPPY - 0x10;
+ break;
+ case HID_GD_GAMEPAD:
+ if (code <= 0xf)
+ code += BTN_GAMEPAD;
+ else
+ code += BTN_TRIGGER_HAPPY - 0x10;
break;
- case HID_GD_GAMEPAD: code += BTN_GAMEPAD; break;
default:
switch (field->physical) {
case HID_GD_MOUSE:
@@ -1146,6 +1166,38 @@ static void report_features(struct hid_device *hid)
}
}
+static struct hid_input *hidinput_allocate(struct hid_device *hid)
+{
+ struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
+ struct input_dev *input_dev = input_allocate_device();
+ if (!hidinput || !input_dev) {
+ kfree(hidinput);
+ input_free_device(input_dev);
+ hid_err(hid, "Out of memory during hid input probe\n");
+ return NULL;
+ }
+
+ input_set_drvdata(input_dev, hid);
+ input_dev->event = hid->ll_driver->hidinput_input_event;
+ input_dev->open = hidinput_open;
+ input_dev->close = hidinput_close;
+ input_dev->setkeycode = hidinput_setkeycode;
+ input_dev->getkeycode = hidinput_getkeycode;
+
+ input_dev->name = hid->name;
+ input_dev->phys = hid->phys;
+ input_dev->uniq = hid->uniq;
+ input_dev->id.bustype = hid->bus;
+ input_dev->id.vendor = hid->vendor;
+ input_dev->id.product = hid->product;
+ input_dev->id.version = hid->version;
+ input_dev->dev.parent = hid->dev.parent;
+ hidinput->input = input_dev;
+ list_add_tail(&hidinput->list, &hid->inputs);
+
+ return hidinput;
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1157,7 +1209,6 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
struct hid_driver *drv = hid->driver;
struct hid_report *report;
struct hid_input *hidinput = NULL;
- struct input_dev *input_dev;
int i, j, k;
INIT_LIST_HEAD(&hid->inputs);
@@ -1188,33 +1239,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
continue;
if (!hidinput) {
- hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!hidinput || !input_dev) {
- kfree(hidinput);
- input_free_device(input_dev);
- hid_err(hid, "Out of memory during hid input probe\n");
+ hidinput = hidinput_allocate(hid);
+ if (!hidinput)
goto out_unwind;
- }
-
- input_set_drvdata(input_dev, hid);
- input_dev->event =
- hid->ll_driver->hidinput_input_event;
- input_dev->open = hidinput_open;
- input_dev->close = hidinput_close;
- input_dev->setkeycode = hidinput_setkeycode;
- input_dev->getkeycode = hidinput_getkeycode;
-
- input_dev->name = hid->name;
- input_dev->phys = hid->phys;
- input_dev->uniq = hid->uniq;
- input_dev->id.bustype = hid->bus;
- input_dev->id.vendor = hid->vendor;
- input_dev->id.product = hid->product;
- input_dev->id.version = hid->version;
- input_dev->dev.parent = hid->dev.parent;
- hidinput->input = input_dev;
- list_add_tail(&hidinput->list, &hid->inputs);
}
for (i = 0; i < report->maxfield; i++)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7867d69f0efe..61543c02ea0b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -52,11 +52,14 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
#define MT_QUIRK_NO_AREA (1 << 9)
+#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
+#define MT_QUIRK_HOVERING (1 << 11)
struct mt_slot {
- __s32 x, y, p, w, h;
+ __s32 x, y, cx, cy, p, w, h;
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
+ bool inrange_state; /* is the finger in proximity of the sensor? */
};
struct mt_class {
@@ -121,6 +124,7 @@ struct mt_device {
#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
#define MT_DEFAULT_MAXCONTACT 10
+#define MT_MAX_MAXCONTACT 250
#define MT_USB_DEVICE(v, p) HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p)
#define MT_BT_DEVICE(v, p) HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p)
@@ -282,11 +286,26 @@ static void mt_feature_mapping(struct hid_device *hdev,
case HID_DG_CONTACTMAX:
td->maxcontact_report_id = field->report->id;
td->maxcontacts = field->value[0];
+ if (!td->maxcontacts &&
+ field->logical_maximum <= MT_MAX_MAXCONTACT)
+ td->maxcontacts = field->logical_maximum;
if (td->mtclass.maxcontacts)
/* check if the maxcontacts is given by the class */
td->maxcontacts = td->mtclass.maxcontacts;
break;
+ case 0xff0000c5:
+ if (field->report_count == 256 && field->report_size == 8) {
+ /* Win 8 devices need special quirks */
+ __s32 *quirks = &td->mtclass.quirks;
+ *quirks |= MT_QUIRK_ALWAYS_VALID;
+ *quirks |= MT_QUIRK_IGNORE_DUPLICATES;
+ *quirks |= MT_QUIRK_HOVERING;
+ *quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
+ *quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
+ *quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
+ }
+ break;
}
}
@@ -297,6 +316,7 @@ static void set_abs(struct input_dev *input, unsigned int code,
int fmax = field->logical_maximum;
int fuzz = snratio ? (fmax - fmin) / snratio : 0;
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+ input_abs_set_res(input, code, hidinput_calc_abs_res(field, code));
}
static void mt_store_field(struct hid_usage *usage, struct mt_device *td,
@@ -317,6 +337,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
int code;
+ struct hid_usage *prev_usage = NULL;
/* Only map fields from TouchScreen or TouchPad collections.
* We need to ignore fields that belong to other collections
@@ -339,23 +360,42 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (field->physical == HID_DG_STYLUS)
return -1;
+ if (usage->usage_index)
+ prev_usage = &field->usage[usage->usage_index - 1];
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- hid_map_usage(hi, usage, bit, max,
+ if (prev_usage && (prev_usage->hid == usage->hid)) {
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOOL_X);
+ set_abs(hi->input, ABS_MT_TOOL_X, field,
+ cls->sn_move);
+ } else {
+ hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
- set_abs(hi->input, ABS_MT_POSITION_X, field,
- cls->sn_move);
+ set_abs(hi->input, ABS_MT_POSITION_X, field,
+ cls->sn_move);
+ }
+
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
return 1;
case HID_GD_Y:
- hid_map_usage(hi, usage, bit, max,
+ if (prev_usage && (prev_usage->hid == usage->hid)) {
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOOL_Y);
+ set_abs(hi->input, ABS_MT_TOOL_Y, field,
+ cls->sn_move);
+ } else {
+ hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
- set_abs(hi->input, ABS_MT_POSITION_Y, field,
- cls->sn_move);
+ set_abs(hi->input, ABS_MT_POSITION_Y, field,
+ cls->sn_move);
+ }
+
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
return 1;
@@ -365,6 +405,12 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_DIGITIZER:
switch (usage->hid) {
case HID_DG_INRANGE:
+ if (cls->quirks & MT_QUIRK_HOVERING) {
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_DISTANCE);
+ input_set_abs_params(hi->input,
+ ABS_MT_DISTANCE, 0, 1, 0, 0);
+ }
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
return 1;
@@ -477,18 +523,26 @@ static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
{
- if (td->curvalid) {
+ if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
+ struct input_mt *mt = input->mt;
if (slotnum < 0 || slotnum >= td->maxcontacts)
return;
+ if ((td->mtclass.quirks & MT_QUIRK_IGNORE_DUPLICATES) && mt) {
+ struct input_mt_slot *slot = &mt->slots[slotnum];
+ if (input_mt_is_active(slot) &&
+ input_mt_is_used(mt, slot))
+ return;
+ }
+
input_mt_slot(input, slotnum);
input_mt_report_slot_state(input, MT_TOOL_FINGER,
- s->touch_state);
- if (s->touch_state) {
- /* this finger is on the screen */
+ s->touch_state || s->inrange_state);
+ if (s->touch_state || s->inrange_state) {
+ /* this finger is in proximity of the sensor */
int wide = (s->w > s->h);
/* divided by two to match visual scale of touch */
int major = max(s->w, s->h) >> 1;
@@ -496,6 +550,10 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
+ input_event(input, EV_ABS, ABS_MT_TOOL_X, s->cx);
+ input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
+ input_event(input, EV_ABS, ABS_MT_DISTANCE,
+ !s->touch_state);
input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
@@ -526,10 +584,10 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
if (hid->claimed & HID_CLAIMED_INPUT) {
switch (usage->hid) {
case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_ALWAYS_VALID)
- td->curvalid = true;
- else if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ if (quirks & MT_QUIRK_VALID_IS_INRANGE)
td->curvalid = value;
+ if (quirks & MT_QUIRK_HOVERING)
+ td->curdata.inrange_state = value;
break;
case HID_DG_TIPSWITCH:
if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
@@ -547,10 +605,16 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
td->curdata.p = value;
break;
case HID_GD_X:
- td->curdata.x = value;
+ if (usage->code == ABS_MT_TOOL_X)
+ td->curdata.cx = value;
+ else
+ td->curdata.x = value;
break;
case HID_GD_Y:
- td->curdata.y = value;
+ if (usage->code == ABS_MT_TOOL_Y)
+ td->curdata.cy = value;
+ else
+ td->curdata.y = value;
break;
case HID_DG_WIDTH:
td->curdata.w = value;
@@ -575,12 +639,15 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
- if (usage->hid == td->last_slot_field)
- mt_complete_slot(td, field->hidinput->input);
+ if (usage->usage_index + 1 == field->report_count) {
+ /* we only take into account the last report. */
+ if (usage->hid == td->last_slot_field)
+ mt_complete_slot(td, field->hidinput->input);
- if (field->index == td->last_field_index
- && td->num_received >= td->num_expected)
- mt_sync_frame(td, field->hidinput->input);
+ if (field->index == td->last_field_index
+ && td->num_received >= td->num_expected)
+ mt_sync_frame(td, field->hidinput->input);
+ }
}
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 13ca9191b630..a79e95bb9fb6 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -116,7 +116,7 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
rdev->priv = data;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = picolcd_cir_open;
rdev->close = picolcd_cir_close;
rdev->input_name = data->hdev->name;
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 5669916c2943..1219998a02d6 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -167,7 +167,7 @@ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj
loff_t off, size_t count) \
{ \
return isku_sysfs_write(fp, kobj, buf, off, count, \
- sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+ ISKU_SIZE_ ## THINGY, ISKU_COMMAND_ ## THINGY); \
}
#define ISKU_SYSFS_R(thingy, THINGY) \
@@ -176,32 +176,32 @@ static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj,
loff_t off, size_t count) \
{ \
return isku_sysfs_read(fp, kobj, buf, off, count, \
- sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+ ISKU_SIZE_ ## THINGY, ISKU_COMMAND_ ## THINGY); \
}
#define ISKU_SYSFS_RW(thingy, THINGY) \
ISKU_SYSFS_R(thingy, THINGY) \
ISKU_SYSFS_W(thingy, THINGY)
-#define ISKU_BIN_ATTR_RW(thingy) \
+#define ISKU_BIN_ATTR_RW(thingy, THINGY) \
{ \
.attr = { .name = #thingy, .mode = 0660 }, \
- .size = sizeof(struct isku_ ## thingy), \
+ .size = ISKU_SIZE_ ## THINGY, \
.read = isku_sysfs_read_ ## thingy, \
.write = isku_sysfs_write_ ## thingy \
}
-#define ISKU_BIN_ATTR_R(thingy) \
+#define ISKU_BIN_ATTR_R(thingy, THINGY) \
{ \
.attr = { .name = #thingy, .mode = 0440 }, \
- .size = sizeof(struct isku_ ## thingy), \
+ .size = ISKU_SIZE_ ## THINGY, \
.read = isku_sysfs_read_ ## thingy, \
}
-#define ISKU_BIN_ATTR_W(thingy) \
+#define ISKU_BIN_ATTR_W(thingy, THINGY) \
{ \
.attr = { .name = #thingy, .mode = 0220 }, \
- .size = sizeof(struct isku_ ## thingy), \
+ .size = ISKU_SIZE_ ## THINGY, \
.write = isku_sysfs_write_ ## thingy \
}
@@ -218,21 +218,23 @@ ISKU_SYSFS_RW(last_set, LAST_SET)
ISKU_SYSFS_W(talk, TALK)
ISKU_SYSFS_R(info, INFO)
ISKU_SYSFS_W(control, CONTROL)
+ISKU_SYSFS_W(reset, RESET)
static struct bin_attribute isku_bin_attributes[] = {
- ISKU_BIN_ATTR_RW(macro),
- ISKU_BIN_ATTR_RW(keys_function),
- ISKU_BIN_ATTR_RW(keys_easyzone),
- ISKU_BIN_ATTR_RW(keys_media),
- ISKU_BIN_ATTR_RW(keys_thumbster),
- ISKU_BIN_ATTR_RW(keys_macro),
- ISKU_BIN_ATTR_RW(keys_capslock),
- ISKU_BIN_ATTR_RW(light),
- ISKU_BIN_ATTR_RW(key_mask),
- ISKU_BIN_ATTR_RW(last_set),
- ISKU_BIN_ATTR_W(talk),
- ISKU_BIN_ATTR_R(info),
- ISKU_BIN_ATTR_W(control),
+ ISKU_BIN_ATTR_RW(macro, MACRO),
+ ISKU_BIN_ATTR_RW(keys_function, KEYS_FUNCTION),
+ ISKU_BIN_ATTR_RW(keys_easyzone, KEYS_EASYZONE),
+ ISKU_BIN_ATTR_RW(keys_media, KEYS_MEDIA),
+ ISKU_BIN_ATTR_RW(keys_thumbster, KEYS_THUMBSTER),
+ ISKU_BIN_ATTR_RW(keys_macro, KEYS_MACRO),
+ ISKU_BIN_ATTR_RW(keys_capslock, KEYS_CAPSLOCK),
+ ISKU_BIN_ATTR_RW(light, LIGHT),
+ ISKU_BIN_ATTR_RW(key_mask, KEY_MASK),
+ ISKU_BIN_ATTR_RW(last_set, LAST_SET),
+ ISKU_BIN_ATTR_W(talk, TALK),
+ ISKU_BIN_ATTR_R(info, INFO),
+ ISKU_BIN_ATTR_W(control, CONTROL),
+ ISKU_BIN_ATTR_W(reset, RESET),
__ATTR_NULL
};
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
index 605b3ce21638..cf6896c83867 100644
--- a/drivers/hid/hid-roccat-isku.h
+++ b/drivers/hid/hid-roccat-isku.h
@@ -15,76 +15,33 @@
#include <linux/types.h>
enum {
+ ISKU_SIZE_CONTROL = 0x03,
+ ISKU_SIZE_INFO = 0x06,
+ ISKU_SIZE_KEY_MASK = 0x06,
+ ISKU_SIZE_KEYS_FUNCTION = 0x29,
+ ISKU_SIZE_KEYS_EASYZONE = 0x41,
+ ISKU_SIZE_KEYS_MEDIA = 0x1d,
+ ISKU_SIZE_KEYS_THUMBSTER = 0x17,
+ ISKU_SIZE_KEYS_MACRO = 0x23,
+ ISKU_SIZE_KEYS_CAPSLOCK = 0x06,
+ ISKU_SIZE_LAST_SET = 0x14,
+ ISKU_SIZE_LIGHT = 0x0a,
+ ISKU_SIZE_MACRO = 0x823,
+ ISKU_SIZE_RESET = 0x03,
+ ISKU_SIZE_TALK = 0x10,
+};
+
+enum {
ISKU_PROFILE_NUM = 5,
ISKU_USB_INTERFACE_PROTOCOL = 0,
};
-struct isku_control {
- uint8_t command; /* ISKU_COMMAND_CONTROL */
- uint8_t value;
- uint8_t request;
-} __packed;
-
struct isku_actual_profile {
uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
uint8_t actual_profile;
} __packed;
-struct isku_key_mask {
- uint8_t command; /* ISKU_COMMAND_KEY_MASK */
- uint8_t size; /* 6 */
- uint8_t profile_number; /* 0-4 */
- uint8_t mask;
- uint16_t checksum;
-} __packed;
-
-struct isku_keys_function {
- uint8_t data[0x29];
-} __packed;
-
-struct isku_keys_easyzone {
- uint8_t data[0x41];
-} __packed;
-
-struct isku_keys_media {
- uint8_t data[0x1d];
-} __packed;
-
-struct isku_keys_thumbster {
- uint8_t data[0x17];
-} __packed;
-
-struct isku_keys_macro {
- uint8_t data[0x23];
-} __packed;
-
-struct isku_keys_capslock {
- uint8_t data[0x6];
-} __packed;
-
-struct isku_macro {
- uint8_t data[0x823];
-} __packed;
-
-struct isku_light {
- uint8_t data[0xa];
-} __packed;
-
-struct isku_info {
- uint8_t data[2];
- uint8_t firmware_version;
- uint8_t unknown[3];
-} __packed;
-
-struct isku_talk {
- uint8_t data[0x10];
-} __packed;
-
-struct isku_last_set {
- uint8_t data[0x14];
-} __packed;
-
enum isku_commands {
ISKU_COMMAND_CONTROL = 0x4,
ISKU_COMMAND_ACTUAL_PROFILE = 0x5,
@@ -97,6 +54,7 @@ enum isku_commands {
ISKU_COMMAND_MACRO = 0xe,
ISKU_COMMAND_INFO = 0xf,
ISKU_COMMAND_LIGHT = 0x10,
+ ISKU_COMMAND_RESET = 0x11,
ISKU_COMMAND_KEYS_CAPSLOCK = 0x13,
ISKU_COMMAND_LAST_SET = 0x14,
ISKU_COMMAND_15 = 0x15,
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index f5602fec4865..6a48fa3c7da9 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -14,6 +14,7 @@
/*
* Roccat Kone[+] is an updated/improved version of the Kone with more memory
* and functionality and without the non-standard behaviours the Kone had.
+ * KoneXTD has same capabilities but updated sensor.
*/
#include <linux/device.h>
@@ -55,56 +56,6 @@ static int koneplus_send_control(struct usb_device *usb_dev, uint value,
&control, sizeof(struct roccat_common2_control));
}
-static int koneplus_get_info(struct usb_device *usb_dev,
- struct koneplus_info *buf)
-{
- return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
- buf, sizeof(struct koneplus_info));
-}
-
-static int koneplus_get_profile_settings(struct usb_device *usb_dev,
- struct koneplus_profile_settings *buf, uint number)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number,
- KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
- if (retval)
- return retval;
-
- return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
- buf, sizeof(struct koneplus_profile_settings));
-}
-
-static int koneplus_set_profile_settings(struct usb_device *usb_dev,
- struct koneplus_profile_settings const *settings)
-{
- return roccat_common2_send_with_status(usb_dev,
- KONEPLUS_COMMAND_PROFILE_SETTINGS,
- settings, sizeof(struct koneplus_profile_settings));
-}
-
-static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
- struct koneplus_profile_buttons *buf, int number)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number,
- KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
- if (retval)
- return retval;
-
- return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
- buf, sizeof(struct koneplus_profile_buttons));
-}
-
-static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
- struct koneplus_profile_buttons const *buttons)
-{
- return roccat_common2_send_with_status(usb_dev,
- KONEPLUS_COMMAND_PROFILE_BUTTONS,
- buttons, sizeof(struct koneplus_profile_buttons));
-}
/* retval is 0-4 on success, < 0 on error */
static int koneplus_get_actual_profile(struct usb_device *usb_dev)
@@ -113,7 +64,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
int retval;
retval = roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
- &buf, sizeof(struct koneplus_actual_profile));
+ &buf, KONEPLUS_SIZE_ACTUAL_PROFILE);
return retval ? retval : buf.actual_profile;
}
@@ -124,12 +75,12 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
struct koneplus_actual_profile buf;
buf.command = KONEPLUS_COMMAND_ACTUAL_PROFILE;
- buf.size = sizeof(struct koneplus_actual_profile);
+ buf.size = KONEPLUS_SIZE_ACTUAL_PROFILE;
buf.actual_profile = new_profile;
return roccat_common2_send_with_status(usb_dev,
KONEPLUS_COMMAND_ACTUAL_PROFILE,
- &buf, sizeof(struct koneplus_actual_profile));
+ &buf, KONEPLUS_SIZE_ACTUAL_PROFILE);
}
static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
@@ -182,111 +133,77 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return real_size;
}
-static ssize_t koneplus_sysfs_write_talk(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_talk), KONEPLUS_COMMAND_TALK);
+#define KONEPLUS_SYSFS_W(thingy, THINGY) \
+static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return koneplus_sysfs_write(fp, kobj, buf, off, count, \
+ KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
}
-static ssize_t koneplus_sysfs_write_macro(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_macro), KONEPLUS_COMMAND_MACRO);
+#define KONEPLUS_SYSFS_R(thingy, THINGY) \
+static ssize_t koneplus_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return koneplus_sysfs_read(fp, kobj, buf, off, count, \
+ KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
}
-static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
-}
+#define KONEPLUS_SYSFS_RW(thingy, THINGY) \
+KONEPLUS_SYSFS_W(thingy, THINGY) \
+KONEPLUS_SYSFS_R(thingy, THINGY)
-static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
+#define KONEPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = KONEPLUS_SIZE_ ## THINGY, \
+ .read = koneplus_sysfs_read_ ## thingy, \
+ .write = koneplus_sysfs_write_ ## thingy \
}
-static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu), KONEPLUS_COMMAND_TCU);
+#define KONEPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = KONEPLUS_SIZE_ ## THINGY, \
+ .read = koneplus_sysfs_read_ ## thingy, \
}
-static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu_image), KONEPLUS_COMMAND_TCU);
+#define KONEPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = KONEPLUS_SIZE_ ## THINGY, \
+ .write = koneplus_sysfs_write_ ## thingy \
}
-static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
-
- if (off >= sizeof(struct koneplus_profile_settings))
- return 0;
+KONEPLUS_SYSFS_W(control, CONTROL)
+KONEPLUS_SYSFS_RW(info, INFO)
+KONEPLUS_SYSFS_W(talk, TALK)
+KONEPLUS_SYSFS_W(macro, MACRO)
+KONEPLUS_SYSFS_RW(sensor, SENSOR)
+KONEPLUS_SYSFS_RW(tcu, TCU)
+KONEPLUS_SYSFS_R(tcu_image, TCU_IMAGE)
+KONEPLUS_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
+KONEPLUS_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
- if (off + count > sizeof(struct koneplus_profile_settings))
- count = sizeof(struct koneplus_profile_settings) - off;
-
- mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((char const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
- count);
- mutex_unlock(&koneplus->koneplus_lock);
-
- return count;
-}
-
-static ssize_t koneplus_sysfs_write_profile_settings(struct file *fp,
+static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_number;
- struct koneplus_profile_settings *profile_settings;
-
- if (off != 0 || count != sizeof(struct koneplus_profile_settings))
- return -EINVAL;
-
- profile_number = ((struct koneplus_profile_settings const *)buf)->number;
- profile_settings = &koneplus->profile_settings[profile_number];
-
- mutex_lock(&koneplus->koneplus_lock);
- difference = memcmp(buf, profile_settings,
- sizeof(struct koneplus_profile_settings));
- if (difference) {
- retval = koneplus_set_profile_settings(usb_dev,
- (struct koneplus_profile_settings const *)buf);
- if (!retval)
- memcpy(profile_settings, buf,
- sizeof(struct koneplus_profile_settings));
- }
- mutex_unlock(&koneplus->koneplus_lock);
+ ssize_t retval;
+ retval = koneplus_send_control(usb_dev, *(uint *)(attr->private),
+ KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return sizeof(struct koneplus_profile_settings);
+ return koneplus_sysfs_read(fp, kobj, buf, off, count,
+ KONEPLUS_SIZE_PROFILE_SETTINGS,
+ KONEPLUS_COMMAND_PROFILE_SETTINGS);
}
static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
@@ -295,57 +212,17 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
-
- if (off >= sizeof(struct koneplus_profile_buttons))
- return 0;
-
- if (off + count > sizeof(struct koneplus_profile_buttons))
- count = sizeof(struct koneplus_profile_buttons) - off;
-
- mutex_lock(&koneplus->koneplus_lock);
- memcpy(buf, ((char const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
- count);
- mutex_unlock(&koneplus->koneplus_lock);
-
- return count;
-}
-
-static ssize_t koneplus_sysfs_write_profile_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- uint profile_number;
- struct koneplus_profile_buttons *profile_buttons;
-
- if (off != 0 || count != sizeof(struct koneplus_profile_buttons))
- return -EINVAL;
-
- profile_number = ((struct koneplus_profile_buttons const *)buf)->number;
- profile_buttons = &koneplus->profile_buttons[profile_number];
-
- mutex_lock(&koneplus->koneplus_lock);
- difference = memcmp(buf, profile_buttons,
- sizeof(struct koneplus_profile_buttons));
- if (difference) {
- retval = koneplus_set_profile_buttons(usb_dev,
- (struct koneplus_profile_buttons const *)buf);
- if (!retval)
- memcpy(profile_buttons, buf,
- sizeof(struct koneplus_profile_buttons));
- }
- mutex_unlock(&koneplus->koneplus_lock);
+ ssize_t retval;
+ retval = koneplus_send_control(usb_dev, *(uint *)(attr->private),
+ KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return sizeof(struct koneplus_profile_buttons);
+ return koneplus_sysfs_read(fp, kobj, buf, off, count,
+ KONEPLUS_SIZE_PROFILE_BUTTONS,
+ KONEPLUS_COMMAND_PROFILE_BUTTONS);
}
static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev,
@@ -401,9 +278,20 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct koneplus_device *koneplus =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->info.firmware_version);
+ struct koneplus_device *koneplus;
+ struct usb_device *usb_dev;
+ struct koneplus_info info;
+
+ dev = dev->parent->parent;
+ koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ mutex_lock(&koneplus->koneplus_lock);
+ roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
+ &info, KONEPLUS_SIZE_INFO);
+ mutex_unlock(&koneplus->koneplus_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
static struct device_attribute koneplus_attributes[] = {
@@ -419,132 +307,85 @@ static struct device_attribute koneplus_attributes[] = {
};
static struct bin_attribute koneplus_bin_attributes[] = {
- {
- .attr = { .name = "sensor", .mode = 0660 },
- .size = sizeof(struct koneplus_sensor),
- .read = koneplus_sysfs_read_sensor,
- .write = koneplus_sysfs_write_sensor
- },
- {
- .attr = { .name = "tcu", .mode = 0220 },
- .size = sizeof(struct koneplus_tcu),
- .write = koneplus_sysfs_write_tcu
- },
- {
- .attr = { .name = "tcu_image", .mode = 0440 },
- .size = sizeof(struct koneplus_tcu_image),
- .read = koneplus_sysfs_read_tcu_image
- },
- {
- .attr = { .name = "profile_settings", .mode = 0220 },
- .size = sizeof(struct koneplus_profile_settings),
- .write = koneplus_sysfs_write_profile_settings
- },
+ KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL),
+ KONEPLUS_BIN_ATTRIBUTE_RW(info, INFO),
+ KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK),
+ KONEPLUS_BIN_ATTRIBUTE_W(macro, MACRO),
+ KONEPLUS_BIN_ATTRIBUTE_RW(sensor, SENSOR),
+ KONEPLUS_BIN_ATTRIBUTE_RW(tcu, TCU),
+ KONEPLUS_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE),
+ KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
+ KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
{
.attr = { .name = "profile1_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_settings),
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
.read = koneplus_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
- .attr = { .name = "profile_buttons", .mode = 0220 },
- .size = sizeof(struct koneplus_profile_buttons),
- .write = koneplus_sysfs_write_profile_buttons
- },
- {
.attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = sizeof(struct koneplus_profile_buttons),
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
.read = koneplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
- {
- .attr = { .name = "macro", .mode = 0220 },
- .size = sizeof(struct koneplus_macro),
- .write = koneplus_sysfs_write_macro
- },
- {
- .attr = { .name = "talk", .mode = 0220 },
- .size = sizeof(struct koneplus_talk),
- .write = koneplus_sysfs_write_talk
- },
__ATTR_NULL
};
static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
struct koneplus_device *koneplus)
{
- int retval, i;
- static uint wait = 200;
+ int retval;
mutex_init(&koneplus->koneplus_lock);
- retval = koneplus_get_info(usb_dev, &koneplus->info);
- if (retval)
- return retval;
-
- for (i = 0; i < 5; ++i) {
- msleep(wait);
- retval = koneplus_get_profile_settings(usb_dev,
- &koneplus->profile_settings[i], i);
- if (retval)
- return retval;
-
- msleep(wait);
- retval = koneplus_get_profile_buttons(usb_dev,
- &koneplus->profile_buttons[i], i);
- if (retval)
- return retval;
- }
-
- msleep(wait);
retval = koneplus_get_actual_profile(usb_dev);
if (retval < 0)
return retval;
@@ -709,6 +550,7 @@ static int koneplus_raw_event(struct hid_device *hdev,
static const struct hid_device_id koneplus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
{ }
};
@@ -749,5 +591,5 @@ module_init(koneplus_init);
module_exit(koneplus_exit);
MODULE_AUTHOR("Stefan Achatz");
-MODULE_DESCRIPTION("USB Roccat Kone[+] driver");
+MODULE_DESCRIPTION("USB Roccat Kone[+]/XTD driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index 7074b2a4b94b..af7f57e8cf3b 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -14,11 +14,19 @@
#include <linux/types.h>
-struct koneplus_talk {
- uint8_t command; /* KONEPLUS_COMMAND_TALK */
- uint8_t size; /* always 0x10 */
- uint8_t data[14];
-} __packed;
+enum {
+ KONEPLUS_SIZE_ACTUAL_PROFILE = 0x03,
+ KONEPLUS_SIZE_CONTROL = 0x03,
+ KONEPLUS_SIZE_FIRMWARE_WRITE = 0x0402,
+ KONEPLUS_SIZE_INFO = 0x06,
+ KONEPLUS_SIZE_MACRO = 0x0822,
+ KONEPLUS_SIZE_PROFILE_SETTINGS = 0x2b,
+ KONEPLUS_SIZE_PROFILE_BUTTONS = 0x4d,
+ KONEPLUS_SIZE_SENSOR = 0x06,
+ KONEPLUS_SIZE_TALK = 0x10,
+ KONEPLUS_SIZE_TCU = 0x04,
+ KONEPLUS_SIZE_TCU_IMAGE = 0x0404,
+};
enum koneplus_control_requests {
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
@@ -31,45 +39,6 @@ struct koneplus_actual_profile {
uint8_t actual_profile; /* Range 0-4! */
} __attribute__ ((__packed__));
-struct koneplus_profile_settings {
- uint8_t command; /* KONEPLUS_COMMAND_PROFILE_SETTINGS */
- uint8_t size; /* always 43 */
- uint8_t number; /* range 0-4 */
- uint8_t advanced_sensitivity;
- uint8_t sensitivity_x;
- uint8_t sensitivity_y;
- uint8_t cpi_levels_enabled;
- uint8_t cpi_levels_x[5];
- uint8_t cpi_startup_level; /* range 0-4 */
- uint8_t cpi_levels_y[5]; /* range 1-60 means 100-6000 cpi */
- uint8_t unknown1;
- uint8_t polling_rate;
- uint8_t lights_enabled;
- uint8_t light_effect_mode;
- uint8_t color_flow_effect;
- uint8_t light_effect_type;
- uint8_t light_effect_speed;
- uint8_t lights[16];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
-struct koneplus_profile_buttons {
- uint8_t command; /* KONEPLUS_COMMAND_PROFILE_BUTTONS */
- uint8_t size; /* always 77 */
- uint8_t number; /* range 0-4 */
- uint8_t data[72];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
-struct koneplus_macro {
- uint8_t command; /* KONEPLUS_COMMAND_MACRO */
- uint16_t size; /* always 0x822 little endian */
- uint8_t profile; /* range 0-4 */
- uint8_t button; /* range 0-23 */
- uint8_t data[2075];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
struct koneplus_info {
uint8_t command; /* KONEPLUS_COMMAND_INFO */
uint8_t size; /* always 6 */
@@ -77,51 +46,15 @@ struct koneplus_info {
uint8_t unknown[3];
} __attribute__ ((__packed__));
-struct koneplus_e {
- uint8_t command; /* KONEPLUS_COMMAND_E */
- uint8_t size; /* always 3 */
- uint8_t unknown; /* TODO 1; 0 before firmware update */
-} __attribute__ ((__packed__));
-
-struct koneplus_sensor {
- uint8_t command; /* KONEPLUS_COMMAND_SENSOR */
- uint8_t size; /* always 6 */
- uint8_t data[4];
-} __attribute__ ((__packed__));
-
-struct koneplus_firmware_write {
- uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE */
- uint8_t unknown[1025];
-} __attribute__ ((__packed__));
-
-struct koneplus_firmware_write_control {
- uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL */
- /*
- * value is 1 on success
- * 3 means "not finished yet"
- */
- uint8_t value;
- uint8_t unknown; /* always 0x75 */
-} __attribute__ ((__packed__));
-
-struct koneplus_tcu {
- uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
- uint8_t data[2];
-} __attribute__ ((__packed__));
-
-struct koneplus_tcu_image {
- uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
- uint8_t data[1024];
- uint16_t checksum;
-} __attribute__ ((__packed__));
-
enum koneplus_commands {
KONEPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
+ KONEPLUS_COMMAND_CONTROL = 0x4,
KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
KONEPLUS_COMMAND_MACRO = 0x8,
KONEPLUS_COMMAND_INFO = 0x9,
KONEPLUS_COMMAND_TCU = 0xc,
+ KONEPLUS_COMMAND_TCU_IMAGE = 0xc,
KONEPLUS_COMMAND_E = 0xe,
KONEPLUS_COMMAND_SENSOR = 0xf,
KONEPLUS_COMMAND_TALK = 0x10,
@@ -187,10 +120,6 @@ struct koneplus_device {
int chrdev_minor;
struct mutex koneplus_lock;
-
- struct koneplus_info info;
- struct koneplus_profile_settings profile_settings[5];
- struct koneplus_profile_buttons profile_buttons[5];
};
#endif
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index ca6527ac655d..b8b37789b864 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -70,13 +70,6 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
return kovaplus_send_control(usb_dev, number, request);
}
-static int kovaplus_get_info(struct usb_device *usb_dev,
- struct kovaplus_info *buf)
-{
- return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
- buf, sizeof(struct kovaplus_info));
-}
-
static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings *buf, uint number)
{
@@ -88,15 +81,7 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
return retval;
return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
- buf, sizeof(struct kovaplus_profile_settings));
-}
-
-static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
- struct kovaplus_profile_settings const *settings)
-{
- return roccat_common2_send_with_status(usb_dev,
- KOVAPLUS_COMMAND_PROFILE_SETTINGS,
- settings, sizeof(struct kovaplus_profile_settings));
+ buf, KOVAPLUS_SIZE_PROFILE_SETTINGS);
}
static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
@@ -110,15 +95,7 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
return retval;
return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
- buf, sizeof(struct kovaplus_profile_buttons));
-}
-
-static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
- struct kovaplus_profile_buttons const *buttons)
-{
- return roccat_common2_send_with_status(usb_dev,
- KOVAPLUS_COMMAND_PROFILE_BUTTONS,
- buttons, sizeof(struct kovaplus_profile_buttons));
+ buf, KOVAPLUS_SIZE_PROFILE_BUTTONS);
}
/* retval is 0-4 on success, < 0 on error */
@@ -147,122 +124,141 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
&buf, sizeof(struct kovaplus_actual_profile));
}
-static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t kovaplus_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
- if (off >= sizeof(struct kovaplus_profile_settings))
+ if (off >= real_size)
return 0;
- if (off + count > sizeof(struct kovaplus_profile_settings))
- count = sizeof(struct kovaplus_profile_settings) - off;
+ if (off != 0 || count != real_size)
+ return -EINVAL;
mutex_lock(&kovaplus->kovaplus_lock);
- memcpy(buf, ((char const *)&kovaplus->profile_settings[*(uint *)(attr->private)]) + off,
- count);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&kovaplus->kovaplus_lock);
- return count;
+ if (retval)
+ return retval;
+
+ return real_size;
}
-static ssize_t kovaplus_sysfs_write_profile_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_index;
- struct kovaplus_profile_settings *profile_settings;
+ int retval;
- if (off != 0 || count != sizeof(struct kovaplus_profile_settings))
+ if (off != 0 || count != real_size)
return -EINVAL;
- profile_index = ((struct kovaplus_profile_settings const *)buf)->profile_index;
- profile_settings = &kovaplus->profile_settings[profile_index];
-
mutex_lock(&kovaplus->kovaplus_lock);
- difference = memcmp(buf, profile_settings,
- sizeof(struct kovaplus_profile_settings));
- if (difference) {
- retval = kovaplus_set_profile_settings(usb_dev,
- (struct kovaplus_profile_settings const *)buf);
- if (!retval)
- memcpy(profile_settings, buf,
- sizeof(struct kovaplus_profile_settings));
- }
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ buf, real_size);
mutex_unlock(&kovaplus->kovaplus_lock);
if (retval)
return retval;
- return sizeof(struct kovaplus_profile_settings);
+ return real_size;
}
-static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+#define KOVAPLUS_SYSFS_W(thingy, THINGY) \
+static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return kovaplus_sysfs_write(fp, kobj, buf, off, count, \
+ KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
+}
- if (off >= sizeof(struct kovaplus_profile_buttons))
- return 0;
+#define KOVAPLUS_SYSFS_R(thingy, THINGY) \
+static ssize_t kovaplus_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return kovaplus_sysfs_read(fp, kobj, buf, off, count, \
+ KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
+}
- if (off + count > sizeof(struct kovaplus_profile_buttons))
- count = sizeof(struct kovaplus_profile_buttons) - off;
+#define KOVAPLUS_SYSFS_RW(thingy, THINGY) \
+KOVAPLUS_SYSFS_W(thingy, THINGY) \
+KOVAPLUS_SYSFS_R(thingy, THINGY)
- mutex_lock(&kovaplus->kovaplus_lock);
- memcpy(buf, ((char const *)&kovaplus->profile_buttons[*(uint *)(attr->private)]) + off,
- count);
- mutex_unlock(&kovaplus->kovaplus_lock);
+#define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = KOVAPLUS_SIZE_ ## THINGY, \
+ .read = kovaplus_sysfs_read_ ## thingy, \
+ .write = kovaplus_sysfs_write_ ## thingy \
+}
+
+#define KOVAPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = KOVAPLUS_SIZE_ ## THINGY, \
+ .read = kovaplus_sysfs_read_ ## thingy, \
+}
- return count;
+#define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = KOVAPLUS_SIZE_ ## THINGY, \
+ .write = kovaplus_sysfs_write_ ## thingy \
}
-static ssize_t kovaplus_sysfs_write_profile_buttons(struct file *fp,
+KOVAPLUS_SYSFS_W(control, CONTROL)
+KOVAPLUS_SYSFS_RW(info, INFO)
+KOVAPLUS_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
+KOVAPLUS_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
+
+static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- uint profile_index;
- struct kovaplus_profile_buttons *profile_buttons;
+ ssize_t retval;
- if (off != 0 || count != sizeof(struct kovaplus_profile_buttons))
- return -EINVAL;
+ retval = kovaplus_select_profile(usb_dev, *(uint *)(attr->private),
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
+ if (retval)
+ return retval;
- profile_index = ((struct kovaplus_profile_buttons const *)buf)->profile_index;
- profile_buttons = &kovaplus->profile_buttons[profile_index];
+ return kovaplus_sysfs_read(fp, kobj, buf, off, count,
+ KOVAPLUS_SIZE_PROFILE_SETTINGS,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS);
+}
- mutex_lock(&kovaplus->kovaplus_lock);
- difference = memcmp(buf, profile_buttons,
- sizeof(struct kovaplus_profile_buttons));
- if (difference) {
- retval = kovaplus_set_profile_buttons(usb_dev,
- (struct kovaplus_profile_buttons const *)buf);
- if (!retval)
- memcpy(profile_buttons, buf,
- sizeof(struct kovaplus_profile_buttons));
- }
- mutex_unlock(&kovaplus->kovaplus_lock);
+static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ ssize_t retval;
+ retval = kovaplus_select_profile(usb_dev, *(uint *)(attr->private),
+ KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return sizeof(struct kovaplus_profile_buttons);
+ return kovaplus_sysfs_read(fp, kobj, buf, off, count,
+ KOVAPLUS_SIZE_PROFILE_BUTTONS,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS);
}
static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
@@ -342,9 +338,20 @@ static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct kovaplus_device *kovaplus =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->info.firmware_version);
+ struct kovaplus_device *kovaplus;
+ struct usb_device *usb_dev;
+ struct kovaplus_info info;
+
+ dev = dev->parent->parent;
+ kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ mutex_lock(&kovaplus->kovaplus_lock);
+ roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
+ &info, KOVAPLUS_SIZE_INFO);
+ mutex_unlock(&kovaplus->kovaplus_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
static struct device_attribute kovaplus_attributes[] = {
@@ -363,73 +370,67 @@ static struct device_attribute kovaplus_attributes[] = {
};
static struct bin_attribute kovaplus_bin_attributes[] = {
- {
- .attr = { .name = "profile_settings", .mode = 0220 },
- .size = sizeof(struct kovaplus_profile_settings),
- .write = kovaplus_sysfs_write_profile_settings
- },
+ KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL),
+ KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO),
+ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
+ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
{
.attr = { .name = "profile1_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_settings),
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
.read = kovaplus_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
- .attr = { .name = "profile_buttons", .mode = 0220 },
- .size = sizeof(struct kovaplus_profile_buttons),
- .write = kovaplus_sysfs_write_profile_buttons
- },
- {
.attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = sizeof(struct kovaplus_profile_buttons),
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
.read = kovaplus_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
@@ -444,10 +445,6 @@ static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
mutex_init(&kovaplus->kovaplus_lock);
- retval = kovaplus_get_info(usb_dev, &kovaplus->info);
- if (retval)
- return retval;
-
for (i = 0; i < 5; ++i) {
msleep(wait);
retval = kovaplus_get_profile_settings(usb_dev,
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index f82daa1cdcb9..fbb7a16a7e54 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -14,6 +14,13 @@
#include <linux/types.h>
+enum {
+ KOVAPLUS_SIZE_CONTROL = 0x03,
+ KOVAPLUS_SIZE_INFO = 0x06,
+ KOVAPLUS_SIZE_PROFILE_SETTINGS = 0x10,
+ KOVAPLUS_SIZE_PROFILE_BUTTONS = 0x17,
+};
+
enum kovaplus_control_requests {
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
@@ -53,15 +60,9 @@ struct kovaplus_info {
uint8_t unknown[3];
} __packed;
-/* writes 1 on plugin */
-struct kovaplus_a {
- uint8_t command; /* KOVAPLUS_COMMAND_A */
- uint8_t size; /* 3 */
- uint8_t unknown;
-} __packed;
-
enum kovaplus_commands {
KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
+ KOVAPLUS_COMMAND_CONTROL = 0x4,
KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
KOVAPLUS_COMMAND_INFO = 0x9,
@@ -125,7 +126,6 @@ struct kovaplus_device {
int roccat_claimed;
int chrdev_minor;
struct mutex kovaplus_lock;
- struct kovaplus_info info;
struct kovaplus_profile_settings profile_settings[5];
struct kovaplus_profile_buttons profile_buttons[5];
};
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
new file mode 100644
index 000000000000..5084fb4b7e91
--- /dev/null
+++ b/drivers/hid/hid-roccat-lua.c
@@ -0,0 +1,227 @@
+/*
+ * Roccat Lua driver for Linux
+ *
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Lua is a gamer mouse which cpi, button and light settings can be
+ * configured.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-lua.h"
+
+static ssize_t lua_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&lua->lua_lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&lua->lua_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&lua->lua_lock);
+ retval = roccat_common2_send(usb_dev, command, (void *)buf, real_size);
+ mutex_unlock(&lua->lua_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define LUA_SYSFS_W(thingy, THINGY) \
+static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ return lua_sysfs_write(fp, kobj, buf, off, count, \
+ LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \
+}
+
+#define LUA_SYSFS_R(thingy, THINGY) \
+static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
+{ \
+ return lua_sysfs_read(fp, kobj, buf, off, count, \
+ LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \
+}
+
+#define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+LUA_SYSFS_W(thingy, THINGY) \
+LUA_SYSFS_R(thingy, THINGY) \
+static struct bin_attribute lua_ ## thingy ## _attr = { \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = LUA_SIZE_ ## THINGY, \
+ .read = lua_sysfs_read_ ## thingy, \
+ .write = lua_sysfs_write_ ## thingy \
+};
+
+LUA_BIN_ATTRIBUTE_RW(control, CONTROL)
+
+static int lua_create_sysfs_attributes(struct usb_interface *intf)
+{
+ return sysfs_create_bin_file(&intf->dev.kobj, &lua_control_attr);
+}
+
+static void lua_remove_sysfs_attributes(struct usb_interface *intf)
+{
+ sysfs_remove_bin_file(&intf->dev.kobj, &lua_control_attr);
+}
+
+static int lua_init_lua_device_struct(struct usb_device *usb_dev,
+ struct lua_device *lua)
+{
+ mutex_init(&lua->lua_lock);
+
+ return 0;
+}
+
+static int lua_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct lua_device *lua;
+ int retval;
+
+ lua = kzalloc(sizeof(*lua), GFP_KERNEL);
+ if (!lua) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, lua);
+
+ retval = lua_init_lua_device_struct(usb_dev, lua);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct lua_device\n");
+ goto exit;
+ }
+
+ retval = lua_create_sysfs_attributes(intf);
+ if (retval) {
+ hid_err(hdev, "cannot create sysfs files\n");
+ goto exit;
+ }
+
+ return 0;
+exit:
+ kfree(lua);
+ return retval;
+}
+
+static void lua_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct lua_device *lua;
+
+ lua_remove_sysfs_attributes(intf);
+
+ lua = hid_get_drvdata(hdev);
+ kfree(lua);
+}
+
+static int lua_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = lua_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void lua_remove(struct hid_device *hdev)
+{
+ lua_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id lua_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, lua_devices);
+
+static struct hid_driver lua_driver = {
+ .name = "lua",
+ .id_table = lua_devices,
+ .probe = lua_probe,
+ .remove = lua_remove
+};
+
+static int __init lua_init(void)
+{
+ return hid_register_driver(&lua_driver);
+}
+
+static void __exit lua_exit(void)
+{
+ hid_unregister_driver(&lua_driver);
+}
+
+module_init(lua_init);
+module_exit(lua_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Lua driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-lua.h b/drivers/hid/hid-roccat-lua.h
new file mode 100644
index 000000000000..547d77a375d1
--- /dev/null
+++ b/drivers/hid/hid-roccat-lua.h
@@ -0,0 +1,29 @@
+#ifndef __HID_ROCCAT_LUA_H
+#define __HID_ROCCAT_LUA_H
+
+/*
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ LUA_SIZE_CONTROL = 8,
+};
+
+enum lua_commands {
+ LUA_COMMAND_CONTROL = 3,
+};
+
+struct lua_device {
+ struct mutex lua_lock;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1317c177a3e2..d4f1e3bee590 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -66,48 +66,14 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
- buf, sizeof(struct pyra_profile_settings));
-}
-
-static int pyra_get_profile_buttons(struct usb_device *usb_dev,
- struct pyra_profile_buttons *buf, int number)
-{
- int retval;
- retval = pyra_send_control(usb_dev, number,
- PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
- if (retval)
- return retval;
- return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
- buf, sizeof(struct pyra_profile_buttons));
+ buf, PYRA_SIZE_PROFILE_SETTINGS);
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
- buf, sizeof(struct pyra_settings));
-}
-
-static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
-{
- return roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
- buf, sizeof(struct pyra_info));
-}
-
-static int pyra_set_profile_settings(struct usb_device *usb_dev,
- struct pyra_profile_settings const *settings)
-{
- return roccat_common2_send_with_status(usb_dev,
- PYRA_COMMAND_PROFILE_SETTINGS, settings,
- sizeof(struct pyra_profile_settings));
-}
-
-static int pyra_set_profile_buttons(struct usb_device *usb_dev,
- struct pyra_profile_buttons const *buttons)
-{
- return roccat_common2_send_with_status(usb_dev,
- PYRA_COMMAND_PROFILE_BUTTONS, buttons,
- sizeof(struct pyra_profile_buttons));
+ buf, PYRA_SIZE_SETTINGS);
}
static int pyra_set_settings(struct usb_device *usb_dev,
@@ -115,146 +81,144 @@ static int pyra_set_settings(struct usb_device *usb_dev,
{
return roccat_common2_send_with_status(usb_dev,
PYRA_COMMAND_SETTINGS, settings,
- sizeof(struct pyra_settings));
+ PYRA_SIZE_SETTINGS);
}
-static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pyra_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
- if (off >= sizeof(struct pyra_profile_settings))
+ if (off >= real_size)
return 0;
- if (off + count > sizeof(struct pyra_profile_settings))
- count = sizeof(struct pyra_profile_settings) - off;
+ if (off != 0 || count != real_size)
+ return -EINVAL;
mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->profile_settings[*(uint *)(attr->private)]) + off,
- count);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&pyra->pyra_lock);
- return count;
+ if (retval)
+ return retval;
+
+ return real_size;
}
-static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
- if (off >= sizeof(struct pyra_profile_buttons))
- return 0;
-
- if (off + count > sizeof(struct pyra_profile_buttons))
- count = sizeof(struct pyra_profile_buttons) - off;
+ if (off != 0 || count != real_size)
+ return -EINVAL;
mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->profile_buttons[*(uint *)(attr->private)]) + off,
- count);
+ retval = roccat_common2_send_with_status(usb_dev, command, (void *)buf, real_size);
mutex_unlock(&pyra->pyra_lock);
- return count;
+ if (retval)
+ return retval;
+
+ return real_size;
}
-static ssize_t pyra_sysfs_write_profile_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *dev =
- container_of(kobj, struct device, kobj)->parent->parent;
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
- struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_number;
- struct pyra_profile_settings *profile_settings;
+#define PYRA_SYSFS_W(thingy, THINGY) \
+static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return pyra_sysfs_write(fp, kobj, buf, off, count, \
+ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
+}
- if (off != 0 || count != sizeof(struct pyra_profile_settings))
- return -EINVAL;
+#define PYRA_SYSFS_R(thingy, THINGY) \
+static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return pyra_sysfs_read(fp, kobj, buf, off, count, \
+ PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
+}
- profile_number = ((struct pyra_profile_settings const *)buf)->number;
- profile_settings = &pyra->profile_settings[profile_number];
+#define PYRA_SYSFS_RW(thingy, THINGY) \
+PYRA_SYSFS_W(thingy, THINGY) \
+PYRA_SYSFS_R(thingy, THINGY)
- mutex_lock(&pyra->pyra_lock);
- difference = memcmp(buf, profile_settings,
- sizeof(struct pyra_profile_settings));
- if (difference) {
- retval = pyra_set_profile_settings(usb_dev,
- (struct pyra_profile_settings const *)buf);
- if (!retval)
- memcpy(profile_settings, buf,
- sizeof(struct pyra_profile_settings));
- }
- mutex_unlock(&pyra->pyra_lock);
+#define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = PYRA_SIZE_ ## THINGY, \
+ .read = pyra_sysfs_read_ ## thingy, \
+ .write = pyra_sysfs_write_ ## thingy \
+}
- if (retval)
- return retval;
+#define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = PYRA_SIZE_ ## THINGY, \
+ .read = pyra_sysfs_read_ ## thingy, \
+}
- return sizeof(struct pyra_profile_settings);
+#define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = PYRA_SIZE_ ## THINGY, \
+ .write = pyra_sysfs_write_ ## thingy \
}
-static ssize_t pyra_sysfs_write_profile_buttons(struct file *fp,
+PYRA_SYSFS_W(control, CONTROL)
+PYRA_SYSFS_RW(info, INFO)
+PYRA_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
+PYRA_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
+PYRA_SYSFS_R(settings, SETTINGS)
+
+static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0;
- int difference;
- int profile_number;
- struct pyra_profile_buttons *profile_buttons;
-
- if (off != 0 || count != sizeof(struct pyra_profile_buttons))
- return -EINVAL;
-
- profile_number = ((struct pyra_profile_buttons const *)buf)->number;
- profile_buttons = &pyra->profile_buttons[profile_number];
-
- mutex_lock(&pyra->pyra_lock);
- difference = memcmp(buf, profile_buttons,
- sizeof(struct pyra_profile_buttons));
- if (difference) {
- retval = pyra_set_profile_buttons(usb_dev,
- (struct pyra_profile_buttons const *)buf);
- if (!retval)
- memcpy(profile_buttons, buf,
- sizeof(struct pyra_profile_buttons));
- }
- mutex_unlock(&pyra->pyra_lock);
+ ssize_t retval;
+ retval = pyra_send_control(usb_dev, *(uint *)(attr->private),
+ PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return sizeof(struct pyra_profile_buttons);
+ return pyra_sysfs_read(fp, kobj, buf, off, count,
+ PYRA_SIZE_PROFILE_SETTINGS,
+ PYRA_COMMAND_PROFILE_SETTINGS);
}
-static ssize_t pyra_sysfs_read_settings(struct file *fp,
+static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev =
container_of(kobj, struct device, kobj)->parent->parent;
- struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
-
- if (off >= sizeof(struct pyra_settings))
- return 0;
-
- if (off + count > sizeof(struct pyra_settings))
- count = sizeof(struct pyra_settings) - off;
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ ssize_t retval;
- mutex_lock(&pyra->pyra_lock);
- memcpy(buf, ((char const *)&pyra->settings) + off, count);
- mutex_unlock(&pyra->pyra_lock);
+ retval = pyra_send_control(usb_dev, *(uint *)(attr->private),
+ PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
+ if (retval)
+ return retval;
- return count;
+ return pyra_sysfs_read(fp, kobj, buf, off, count,
+ PYRA_SIZE_PROFILE_BUTTONS,
+ PYRA_COMMAND_PROFILE_BUTTONS);
}
static ssize_t pyra_sysfs_write_settings(struct file *fp,
@@ -266,35 +230,32 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
- int difference;
struct pyra_roccat_report roccat_report;
+ struct pyra_settings const *settings;
- if (off != 0 || count != sizeof(struct pyra_settings))
+ if (off != 0 || count != PYRA_SIZE_SETTINGS)
return -EINVAL;
mutex_lock(&pyra->pyra_lock);
- difference = memcmp(buf, &pyra->settings, sizeof(struct pyra_settings));
- if (difference) {
- retval = pyra_set_settings(usb_dev,
- (struct pyra_settings const *)buf);
- if (retval) {
- mutex_unlock(&pyra->pyra_lock);
- return retval;
- }
-
- memcpy(&pyra->settings, buf,
- sizeof(struct pyra_settings));
- profile_activated(pyra, pyra->settings.startup_profile);
+ settings = (struct pyra_settings const *)buf;
- roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2;
- roccat_report.value = pyra->settings.startup_profile + 1;
- roccat_report.key = 0;
- roccat_report_event(pyra->chrdev_minor,
- (uint8_t const *)&roccat_report);
+ retval = pyra_set_settings(usb_dev, settings);
+ if (retval) {
+ mutex_unlock(&pyra->pyra_lock);
+ return retval;
}
+
+ profile_activated(pyra, settings->startup_profile);
+
+ roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2;
+ roccat_report.value = settings->startup_profile + 1;
+ roccat_report.key = 0;
+ roccat_report_event(pyra->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+
mutex_unlock(&pyra->pyra_lock);
- return sizeof(struct pyra_settings);
+ return PYRA_SIZE_SETTINGS;
}
@@ -311,23 +272,34 @@ static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
{
struct pyra_device *pyra =
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_profile);
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct pyra_settings settings;
+
+ mutex_lock(&pyra->pyra_lock);
+ roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
+ &settings, PYRA_SIZE_SETTINGS);
+ mutex_unlock(&pyra->pyra_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", settings.startup_profile);
}
static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct pyra_device *pyra =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->firmware_version);
-}
+ struct pyra_device *pyra;
+ struct usb_device *usb_dev;
+ struct pyra_info info;
-static ssize_t pyra_sysfs_show_startup_profile(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pyra_device *pyra =
- hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
- return snprintf(buf, PAGE_SIZE, "%d\n", pyra->settings.startup_profile);
+ dev = dev->parent->parent;
+ pyra = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ mutex_lock(&pyra->pyra_lock);
+ roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
+ &info, PYRA_SIZE_INFO);
+ mutex_unlock(&pyra->pyra_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
static struct device_attribute pyra_attributes[] = {
@@ -336,105 +308,88 @@ static struct device_attribute pyra_attributes[] = {
__ATTR(firmware_version, 0440,
pyra_sysfs_show_firmware_version, NULL),
__ATTR(startup_profile, 0440,
- pyra_sysfs_show_startup_profile, NULL),
+ pyra_sysfs_show_actual_profile, NULL),
__ATTR_NULL
};
static struct bin_attribute pyra_bin_attributes[] = {
- {
- .attr = { .name = "profile_settings", .mode = 0220 },
- .size = sizeof(struct pyra_profile_settings),
- .write = pyra_sysfs_write_profile_settings
- },
+ PYRA_BIN_ATTRIBUTE_W(control, CONTROL),
+ PYRA_BIN_ATTRIBUTE_RW(info, INFO),
+ PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
+ PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
+ PYRA_BIN_ATTRIBUTE_RW(settings, SETTINGS),
{
.attr = { .name = "profile1_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_settings", .mode = 0440 },
- .size = sizeof(struct pyra_profile_settings),
+ .size = PYRA_SIZE_PROFILE_SETTINGS,
.read = pyra_sysfs_read_profilex_settings,
.private = &profile_numbers[4]
},
{
- .attr = { .name = "profile_buttons", .mode = 0220 },
- .size = sizeof(struct pyra_profile_buttons),
- .write = pyra_sysfs_write_profile_buttons
- },
- {
.attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[0]
},
{
.attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[1]
},
{
.attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[2]
},
{
.attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[3]
},
{
.attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = sizeof(struct pyra_profile_buttons),
+ .size = PYRA_SIZE_PROFILE_BUTTONS,
.read = pyra_sysfs_read_profilex_buttons,
.private = &profile_numbers[4]
},
- {
- .attr = { .name = "settings", .mode = 0660 },
- .size = sizeof(struct pyra_settings),
- .read = pyra_sysfs_read_settings,
- .write = pyra_sysfs_write_settings
- },
__ATTR_NULL
};
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
struct pyra_device *pyra)
{
- struct pyra_info info;
+ struct pyra_settings settings;
int retval, i;
mutex_init(&pyra->pyra_lock);
- retval = pyra_get_info(usb_dev, &info);
- if (retval)
- return retval;
-
- pyra->firmware_version = info.firmware_version;
-
- retval = pyra_get_settings(usb_dev, &pyra->settings);
+ retval = pyra_get_settings(usb_dev, &settings);
if (retval)
return retval;
@@ -443,14 +398,9 @@ static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
&pyra->profile_settings[i], i);
if (retval)
return retval;
-
- retval = pyra_get_profile_buttons(usb_dev,
- &pyra->profile_buttons[i], i);
- if (retval)
- return retval;
}
- profile_activated(pyra, pyra->settings.startup_profile);
+ profile_activated(pyra, settings.startup_profile);
return 0;
}
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index eada7830fa99..beedcf001ceb 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -14,11 +14,13 @@
#include <linux/types.h>
-struct pyra_b {
- uint8_t command; /* PYRA_COMMAND_B */
- uint8_t size; /* always 3 */
- uint8_t unknown; /* 1 */
-} __attribute__ ((__packed__));
+enum {
+ PYRA_SIZE_CONTROL = 0x03,
+ PYRA_SIZE_INFO = 0x06,
+ PYRA_SIZE_PROFILE_SETTINGS = 0x0d,
+ PYRA_SIZE_PROFILE_BUTTONS = 0x13,
+ PYRA_SIZE_SETTINGS = 0x03,
+};
enum pyra_control_requests {
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
@@ -46,14 +48,6 @@ struct pyra_profile_settings {
uint16_t checksum; /* byte sum */
} __attribute__ ((__packed__));
-struct pyra_profile_buttons {
- uint8_t command; /* PYRA_COMMAND_PROFILE_BUTTONS */
- uint8_t size; /* always 0x13 */
- uint8_t number; /* Range 0-4 */
- uint8_t buttons[14];
- uint16_t checksum; /* byte sum */
-} __attribute__ ((__packed__));
-
struct pyra_info {
uint8_t command; /* PYRA_COMMAND_INFO */
uint8_t size; /* always 6 */
@@ -64,6 +58,7 @@ struct pyra_info {
} __attribute__ ((__packed__));
enum pyra_commands {
+ PYRA_COMMAND_CONTROL = 0x4,
PYRA_COMMAND_SETTINGS = 0x5,
PYRA_COMMAND_PROFILE_SETTINGS = 0x6,
PYRA_COMMAND_PROFILE_BUTTONS = 0x7,
@@ -148,13 +143,10 @@ struct pyra_roccat_report {
struct pyra_device {
int actual_profile;
int actual_cpi;
- int firmware_version;
int roccat_claimed;
int chrdev_minor;
struct mutex pyra_lock;
- struct pyra_settings settings;
struct pyra_profile_settings profile_settings[5];
- struct pyra_profile_buttons profile_buttons[5];
};
#endif
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 014afba407e0..31747a29c093 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -120,7 +120,7 @@ SAVU_SYSFS_RW(profile, PROFILE)
SAVU_SYSFS_RW(general, GENERAL)
SAVU_SYSFS_RW(buttons, BUTTONS)
SAVU_SYSFS_RW(macro, MACRO)
-SAVU_SYSFS_R(info, INFO)
+SAVU_SYSFS_RW(info, INFO)
SAVU_SYSFS_RW(sensor, SENSOR)
static struct bin_attribute savu_bin_attributes[] = {
@@ -129,7 +129,7 @@ static struct bin_attribute savu_bin_attributes[] = {
SAVU_BIN_ATTRIBUTE_RW(general, GENERAL),
SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS),
SAVU_BIN_ATTRIBUTE_RW(macro, MACRO),
- SAVU_BIN_ATTRIBUTE_R(info, INFO),
+ SAVU_BIN_ATTRIBUTE_RW(info, INFO),
SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR),
__ATTR_NULL
};
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index d9d73e9163eb..0bc58bd8d4f5 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -82,23 +82,6 @@ struct hid_sensor_hub_callbacks_list {
void *priv;
};
-static int sensor_hub_check_for_sensor_page(struct hid_device *hdev)
-{
- int i;
- int ret = -EINVAL;
-
- for (i = 0; i < hdev->maxcollection; i++) {
- struct hid_collection *col = &hdev->collection[i];
- if (col->type == HID_COLLECTION_PHYSICAL &&
- (col->usage & HID_USAGE_PAGE) == HID_UP_SENSOR) {
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
static struct hid_report *sensor_hub_report(int id, struct hid_device *hdev,
int dir)
{
@@ -437,9 +420,6 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
ptr = raw_data;
ptr++; /*Skip report id*/
- if (!report)
- goto err_report;
-
spin_lock_irqsave(&pdata->lock, flags);
for (i = 0; i < report->maxfield; ++i) {
@@ -485,7 +465,6 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
callback->pdev);
spin_unlock_irqrestore(&pdata->lock, flags);
-err_report:
return 1;
}
@@ -524,10 +503,6 @@ static int sensor_hub_probe(struct hid_device *hdev,
hid_err(hdev, "parse failed\n");
goto err_free;
}
- if (sensor_hub_check_for_sensor_page(hdev) < 0) {
- hid_err(hdev, "sensor page not found\n");
- goto err_free;
- }
INIT_LIST_HEAD(&hdev->inputs);
ret = hid_hw_start(hdev, 0);
@@ -630,16 +605,7 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086,
- USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087,
- USB_DEVICE_ID_SENSOR_HUB_1020) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8086,
- USB_DEVICE_ID_SENSOR_HUB_09FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_INTEL_8087,
- USB_DEVICE_ID_SENSOR_HUB_09FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
- USB_DEVICE_ID_SENSOR_HUB_7014) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, sensor_hub_devices);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 7c47fc3f7b2b..413a73187d33 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -57,10 +57,6 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
set_current_state(TASK_INTERRUPTIBLE);
while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -69,6 +65,10 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
ret = -EIO;
break;
}
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
/* allow O_NONBLOCK to work well from other threads */
mutex_unlock(&list->read_mutex);
@@ -295,6 +295,13 @@ out:
}
+static int hidraw_fasync(int fd, struct file *file, int on)
+{
+ struct hidraw_list *list = file->private_data;
+
+ return fasync_helper(fd, file, on, &list->fasync);
+}
+
static int hidraw_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
@@ -438,6 +445,7 @@ static const struct file_operations hidraw_ops = {
.open = hidraw_open,
.release = hidraw_release,
.unlocked_ioctl = hidraw_ioctl,
+ .fasync = hidraw_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = hidraw_ioctl,
#endif
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
new file mode 100644
index 000000000000..b66617a020bd
--- /dev/null
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -0,0 +1,18 @@
+menu "I2C HID support"
+ depends on I2C
+
+config I2C_HID
+ tristate "HID over I2C transport layer"
+ default n
+ depends on I2C && INPUT
+ select HID
+ ---help---
+ Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
+ other HID based devices which is connected to your computer via I2C.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-hid.
+
+endmenu
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
new file mode 100644
index 000000000000..832d8f9aaba2
--- /dev/null
+++ b/drivers/hid/i2c-hid/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the I2C input drivers
+#
+
+obj-$(CONFIG_I2C_HID) += i2c-hid.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
new file mode 100644
index 000000000000..e766b5614ef5
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -0,0 +1,990 @@
+/*
+ * HID over I2C protocol implementation
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ * Copyright (c) 2012 Red Hat, Inc
+ *
+ * This code is partly based on "USB HID support for Linux":
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2007-2008 Oliver Neukum
+ * Copyright (c) 2006-2010 Jiri Kosina
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/hid.h>
+#include <linux/mutex.h>
+
+#include <linux/i2c/i2c-hid.h>
+
+/* flags */
+#define I2C_HID_STARTED (1 << 0)
+#define I2C_HID_RESET_PENDING (1 << 1)
+#define I2C_HID_READ_PENDING (1 << 2)
+
+#define I2C_HID_PWR_ON 0x00
+#define I2C_HID_PWR_SLEEP 0x01
+
+/* debug option */
+static bool debug;
+module_param(debug, bool, 0444);
+MODULE_PARM_DESC(debug, "print a lot of debug information");
+
+#define i2c_hid_dbg(ihid, fmt, arg...) \
+do { \
+ if (debug) \
+ dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
+} while (0)
+
+struct i2c_hid_desc {
+ __le16 wHIDDescLength;
+ __le16 bcdVersion;
+ __le16 wReportDescLength;
+ __le16 wReportDescRegister;
+ __le16 wInputRegister;
+ __le16 wMaxInputLength;
+ __le16 wOutputRegister;
+ __le16 wMaxOutputLength;
+ __le16 wCommandRegister;
+ __le16 wDataRegister;
+ __le16 wVendorID;
+ __le16 wProductID;
+ __le16 wVersionID;
+ __le32 reserved;
+} __packed;
+
+struct i2c_hid_cmd {
+ unsigned int registerIndex;
+ __u8 opcode;
+ unsigned int length;
+ bool wait;
+};
+
+union command {
+ u8 data[0];
+ struct cmd {
+ __le16 reg;
+ __u8 reportTypeID;
+ __u8 opcode;
+ } __packed c;
+};
+
+#define I2C_HID_CMD(opcode_) \
+ .opcode = opcode_, .length = 4, \
+ .registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
+
+/* fetch HID descriptor */
+static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
+/* fetch report descriptors */
+static const struct i2c_hid_cmd hid_report_descr_cmd = {
+ .registerIndex = offsetof(struct i2c_hid_desc,
+ wReportDescRegister),
+ .opcode = 0x00,
+ .length = 2 };
+/* commands */
+static const struct i2c_hid_cmd hid_reset_cmd = { I2C_HID_CMD(0x01),
+ .wait = true };
+static const struct i2c_hid_cmd hid_get_report_cmd = { I2C_HID_CMD(0x02) };
+static const struct i2c_hid_cmd hid_set_report_cmd = { I2C_HID_CMD(0x03) };
+static const struct i2c_hid_cmd hid_set_power_cmd = { I2C_HID_CMD(0x08) };
+
+/*
+ * These definitions are not used here, but are defined by the spec.
+ * Keeping them here for documentation purposes.
+ *
+ * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
+ * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
+ * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
+ * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
+ */
+
+static DEFINE_MUTEX(i2c_hid_open_mut);
+
+/* The main device structure */
+struct i2c_hid {
+ struct i2c_client *client; /* i2c client */
+ struct hid_device *hid; /* pointer to corresponding HID dev */
+ union {
+ __u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
+ struct i2c_hid_desc hdesc; /* the HID Descriptor */
+ };
+ __le16 wHIDDescRegister; /* location of the i2c
+ * register of the HID
+ * descriptor. */
+ unsigned int bufsize; /* i2c buffer size */
+ char *inbuf; /* Input buffer */
+ char *cmdbuf; /* Command buffer */
+ char *argsbuf; /* Command arguments buffer */
+
+ unsigned long flags; /* device flags */
+
+ wait_queue_head_t wait; /* For waiting the interrupt */
+};
+
+static int __i2c_hid_command(struct i2c_client *client,
+ const struct i2c_hid_cmd *command, u8 reportID,
+ u8 reportType, u8 *args, int args_len,
+ unsigned char *buf_recv, int data_len)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ union command *cmd = (union command *)ihid->cmdbuf;
+ int ret;
+ struct i2c_msg msg[2];
+ int msg_num = 1;
+
+ int length = command->length;
+ bool wait = command->wait;
+ unsigned int registerIndex = command->registerIndex;
+
+ /* special case for hid_descr_cmd */
+ if (command == &hid_descr_cmd) {
+ cmd->c.reg = ihid->wHIDDescRegister;
+ } else {
+ cmd->data[0] = ihid->hdesc_buffer[registerIndex];
+ cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
+ }
+
+ if (length > 2) {
+ cmd->c.opcode = command->opcode;
+ cmd->c.reportTypeID = reportID | reportType << 4;
+ }
+
+ memcpy(cmd->data + length, args, args_len);
+ length += args_len;
+
+ i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags & I2C_M_TEN;
+ msg[0].len = length;
+ msg[0].buf = cmd->data;
+ if (data_len > 0) {
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags & I2C_M_TEN;
+ msg[1].flags |= I2C_M_RD;
+ msg[1].len = data_len;
+ msg[1].buf = buf_recv;
+ msg_num = 2;
+ set_bit(I2C_HID_READ_PENDING, &ihid->flags);
+ }
+
+ if (wait)
+ set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
+
+ ret = i2c_transfer(client->adapter, msg, msg_num);
+
+ if (data_len > 0)
+ clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
+
+ if (ret != msg_num)
+ return ret < 0 ? ret : -EIO;
+
+ ret = 0;
+
+ if (wait) {
+ i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
+ if (!wait_event_timeout(ihid->wait,
+ !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
+ msecs_to_jiffies(5000)))
+ ret = -ENODATA;
+ i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
+ }
+
+ return ret;
+}
+
+static int i2c_hid_command(struct i2c_client *client,
+ const struct i2c_hid_cmd *command,
+ unsigned char *buf_recv, int data_len)
+{
+ return __i2c_hid_command(client, command, 0, 0, NULL, 0,
+ buf_recv, data_len);
+}
+
+static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
+ u8 reportID, unsigned char *buf_recv, int data_len)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ u8 args[3];
+ int ret;
+ int args_len = 0;
+ u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (reportID >= 0x0F) {
+ args[args_len++] = reportID;
+ reportID = 0x0F;
+ }
+
+ args[args_len++] = readRegister & 0xFF;
+ args[args_len++] = readRegister >> 8;
+
+ ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
+ reportType, args, args_len, buf_recv, data_len);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to retrieve report from device.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i2c_hid_set_report(struct i2c_client *client, u8 reportType,
+ u8 reportID, unsigned char *buf, size_t data_len)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ u8 *args = ihid->argsbuf;
+ int ret;
+ u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+
+ /* hidraw already checked that data_len < HID_MAX_BUFFER_SIZE */
+ u16 size = 2 /* size */ +
+ (reportID ? 1 : 0) /* reportID */ +
+ data_len /* buf */;
+ int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ 2 /* dataRegister */ +
+ size /* args */;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (reportID >= 0x0F) {
+ args[index++] = reportID;
+ reportID = 0x0F;
+ }
+
+ args[index++] = dataRegister & 0xFF;
+ args[index++] = dataRegister >> 8;
+
+ args[index++] = size & 0xFF;
+ args[index++] = size >> 8;
+
+ if (reportID)
+ args[index++] = reportID;
+
+ memcpy(&args[index], buf, data_len);
+
+ ret = __i2c_hid_command(client, &hid_set_report_cmd, reportID,
+ reportType, args, args_len, NULL, 0);
+ if (ret) {
+ dev_err(&client->dev, "failed to set a report to device.\n");
+ return ret;
+ }
+
+ return data_len;
+}
+
+static int i2c_hid_set_power(struct i2c_client *client, int power_state)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
+ 0, NULL, 0, NULL, 0);
+ if (ret)
+ dev_err(&client->dev, "failed to change power setting.\n");
+
+ return ret;
+}
+
+static int i2c_hid_hwreset(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ret)
+ return ret;
+
+ i2c_hid_dbg(ihid, "resetting...\n");
+
+ ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
+ if (ret) {
+ dev_err(&client->dev, "failed to reset device.\n");
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i2c_hid_get_input(struct i2c_hid *ihid)
+{
+ int ret, ret_size;
+ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+ ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+ if (ret != size) {
+ if (ret < 0)
+ return;
+
+ dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
+ __func__, ret, size);
+ return;
+ }
+
+ ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
+
+ if (!ret_size) {
+ /* host or device initiated RESET completed */
+ if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
+ wake_up(&ihid->wait);
+ return;
+ }
+
+ if (ret_size > size) {
+ dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ __func__, size, ret_size);
+ return;
+ }
+
+ i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
+
+ if (test_bit(I2C_HID_STARTED, &ihid->flags))
+ hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
+ ret_size - 2, 1);
+
+ return;
+}
+
+static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
+{
+ struct i2c_hid *ihid = dev_id;
+
+ if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
+ return IRQ_HANDLED;
+
+ i2c_hid_get_input(ihid);
+
+ return IRQ_HANDLED;
+}
+
+static int i2c_hid_get_report_length(struct hid_report *report)
+{
+ return ((report->size - 1) >> 3) + 1 +
+ report->device->report_enum[report->type].numbered + 2;
+}
+
+static void i2c_hid_init_report(struct hid_report *report, u8 *buffer,
+ size_t bufsize)
+{
+ struct hid_device *hid = report->device;
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ unsigned int size, ret_size;
+
+ size = i2c_hid_get_report_length(report);
+ if (i2c_hid_get_report(client,
+ report->type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+ report->id, buffer, size))
+ return;
+
+ i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, ihid->inbuf);
+
+ ret_size = buffer[0] | (buffer[1] << 8);
+
+ if (ret_size != size) {
+ dev_err(&client->dev, "error in %s size:%d / ret_size:%d\n",
+ __func__, size, ret_size);
+ return;
+ }
+
+ /* hid->driver_lock is held as we are in probe function,
+ * we just need to setup the input fields, so using
+ * hid_report_raw_event is safe. */
+ hid_report_raw_event(hid, report->type, buffer + 2, size - 2, 1);
+}
+
+/*
+ * Initialize all reports
+ */
+static void i2c_hid_init_reports(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ u8 *inbuf = kzalloc(ihid->bufsize, GFP_KERNEL);
+
+ if (!inbuf) {
+ dev_err(&client->dev, "can not retrieve initial reports\n");
+ return;
+ }
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_INPUT_REPORT].report_list, list)
+ i2c_hid_init_report(report, inbuf, ihid->bufsize);
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
+ i2c_hid_init_report(report, inbuf, ihid->bufsize);
+
+ kfree(inbuf);
+}
+
+/*
+ * Traverse the supplied list of reports and find the longest
+ */
+static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
+ unsigned int *max)
+{
+ struct hid_report *report;
+ unsigned int size;
+
+ /* We should not rely on wMaxInputLength, as some devices may set it to
+ * a wrong length. */
+ list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+ size = i2c_hid_get_report_length(report);
+ if (*max < size)
+ *max = size;
+ }
+}
+
+static void i2c_hid_free_buffers(struct i2c_hid *ihid)
+{
+ kfree(ihid->inbuf);
+ kfree(ihid->argsbuf);
+ kfree(ihid->cmdbuf);
+ ihid->inbuf = NULL;
+ ihid->cmdbuf = NULL;
+ ihid->argsbuf = NULL;
+ ihid->bufsize = 0;
+}
+
+static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
+{
+ /* the worst case is computed from the set_report command with a
+ * reportID > 15 and the maximum report length */
+ int args_len = sizeof(__u8) + /* optional ReportID byte */
+ sizeof(__u16) + /* data register */
+ sizeof(__u16) + /* size of the report */
+ report_size; /* report */
+
+ ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
+ ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
+ ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
+
+ if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+ i2c_hid_free_buffers(ihid);
+ return -ENOMEM;
+ }
+
+ ihid->bufsize = report_size;
+
+ return 0;
+}
+
+static int i2c_hid_get_raw_report(struct hid_device *hid,
+ unsigned char report_number, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ size_t ret_count, ask_count;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ /* +2 bytes to include the size of the reply in the query buffer */
+ ask_count = min(count + 2, (size_t)ihid->bufsize);
+
+ ret = i2c_hid_get_report(client,
+ report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+ report_number, ihid->inbuf, ask_count);
+
+ if (ret < 0)
+ return ret;
+
+ ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
+
+ if (ret_count <= 2)
+ return 0;
+
+ ret_count = min(ret_count, ask_count);
+
+ /* The query buffer contains the size, dropping it in the reply */
+ count = min(count, ret_count - 2);
+ memcpy(buf, ihid->inbuf + 2, count);
+
+ return count;
+}
+
+static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+ size_t count, unsigned char report_type)
+{
+ struct i2c_client *client = hid->driver_data;
+ int report_id = buf[0];
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT)
+ return -EINVAL;
+
+ if (report_id) {
+ buf++;
+ count--;
+ }
+
+ ret = i2c_hid_set_report(client,
+ report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
+ report_id, buf, count);
+
+ if (report_id && ret >= 0)
+ ret++; /* add report_id to the number of transfered bytes */
+
+ return ret;
+}
+
+static int i2c_hid_parse(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ struct i2c_hid_desc *hdesc = &ihid->hdesc;
+ unsigned int rsize;
+ char *rdesc;
+ int ret;
+ int tries = 3;
+
+ i2c_hid_dbg(ihid, "entering %s\n", __func__);
+
+ rsize = le16_to_cpu(hdesc->wReportDescLength);
+ if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+ dbg_hid("weird size of report descriptor (%u)\n", rsize);
+ return -EINVAL;
+ }
+
+ do {
+ ret = i2c_hid_hwreset(client);
+ if (ret)
+ msleep(1000);
+ } while (tries-- > 0 && ret);
+
+ if (ret)
+ return ret;
+
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
+
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ kfree(rdesc);
+ return -EIO;
+ }
+
+ i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
+
+ ret = hid_parse_report(hid, rdesc, rsize);
+ kfree(rdesc);
+ if (ret) {
+ dbg_hid("parsing report descriptor failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i2c_hid_start(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+ unsigned int bufsize = HID_MIN_BUFFER_SIZE;
+
+ i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+ i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+ i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+
+ if (bufsize > ihid->bufsize) {
+ i2c_hid_free_buffers(ihid);
+
+ ret = i2c_hid_alloc_buffers(ihid, bufsize);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
+ i2c_hid_init_reports(hid);
+
+ return 0;
+}
+
+static void i2c_hid_stop(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ hid->claimed = 0;
+
+ i2c_hid_free_buffers(ihid);
+}
+
+static int i2c_hid_open(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret = 0;
+
+ mutex_lock(&i2c_hid_open_mut);
+ if (!hid->open++) {
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ret) {
+ hid->open--;
+ goto done;
+ }
+ set_bit(I2C_HID_STARTED, &ihid->flags);
+ }
+done:
+ mutex_unlock(&i2c_hid_open_mut);
+ return ret;
+}
+
+static void i2c_hid_close(struct hid_device *hid)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ /* protecting hid->open to make sure we don't restart
+ * data acquistion due to a resumption we no longer
+ * care about
+ */
+ mutex_lock(&i2c_hid_open_mut);
+ if (!--hid->open) {
+ clear_bit(I2C_HID_STARTED, &ihid->flags);
+
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ }
+ mutex_unlock(&i2c_hid_open_mut);
+}
+
+static int i2c_hid_power(struct hid_device *hid, int lvl)
+{
+ struct i2c_client *client = hid->driver_data;
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret = 0;
+
+ i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
+
+ switch (lvl) {
+ case PM_HINT_FULLON:
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ break;
+ case PM_HINT_NORMAL:
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ break;
+ }
+ return ret;
+}
+
+static int i2c_hid_hidinput_input_event(struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct hid_field *field;
+ int offset;
+
+ if (type == EV_FF)
+ return input_ff_event(dev, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(hid, type, code, &field);
+
+ if (offset == -1) {
+ hid_warn(dev, "event field not found\n");
+ return -1;
+ }
+
+ return hid_set_field(field, offset, value);
+}
+
+static struct hid_ll_driver i2c_hid_ll_driver = {
+ .parse = i2c_hid_parse,
+ .start = i2c_hid_start,
+ .stop = i2c_hid_stop,
+ .open = i2c_hid_open,
+ .close = i2c_hid_close,
+ .power = i2c_hid_power,
+ .hidinput_input_event = i2c_hid_hidinput_input_event,
+};
+
+static int i2c_hid_init_irq(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ int ret;
+
+ dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
+
+ ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, ihid);
+ if (ret < 0) {
+ dev_warn(&client->dev,
+ "Could not register for %s interrupt, irq = %d,"
+ " ret = %d\n",
+ client->name, client->irq, ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
+{
+ struct i2c_client *client = ihid->client;
+ struct i2c_hid_desc *hdesc = &ihid->hdesc;
+ unsigned int dsize;
+ int ret;
+
+ /* Fetch the length of HID description, retrieve the 4 first bytes:
+ * bytes 0-1 -> length
+ * bytes 2-3 -> bcdVersion (has to be 1.00) */
+ ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer, 4);
+
+ i2c_hid_dbg(ihid, "%s, ihid->hdesc_buffer: %*ph\n",
+ __func__, 4, ihid->hdesc_buffer);
+
+ if (ret) {
+ dev_err(&client->dev,
+ "unable to fetch the size of HID descriptor (ret=%d)\n",
+ ret);
+ return -ENODEV;
+ }
+
+ dsize = le16_to_cpu(hdesc->wHIDDescLength);
+ /*
+ * the size of the HID descriptor should at least contain
+ * its size and the bcdVersion (4 bytes), and should not be greater
+ * than sizeof(struct i2c_hid_desc) as we directly fill this struct
+ * through i2c_hid_command.
+ */
+ if (dsize < 4 || dsize > sizeof(struct i2c_hid_desc)) {
+ dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
+ dsize);
+ return -ENODEV;
+ }
+
+ /* check bcdVersion == 1.0 */
+ if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
+ dev_err(&client->dev,
+ "unexpected HID descriptor bcdVersion (0x%04hx)\n",
+ le16_to_cpu(hdesc->bcdVersion));
+ return -ENODEV;
+ }
+
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
+ dsize);
+ if (ret) {
+ dev_err(&client->dev, "hid_descr_cmd Fail\n");
+ return -ENODEV;
+ }
+
+ i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
+
+ return 0;
+}
+
+static int i2c_hid_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int ret;
+ struct i2c_hid *ihid;
+ struct hid_device *hid;
+ __u16 hidRegister;
+ struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
+
+ dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+
+ if (!platform_data) {
+ dev_err(&client->dev, "HID register address not provided\n");
+ return -EINVAL;
+ }
+
+ if (!client->irq) {
+ dev_err(&client->dev,
+ "HID over i2c has not been provided an Int IRQ\n");
+ return -EINVAL;
+ }
+
+ ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
+ if (!ihid)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, ihid);
+
+ ihid->client = client;
+
+ hidRegister = platform_data->hid_descriptor_address;
+ ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
+
+ init_waitqueue_head(&ihid->wait);
+
+ /* we need to allocate the command buffer without knowing the maximum
+ * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
+ * real computation later. */
+ ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
+ if (ret < 0)
+ goto err;
+
+ ret = i2c_hid_fetch_hid_descriptor(ihid);
+ if (ret < 0)
+ goto err;
+
+ ret = i2c_hid_init_irq(client);
+ if (ret < 0)
+ goto err;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_irq;
+ }
+
+ ihid->hid = hid;
+
+ hid->driver_data = client;
+ hid->ll_driver = &i2c_hid_ll_driver;
+ hid->hid_get_raw_report = i2c_hid_get_raw_report;
+ hid->hid_output_raw_report = i2c_hid_output_raw_report;
+ hid->dev.parent = &client->dev;
+ hid->bus = BUS_I2C;
+ hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
+ hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+ hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+
+ snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
+ client->name, hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ if (ret != -ENODEV)
+ hid_err(client, "can't add hid device: %d\n", ret);
+ goto err_mem_free;
+ }
+
+ return 0;
+
+err_mem_free:
+ hid_destroy_device(hid);
+
+err_irq:
+ free_irq(client->irq, ihid);
+
+err:
+ i2c_hid_free_buffers(ihid);
+ kfree(ihid);
+ return ret;
+}
+
+static int i2c_hid_remove(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ struct hid_device *hid;
+
+ hid = ihid->hid;
+ hid_destroy_device(hid);
+
+ free_irq(client->irq, ihid);
+
+ if (ihid->bufsize)
+ i2c_hid_free_buffers(ihid);
+
+ kfree(ihid);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int i2c_hid_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+
+ return 0;
+}
+
+static int i2c_hid_resume(struct device *dev)
+{
+ int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ ret = i2c_hid_hwreset(client);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(i2c_hid_pm, i2c_hid_suspend, i2c_hid_resume);
+
+static const struct i2c_device_id i2c_hid_id_table[] = {
+ { "hid", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
+
+
+static struct i2c_driver i2c_hid_driver = {
+ .driver = {
+ .name = "i2c_hid",
+ .owner = THIS_MODULE,
+ .pm = &i2c_hid_pm,
+ },
+
+ .probe = i2c_hid_probe,
+ .remove = i2c_hid_remove,
+
+ .id_table = i2c_hid_id_table,
+};
+
+module_i2c_driver(i2c_hid_driver);
+
+MODULE_DESCRIPTION("HID over I2C core driver");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 11c7932dc7e6..e0e6abf1cd3b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,8 +70,10 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
@@ -79,9 +81,11 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 14599e256791..87bd64959a91 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -361,10 +361,6 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
prepare_to_wait(&list->hiddev->wait, &wait, TASK_INTERRUPTIBLE);
while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- break;
- }
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
@@ -373,6 +369,10 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
retval = -EIO;
break;
}
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
/* let O_NONBLOCK tasks run */
mutex_unlock(&list->thread_lock);
@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case HIDIOCAPPLICATION:
- if (arg < 0 || arg >= hid->maxapplication)
+ if (arg >= hid->maxapplication)
break;
for (i = 0; i < hid->maxcollection; i++)
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 3ad91f6447d8..e61e5f991aa5 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -675,7 +675,7 @@ static const struct file_operations hsc_fops = {
.release = hsc_release,
};
-static void __devinit hsc_channel_init(struct hsc_channel *channel)
+static void hsc_channel_init(struct hsc_channel *channel)
{
init_waitqueue_head(&channel->rx_wait);
init_waitqueue_head(&channel->tx_wait);
@@ -685,7 +685,7 @@ static void __devinit hsc_channel_init(struct hsc_channel *channel)
INIT_LIST_HEAD(&channel->tx_msgs_queue);
}
-static int __devinit hsc_probe(struct device *dev)
+static int hsc_probe(struct device *dev)
{
const char devname[] = "hsi_char";
struct hsc_client_data *cl_data;
@@ -744,7 +744,7 @@ out1:
return ret;
}
-static int __devexit hsc_remove(struct device *dev)
+static int hsc_remove(struct device *dev)
{
struct hsi_client *cl = to_hsi_client(dev);
struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
@@ -763,7 +763,7 @@ static struct hsi_client_driver hsc_driver = {
.name = "hsi_char",
.owner = THIS_MODULE,
.probe = hsc_probe,
- .remove = __devexit_p(hsc_remove),
+ .remove = hsc_remove,
},
};
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 70f5dde1cc52..b38ef6d8d049 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -13,4 +13,10 @@ config HYPERV_UTILS
help
Select this option to enable the Hyper-V Utilities.
+config HYPERV_BALLOON
+ tristate "Microsoft Hyper-V Balloon driver"
+ depends on HYPERV
+ help
+ Select this option to enable Hyper-V Balloon driver.
+
endmenu
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index a23938b991c9..e6abfa02d8b7 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
+obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index f4c3d28cd1fc..773a2f25a8f0 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -33,14 +33,6 @@
#define NUM_PAGES_SPANNED(addr, len) \
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
-/* Internal routines */
-static int create_gpadl_header(
- void *kbuffer, /* must be phys and virt contiguous */
- u32 size, /* page-size multiple */
- struct vmbus_channel_msginfo **msginfo,
- u32 *messagecount);
-static void vmbus_setevent(struct vmbus_channel *channel);
-
/*
* vmbus_setevent- Trigger an event notification on the specified
* channel.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2b8b8d4558d2..2f84c5cff8d4 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -265,14 +265,9 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_offer_channel *offer;
struct vmbus_channel *newchannel;
- uuid_le *guidtype;
- uuid_le *guidinstance;
offer = (struct vmbus_channel_offer_channel *)hdr;
- guidtype = &offer->offer.if_type;
- guidinstance = &offer->offer.if_instance;
-
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
@@ -470,7 +465,6 @@ static void vmbus_onversion_response(
{
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
- struct vmbus_channel_initiate_contact *initiate;
struct vmbus_channel_version_response *version_response;
unsigned long flags;
@@ -484,8 +478,6 @@ static void vmbus_onversion_response(
if (requestheader->msgtype ==
CHANNELMSG_INITIATE_CONTACT) {
- initiate =
- (struct vmbus_channel_initiate_contact *)requestheader;
memcpy(&msginfo->response.version_response,
version_response,
sizeof(struct vmbus_channel_version_response));
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
new file mode 100644
index 000000000000..dd289fd179ca
--- /dev/null
+++ b/drivers/hv/hv_balloon.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (c) 2012, Microsoft Corporation.
+ *
+ * Author:
+ * K. Y. Srinivasan <kys@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mman.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/memory_hotplug.h>
+#include <linux/memory.h>
+#include <linux/notifier.h>
+#include <linux/mman.h>
+#include <linux/percpu_counter.h>
+
+#include <linux/hyperv.h>
+
+/*
+ * We begin with definitions supporting the Dynamic Memory protocol
+ * with the host.
+ *
+ * Begin protocol definitions.
+ */
+
+
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the major
+ * version.
+ *
+ * History:
+ * Initial version 1.0
+ * Changed to 0.1 on 2009/03/25
+ * Changes to 0.2 on 2009/05/14
+ * Changes to 0.3 on 2009/12/03
+ * Changed to 1.0 on 2011/04/05
+ */
+
+#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
+#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
+#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
+
+enum {
+ DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
+ DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+
+ DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
+ DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+
+ DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
+};
+
+
+
+/*
+ * Message Types
+ */
+
+enum dm_message_type {
+ /*
+ * Version 0.3
+ */
+ DM_ERROR = 0,
+ DM_VERSION_REQUEST = 1,
+ DM_VERSION_RESPONSE = 2,
+ DM_CAPABILITIES_REPORT = 3,
+ DM_CAPABILITIES_RESPONSE = 4,
+ DM_STATUS_REPORT = 5,
+ DM_BALLOON_REQUEST = 6,
+ DM_BALLOON_RESPONSE = 7,
+ DM_UNBALLOON_REQUEST = 8,
+ DM_UNBALLOON_RESPONSE = 9,
+ DM_MEM_HOT_ADD_REQUEST = 10,
+ DM_MEM_HOT_ADD_RESPONSE = 11,
+ DM_VERSION_03_MAX = 11,
+ /*
+ * Version 1.0.
+ */
+ DM_INFO_MESSAGE = 12,
+ DM_VERSION_1_MAX = 12
+};
+
+
+/*
+ * Structures defining the dynamic memory management
+ * protocol.
+ */
+
+union dm_version {
+ struct {
+ __u16 minor_version;
+ __u16 major_version;
+ };
+ __u32 version;
+} __packed;
+
+
+union dm_caps {
+ struct {
+ __u64 balloon:1;
+ __u64 hot_add:1;
+ __u64 reservedz:62;
+ } cap_bits;
+ __u64 caps;
+} __packed;
+
+union dm_mem_page_range {
+ struct {
+ /*
+ * The PFN number of the first page in the range.
+ * 40 bits is the architectural limit of a PFN
+ * number for AMD64.
+ */
+ __u64 start_page:40;
+ /*
+ * The number of pages in the range.
+ */
+ __u64 page_cnt:24;
+ } finfo;
+ __u64 page_range;
+} __packed;
+
+
+
+/*
+ * The header for all dynamic memory messages:
+ *
+ * type: Type of the message.
+ * size: Size of the message in bytes; including the header.
+ * trans_id: The guest is responsible for manufacturing this ID.
+ */
+
+struct dm_header {
+ __u16 type;
+ __u16 size;
+ __u32 trans_id;
+} __packed;
+
+/*
+ * A generic message format for dynamic memory.
+ * Specific message formats are defined later in the file.
+ */
+
+struct dm_message {
+ struct dm_header hdr;
+ __u8 data[]; /* enclosed message */
+} __packed;
+
+
+/*
+ * Specific message types supporting the dynamic memory protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * dm_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_request {
+ struct dm_header hdr;
+ union dm_version version;
+ __u32 is_last_attempt:1;
+ __u32 reservedz:31;
+} __packed;
+
+/*
+ * Version response message; Host to Guest and indicates
+ * if the host has accepted the version sent by the guest.
+ *
+ * is_accepted: If TRUE, host has accepted the version and the guest
+ * should proceed to the next stage of the protocol. FALSE indicates that
+ * guest should re-try with a different version.
+ *
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct dm_version_response {
+ struct dm_header hdr;
+ __u64 is_accepted:1;
+ __u64 reservedz:63;
+} __packed;
+
+/*
+ * Message reporting capabilities. This is sent from the guest to the
+ * host.
+ */
+
+struct dm_capabilities {
+ struct dm_header hdr;
+ union dm_caps caps;
+ __u64 min_page_cnt;
+ __u64 max_page_number;
+} __packed;
+
+/*
+ * Response to the capabilities message. This is sent from the host to the
+ * guest. This message notifies if the host has accepted the guest's
+ * capabilities. If the host has not accepted, the guest must shutdown
+ * the service.
+ *
+ * is_accepted: Indicates if the host has accepted guest's capabilities.
+ * reservedz: Must be 0.
+ */
+
+struct dm_capabilities_resp_msg {
+ struct dm_header hdr;
+ __u64 is_accepted:1;
+ __u64 reservedz:63;
+} __packed;
+
+/*
+ * This message is used to report memory pressure from the guest.
+ * This message is not part of any transaction and there is no
+ * response to this message.
+ *
+ * num_avail: Available memory in pages.
+ * num_committed: Committed memory in pages.
+ * page_file_size: The accumulated size of all page files
+ * in the system in pages.
+ * zero_free: The nunber of zero and free pages.
+ * page_file_writes: The writes to the page file in pages.
+ * io_diff: An indicator of file cache efficiency or page file activity,
+ * calculated as File Cache Page Fault Count - Page Read Count.
+ * This value is in pages.
+ *
+ * Some of these metrics are Windows specific and fortunately
+ * the algorithm on the host side that computes the guest memory
+ * pressure only uses num_committed value.
+ */
+
+struct dm_status {
+ struct dm_header hdr;
+ __u64 num_avail;
+ __u64 num_committed;
+ __u64 page_file_size;
+ __u64 zero_free;
+ __u32 page_file_writes;
+ __u32 io_diff;
+} __packed;
+
+
+/*
+ * Message to ask the guest to allocate memory - balloon up message.
+ * This message is sent from the host to the guest. The guest may not be
+ * able to allocate as much memory as requested.
+ *
+ * num_pages: number of pages to allocate.
+ */
+
+struct dm_balloon {
+ struct dm_header hdr;
+ __u32 num_pages;
+ __u32 reservedz;
+} __packed;
+
+
+/*
+ * Balloon response message; this message is sent from the guest
+ * to the host in response to the balloon message.
+ *
+ * reservedz: Reserved; must be set to zero.
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will atleast one more message from the guest.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_balloon_response {
+ struct dm_header hdr;
+ __u32 reservedz;
+ __u32 more_pages:1;
+ __u32 range_count:31;
+ union dm_mem_page_range range_array[];
+} __packed;
+
+/*
+ * Un-balloon message; this message is sent from the host
+ * to the guest to give guest more memory.
+ *
+ * more_pages: If FALSE, this is the last message of the transaction.
+ * if TRUE there will atleast one more message from the guest.
+ *
+ * reservedz: Reserved; must be set to zero.
+ *
+ * range_count: The number of ranges in the range array.
+ *
+ * range_array: An array of page ranges returned to the host.
+ *
+ */
+
+struct dm_unballoon_request {
+ struct dm_header hdr;
+ __u32 more_pages:1;
+ __u32 reservedz:31;
+ __u32 range_count;
+ union dm_mem_page_range range_array[];
+} __packed;
+
+/*
+ * Un-balloon response message; this message is sent from the guest
+ * to the host in response to an unballoon request.
+ *
+ */
+
+struct dm_unballoon_response {
+ struct dm_header hdr;
+} __packed;
+
+
+/*
+ * Hot add request message. Message sent from the host to the guest.
+ *
+ * mem_range: Memory range to hot add.
+ *
+ * On Linux we currently don't support this since we cannot hot add
+ * arbitrary granularity of memory.
+ */
+
+struct dm_hot_add {
+ struct dm_header hdr;
+ union dm_mem_page_range range;
+} __packed;
+
+/*
+ * Hot add response message.
+ * This message is sent by the guest to report the status of a hot add request.
+ * If page_count is less than the requested page count, then the host should
+ * assume all further hot add requests will fail, since this indicates that
+ * the guest has hit an upper physical memory barrier.
+ *
+ * Hot adds may also fail due to low resources; in this case, the guest must
+ * not complete this message until the hot add can succeed, and the host must
+ * not send a new hot add request until the response is sent.
+ * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
+ * times it fails the request.
+ *
+ *
+ * page_count: number of pages that were successfully hot added.
+ *
+ * result: result of the operation 1: success, 0: failure.
+ *
+ */
+
+struct dm_hot_add_response {
+ struct dm_header hdr;
+ __u32 page_count;
+ __u32 result;
+} __packed;
+
+/*
+ * Types of information sent from host to the guest.
+ */
+
+enum dm_info_type {
+ INFO_TYPE_MAX_PAGE_CNT = 0,
+ MAX_INFO_TYPE
+};
+
+
+/*
+ * Header for the information message.
+ */
+
+struct dm_info_header {
+ enum dm_info_type type;
+ __u32 data_size;
+} __packed;
+
+/*
+ * This message is sent from the host to the guest to pass
+ * some relevant information (win8 addition).
+ *
+ * reserved: no used.
+ * info_size: size of the information blob.
+ * info: information blob.
+ */
+
+struct dm_info_msg {
+ struct dm_header hdr;
+ __u32 reserved;
+ __u32 info_size;
+ __u8 info[];
+};
+
+/*
+ * End protocol definitions.
+ */
+
+static bool hot_add;
+static bool do_hot_add;
+
+module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+
+static atomic_t trans_id = ATOMIC_INIT(0);
+
+static int dm_ring_size = (5 * PAGE_SIZE);
+
+/*
+ * Driver specific state.
+ */
+
+enum hv_dm_state {
+ DM_INITIALIZING = 0,
+ DM_INITIALIZED,
+ DM_BALLOON_UP,
+ DM_BALLOON_DOWN,
+ DM_HOT_ADD,
+ DM_INIT_ERROR
+};
+
+
+static __u8 recv_buffer[PAGE_SIZE];
+static __u8 *send_buffer;
+#define PAGES_IN_2M 512
+
+struct hv_dynmem_device {
+ struct hv_device *dev;
+ enum hv_dm_state state;
+ struct completion host_event;
+ struct completion config_event;
+
+ /*
+ * Number of pages we have currently ballooned out.
+ */
+ unsigned int num_pages_ballooned;
+
+ /*
+ * This thread handles both balloon/hot-add
+ * requests from the host as well as notifying
+ * the host with regards to memory pressure in
+ * the guest.
+ */
+ struct task_struct *thread;
+
+ /*
+ * We start with the highest version we can support
+ * and downgrade based on the host; we save here the
+ * next version to try.
+ */
+ __u32 next_version;
+};
+
+static struct hv_dynmem_device dm_device;
+
+static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
+{
+
+ struct dm_hot_add_response resp;
+
+ if (do_hot_add) {
+
+ pr_info("Memory hot add not supported\n");
+
+ /*
+ * Currently we do not support hot add.
+ * Just fail the request.
+ */
+ }
+
+ memset(&resp, 0, sizeof(struct dm_hot_add_response));
+ resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
+ resp.hdr.size = sizeof(struct dm_hot_add_response);
+ resp.hdr.trans_id = atomic_inc_return(&trans_id);
+
+ resp.page_count = 0;
+ resp.result = 0;
+
+ dm->state = DM_INITIALIZED;
+ vmbus_sendpacket(dm->dev->channel, &resp,
+ sizeof(struct dm_hot_add_response),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+}
+
+static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
+{
+ struct dm_info_header *info_hdr;
+
+ info_hdr = (struct dm_info_header *)msg->info;
+
+ switch (info_hdr->type) {
+ case INFO_TYPE_MAX_PAGE_CNT:
+ pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
+ pr_info("Data Size is %d\n", info_hdr->data_size);
+ break;
+ default:
+ pr_info("Received Unknown type: %d\n", info_hdr->type);
+ }
+}
+
+/*
+ * Post our status as it relates memory pressure to the
+ * host. Host expects the guests to post this status
+ * periodically at 1 second intervals.
+ *
+ * The metrics specified in this protocol are very Windows
+ * specific and so we cook up numbers here to convey our memory
+ * pressure.
+ */
+
+static void post_status(struct hv_dynmem_device *dm)
+{
+ struct dm_status status;
+
+
+ memset(&status, 0, sizeof(struct dm_status));
+ status.hdr.type = DM_STATUS_REPORT;
+ status.hdr.size = sizeof(struct dm_status);
+ status.hdr.trans_id = atomic_inc_return(&trans_id);
+
+
+ status.num_committed = vm_memory_committed();
+
+ vmbus_sendpacket(dm->dev->channel, &status,
+ sizeof(struct dm_status),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+}
+
+
+
+static void free_balloon_pages(struct hv_dynmem_device *dm,
+ union dm_mem_page_range *range_array)
+{
+ int num_pages = range_array->finfo.page_cnt;
+ __u64 start_frame = range_array->finfo.start_page;
+ struct page *pg;
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ pg = pfn_to_page(i + start_frame);
+ __free_page(pg);
+ dm->num_pages_ballooned--;
+ }
+}
+
+
+
+static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
+ struct dm_balloon_response *bl_resp, int alloc_unit,
+ bool *alloc_error)
+{
+ int i = 0;
+ struct page *pg;
+
+ if (num_pages < alloc_unit)
+ return 0;
+
+ for (i = 0; (i * alloc_unit) < num_pages; i++) {
+ if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
+ PAGE_SIZE)
+ return i * alloc_unit;
+
+ /*
+ * We execute this code in a thread context. Furthermore,
+ * we don't want the kernel to try too hard.
+ */
+ pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN,
+ get_order(alloc_unit << PAGE_SHIFT));
+
+ if (!pg) {
+ *alloc_error = true;
+ return i * alloc_unit;
+ }
+
+
+ dm->num_pages_ballooned += alloc_unit;
+
+ bl_resp->range_count++;
+ bl_resp->range_array[i].finfo.start_page =
+ page_to_pfn(pg);
+ bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
+ bl_resp->hdr.size += sizeof(union dm_mem_page_range);
+
+ }
+
+ return num_pages;
+}
+
+
+
+static void balloon_up(struct hv_dynmem_device *dm, struct dm_balloon *req)
+{
+ int num_pages = req->num_pages;
+ int num_ballooned = 0;
+ struct dm_balloon_response *bl_resp;
+ int alloc_unit;
+ int ret;
+ bool alloc_error = false;
+ bool done = false;
+ int i;
+
+
+ /*
+ * Currently, we only support 4k allocations.
+ */
+ alloc_unit = 1;
+
+ while (!done) {
+ bl_resp = (struct dm_balloon_response *)send_buffer;
+ memset(send_buffer, 0, PAGE_SIZE);
+ bl_resp->hdr.type = DM_BALLOON_RESPONSE;
+ bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
+ bl_resp->hdr.size = sizeof(struct dm_balloon_response);
+ bl_resp->more_pages = 1;
+
+
+ num_pages -= num_ballooned;
+ num_ballooned = alloc_balloon_pages(dm, num_pages,
+ bl_resp, alloc_unit,
+ &alloc_error);
+
+ if ((alloc_error) || (num_ballooned == num_pages)) {
+ bl_resp->more_pages = 0;
+ done = true;
+ dm->state = DM_INITIALIZED;
+ }
+
+ /*
+ * We are pushing a lot of data through the channel;
+ * deal with transient failures caused because of the
+ * lack of space in the ring buffer.
+ */
+
+ do {
+ ret = vmbus_sendpacket(dm_device.dev->channel,
+ bl_resp,
+ bl_resp->hdr.size,
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret == -EAGAIN)
+ msleep(20);
+
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ /*
+ * Free up the memory we allocatted.
+ */
+ pr_info("Balloon response failed\n");
+
+ for (i = 0; i < bl_resp->range_count; i++)
+ free_balloon_pages(dm,
+ &bl_resp->range_array[i]);
+
+ done = true;
+ }
+ }
+
+}
+
+static void balloon_down(struct hv_dynmem_device *dm,
+ struct dm_unballoon_request *req)
+{
+ union dm_mem_page_range *range_array = req->range_array;
+ int range_count = req->range_count;
+ struct dm_unballoon_response resp;
+ int i;
+
+ for (i = 0; i < range_count; i++)
+ free_balloon_pages(dm, &range_array[i]);
+
+ if (req->more_pages == 1)
+ return;
+
+ memset(&resp, 0, sizeof(struct dm_unballoon_response));
+ resp.hdr.type = DM_UNBALLOON_RESPONSE;
+ resp.hdr.trans_id = atomic_inc_return(&trans_id);
+ resp.hdr.size = sizeof(struct dm_unballoon_response);
+
+ vmbus_sendpacket(dm_device.dev->channel, &resp,
+ sizeof(struct dm_unballoon_response),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+ dm->state = DM_INITIALIZED;
+}
+
+static void balloon_onchannelcallback(void *context);
+
+static int dm_thread_func(void *dm_dev)
+{
+ struct hv_dynmem_device *dm = dm_dev;
+ int t;
+ unsigned long scan_start;
+
+ while (!kthread_should_stop()) {
+ t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ);
+ /*
+ * The host expects us to post information on the memory
+ * pressure every second.
+ */
+
+ if (t == 0)
+ post_status(dm);
+
+ scan_start = jiffies;
+ switch (dm->state) {
+ case DM_BALLOON_UP:
+ balloon_up(dm, (struct dm_balloon *)recv_buffer);
+ break;
+
+ case DM_HOT_ADD:
+ hot_add_req(dm, (struct dm_hot_add *)recv_buffer);
+ break;
+ default:
+ break;
+ }
+
+ if (!time_in_range(jiffies, scan_start, scan_start + HZ))
+ post_status(dm);
+
+ }
+
+ return 0;
+}
+
+
+static void version_resp(struct hv_dynmem_device *dm,
+ struct dm_version_response *vresp)
+{
+ struct dm_version_request version_req;
+ int ret;
+
+ if (vresp->is_accepted) {
+ /*
+ * We are done; wakeup the
+ * context waiting for version
+ * negotiation.
+ */
+ complete(&dm->host_event);
+ return;
+ }
+ /*
+ * If there are more versions to try, continue
+ * with negotiations; if not
+ * shutdown the service since we are not able
+ * to negotiate a suitable version number
+ * with the host.
+ */
+ if (dm->next_version == 0)
+ goto version_error;
+
+ dm->next_version = 0;
+ memset(&version_req, 0, sizeof(struct dm_version_request));
+ version_req.hdr.type = DM_VERSION_REQUEST;
+ version_req.hdr.size = sizeof(struct dm_version_request);
+ version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+ version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
+ version_req.is_last_attempt = 1;
+
+ ret = vmbus_sendpacket(dm->dev->channel, &version_req,
+ sizeof(struct dm_version_request),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND, 0);
+
+ if (ret)
+ goto version_error;
+
+ return;
+
+version_error:
+ dm->state = DM_INIT_ERROR;
+ complete(&dm->host_event);
+}
+
+static void cap_resp(struct hv_dynmem_device *dm,
+ struct dm_capabilities_resp_msg *cap_resp)
+{
+ if (!cap_resp->is_accepted) {
+ pr_info("Capabilities not accepted by host\n");
+ dm->state = DM_INIT_ERROR;
+ }
+ complete(&dm->host_event);
+}
+
+static void balloon_onchannelcallback(void *context)
+{
+ struct hv_device *dev = context;
+ u32 recvlen;
+ u64 requestid;
+ struct dm_message *dm_msg;
+ struct dm_header *dm_hdr;
+ struct hv_dynmem_device *dm = hv_get_drvdata(dev);
+
+ memset(recv_buffer, 0, sizeof(recv_buffer));
+ vmbus_recvpacket(dev->channel, recv_buffer,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ dm_msg = (struct dm_message *)recv_buffer;
+ dm_hdr = &dm_msg->hdr;
+
+ switch (dm_hdr->type) {
+ case DM_VERSION_RESPONSE:
+ version_resp(dm,
+ (struct dm_version_response *)dm_msg);
+ break;
+
+ case DM_CAPABILITIES_RESPONSE:
+ cap_resp(dm,
+ (struct dm_capabilities_resp_msg *)dm_msg);
+ break;
+
+ case DM_BALLOON_REQUEST:
+ dm->state = DM_BALLOON_UP;
+ complete(&dm->config_event);
+ break;
+
+ case DM_UNBALLOON_REQUEST:
+ dm->state = DM_BALLOON_DOWN;
+ balloon_down(dm,
+ (struct dm_unballoon_request *)recv_buffer);
+ break;
+
+ case DM_MEM_HOT_ADD_REQUEST:
+ dm->state = DM_HOT_ADD;
+ complete(&dm->config_event);
+ break;
+
+ case DM_INFO_MESSAGE:
+ process_info(dm, (struct dm_info_msg *)dm_msg);
+ break;
+
+ default:
+ pr_err("Unhandled message: type: %d\n", dm_hdr->type);
+
+ }
+ }
+
+}
+
+static int balloon_probe(struct hv_device *dev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ int ret, t;
+ struct dm_version_request version_req;
+ struct dm_capabilities cap_msg;
+
+ do_hot_add = hot_add;
+
+ /*
+ * First allocate a send buffer.
+ */
+
+ send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!send_buffer)
+ return -ENOMEM;
+
+ ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
+ balloon_onchannelcallback, dev);
+
+ if (ret)
+ goto probe_error0;
+
+ dm_device.dev = dev;
+ dm_device.state = DM_INITIALIZING;
+ dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+ init_completion(&dm_device.host_event);
+ init_completion(&dm_device.config_event);
+
+ dm_device.thread =
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
+ if (IS_ERR(dm_device.thread)) {
+ ret = PTR_ERR(dm_device.thread);
+ goto probe_error1;
+ }
+
+ hv_set_drvdata(dev, &dm_device);
+ /*
+ * Initiate the hand shake with the host and negotiate
+ * a version that the host can support. We start with the
+ * highest version number and go down if the host cannot
+ * support it.
+ */
+ memset(&version_req, 0, sizeof(struct dm_version_request));
+ version_req.hdr.type = DM_VERSION_REQUEST;
+ version_req.hdr.size = sizeof(struct dm_version_request);
+ version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+ version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
+ version_req.is_last_attempt = 0;
+
+ ret = vmbus_sendpacket(dev->channel, &version_req,
+ sizeof(struct dm_version_request),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto probe_error2;
+
+ t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto probe_error2;
+ }
+
+ /*
+ * If we could not negotiate a compatible version with the host
+ * fail the probe function.
+ */
+ if (dm_device.state == DM_INIT_ERROR) {
+ ret = -ETIMEDOUT;
+ goto probe_error2;
+ }
+ /*
+ * Now submit our capabilities to the host.
+ */
+ memset(&cap_msg, 0, sizeof(struct dm_capabilities));
+ cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
+ cap_msg.hdr.size = sizeof(struct dm_capabilities);
+ cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
+
+ cap_msg.caps.cap_bits.balloon = 1;
+ /*
+ * While we currently don't support hot-add,
+ * we still advertise this capability since the
+ * host requires that guests partcipating in the
+ * dynamic memory protocol support hot add.
+ */
+ cap_msg.caps.cap_bits.hot_add = 1;
+
+ /*
+ * Currently the host does not use these
+ * values and we set them to what is done in the
+ * Windows driver.
+ */
+ cap_msg.min_page_cnt = 0;
+ cap_msg.max_page_number = -1;
+
+ ret = vmbus_sendpacket(dev->channel, &cap_msg,
+ sizeof(struct dm_capabilities),
+ (unsigned long)NULL,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto probe_error2;
+
+ t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto probe_error2;
+ }
+
+ /*
+ * If the host does not like our capabilities,
+ * fail the probe function.
+ */
+ if (dm_device.state == DM_INIT_ERROR) {
+ ret = -ETIMEDOUT;
+ goto probe_error2;
+ }
+
+ dm_device.state = DM_INITIALIZED;
+
+ return 0;
+
+probe_error2:
+ kthread_stop(dm_device.thread);
+
+probe_error1:
+ vmbus_close(dev->channel);
+probe_error0:
+ kfree(send_buffer);
+ return ret;
+}
+
+static int balloon_remove(struct hv_device *dev)
+{
+ struct hv_dynmem_device *dm = hv_get_drvdata(dev);
+
+ if (dm->num_pages_ballooned != 0)
+ pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
+
+ vmbus_close(dev->channel);
+ kthread_stop(dm->thread);
+ kfree(send_buffer);
+
+ return 0;
+}
+
+static const struct hv_vmbus_device_id id_table[] = {
+ /* Dynamic Memory Class ID */
+ /* 525074DC-8985-46e2-8057-A307DC18A502 */
+ { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
+ 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, id_table);
+
+static struct hv_driver balloon_drv = {
+ .name = "hv_balloon",
+ .id_table = id_table,
+ .probe = balloon_probe,
+ .remove = balloon_remove,
+};
+
+static int __init init_balloon_drv(void)
+{
+
+ return vmbus_driver_register(&balloon_drv);
+}
+
+static void exit_balloon_drv(void)
+{
+
+ vmbus_driver_unregister(&balloon_drv);
+}
+
+module_init(init_balloon_drv);
+module_exit(exit_balloon_drv);
+
+MODULE_DESCRIPTION("Hyper-V Balloon");
+MODULE_VERSION(HV_DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4800d4c2a7b7..32f238f3caea 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1208,6 +1208,14 @@ config SENSORS_TWL4030_MADC
This driver can also be built as a module. If so it will be called
twl4030-madc-hwmon.
+config SENSORS_VEXPRESS
+ tristate "Versatile Express"
+ depends on VEXPRESS_CONFIG
+ help
+ This driver provides support for hardware sensors available on
+ the ARM Ltd's Versatile Express platform. It can provide wide
+ range of information like temperature, power, energy.
+
config SENSORS_VIA_CPUTEMP
tristate "VIA CPU temperature sensor"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index a930f0997d25..5da287443f6c 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 78b81793ddd9..6119ff8e8c87 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -478,7 +478,7 @@ static int abituguru_write(struct abituguru_data *data,
* alarm for sensor type X and then enabling the sensor as sensor type
* X, if we then get an alarm it is a sensor of type X.
*/
-static int __devinit
+static int
abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
u8 sensor_addr)
{
@@ -635,7 +635,7 @@ abituguru_detect_bank1_sensor_type_exit:
* read/write test would be feasible because of the reaction above, I've
* however opted to stay on the safe side.
*/
-static void __devinit
+static void
abituguru_detect_no_bank2_sensors(struct abituguru_data *data)
{
int i;
@@ -691,7 +691,7 @@ abituguru_detect_no_bank2_sensors(struct abituguru_data *data)
(int)data->bank2_sensors);
}
-static void __devinit
+static void
abituguru_detect_no_pwms(struct abituguru_data *data)
{
int i, j;
@@ -1264,7 +1264,7 @@ static struct sensor_device_attribute_2 abituguru_sysfs_attr[] = {
SENSOR_ATTR_2(name, 0444, show_name, NULL, 0, 0),
};
-static int __devinit abituguru_probe(struct platform_device *pdev)
+static int abituguru_probe(struct platform_device *pdev)
{
struct abituguru_data *data;
int i, j, used, sysfs_names_free, sysfs_attr_i, res = -ENODEV;
@@ -1434,7 +1434,7 @@ abituguru_probe_error:
return res;
}
-static int __devexit abituguru_remove(struct platform_device *pdev)
+static int abituguru_remove(struct platform_device *pdev)
{
int i;
struct abituguru_data *data = platform_get_drvdata(pdev);
@@ -1545,7 +1545,7 @@ static struct platform_driver abituguru_driver = {
.pm = ABIT_UGURU_PM,
},
.probe = abituguru_probe,
- .remove = __devexit_p(abituguru_remove),
+ .remove = abituguru_remove,
};
static int __init abituguru_detect(void)
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index b174b8b2b4df..205327d33c4d 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -966,7 +966,7 @@ static struct sensor_device_attribute_2 abituguru3_sysfs_attr[] = {
SENSOR_ATTR_2(name, 0444, show_name, NULL, 0, 0),
};
-static int __devinit abituguru3_probe(struct platform_device *pdev)
+static int abituguru3_probe(struct platform_device *pdev)
{
const int no_sysfs_attr[3] = { 10, 8, 7 };
int sensor_index[3] = { 0, 1, 1 };
@@ -1072,7 +1072,7 @@ abituguru3_probe_error:
return res;
}
-static int __devexit abituguru3_remove(struct platform_device *pdev)
+static int abituguru3_remove(struct platform_device *pdev)
{
int i;
struct abituguru3_data *data = platform_get_drvdata(pdev);
@@ -1171,7 +1171,7 @@ static struct platform_driver abituguru3_driver = {
.pm = ABIT_UGURU3_PM
},
.probe = abituguru3_probe,
- .remove = __devexit_p(abituguru3_remove),
+ .remove = abituguru3_remove,
};
static int __init abituguru3_dmi_detect(void)
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 37c01e72d699..a57584d28a40 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -107,7 +107,7 @@ static const struct attribute_group ad7314_group = {
.attrs = ad7314_attributes,
};
-static int __devinit ad7314_probe(struct spi_device *spi_dev)
+static int ad7314_probe(struct spi_device *spi_dev)
{
int ret;
struct ad7314_data *chip;
@@ -135,7 +135,7 @@ error_remove_group:
return ret;
}
-static int __devexit ad7314_remove(struct spi_device *spi_dev)
+static int ad7314_remove(struct spi_device *spi_dev)
{
struct ad7314_data *chip = dev_get_drvdata(&spi_dev->dev);
@@ -159,7 +159,7 @@ static struct spi_driver ad7314_driver = {
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
- .remove = __devexit_p(ad7314_remove),
+ .remove = ad7314_remove,
.id_table = ad7314_id,
};
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index b420fb3f3a71..f3a5d4764eb9 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -226,7 +226,7 @@ exit_remove:
return err;
}
-static int __devexit ad7414_remove(struct i2c_client *client)
+static int ad7414_remove(struct i2c_client *client)
{
struct ad7414_data *data = i2c_get_clientdata(client);
@@ -246,7 +246,7 @@ static struct i2c_driver ad7414_driver = {
.name = "ad7414",
},
.probe = ad7414_probe,
- .remove = __devexit_p(ad7414_remove),
+ .remove = ad7414_remove,
.id_table = ad7414_id,
};
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index f4c5867170d6..751b1f0264a4 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -161,7 +161,7 @@ static struct sensor_device_attribute ad_input[] = {
/*----------------------------------------------------------------------*/
-static int __devinit adcxx_probe(struct spi_device *spi)
+static int adcxx_probe(struct spi_device *spi)
{
int channels = spi_get_device_id(spi)->driver_data;
struct adcxx *adc;
@@ -208,7 +208,7 @@ out_err:
return status;
}
-static int __devexit adcxx_remove(struct spi_device *spi)
+static int adcxx_remove(struct spi_device *spi)
{
struct adcxx *adc = spi_get_drvdata(spi);
int i;
@@ -240,7 +240,7 @@ static struct spi_driver adcxx_driver = {
},
.id_table = adcxx_ids,
.probe = adcxx_probe,
- .remove = __devexit_p(adcxx_remove),
+ .remove = adcxx_remove,
};
module_spi_driver(adcxx_driver);
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 1b53aa42b6db..a79875986f91 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -173,7 +173,7 @@ static const struct attribute_group ads7871_group = {
.attrs = ads7871_attributes,
};
-static int __devinit ads7871_probe(struct spi_device *spi)
+static int ads7871_probe(struct spi_device *spi)
{
int ret, err;
uint8_t val;
@@ -225,7 +225,7 @@ error_remove:
return err;
}
-static int __devexit ads7871_remove(struct spi_device *spi)
+static int ads7871_remove(struct spi_device *spi)
{
struct ads7871_data *pdata = spi_get_drvdata(spi);
@@ -241,7 +241,7 @@ static struct spi_driver ads7871_driver = {
},
.probe = ads7871_probe,
- .remove = __devexit_p(ads7871_remove),
+ .remove = ads7871_remove,
};
module_spi_driver(ads7871_driver);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 517f1856c706..34ff03abb50b 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -276,7 +276,7 @@ static int adt7411_detect(struct i2c_client *client,
return 0;
}
-static int __devinit adt7411_probe(struct i2c_client *client,
+static int adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adt7411_data *data;
@@ -317,7 +317,7 @@ static int __devinit adt7411_probe(struct i2c_client *client,
return ret;
}
-static int __devexit adt7411_remove(struct i2c_client *client)
+static int adt7411_remove(struct i2c_client *client)
{
struct adt7411_data *data = i2c_get_clientdata(client);
@@ -337,7 +337,7 @@ static struct i2c_driver adt7411_driver = {
.name = "adt7411",
},
.probe = adt7411_probe,
- .remove = __devexit_p(adt7411_remove),
+ .remove = adt7411_remove,
.id_table = adt7411_id,
.detect = adt7411_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 24426a785ad5..d64923d63537 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -356,7 +356,7 @@ static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
return adjust_tjmax(c, id, dev);
}
-static int __devinit create_name_attr(struct platform_data *pdata,
+static int create_name_attr(struct platform_data *pdata,
struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
@@ -551,7 +551,7 @@ static void coretemp_remove_core(struct platform_data *pdata,
pdata->core_data[indx] = NULL;
}
-static int __devinit coretemp_probe(struct platform_device *pdev)
+static int coretemp_probe(struct platform_device *pdev)
{
struct platform_data *pdata;
int err;
@@ -584,7 +584,7 @@ exit_free:
return err;
}
-static int __devexit coretemp_remove(struct platform_device *pdev)
+static int coretemp_remove(struct platform_device *pdev)
{
struct platform_data *pdata = platform_get_drvdata(pdev);
int i;
@@ -606,7 +606,7 @@ static struct platform_driver coretemp_driver = {
.name = DRVNAME,
},
.probe = coretemp_probe,
- .remove = __devexit_p(coretemp_remove),
+ .remove = coretemp_remove,
};
static int __cpuinit coretemp_device_add(unsigned int cpu)
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 19704948801c..ab4452c5a98c 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -270,7 +270,7 @@ static struct attribute *da9052_attr[] = {
static const struct attribute_group da9052_attr_group = {.attrs = da9052_attr};
-static int __devinit da9052_hwmon_probe(struct platform_device *pdev)
+static int da9052_hwmon_probe(struct platform_device *pdev)
{
struct da9052_hwmon *hwmon;
int ret;
@@ -303,7 +303,7 @@ err_mem:
return ret;
}
-static int __devexit da9052_hwmon_remove(struct platform_device *pdev)
+static int da9052_hwmon_remove(struct platform_device *pdev)
{
struct da9052_hwmon *hwmon = platform_get_drvdata(pdev);
@@ -315,7 +315,7 @@ static int __devexit da9052_hwmon_remove(struct platform_device *pdev)
static struct platform_driver da9052_hwmon_driver = {
.probe = da9052_hwmon_probe,
- .remove = __devexit_p(da9052_hwmon_remove),
+ .remove = da9052_hwmon_remove,
.driver = {
.name = "da9052-hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index fe0eeec0b750..7430f70ae452 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2630,7 +2630,7 @@ exit:
return err;
}
-static int __devinit dme1737_isa_probe(struct platform_device *pdev)
+static int dme1737_isa_probe(struct platform_device *pdev)
{
u8 company, device;
struct resource *res;
@@ -2718,7 +2718,7 @@ exit_remove_files:
return err;
}
-static int __devexit dme1737_isa_remove(struct platform_device *pdev)
+static int dme1737_isa_remove(struct platform_device *pdev)
{
struct dme1737_data *data = platform_get_drvdata(pdev);
@@ -2734,7 +2734,7 @@ static struct platform_driver dme1737_isa_driver = {
.name = "dme1737",
},
.probe = dme1737_isa_probe,
- .remove = __devexit_p(dme1737_isa_remove),
+ .remove = dme1737_isa_remove,
};
/* ---------------------------------------------------------------------
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index a98c917b5888..789bd4fb329b 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -187,7 +187,7 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
* Sysfs callback functions
*/
-static const u16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
+static const s16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
char *buf)
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 4dd7723d257f..a9816979c5de 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1343,7 +1343,7 @@ static struct attribute *f71805f_attr_pwm[] = {
* Device registration and initialization
*/
-static void __devinit f71805f_init_device(struct f71805f_data *data)
+static void f71805f_init_device(struct f71805f_data *data)
{
u8 reg;
int i;
@@ -1374,7 +1374,7 @@ static void __devinit f71805f_init_device(struct f71805f_data *data)
}
}
-static int __devinit f71805f_probe(struct platform_device *pdev)
+static int f71805f_probe(struct platform_device *pdev)
{
struct f71805f_sio_data *sio_data = pdev->dev.platform_data;
struct f71805f_data *data;
@@ -1490,7 +1490,7 @@ exit_remove_files:
return err;
}
-static int __devexit f71805f_remove(struct platform_device *pdev)
+static int f71805f_remove(struct platform_device *pdev)
{
struct f71805f_data *data = platform_get_drvdata(pdev);
int i;
@@ -1510,7 +1510,7 @@ static struct platform_driver f71805f_driver = {
.name = DRVNAME,
},
.probe = f71805f_probe,
- .remove = __devexit_p(f71805f_remove),
+ .remove = f71805f_remove,
};
static int __init f71805f_device_add(unsigned short address,
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 50e4ce2d22d8..bb7275cc47f3 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -364,7 +364,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
char *buf);
-static int __devinit f71882fg_probe(struct platform_device *pdev);
+static int f71882fg_probe(struct platform_device *pdev);
static int f71882fg_remove(struct platform_device *pdev);
static struct platform_driver f71882fg_driver = {
@@ -2145,7 +2145,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%s\n", f71882fg_names[data->type]);
}
-static int __devinit f71882fg_create_sysfs_files(struct platform_device *pdev,
+static int f71882fg_create_sysfs_files(struct platform_device *pdev,
struct sensor_device_attribute_2 *attr, int count)
{
int err, i;
@@ -2167,7 +2167,7 @@ static void f71882fg_remove_sysfs_files(struct platform_device *pdev,
device_remove_file(&pdev->dev, &attr[i].dev_attr);
}
-static int __devinit f71882fg_create_fan_sysfs_files(
+static int f71882fg_create_fan_sysfs_files(
struct platform_device *pdev, int idx)
{
struct f71882fg_data *data = platform_get_drvdata(pdev);
@@ -2265,7 +2265,7 @@ static int __devinit f71882fg_create_fan_sysfs_files(
return err;
}
-static int __devinit f71882fg_probe(struct platform_device *pdev)
+static int f71882fg_probe(struct platform_device *pdev)
{
struct f71882fg_data *data;
struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 34ab2a8f9654..b757088aeddb 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -114,7 +114,7 @@ static const struct attribute_group fam15h_power_attr_group = {
.attrs = fam15h_power_attrs,
};
-static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
+static bool fam15h_power_is_internal_node0(struct pci_dev *f4)
{
u32 val;
@@ -171,7 +171,7 @@ static int fam15h_power_resume(struct pci_dev *pdev)
#define fam15h_power_resume NULL
#endif
-static void __devinit fam15h_power_init_data(struct pci_dev *f4,
+static void fam15h_power_init_data(struct pci_dev *f4,
struct fam15h_power_data *data)
{
u32 val;
@@ -197,7 +197,7 @@ static void __devinit fam15h_power_init_data(struct pci_dev *f4,
data->processor_pwr_watts = (tmp * 15625) >> 10;
}
-static int __devinit fam15h_power_probe(struct pci_dev *pdev,
+static int fam15h_power_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct fam15h_power_data *data;
@@ -238,7 +238,7 @@ exit_remove_group:
return err;
}
-static void __devexit fam15h_power_remove(struct pci_dev *pdev)
+static void fam15h_power_remove(struct pci_dev *pdev)
{
struct device *dev;
struct fam15h_power_data *data;
@@ -260,7 +260,7 @@ static struct pci_driver fam15h_power_driver = {
.name = "fam15h_power",
.id_table = fam15h_power_id_table,
.probe = fam15h_power_probe,
- .remove = __devexit_p(fam15h_power_remove),
+ .remove = fam15h_power_remove,
.resume = fam15h_power_resume,
};
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 1381a2e3bbd4..4e04c1228e51 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -499,13 +499,13 @@ static int gpio_fan_get_of_pdata(struct device *dev,
return 0;
}
-static struct of_device_id of_gpio_fan_match[] __devinitdata = {
+static struct of_device_id of_gpio_fan_match[] = {
{ .compatible = "gpio-fan", },
{},
};
#endif /* CONFIG_OF_GPIO */
-static int __devinit gpio_fan_probe(struct platform_device *pdev)
+static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
@@ -581,7 +581,7 @@ err_free_alarm:
return err;
}
-static int __devexit gpio_fan_remove(struct platform_device *pdev)
+static int gpio_fan_remove(struct platform_device *pdev)
{
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
@@ -626,7 +626,7 @@ static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
- .remove = __devexit_p(gpio_fan_remove),
+ .remove = gpio_fan_remove,
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 9a675efaa78d..2dc37c7c6947 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -220,7 +220,7 @@ static const struct attribute_group hih6130_attr_group = {
* device's name.
* Returns 0 on success.
*/
-static int __devinit hih6130_probe(struct i2c_client *client,
+static int hih6130_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct hih6130 *hih6130;
@@ -263,7 +263,7 @@ fail_remove_sysfs:
* hih6130_remove() - remove device
* @client: I2C client device
*/
-static int __devexit hih6130_remove(struct i2c_client *client)
+static int hih6130_remove(struct i2c_client *client)
{
struct hih6130 *hih6130 = i2c_get_clientdata(client);
@@ -283,7 +283,7 @@ MODULE_DEVICE_TABLE(i2c, hih6130_id);
static struct i2c_driver hih6130_driver = {
.driver.name = "hih6130",
.probe = hih6130_probe,
- .remove = __devexit_p(hih6130_remove),
+ .remove = hih6130_remove,
.id_table = hih6130_id,
};
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 9f26400713f0..89cfd64b3373 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -115,6 +115,12 @@ int vid_from_reg(int val, u8 vrm)
return (val < 32) ? 1550 - 25 * val
: 775 - (25 * (val - 31)) / 2;
+ case 26: /* AMD family 10h to 15h, serial VID */
+ val &= 0x7f;
+ if (val >= 0x7c)
+ return 0;
+ return DIV_ROUND_CLOSEST(15500 - 125 * val, 10);
+
case 91: /* VRM 9.1 */
case 90: /* VRM 9.0 */
val &= 0x1f;
@@ -195,6 +201,10 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0xF, 0x40, 0x7F, ANY, 24}, /* NPT family 0Fh */
{X86_VENDOR_AMD, 0xF, 0x80, ANY, ANY, 25}, /* future fam. 0Fh */
{X86_VENDOR_AMD, 0x10, 0x0, ANY, ANY, 25}, /* NPT family 10h */
+ {X86_VENDOR_AMD, 0x11, 0x0, ANY, ANY, 26}, /* family 11h */
+ {X86_VENDOR_AMD, 0x12, 0x0, ANY, ANY, 26}, /* family 12h */
+ {X86_VENDOR_AMD, 0x14, 0x0, ANY, ANY, 26}, /* family 14h */
+ {X86_VENDOR_AMD, 0x15, 0x0, ANY, ANY, 26}, /* family 15h */
{X86_VENDOR_INTEL, 0x6, 0x0, 0x6, ANY, 82}, /* Pentium Pro,
* Pentium II, Xeon,
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c3c471ca202f..646314f7c839 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -84,19 +84,21 @@ static void __init hwmon_pci_quirks(void)
/* Open access to 0x295-0x296 on MSI MS-7031 */
sb = pci_get_device(PCI_VENDOR_ID_ATI, 0x436c, NULL);
- if (sb &&
- (sb->subsystem_vendor == 0x1462 && /* MSI */
- sb->subsystem_device == 0x0031)) { /* MS-7031 */
-
- pci_read_config_byte(sb, 0x48, &enable);
- pci_read_config_word(sb, 0x64, &base);
-
- if (base == 0 && !(enable & BIT(2))) {
- dev_info(&sb->dev,
- "Opening wide generic port at 0x295\n");
- pci_write_config_word(sb, 0x64, 0x295);
- pci_write_config_byte(sb, 0x48, enable | BIT(2));
+ if (sb) {
+ if (sb->subsystem_vendor == 0x1462 && /* MSI */
+ sb->subsystem_device == 0x0031) { /* MS-7031 */
+ pci_read_config_byte(sb, 0x48, &enable);
+ pci_read_config_word(sb, 0x64, &base);
+
+ if (base == 0 && !(enable & BIT(2))) {
+ dev_info(&sb->dev,
+ "Opening wide generic port at 0x295\n");
+ pci_write_config_word(sb, 0x64, 0x295);
+ pci_write_config_byte(sb, 0x48,
+ enable | BIT(2));
+ }
}
+ pci_dev_put(sb);
}
#endif
}
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 46141abaafba..b87c2ccee06b 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -260,7 +260,7 @@ static ssize_t show_label(struct device *dev,
attr->index & DIMM_MASK);
}
-static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
+static int i5k_amb_hwmon_init(struct platform_device *pdev)
{
int i, j, k, d = 0;
u16 c;
@@ -406,7 +406,7 @@ exit_remove:
return res;
}
-static int __devinit i5k_amb_add(void)
+static int i5k_amb_add(void)
{
int res = -ENODEV;
@@ -425,7 +425,7 @@ err:
return res;
}
-static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,
+static int i5k_find_amb_registers(struct i5k_amb_data *data,
unsigned long devid)
{
struct pci_dev *pcidev;
@@ -459,7 +459,7 @@ out:
return res;
}
-static int __devinit i5k_channel_probe(u16 *amb_present, unsigned long dev_id)
+static int i5k_channel_probe(u16 *amb_present, unsigned long dev_id)
{
struct pci_dev *pcidev;
u16 val16;
@@ -488,14 +488,14 @@ out:
static struct {
unsigned long err;
unsigned long fbd0;
-} chipset_ids[] __devinitdata = {
+} chipset_ids[] = {
{ PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 },
{ PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 },
{ 0, 0 }
};
#ifdef MODULE
-static struct pci_device_id i5k_amb_ids[] __devinitdata = {
+static struct pci_device_id i5k_amb_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) },
{ 0, }
@@ -503,7 +503,7 @@ static struct pci_device_id i5k_amb_ids[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, i5k_amb_ids);
#endif
-static int __devinit i5k_amb_probe(struct platform_device *pdev)
+static int i5k_amb_probe(struct platform_device *pdev)
{
struct i5k_amb_data *data;
struct resource *reso;
@@ -564,7 +564,7 @@ err:
return res;
}
-static int __devexit i5k_amb_remove(struct platform_device *pdev)
+static int i5k_amb_remove(struct platform_device *pdev)
{
int i;
struct i5k_amb_data *data = platform_get_drvdata(pdev);
@@ -587,7 +587,7 @@ static struct platform_driver i5k_amb_driver = {
.name = DRVNAME,
},
.probe = i5k_amb_probe,
- .remove = __devexit_p(i5k_amb_remove),
+ .remove = i5k_amb_remove,
};
static int __init i5k_amb_init(void)
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index f1de3979181f..117d66fcded6 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -203,6 +203,8 @@ static const u8 IT87_REG_FAN[] = { 0x0d, 0x0e, 0x0f, 0x80, 0x82 };
static const u8 IT87_REG_FAN_MIN[] = { 0x10, 0x11, 0x12, 0x84, 0x86 };
static const u8 IT87_REG_FANX[] = { 0x18, 0x19, 0x1a, 0x81, 0x83 };
static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
+static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
+
#define IT87_REG_FAN_MAIN_CTRL 0x13
#define IT87_REG_FAN_CTL 0x14
#define IT87_REG_PWM(nr) (0x15 + (nr))
@@ -226,6 +228,83 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
+struct it87_devices {
+ const char *name;
+ u16 features;
+ u8 peci_mask;
+ u8 old_peci_mask;
+};
+
+#define FEAT_12MV_ADC (1 << 0)
+#define FEAT_NEWER_AUTOPWM (1 << 1)
+#define FEAT_OLD_AUTOPWM (1 << 2)
+#define FEAT_16BIT_FANS (1 << 3)
+#define FEAT_TEMP_OFFSET (1 << 4)
+#define FEAT_TEMP_PECI (1 << 5)
+#define FEAT_TEMP_OLD_PECI (1 << 6)
+
+static const struct it87_devices it87_devices[] = {
+ [it87] = {
+ .name = "it87",
+ .features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
+ },
+ [it8712] = {
+ .name = "it8712",
+ .features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
+ },
+ [it8716] = {
+ .name = "it8716",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET,
+ },
+ [it8718] = {
+ .name = "it8718",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8720] = {
+ .name = "it8720",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8721] = {
+ .name = "it8721",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI,
+ .peci_mask = 0x05,
+ .old_peci_mask = 0x02, /* Actually reports PCH */
+ },
+ [it8728] = {
+ .name = "it8728",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ .peci_mask = 0x07,
+ },
+ [it8782] = {
+ .name = "it8782",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+ [it8783] = {
+ .name = "it8783",
+ .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
+ | FEAT_TEMP_OLD_PECI,
+ .old_peci_mask = 0x4,
+ },
+};
+
+#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
+#define has_12mv_adc(data) ((data)->features & FEAT_12MV_ADC)
+#define has_newer_autopwm(data) ((data)->features & FEAT_NEWER_AUTOPWM)
+#define has_old_autopwm(data) ((data)->features & FEAT_OLD_AUTOPWM)
+#define has_temp_offset(data) ((data)->features & FEAT_TEMP_OFFSET)
+#define has_temp_peci(data, nr) (((data)->features & FEAT_TEMP_PECI) && \
+ ((data)->peci_mask & (1 << nr)))
+#define has_temp_old_peci(data, nr) \
+ (((data)->features & FEAT_TEMP_OLD_PECI) && \
+ ((data)->old_peci_mask & (1 << nr)))
struct it87_sio_data {
enum chips type;
@@ -249,7 +328,9 @@ struct it87_sio_data {
struct it87_data {
struct device *hwmon_dev;
enum chips type;
- u8 revision;
+ u16 features;
+ u8 peci_mask;
+ u8 old_peci_mask;
unsigned short addr;
const char *name;
@@ -258,17 +339,13 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[9]; /* Register value */
- u8 in_max[8]; /* Register value */
- u8 in_min[8]; /* Register value */
+ u8 in[9][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
- u16 fan[5]; /* Register values, possibly combined */
- u16 fan_min[5]; /* Register values, possibly combined */
+ u16 fan[5][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
- s8 temp[3]; /* Register value */
- s8 temp_high[3]; /* Register value */
- s8 temp_low[3]; /* Register value */
- u8 sensor; /* Register value */
+ s8 temp[3][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
+ u8 sensor; /* Register value (IT87_REG_TEMP_ENABLE) */
+ u8 extra; /* Register value (IT87_REG_TEMP_EXTRA) */
u8 fan_div[3]; /* Register encoding, shifted right */
u8 vid; /* Register encoding, combined */
u8 vrm;
@@ -296,26 +373,6 @@ struct it87_data {
s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
-static inline int has_12mv_adc(const struct it87_data *data)
-{
- /*
- * IT8721F and later have a 12 mV ADC, also with internal scaling
- * on selected inputs.
- */
- return data->type == it8721
- || data->type == it8728;
-}
-
-static inline int has_newer_autopwm(const struct it87_data *data)
-{
- /*
- * IT8721F and later have separate registers for the temperature
- * mapping and the manual duty cycle.
- */
- return data->type == it8721
- || data->type == it8728;
-}
-
static int adc_lsb(const struct it87_data *data, int nr)
{
int lsb = has_12mv_adc(data) ? 12 : 16;
@@ -398,37 +455,8 @@ static const unsigned int pwm_freq[8] = {
750000 / 128,
};
-static inline int has_16bit_fans(const struct it87_data *data)
-{
- /*
- * IT8705F Datasheet 0.4.1, 3h == Version G.
- * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
- * These are the first revisions with 16-bit tachometer support.
- */
- return (data->type == it87 && data->revision >= 0x03)
- || (data->type == it8712 && data->revision >= 0x08)
- || data->type == it8716
- || data->type == it8718
- || data->type == it8720
- || data->type == it8721
- || data->type == it8728
- || data->type == it8782
- || data->type == it8783;
-}
-
-static inline int has_old_autopwm(const struct it87_data *data)
-{
- /*
- * The old automatic fan speed control interface is implemented
- * by IT8705F chips up to revision F and IT8712F chips up to
- * revision G.
- */
- return (data->type == it87 && data->revision < 0x03)
- || (data->type == it8712 && data->revision < 0x08);
-}
-
static int it87_probe(struct platform_device *pdev);
-static int __devexit it87_remove(struct platform_device *pdev);
+static int it87_remove(struct platform_device *pdev);
static int it87_read_value(struct it87_data *data, u8 reg);
static void it87_write_value(struct it87_data *data, u8 reg, u8 value);
@@ -443,63 +471,26 @@ static struct platform_driver it87_driver = {
.name = DRVNAME,
},
.probe = it87_probe,
- .remove = __devexit_p(it87_remove),
+ .remove = it87_remove,
};
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr]));
-}
-
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_min[nr]));
+ return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr][index]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in_max[nr]));
-}
-
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
- struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val;
-
- if (kstrtoul(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->in_min[nr] = in_to_reg(data, nr, val);
- it87_write_value(data, IT87_REG_VIN_MIN(nr),
- data->in_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -508,140 +499,167 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->in_max[nr] = in_to_reg(data, nr, val);
- it87_write_value(data, IT87_REG_VIN_MAX(nr),
- data->in_max[nr]);
+ data->in[nr][index] = in_to_reg(data, nr, val);
+ it87_write_value(data,
+ index == 1 ? IT87_REG_VIN_MIN(nr)
+ : IT87_REG_VIN_MAX(nr),
+ data->in[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset);
-
-#define limit_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-limit_in_offset(0);
-show_in_offset(1);
-limit_in_offset(1);
-show_in_offset(2);
-limit_in_offset(2);
-show_in_offset(3);
-limit_in_offset(3);
-show_in_offset(4);
-limit_in_offset(4);
-show_in_offset(5);
-limit_in_offset(5);
-show_in_offset(6);
-limit_in_offset(6);
-show_in_offset(7);
-limit_in_offset(7);
-show_in_offset(8);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 0, 2);
+
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 2, 2);
+
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 3, 1);
+static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 3, 2);
+
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 4, 1);
+static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 4, 2);
+
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 5, 1);
+static SENSOR_DEVICE_ATTR_2(in5_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 5, 2);
+
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 6, 1);
+static SENSOR_DEVICE_ATTR_2(in6_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 6, 2);
+
+static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 7, 0);
+static SENSOR_DEVICE_ATTR_2(in7_min, S_IRUGO | S_IWUSR, show_in, set_in,
+ 7, 1);
+static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
+ 7, 2);
+
+static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
/* 3 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
-}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_high[nr]));
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr][index]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_low[nr]));
-}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
+ u8 reg, regval;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
- data->temp_high[nr] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
+ switch (index) {
+ default:
+ case 1:
+ reg = IT87_REG_TEMP_LOW(nr);
+ break;
+ case 2:
+ reg = IT87_REG_TEMP_HIGH(nr);
+ break;
+ case 3:
+ regval = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+ if (!(regval & 0x80)) {
+ regval |= 0x80;
+ it87_write_value(data, IT87_REG_BEEP_ENABLE, regval);
+ }
+ data->valid = 0;
+ reg = IT87_REG_TEMP_OFFSET[nr];
+ break;
+ }
- mutex_lock(&data->update_lock);
- data->temp_low[nr] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
+ data->temp[nr][index] = TEMP_TO_REG(val);
+ it87_write_value(data, reg, data->temp[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
-#define show_temp_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1);
-
-show_temp_offset(1);
-show_temp_offset(2);
-show_temp_offset(3);
-
-static ssize_t show_sensor(struct device *dev, struct device_attribute *attr,
- char *buf)
+
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, 2);
+static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 0, 3);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, 2);
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 1, 3);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
+ set_temp, 2, 3);
+
+static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
u8 reg = data->sensor; /* In case value is updated while used */
+ u8 extra = data->extra;
+ if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1))
+ || (has_temp_old_peci(data, nr) && (extra & 0x80)))
+ return sprintf(buf, "6\n"); /* Intel PECI */
if (reg & (1 << nr))
return sprintf(buf, "3\n"); /* thermal diode */
if (reg & (8 << nr))
return sprintf(buf, "4\n"); /* thermistor */
return sprintf(buf, "0\n"); /* disabled */
}
-static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t set_temp_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
- u8 reg;
+ u8 reg, extra;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
@@ -649,33 +667,45 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
reg &= ~(1 << nr);
reg &= ~(8 << nr);
+ if (has_temp_peci(data, nr) && (reg >> 6 == nr + 1 || val == 6))
+ reg &= 0x3f;
+ extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+ if (has_temp_old_peci(data, nr) && ((extra & 0x80) || val == 6))
+ extra &= 0x7f;
if (val == 2) { /* backwards compatibility */
- dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
- "instead\n");
+ dev_warn(dev,
+ "Sensor type 2 is deprecated, please use 4 instead\n");
val = 4;
}
- /* 3 = thermal diode; 4 = thermistor; 0 = disabled */
+ /* 3 = thermal diode; 4 = thermistor; 6 = Intel PECI; 0 = disabled */
if (val == 3)
reg |= 1 << nr;
else if (val == 4)
reg |= 8 << nr;
+ else if (has_temp_peci(data, nr) && val == 6)
+ reg |= (nr + 1) << 6;
+ else if (has_temp_old_peci(data, nr) && val == 6)
+ extra |= 0x80;
else if (val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
data->sensor = reg;
+ data->extra = extra;
it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
+ if (has_temp_old_peci(data, nr))
+ it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
-#define show_sensor_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \
- show_sensor, set_sensor, offset - 1);
-show_sensor_offset(1);
-show_sensor_offset(2);
-show_sensor_offset(3);
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 0);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 1);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
+ set_temp_type, 2);
/* 3 Fans */
@@ -692,25 +722,21 @@ static int pwm_mode(const struct it87_data *data, int nr)
}
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
+ int speed;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
- DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
- DIV_FROM_REG(data->fan_div[nr])));
+ speed = has_16bit_fans(data) ?
+ FAN16_FROM_REG(data->fan[nr][index]) :
+ FAN_FROM_REG(data->fan[nr][index],
+ DIV_FROM_REG(data->fan_div[nr]));
+ return sprintf(buf, "%d\n", speed);
}
+
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -747,11 +773,13 @@ static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", pwm_freq[index]);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ int nr = sattr->nr;
+ int index = sattr->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
@@ -761,24 +789,36 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = it87_read_value(data, IT87_REG_FAN_DIV);
- switch (nr) {
- case 0:
- data->fan_div[nr] = reg & 0x07;
- break;
- case 1:
- data->fan_div[nr] = (reg >> 3) & 0x07;
- break;
- case 2:
- data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
- break;
+
+ if (has_16bit_fans(data)) {
+ data->fan[nr][index] = FAN16_TO_REG(val);
+ it87_write_value(data, IT87_REG_FAN_MIN[nr],
+ data->fan[nr][index] & 0xff);
+ it87_write_value(data, IT87_REG_FANX_MIN[nr],
+ data->fan[nr][index] >> 8);
+ } else {
+ reg = it87_read_value(data, IT87_REG_FAN_DIV);
+ switch (nr) {
+ case 0:
+ data->fan_div[nr] = reg & 0x07;
+ break;
+ case 1:
+ data->fan_div[nr] = (reg >> 3) & 0x07;
+ break;
+ case 2:
+ data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
+ break;
+ }
+ data->fan[nr][index] =
+ FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
+ it87_write_value(data, IT87_REG_FAN_MIN[nr],
+ data->fan[nr][index]);
}
- data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -797,7 +837,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
old = it87_read_value(data, IT87_REG_FAN_DIV);
/* Save fan min limit */
- min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
+ min = FAN_FROM_REG(data->fan[nr][1], DIV_FROM_REG(data->fan_div[nr]));
switch (nr) {
case 0:
@@ -818,8 +858,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
it87_write_value(data, IT87_REG_FAN_DIV, val);
/* Restore fan min limit */
- data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan_min[nr]);
+ data->fan[nr][1] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
+ it87_write_value(data, IT87_REG_FAN_MIN[nr], data->fan[nr][1]);
mutex_unlock(&data->update_lock);
return count;
@@ -843,8 +883,8 @@ static int check_trip_points(struct device *dev, int nr)
}
if (err) {
- dev_err(dev, "Inconsistent trip points, not switching to "
- "automatic mode\n");
+ dev_err(dev,
+ "Inconsistent trip points, not switching to automatic mode\n");
dev_err(dev, "Adjust the trip points and try again\n");
}
return err;
@@ -1092,118 +1132,106 @@ static ssize_t set_auto_temp(struct device *dev,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
-
-#define show_pwm_offset(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- show_pwm_enable, set_pwm_enable, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- show_pwm, set_pwm, offset - 1); \
-static DEVICE_ATTR(pwm##offset##_freq, \
- (offset == 1 ? S_IRUGO | S_IWUSR : S_IRUGO), \
- show_pwm_freq, (offset == 1 ? set_pwm_freq : NULL)); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels_temp, \
- S_IRUGO | S_IWUSR, show_pwm_temp_map, set_pwm_temp_map, \
- offset - 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 0); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_pwm, \
- S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
- offset - 1, 2); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_pwm, \
- S_IRUGO, show_auto_pwm, NULL, offset - 1, 3); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 1); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp_hyst, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 0); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 2); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 3); \
-static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_temp, \
- S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
- offset - 1, 4);
-
-show_pwm_offset(1);
-show_pwm_offset(2);
-show_pwm_offset(3);
-
-/* A different set of callbacks for 16-bit fans */
-static ssize_t show_fan16(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN16_FROM_REG(data->fan[nr]));
-}
-
-static ssize_t show_fan16_min(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", FAN16_FROM_REG(data->fan_min[nr]));
-}
-
-static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
- struct it87_data *data = dev_get_drvdata(dev);
- long val;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->fan_min[nr] = FAN16_TO_REG(val);
- it87_write_value(data, IT87_REG_FAN_MIN[nr],
- data->fan_min[nr] & 0xff);
- it87_write_value(data, IT87_REG_FANX_MIN[nr],
- data->fan_min[nr] >> 8);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-/*
- * We want to use the same sysfs file names as 8-bit fans, but we need
- * different variable names, so we have to use SENSOR_ATTR instead of
- * SENSOR_DEVICE_ATTR.
- */
-#define show_fan16_offset(offset) \
-static struct sensor_device_attribute sensor_dev_attr_fan##offset##_input16 \
- = SENSOR_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan16, NULL, offset - 1); \
-static struct sensor_device_attribute sensor_dev_attr_fan##offset##_min16 \
- = SENSOR_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan16_min, set_fan16_min, offset - 1)
-
-show_fan16_offset(1);
-show_fan16_offset(2);
-show_fan16_offset(3);
-show_fan16_offset(4);
-show_fan16_offset(5);
+static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0);
+static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 0, 1);
+static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 0);
+
+static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 1, 1);
+static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 1);
+
+static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 2, 0);
+static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 2, 1);
+static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO | S_IWUSR, show_fan_div,
+ set_fan_div, 2);
+
+static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 3, 1);
+
+static SENSOR_DEVICE_ATTR_2(fan5_input, S_IRUGO, show_fan, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(fan5_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
+ 4, 1);
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
+static DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 0, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 0, 4);
+
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
+static DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, NULL);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 1, 3);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 3);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 1, 4);
+
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 2);
+static DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL);
+static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
+ show_pwm_temp_map, set_pwm_temp_map, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_pwm, S_IRUGO,
+ show_auto_pwm, NULL, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 4);
/* Alarms */
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
@@ -1471,6 +1499,12 @@ static const struct attribute_group it87_group_temp[3] = {
{ .attrs = it87_attributes_temp[2] },
};
+static struct attribute *it87_attributes_temp_offset[] = {
+ &sensor_dev_attr_temp1_offset.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ &sensor_dev_attr_temp3_offset.dev_attr.attr,
+};
+
static struct attribute *it87_attributes[] = {
&dev_attr_alarms.attr,
&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
@@ -1500,73 +1534,47 @@ static struct attribute *it87_attributes_temp_beep[] = {
&sensor_dev_attr_temp3_beep.dev_attr.attr,
};
-static struct attribute *it87_attributes_fan16[5][3+1] = { {
- &sensor_dev_attr_fan1_input16.dev_attr.attr,
- &sensor_dev_attr_fan1_min16.dev_attr.attr,
+static struct attribute *it87_attributes_fan[5][3+1] = { {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan2_input16.dev_attr.attr,
- &sensor_dev_attr_fan2_min16.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan3_input16.dev_attr.attr,
- &sensor_dev_attr_fan3_min16.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan4_input16.dev_attr.attr,
- &sensor_dev_attr_fan4_min16.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
NULL
}, {
- &sensor_dev_attr_fan5_input16.dev_attr.attr,
- &sensor_dev_attr_fan5_min16.dev_attr.attr,
+ &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
NULL
} };
-static const struct attribute_group it87_group_fan16[5] = {
- { .attrs = it87_attributes_fan16[0] },
- { .attrs = it87_attributes_fan16[1] },
- { .attrs = it87_attributes_fan16[2] },
- { .attrs = it87_attributes_fan16[3] },
- { .attrs = it87_attributes_fan16[4] },
+static const struct attribute_group it87_group_fan[5] = {
+ { .attrs = it87_attributes_fan[0] },
+ { .attrs = it87_attributes_fan[1] },
+ { .attrs = it87_attributes_fan[2] },
+ { .attrs = it87_attributes_fan[3] },
+ { .attrs = it87_attributes_fan[4] },
};
-static struct attribute *it87_attributes_fan[3][4+1] = { {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_min.dev_attr.attr,
+static const struct attribute *it87_attributes_fan_div[] = {
&sensor_dev_attr_fan1_div.dev_attr.attr,
- &sensor_dev_attr_fan1_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
- &sensor_dev_attr_fan2_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_div.dev_attr.attr,
- &sensor_dev_attr_fan3_alarm.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_fan[3] = {
- { .attrs = it87_attributes_fan[0] },
- { .attrs = it87_attributes_fan[1] },
- { .attrs = it87_attributes_fan[2] },
};
-static const struct attribute_group *
-it87_get_fan_group(const struct it87_data *data)
-{
- return has_16bit_fans(data) ? it87_group_fan16 : it87_group_fan;
-}
-
static struct attribute *it87_attributes_pwm[3][4+1] = { {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
@@ -1925,7 +1933,6 @@ static void it87_remove_files(struct device *dev)
{
struct it87_data *data = platform_get_drvdata(pdev);
struct it87_sio_data *sio_data = dev->platform_data;
- const struct attribute_group *fan_group = it87_get_fan_group(data);
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
@@ -1941,6 +1948,9 @@ static void it87_remove_files(struct device *dev)
if (!(data->has_temp & (1 << i)))
continue;
sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
+ if (has_temp_offset(data))
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_temp_offset[i]);
if (sio_data->beep_pin)
sysfs_remove_file(&dev->kobj,
it87_attributes_temp_beep[i]);
@@ -1948,10 +1958,13 @@ static void it87_remove_files(struct device *dev)
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- sysfs_remove_group(&dev->kobj, &fan_group[i]);
+ sysfs_remove_group(&dev->kobj, &it87_group_fan[i]);
if (sio_data->beep_pin)
sysfs_remove_file(&dev->kobj,
it87_attributes_fan_beep[i]);
+ if (i < 3 && !has_16bit_fans(data))
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_fan_div[i]);
}
for (i = 0; i < 3; i++) {
if (sio_data->skip_pwm & (1 << 0))
@@ -1966,27 +1979,15 @@ static void it87_remove_files(struct device *dev)
sysfs_remove_group(&dev->kobj, &it87_group_label);
}
-static int __devinit it87_probe(struct platform_device *pdev)
+static int it87_probe(struct platform_device *pdev)
{
struct it87_data *data;
struct resource *res;
struct device *dev = &pdev->dev;
struct it87_sio_data *sio_data = dev->platform_data;
- const struct attribute_group *fan_group;
int err = 0, i;
int enable_pwm_interface;
int fan_beep_need_rw;
- static const char * const names[] = {
- "it87",
- "it8712",
- "it8716",
- "it8718",
- "it8720",
- "it8721",
- "it8728",
- "it8782",
- "it8783",
- };
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
@@ -2003,8 +2004,31 @@ static int __devinit it87_probe(struct platform_device *pdev)
data->addr = res->start;
data->type = sio_data->type;
- data->revision = sio_data->revision;
- data->name = names[sio_data->type];
+ data->features = it87_devices[sio_data->type].features;
+ data->peci_mask = it87_devices[sio_data->type].peci_mask;
+ data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
+ data->name = it87_devices[sio_data->type].name;
+ /*
+ * IT8705F Datasheet 0.4.1, 3h == Version G.
+ * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
+ * These are the first revisions with 16-bit tachometer support.
+ */
+ switch (data->type) {
+ case it87:
+ if (sio_data->revision >= 0x03) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_16BIT_FANS;
+ }
+ break;
+ case it8712:
+ if (sio_data->revision >= 0x08) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_16BIT_FANS;
+ }
+ break;
+ default:
+ break;
+ }
/* Now, we do the remaining detection. */
if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
@@ -2068,6 +2092,12 @@ static int __devinit it87_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
if (err)
goto error;
+ if (has_temp_offset(data)) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_temp_offset[i]);
+ if (err)
+ goto error;
+ }
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_temp_beep[i]);
@@ -2077,15 +2107,21 @@ static int __devinit it87_probe(struct platform_device *pdev)
}
/* Do not create fan files for disabled fans */
- fan_group = it87_get_fan_group(data);
fan_beep_need_rw = 1;
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- err = sysfs_create_group(&dev->kobj, &fan_group[i]);
+ err = sysfs_create_group(&dev->kobj, &it87_group_fan[i]);
if (err)
goto error;
+ if (i < 3 && !has_16bit_fans(data)) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_fan_div[i]);
+ if (err)
+ goto error;
+ }
+
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_fan_beep[i]);
@@ -2158,7 +2194,7 @@ error:
return err;
}
-static int __devexit it87_remove(struct platform_device *pdev)
+static int it87_remove(struct platform_device *pdev)
{
struct it87_data *data = platform_get_drvdata(pdev);
@@ -2191,7 +2227,7 @@ static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
}
/* Return 1 if and only if the PWM interface is safe to use */
-static int __devinit it87_check_pwm(struct device *dev)
+static int it87_check_pwm(struct device *dev)
{
struct it87_data *data = dev_get_drvdata(dev);
/*
@@ -2221,8 +2257,8 @@ static int __devinit it87_check_pwm(struct device *dev)
* PWM interface).
*/
if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
- dev_info(dev, "Reconfiguring PWM to "
- "active high polarity\n");
+ dev_info(dev,
+ "Reconfiguring PWM to active high polarity\n");
it87_write_value(data, IT87_REG_FAN_CTL,
tmp | 0x87);
for (i = 0; i < 3; i++)
@@ -2232,23 +2268,23 @@ static int __devinit it87_check_pwm(struct device *dev)
return 1;
}
- dev_info(dev, "PWM configuration is "
- "too broken to be fixed\n");
+ dev_info(dev,
+ "PWM configuration is too broken to be fixed\n");
}
- dev_info(dev, "Detected broken BIOS "
- "defaults, disabling PWM interface\n");
+ dev_info(dev,
+ "Detected broken BIOS defaults, disabling PWM interface\n");
return 0;
} else if (fix_pwm_polarity) {
- dev_info(dev, "PWM configuration looks "
- "sane, won't touch\n");
+ dev_info(dev,
+ "PWM configuration looks sane, won't touch\n");
}
return 1;
}
/* Called when we have found a new IT87. */
-static void __devinit it87_init_device(struct platform_device *pdev)
+static void it87_init_device(struct platform_device *pdev)
{
struct it87_sio_data *sio_data = pdev->dev.platform_data;
struct it87_data *data = platform_get_drvdata(pdev);
@@ -2389,42 +2425,46 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_read_value(data, IT87_REG_CONFIG) | 0x40);
}
for (i = 0; i <= 7; i++) {
- data->in[i] =
+ data->in[i][0] =
it87_read_value(data, IT87_REG_VIN(i));
- data->in_min[i] =
+ data->in[i][1] =
it87_read_value(data, IT87_REG_VIN_MIN(i));
- data->in_max[i] =
+ data->in[i][2] =
it87_read_value(data, IT87_REG_VIN_MAX(i));
}
/* in8 (battery) has no limit registers */
- data->in[8] = it87_read_value(data, IT87_REG_VIN(8));
+ data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
for (i = 0; i < 5; i++) {
/* Skip disabled fans */
if (!(data->has_fan & (1 << i)))
continue;
- data->fan_min[i] =
+ data->fan[i][1] =
it87_read_value(data, IT87_REG_FAN_MIN[i]);
- data->fan[i] = it87_read_value(data,
+ data->fan[i][0] = it87_read_value(data,
IT87_REG_FAN[i]);
/* Add high byte if in 16-bit mode */
if (has_16bit_fans(data)) {
- data->fan[i] |= it87_read_value(data,
+ data->fan[i][0] |= it87_read_value(data,
IT87_REG_FANX[i]) << 8;
- data->fan_min[i] |= it87_read_value(data,
+ data->fan[i][1] |= it87_read_value(data,
IT87_REG_FANX_MIN[i]) << 8;
}
}
for (i = 0; i < 3; i++) {
if (!(data->has_temp & (1 << i)))
continue;
- data->temp[i] =
+ data->temp[i][0] =
it87_read_value(data, IT87_REG_TEMP(i));
- data->temp_high[i] =
- it87_read_value(data, IT87_REG_TEMP_HIGH(i));
- data->temp_low[i] =
+ data->temp[i][1] =
it87_read_value(data, IT87_REG_TEMP_LOW(i));
+ data->temp[i][2] =
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ if (has_temp_offset(data))
+ data->temp[i][3] =
+ it87_read_value(data,
+ IT87_REG_TEMP_OFFSET[i]);
}
/* Newer chips don't have clock dividers */
@@ -2448,6 +2488,7 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_update_pwm_ctrl(data, i);
data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
/*
* The IT8705F does not have VID capability.
* The IT8718F and later don't use IT87_REG_VID for the
@@ -2549,8 +2590,7 @@ static void __exit sm_it87_exit(void)
}
-MODULE_AUTHOR("Chris Gauthron, "
- "Jean Delvare <khali@linux-fr.org>");
+MODULE_AUTHOR("Chris Gauthron, Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index dee9eec2036e..e0d66b9590ab 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -102,7 +102,7 @@ static const struct attribute_group jz4740_hwmon_attr_group = {
.attrs = jz4740_hwmon_attributes,
};
-static int __devinit jz4740_hwmon_probe(struct platform_device *pdev)
+static int jz4740_hwmon_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_hwmon *hwmon;
@@ -172,7 +172,7 @@ err_remove_file:
return ret;
}
-static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
+static int jz4740_hwmon_remove(struct platform_device *pdev)
{
struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev);
@@ -184,7 +184,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
- .remove = __devexit_p(jz4740_hwmon_remove),
+ .remove = jz4740_hwmon_remove,
.driver = {
.name = "jz4740-hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index f2fe8078633b..e3b037c73a7e 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -95,7 +95,7 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static bool __devinit has_erratum_319(struct pci_dev *pdev)
+static bool has_erratum_319(struct pci_dev *pdev)
{
u32 pkg_type, reg_dram_cfg;
@@ -129,7 +129,7 @@ static bool __devinit has_erratum_319(struct pci_dev *pdev)
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
}
-static int __devinit k10temp_probe(struct pci_dev *pdev,
+static int k10temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct device *hwmon_dev;
@@ -192,7 +192,7 @@ exit:
return err;
}
-static void __devexit k10temp_remove(struct pci_dev *pdev)
+static void k10temp_remove(struct pci_dev *pdev)
{
hwmon_device_unregister(pci_get_drvdata(pdev));
device_remove_file(&pdev->dev, &dev_attr_name);
@@ -219,7 +219,7 @@ static struct pci_driver k10temp_driver = {
.name = "k10temp",
.id_table = k10temp_id_table,
.probe = k10temp_probe,
- .remove = __devexit_p(k10temp_remove),
+ .remove = k10temp_remove,
};
module_pci_driver(k10temp_driver);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index e8c7fb0bbf95..9f3c0aeacdb9 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -142,7 +142,7 @@ static DEFINE_PCI_DEVICE_TABLE(k8temp_ids) = {
MODULE_DEVICE_TABLE(pci, k8temp_ids);
-static int __devinit is_rev_g_desktop(u8 model)
+static int is_rev_g_desktop(u8 model)
{
u32 brandidx;
@@ -173,7 +173,7 @@ static int __devinit is_rev_g_desktop(u8 model)
return 1;
}
-static int __devinit k8temp_probe(struct pci_dev *pdev,
+static int k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int err;
@@ -304,7 +304,7 @@ exit_remove:
return err;
}
-static void __devexit k8temp_remove(struct pci_dev *pdev)
+static void k8temp_remove(struct pci_dev *pdev)
{
struct k8temp_data *data = pci_get_drvdata(pdev);
@@ -324,7 +324,7 @@ static struct pci_driver k8temp_driver = {
.name = "k8temp",
.id_table = k8temp_ids,
.probe = k8temp_probe,
- .remove = __devexit_p(k8temp_remove),
+ .remove = k8temp_remove,
};
module_pci_driver(k8temp_driver);
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 2d1777a03edb..016efa26ba7c 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -131,7 +131,7 @@ static DEVICE_ATTR(name, S_IRUGO, lm70_show_name, NULL);
/*----------------------------------------------------------------------*/
-static int __devinit lm70_probe(struct spi_device *spi)
+static int lm70_probe(struct spi_device *spi)
{
int chip = spi_get_device_id(spi)->driver_data;
struct lm70 *p_lm70;
@@ -178,7 +178,7 @@ out_dev_create_temp_file_failed:
return status;
}
-static int __devexit lm70_remove(struct spi_device *spi)
+static int lm70_remove(struct spi_device *spi)
{
struct lm70 *p_lm70 = spi_get_drvdata(spi);
@@ -207,7 +207,7 @@ static struct spi_driver lm70_driver = {
},
.id_table = lm70_ids,
.probe = lm70_probe,
- .remove = __devexit_p(lm70_remove),
+ .remove = lm70_remove,
};
module_spi_driver(lm70_driver);
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 8fa2632cbbaf..7272176a9ec7 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
long temp;
short value;
+ s32 err;
int status = kstrtol(buf, 10, &temp);
if (status < 0)
@@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
/* Write value */
value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
(LM73_TEMP_MAX*4)) << 5;
- i2c_smbus_write_word_swapped(client, attr->index, value);
- return count;
+ err = i2c_smbus_write_word_swapped(client, attr->index, value);
+ return (err < 0) ? err : count;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
@@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct i2c_client *client = to_i2c_client(dev);
+ int temp;
+
+ s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+ if (err < 0)
+ return err;
+
/* use integer division instead of equivalent right shift to
guarantee arithmetic shift and preserve the sign */
- int temp = ((s16) (i2c_smbus_read_word_swapped(client,
- attr->index))*250) / 32;
- return sprintf(buf, "%d\n", temp);
+ temp = (((s16) err) * 250) / 32;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index c6ffafe600ad..53d6ee8ffa33 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -833,7 +833,7 @@ static struct lm78_data *lm78_update_device(struct device *dev)
}
#ifdef CONFIG_ISA
-static int __devinit lm78_isa_probe(struct platform_device *pdev)
+static int lm78_isa_probe(struct platform_device *pdev)
{
int err;
struct lm78_data *data;
@@ -886,7 +886,7 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev)
return err;
}
-static int __devexit lm78_isa_remove(struct platform_device *pdev)
+static int lm78_isa_remove(struct platform_device *pdev)
{
struct lm78_data *data = platform_get_drvdata(pdev);
@@ -903,7 +903,7 @@ static struct platform_driver lm78_isa_driver = {
.name = "lm78",
},
.probe = lm78_isa_probe,
- .remove = __devexit_p(lm78_isa_remove),
+ .remove = lm78_isa_remove,
};
/* return 1 if a supported chip is found, 0 otherwise */
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index b4eb0889c465..eda077de8a9f 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -157,7 +157,7 @@ static const struct attribute_group max1110_attr_group = {
.attrs = max1110_attributes,
};
-static int __devinit setup_transfer(struct max1111_data *data)
+static int setup_transfer(struct max1111_data *data)
{
struct spi_message *m;
struct spi_transfer *x;
@@ -179,7 +179,7 @@ static int __devinit setup_transfer(struct max1111_data *data)
return 0;
}
-static int __devinit max1111_probe(struct spi_device *spi)
+static int max1111_probe(struct spi_device *spi)
{
enum chips chip = spi_get_device_id(spi)->driver_data;
struct max1111_data *data;
@@ -256,7 +256,7 @@ err_remove:
return err;
}
-static int __devexit max1111_remove(struct spi_device *spi)
+static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
@@ -283,7 +283,7 @@ static struct spi_driver max1111_driver = {
},
.id_table = max1111_ids,
.probe = max1111_probe,
- .remove = __devexit_p(max1111_remove),
+ .remove = max1111_remove,
};
module_spi_driver(max1111_driver);
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 6304f2616fa7..b5ebb9198c75 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -257,7 +257,7 @@ static const struct attribute_group max197_sysfs_group = {
},
};
-static int __devinit max197_probe(struct platform_device *pdev)
+static int max197_probe(struct platform_device *pdev)
{
int ch, ret;
struct max197_data *data;
@@ -316,7 +316,7 @@ error:
return ret;
}
-static int __devexit max197_remove(struct platform_device *pdev)
+static int max197_remove(struct platform_device *pdev)
{
struct max197_data *data = platform_get_drvdata(pdev);
@@ -339,7 +339,7 @@ static struct platform_driver max197_driver = {
.owner = THIS_MODULE,
},
.probe = max197_probe,
- .remove = __devexit_p(max197_remove),
+ .remove = max197_remove,
.id_table = max197_device_ids,
};
module_platform_driver(max197_driver);
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index cf47a59657a9..2a7f331cd3c0 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -233,7 +233,7 @@ out_err_create_16chans:
return ret;
}
-static int __devexit mc13783_adc_remove(struct platform_device *pdev)
+static int mc13783_adc_remove(struct platform_device *pdev)
{
struct mc13783_adc_priv *priv = platform_get_drvdata(pdev);
kernel_ulong_t driver_data = platform_get_device_id(pdev)->driver_data;
@@ -265,7 +265,7 @@ static const struct platform_device_id mc13783_adc_idtable[] = {
MODULE_DEVICE_TABLE(platform, mc13783_adc_idtable);
static struct platform_driver mc13783_adc_driver = {
- .remove = __devexit_p(mc13783_adc_remove),
+ .remove = mc13783_adc_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 74a6c58d0218..a87eb8986e36 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -309,7 +309,7 @@ static const struct attribute_group ntc_attr_group = {
.attrs = ntc_attributes,
};
-static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
+static int ntc_thermistor_probe(struct platform_device *pdev)
{
struct ntc_data *data;
struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data;
@@ -393,7 +393,7 @@ err_after_sysfs:
return ret;
}
-static int __devexit ntc_thermistor_remove(struct platform_device *pdev)
+static int ntc_thermistor_remove(struct platform_device *pdev)
{
struct ntc_data *data = platform_get_drvdata(pdev);
@@ -419,7 +419,7 @@ static struct platform_driver ntc_thermistor_driver = {
.owner = THIS_MODULE,
},
.probe = ntc_thermistor_probe,
- .remove = __devexit_p(ntc_thermistor_remove),
+ .remove = ntc_thermistor_remove,
.id_table = ntc_thermistor_id,
};
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 91d5b2a21dd9..e35856bb79b4 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -228,7 +228,7 @@ struct pc87360_data {
*/
static int pc87360_probe(struct platform_device *pdev);
-static int __devexit pc87360_remove(struct platform_device *pdev);
+static int pc87360_remove(struct platform_device *pdev);
static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
u8 reg);
@@ -248,7 +248,7 @@ static struct platform_driver pc87360_driver = {
.name = "pc87360",
},
.probe = pc87360_probe,
- .remove = __devexit_p(pc87360_remove),
+ .remove = pc87360_remove,
};
/*
@@ -1221,7 +1221,7 @@ static void pc87360_remove_files(struct device *dev)
sysfs_remove_group(&dev->kobj, &pc8736x_vin_group);
}
-static int __devinit pc87360_probe(struct platform_device *pdev)
+static int pc87360_probe(struct platform_device *pdev)
{
int i;
struct pc87360_data *data;
@@ -1375,7 +1375,7 @@ error:
return err;
}
-static int __devexit pc87360_remove(struct platform_device *pdev)
+static int pc87360_remove(struct platform_device *pdev)
{
struct pc87360_data *data = platform_get_drvdata(pdev);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index f185b1fa53e5..6086ad039d7d 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -956,7 +956,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
* Device detection, attach and detach
*/
-static int __devinit pc87427_request_regions(struct platform_device *pdev,
+static int pc87427_request_regions(struct platform_device *pdev,
int count)
{
struct resource *res;
@@ -980,7 +980,7 @@ static int __devinit pc87427_request_regions(struct platform_device *pdev,
return 0;
}
-static void __devinit pc87427_init_device(struct device *dev)
+static void pc87427_init_device(struct device *dev)
{
struct pc87427_sio_data *sio_data = dev->platform_data;
struct pc87427_data *data = dev_get_drvdata(dev);
@@ -1072,7 +1072,7 @@ static void pc87427_remove_files(struct device *dev)
}
}
-static int __devinit pc87427_probe(struct platform_device *pdev)
+static int pc87427_probe(struct platform_device *pdev)
{
struct pc87427_sio_data *sio_data = pdev->dev.platform_data;
struct pc87427_data *data;
@@ -1141,7 +1141,7 @@ exit_remove_files:
return err;
}
-static int __devexit pc87427_remove(struct platform_device *pdev)
+static int pc87427_remove(struct platform_device *pdev)
{
struct pc87427_data *data = platform_get_drvdata(pdev);
@@ -1158,7 +1158,7 @@ static struct platform_driver pc87427_driver = {
.name = DRVNAME,
},
.probe = pc87427_probe,
- .remove = __devexit_p(pc87427_remove),
+ .remove = pc87427_remove,
};
static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data)
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index bcecd025fcc4..ff2ae0252a48 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -275,7 +275,7 @@ static void s3c_hwmon_remove_attr(struct device *dev,
* s3c_hwmon_probe - device probe entry.
* @dev: The device being probed.
*/
-static int __devinit s3c_hwmon_probe(struct platform_device *dev)
+static int s3c_hwmon_probe(struct platform_device *dev)
{
struct s3c_hwmon_pdata *pdata = dev->dev.platform_data;
struct s3c_hwmon *hwmon;
@@ -364,7 +364,7 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
return ret;
}
-static int __devexit s3c_hwmon_remove(struct platform_device *dev)
+static int s3c_hwmon_remove(struct platform_device *dev)
{
struct s3c_hwmon *hwmon = platform_get_drvdata(dev);
int i;
@@ -386,7 +386,7 @@ static struct platform_driver s3c_hwmon_driver = {
.owner = THIS_MODULE,
},
.probe = s3c_hwmon_probe,
- .remove = __devexit_p(s3c_hwmon_remove),
+ .remove = s3c_hwmon_remove,
};
module_platform_driver(s3c_hwmon_driver);
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 49f6230bdcf1..0cc99fd83e8e 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -153,7 +153,7 @@ abort:
return ret;
}
-static int __devinit sch5627_read_limits(struct sch5627_data *data)
+static int sch5627_read_limits(struct sch5627_data *data)
{
int i, val;
@@ -465,7 +465,7 @@ static int sch5627_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit sch5627_probe(struct platform_device *pdev)
+static int sch5627_probe(struct platform_device *pdev)
{
struct sch5627_data *data;
int err, build_code, build_id, hwmon_rev, val;
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 517118016192..547b5c952eff 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -405,7 +405,7 @@ static int sch5636_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit sch5636_probe(struct platform_device *pdev)
+static int sch5636_probe(struct platform_device *pdev)
{
struct sch5636_data *data;
int i, err, val, revision[2];
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 07a0c1a0b84d..1c85d39df171 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -884,7 +884,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int __devinit sht15_probe(struct platform_device *pdev)
+static int sht15_probe(struct platform_device *pdev)
{
int ret;
struct sht15_data *data;
@@ -1002,7 +1002,7 @@ err_release_reg:
return ret;
}
-static int __devexit sht15_remove(struct platform_device *pdev)
+static int sht15_remove(struct platform_device *pdev)
{
struct sht15_data *data = platform_get_drvdata(pdev);
@@ -1043,7 +1043,7 @@ static struct platform_driver sht15_driver = {
.owner = THIS_MODULE,
},
.probe = sht15_probe,
- .remove = __devexit_p(sht15_remove),
+ .remove = sht15_remove,
.id_table = sht15_device_ids,
};
module_platform_driver(sht15_driver);
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 5f67546950b1..2e9f9570b6f8 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -187,7 +187,7 @@ static const struct attribute_group sht21_attr_group = {
* device's name.
* Returns 0 on success.
*/
-static int __devinit sht21_probe(struct i2c_client *client,
+static int sht21_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct sht21 *sht21;
@@ -233,7 +233,7 @@ fail_remove_sysfs:
* sht21_remove() - remove device
* @client: I2C client device
*/
-static int __devexit sht21_remove(struct i2c_client *client)
+static int sht21_remove(struct i2c_client *client)
{
struct sht21 *sht21 = i2c_get_clientdata(client);
@@ -253,7 +253,7 @@ MODULE_DEVICE_TABLE(i2c, sht21_id);
static struct i2c_driver sht21_driver = {
.driver.name = "sht21",
.probe = sht21_probe,
- .remove = __devexit_p(sht21_remove),
+ .remove = sht21_remove,
.id_table = sht21_id,
};
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 8275f0e14eb7..06ce3c911db9 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -204,7 +204,7 @@ struct sis5595_data {
static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */
static int sis5595_probe(struct platform_device *pdev);
-static int __devexit sis5595_remove(struct platform_device *pdev);
+static int sis5595_remove(struct platform_device *pdev);
static int sis5595_read_value(struct sis5595_data *data, u8 reg);
static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value);
@@ -217,7 +217,7 @@ static struct platform_driver sis5595_driver = {
.name = "sis5595",
},
.probe = sis5595_probe,
- .remove = __devexit_p(sis5595_remove),
+ .remove = sis5595_remove,
};
/* 4 Voltages */
@@ -583,7 +583,7 @@ static const struct attribute_group sis5595_group_temp1 = {
};
/* This is called when the module is loaded */
-static int __devinit sis5595_probe(struct platform_device *pdev)
+static int sis5595_probe(struct platform_device *pdev)
{
int err = 0;
int i;
@@ -659,7 +659,7 @@ exit_remove_files:
return err;
}
-static int __devexit sis5595_remove(struct platform_device *pdev)
+static int sis5595_remove(struct platform_device *pdev)
{
struct sis5595_data *data = platform_get_drvdata(pdev);
@@ -693,7 +693,7 @@ static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
}
/* Called when we have found a new SIS5595. */
-static void __devinit sis5595_init_device(struct sis5595_data *data)
+static void sis5595_init_device(struct sis5595_data *data)
{
u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
if (!(config & 0x01))
@@ -758,7 +758,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis5595_pci_ids) = {
MODULE_DEVICE_TABLE(pci, sis5595_pci_ids);
-static int blacklist[] __devinitdata = {
+static int blacklist[] = {
PCI_DEVICE_ID_SI_540,
PCI_DEVICE_ID_SI_550,
PCI_DEVICE_ID_SI_630,
@@ -774,7 +774,7 @@ static int blacklist[] __devinitdata = {
PCI_DEVICE_ID_SI_5598,
0 };
-static int __devinit sis5595_device_add(unsigned short address)
+static int sis5595_device_add(unsigned short address)
{
struct resource res = {
.start = address,
@@ -815,7 +815,7 @@ exit:
return err;
}
-static int __devinit sis5595_pci_probe(struct pci_dev *dev,
+static int sis5595_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 address;
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 65b07de11a0f..81348fadf3b6 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -228,7 +228,7 @@ static const struct attribute_group smsc47b397_group = {
.attrs = smsc47b397_attributes,
};
-static int __devexit smsc47b397_remove(struct platform_device *pdev)
+static int smsc47b397_remove(struct platform_device *pdev)
{
struct smsc47b397_data *data = platform_get_drvdata(pdev);
@@ -246,10 +246,10 @@ static struct platform_driver smsc47b397_driver = {
.name = DRVNAME,
},
.probe = smsc47b397_probe,
- .remove = __devexit_p(smsc47b397_remove),
+ .remove = smsc47b397_remove,
};
-static int __devinit smsc47b397_probe(struct platform_device *pdev)
+static int smsc47b397_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct smsc47b397_data *data;
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index b8777e54190a..b10c3d36ccbc 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -147,7 +147,7 @@ static const struct attribute_group tmp102_attr_group = {
#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
-static int __devinit tmp102_probe(struct i2c_client *client,
+static int tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tmp102 *tmp102;
@@ -216,7 +216,7 @@ fail_restore_config:
return status;
}
-static int __devexit tmp102_remove(struct i2c_client *client)
+static int tmp102_remove(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
@@ -283,7 +283,7 @@ static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.pm = TMP102_DEV_PM_OPS,
.probe = tmp102_probe,
- .remove = __devexit_p(tmp102_remove),
+ .remove = tmp102_remove,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 1a174f0a3cde..6c6d440bb2dd 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -96,7 +96,7 @@ static const struct attribute_group twl4030_madc_group = {
.attrs = twl4030_madc_attributes,
};
-static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev)
+static int twl4030_madc_hwmon_probe(struct platform_device *pdev)
{
int ret;
struct device *hwmon;
@@ -120,7 +120,7 @@ err_sysfs:
return ret;
}
-static int __devexit twl4030_madc_hwmon_remove(struct platform_device *pdev)
+static int twl4030_madc_hwmon_remove(struct platform_device *pdev)
{
hwmon_device_unregister(&pdev->dev);
sysfs_remove_group(&pdev->dev.kobj, &twl4030_madc_group);
@@ -130,7 +130,7 @@ static int __devexit twl4030_madc_hwmon_remove(struct platform_device *pdev)
static struct platform_driver twl4030_madc_hwmon_driver = {
.probe = twl4030_madc_hwmon_probe,
- .remove = __exit_p(twl4030_madc_hwmon_remove),
+ .remove = twl4030_madc_hwmon_remove,
.driver = {
.name = "twl4030_madc_hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 44136bb6d045..fb3e69341c1b 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -250,7 +250,7 @@ static const struct attribute_group env_group = {
.attrs = env_attributes,
};
-static int __devinit env_probe(struct platform_device *op)
+static int env_probe(struct platform_device *op)
{
struct env *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
@@ -291,7 +291,7 @@ out_free:
goto out;
}
-static int __devexit env_remove(struct platform_device *op)
+static int env_remove(struct platform_device *op)
{
struct env *p = platform_get_drvdata(op);
@@ -321,7 +321,7 @@ static struct platform_driver env_driver = {
.of_match_table = env_match,
},
.probe = env_probe,
- .remove = __devexit_p(env_remove),
+ .remove = env_remove,
};
module_platform_driver(env_driver);
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
new file mode 100644
index 000000000000..d867e6bb2be1
--- /dev/null
+++ b/drivers/hwmon/vexpress.c
@@ -0,0 +1,230 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#define DRVNAME "vexpress-hwmon"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/vexpress.h>
+
+struct vexpress_hwmon_data {
+ struct device *hwmon_dev;
+ struct vexpress_config_func *func;
+};
+
+static ssize_t vexpress_hwmon_name_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ const char *compatible = of_get_property(dev->of_node, "compatible",
+ NULL);
+
+ return sprintf(buffer, "%s\n", compatible);
+}
+
+static ssize_t vexpress_hwmon_label_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ const char *label = of_get_property(dev->of_node, "label", NULL);
+
+ if (!label)
+ return -ENOENT;
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", label);
+}
+
+static ssize_t vexpress_hwmon_u32_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
+ int err;
+ u32 value;
+
+ err = vexpress_config_read(data->func, 0, &value);
+ if (err)
+ return err;
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", value /
+ to_sensor_dev_attr(dev_attr)->index);
+}
+
+static ssize_t vexpress_hwmon_u64_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buffer)
+{
+ struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
+ int err;
+ u32 value_hi, value_lo;
+
+ err = vexpress_config_read(data->func, 0, &value_lo);
+ if (err)
+ return err;
+
+ err = vexpress_config_read(data->func, 1, &value_hi);
+ if (err)
+ return err;
+
+ return snprintf(buffer, PAGE_SIZE, "%llu\n",
+ div_u64(((u64)value_hi << 32) | value_lo,
+ to_sensor_dev_attr(dev_attr)->index));
+}
+
+static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
+
+#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \
+struct attribute *vexpress_hwmon_attrs_##_name[] = { \
+ &dev_attr_name.attr, \
+ &dev_attr_##_label_attr.attr, \
+ &sensor_dev_attr_##_input_attr.dev_attr.attr, \
+ NULL \
+}
+
+#if !defined(CONFIG_REGULATOR_VEXPRESS)
+static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1000);
+static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
+static struct attribute_group vexpress_hwmon_group_volt = {
+ .attrs = vexpress_hwmon_attrs_volt,
+};
+#endif
+
+static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1000);
+static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
+static struct attribute_group vexpress_hwmon_group_amp = {
+ .attrs = vexpress_hwmon_attrs_amp,
+};
+
+static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1000);
+static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
+static struct attribute_group vexpress_hwmon_group_temp = {
+ .attrs = vexpress_hwmon_attrs_temp,
+};
+
+static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
+ NULL, 1);
+static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
+static struct attribute_group vexpress_hwmon_group_power = {
+ .attrs = vexpress_hwmon_attrs_power,
+};
+
+static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
+static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
+ NULL, 1);
+static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
+static struct attribute_group vexpress_hwmon_group_energy = {
+ .attrs = vexpress_hwmon_attrs_energy,
+};
+
+static struct of_device_id vexpress_hwmon_of_match[] = {
+#if !defined(CONFIG_REGULATOR_VEXPRESS)
+ {
+ .compatible = "arm,vexpress-volt",
+ .data = &vexpress_hwmon_group_volt,
+ },
+#endif
+ {
+ .compatible = "arm,vexpress-amp",
+ .data = &vexpress_hwmon_group_amp,
+ }, {
+ .compatible = "arm,vexpress-temp",
+ .data = &vexpress_hwmon_group_temp,
+ }, {
+ .compatible = "arm,vexpress-power",
+ .data = &vexpress_hwmon_group_power,
+ }, {
+ .compatible = "arm,vexpress-energy",
+ .data = &vexpress_hwmon_group_energy,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match);
+
+static int vexpress_hwmon_probe(struct platform_device *pdev)
+{
+ int err;
+ const struct of_device_id *match;
+ struct vexpress_hwmon_data *data;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, data);
+
+ match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ data->func = vexpress_config_func_get_by_dev(&pdev->dev);
+ if (!data->func)
+ return -ENODEV;
+
+ err = sysfs_create_group(&pdev->dev.kobj, match->data);
+ if (err)
+ goto error;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ sysfs_remove_group(&pdev->dev.kobj, match->data);
+ vexpress_config_func_put(data->func);
+ return err;
+}
+
+static int vexpress_hwmon_remove(struct platform_device *pdev)
+{
+ struct vexpress_hwmon_data *data = platform_get_drvdata(pdev);
+ const struct of_device_id *match;
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
+ sysfs_remove_group(&pdev->dev.kobj, match->data);
+
+ vexpress_config_func_put(data->func);
+
+ return 0;
+}
+
+static struct platform_driver vexpress_hwmon_driver = {
+ .probe = vexpress_hwmon_probe,
+ .remove = vexpress_hwmon_remove,
+ .driver = {
+ .name = DRVNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = vexpress_hwmon_of_match,
+ },
+};
+
+module_platform_driver(vexpress_hwmon_driver);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("Versatile Express hwmon sensors driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:vexpress-hwmon");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 4cddee04f2e6..76f157b568ed 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -121,7 +121,7 @@ static const struct attribute_group via_cputemp_group = {
/* Optional attributes */
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_cpu_vid, NULL);
-static int __devinit via_cputemp_probe(struct platform_device *pdev)
+static int via_cputemp_probe(struct platform_device *pdev)
{
struct via_cputemp_data *data;
struct cpuinfo_x86 *c = &cpu_data(pdev->id);
@@ -192,7 +192,7 @@ exit_remove:
return err;
}
-static int __devexit via_cputemp_remove(struct platform_device *pdev)
+static int via_cputemp_remove(struct platform_device *pdev)
{
struct via_cputemp_data *data = platform_get_drvdata(pdev);
@@ -209,7 +209,7 @@ static struct platform_driver via_cputemp_driver = {
.name = DRVNAME,
},
.probe = via_cputemp_probe,
- .remove = __devexit_p(via_cputemp_remove),
+ .remove = via_cputemp_remove,
};
struct pdev_entry {
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 299399aa30fe..e0e14a9f1658 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -339,7 +339,7 @@ struct via686a_data {
static struct pci_dev *s_bridge; /* pointer to the (only) via686a */
static int via686a_probe(struct platform_device *pdev);
-static int __devexit via686a_remove(struct platform_device *pdev);
+static int via686a_remove(struct platform_device *pdev);
static inline int via686a_read_value(struct via686a_data *data, u8 reg)
{
@@ -677,12 +677,12 @@ static struct platform_driver via686a_driver = {
.name = "via686a",
},
.probe = via686a_probe,
- .remove = __devexit_p(via686a_remove),
+ .remove = via686a_remove,
};
/* This is called when the module is loaded */
-static int __devinit via686a_probe(struct platform_device *pdev)
+static int via686a_probe(struct platform_device *pdev)
{
struct via686a_data *data;
struct resource *res;
@@ -728,7 +728,7 @@ exit_remove_files:
return err;
}
-static int __devexit via686a_remove(struct platform_device *pdev)
+static int via686a_remove(struct platform_device *pdev)
{
struct via686a_data *data = platform_get_drvdata(pdev);
@@ -745,7 +745,7 @@ static void via686a_update_fan_div(struct via686a_data *data)
data->fan_div[1] = reg >> 6;
}
-static void __devinit via686a_init_device(struct via686a_data *data)
+static void via686a_init_device(struct via686a_data *data)
{
u8 reg;
@@ -833,7 +833,7 @@ static DEFINE_PCI_DEVICE_TABLE(via686a_pci_ids) = {
};
MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
-static int __devinit via686a_device_add(unsigned short address)
+static int via686a_device_add(unsigned short address)
{
struct resource res = {
.start = address,
@@ -874,7 +874,7 @@ exit:
return err;
}
-static int __devinit via686a_pci_probe(struct pci_dev *dev,
+static int via686a_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 address, val;
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index f2c61153dba9..751703059fae 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -1086,7 +1086,7 @@ static struct device_attribute vt1211_sysfs_misc[] = {
* Device registration and initialization
* --------------------------------------------------------------------- */
-static void __devinit vt1211_init_device(struct vt1211_data *data)
+static void vt1211_init_device(struct vt1211_data *data)
{
/* set VRM */
data->vrm = vid_which_vrm();
@@ -1141,7 +1141,7 @@ static void vt1211_remove_sysfs(struct platform_device *pdev)
device_remove_file(dev, &vt1211_sysfs_misc[i]);
}
-static int __devinit vt1211_probe(struct platform_device *pdev)
+static int vt1211_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vt1211_data *data;
@@ -1217,7 +1217,7 @@ EXIT_DEV_REMOVE_SILENT:
return err;
}
-static int __devexit vt1211_remove(struct platform_device *pdev)
+static int vt1211_remove(struct platform_device *pdev)
{
struct vt1211_data *data = platform_get_drvdata(pdev);
@@ -1233,7 +1233,7 @@ static struct platform_driver vt1211_driver = {
.name = DRVNAME,
},
.probe = vt1211_probe,
- .remove = __devexit_p(vt1211_remove),
+ .remove = vt1211_remove,
};
static int __init vt1211_device_add(unsigned short address)
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 84e3dc5e3a83..a56355cef184 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -176,7 +176,7 @@ struct vt8231_data {
static struct pci_dev *s_bridge;
static int vt8231_probe(struct platform_device *pdev);
-static int __devexit vt8231_remove(struct platform_device *pdev);
+static int vt8231_remove(struct platform_device *pdev);
static struct vt8231_data *vt8231_update_device(struct device *dev);
static void vt8231_init_device(struct vt8231_data *data);
@@ -762,7 +762,7 @@ static struct platform_driver vt8231_driver = {
.name = "vt8231",
},
.probe = vt8231_probe,
- .remove = __devexit_p(vt8231_remove),
+ .remove = vt8231_remove,
};
static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = {
@@ -772,7 +772,7 @@ static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = {
MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
-static int __devinit vt8231_pci_probe(struct pci_dev *dev,
+static int vt8231_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id);
static struct pci_driver vt8231_pci_driver = {
@@ -851,7 +851,7 @@ exit_remove_files:
return err;
}
-static int __devexit vt8231_remove(struct platform_device *pdev)
+static int vt8231_remove(struct platform_device *pdev)
{
struct vt8231_data *data = platform_get_drvdata(pdev);
int i;
@@ -943,7 +943,7 @@ static struct vt8231_data *vt8231_update_device(struct device *dev)
return data;
}
-static int __devinit vt8231_device_add(unsigned short address)
+static int vt8231_device_add(unsigned short address)
{
struct resource res = {
.start = address,
@@ -984,7 +984,7 @@ exit:
return err;
}
-static int __devinit vt8231_pci_probe(struct pci_dev *dev,
+static int vt8231_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 address, val;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index de3c7e04c3b5..0e8ffd6059a0 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,7 +1,7 @@
/*
* w83627ehf - Driver for the hardware monitoring functionality of
* the Winbond W83627EHF Super-I/O chip
- * Copyright (C) 2005-2011 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2005-2012 Jean Delvare <khali@linux-fr.org>
* Copyright (C) 2006 Yuan Mu (Winbond),
* Rudolf Marek <r.marek@assembler.cz>
* David Hubbard <david.c.hubbard@gmail.com>
@@ -502,6 +502,13 @@ struct w83627ehf_data {
u16 have_temp_offset;
u8 in6_skip:1;
u8 temp3_val_only:1;
+
+#ifdef CONFIG_PM
+ /* Remember extra register values over suspend/resume */
+ u8 vbat;
+ u8 fandiv1;
+ u8 fandiv2;
+#endif
};
struct w83627ehf_sio_data {
@@ -898,6 +905,8 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->temp_max_hyst[i]
= w83627ehf_read_temp(data,
data->reg_temp_hyst[i]);
+ if (i > 2)
+ continue;
if (data->have_temp_offset & (1 << i))
data->temp_offset[i]
= w83627ehf_read_value(data,
@@ -1866,7 +1875,7 @@ static void w83627ehf_device_remove_files(struct device *dev)
}
/* Get the monitoring functions started */
-static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
+static inline void w83627ehf_init_device(struct w83627ehf_data *data,
enum kinds kind)
{
int i;
@@ -1952,7 +1961,7 @@ static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
data->reg_temp_config[r2] = tmp;
}
-static void __devinit
+static void
w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
{
int i;
@@ -1965,7 +1974,7 @@ w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp)
}
}
-static void __devinit
+static void
w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
struct w83627ehf_data *data)
{
@@ -2054,7 +2063,7 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
}
}
-static int __devinit w83627ehf_probe(struct platform_device *pdev)
+static int w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct w83627ehf_sio_data *sio_data = dev->platform_data;
@@ -2596,7 +2605,7 @@ exit:
return err;
}
-static int __devexit w83627ehf_remove(struct platform_device *pdev)
+static int w83627ehf_remove(struct platform_device *pdev)
{
struct w83627ehf_data *data = platform_get_drvdata(pdev);
@@ -2608,13 +2617,101 @@ static int __devexit w83627ehf_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int w83627ehf_suspend(struct device *dev)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ mutex_lock(&data->update_lock);
+ data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
+ if (sio_data->kind == nct6775) {
+ data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
+ data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static int w83627ehf_resume(struct device *dev)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ int i;
+
+ mutex_lock(&data->update_lock);
+ data->bank = 0xff; /* Force initial bank selection */
+
+ /* Restore limits */
+ for (i = 0; i < data->in_num; i++) {
+ if ((i == 6) && data->in6_skip)
+ continue;
+
+ w83627ehf_write_value(data, W83627EHF_REG_IN_MIN(i),
+ data->in_min[i]);
+ w83627ehf_write_value(data, W83627EHF_REG_IN_MAX(i),
+ data->in_max[i]);
+ }
+
+ for (i = 0; i < 5; i++) {
+ if (!(data->has_fan_min & (1 << i)))
+ continue;
+
+ w83627ehf_write_value(data, data->REG_FAN_MIN[i],
+ data->fan_min[i]);
+ }
+
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+
+ if (data->reg_temp_over[i])
+ w83627ehf_write_temp(data, data->reg_temp_over[i],
+ data->temp_max[i]);
+ if (data->reg_temp_hyst[i])
+ w83627ehf_write_temp(data, data->reg_temp_hyst[i],
+ data->temp_max_hyst[i]);
+ if (i > 2)
+ continue;
+ if (data->have_temp_offset & (1 << i))
+ w83627ehf_write_value(data,
+ W83627EHF_REG_TEMP_OFFSET[i],
+ data->temp_offset[i]);
+ }
+
+ /* Restore other settings */
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
+ if (sio_data->kind == nct6775) {
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
+ }
+
+ /* Force re-reading all values */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops w83627ehf_dev_pm_ops = {
+ .suspend = w83627ehf_suspend,
+ .resume = w83627ehf_resume,
+};
+
+#define W83627EHF_DEV_PM_OPS (&w83627ehf_dev_pm_ops)
+#else
+#define W83627EHF_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver w83627ehf_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
+ .pm = W83627EHF_DEV_PM_OPS,
},
.probe = w83627ehf_probe,
- .remove = __devexit_p(w83627ehf_remove),
+ .remove = w83627ehf_remove,
};
/* w83627ehf_find() looks for a '627 in the Super-I/O config space */
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index af1589908709..81f486520cea 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -5,7 +5,7 @@
* Philip Edelbrock <phil@netroedge.com>,
* and Mark Studebaker <mdsxyz123@yahoo.com>
* Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org>
- * Copyright (c) 2007 Jean Delvare <khali@linux-fr.org>
+ * Copyright (c) 2007 - 1012 Jean Delvare <khali@linux-fr.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -389,11 +389,17 @@ struct w83627hf_data {
*/
u8 vrm;
u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */
+
+#ifdef CONFIG_PM
+ /* Remember extra register values over suspend/resume */
+ u8 scfg1;
+ u8 scfg2;
+#endif
};
static int w83627hf_probe(struct platform_device *pdev);
-static int __devexit w83627hf_remove(struct platform_device *pdev);
+static int w83627hf_remove(struct platform_device *pdev);
static int w83627hf_read_value(struct w83627hf_data *data, u16 reg);
static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value);
@@ -401,13 +407,80 @@ static void w83627hf_update_fan_div(struct w83627hf_data *data);
static struct w83627hf_data *w83627hf_update_device(struct device *dev);
static void w83627hf_init_device(struct platform_device *pdev);
+#ifdef CONFIG_PM
+static int w83627hf_suspend(struct device *dev)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+
+ mutex_lock(&data->update_lock);
+ data->scfg1 = w83627hf_read_value(data, W83781D_REG_SCFG1);
+ data->scfg2 = w83627hf_read_value(data, W83781D_REG_SCFG2);
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static int w83627hf_resume(struct device *dev)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ int i, num_temps = (data->type == w83697hf) ? 2 : 3;
+
+ /* Restore limits */
+ mutex_lock(&data->update_lock);
+ for (i = 0; i <= 8; i++) {
+ /* skip missing sensors */
+ if (((data->type == w83697hf) && (i == 1)) ||
+ ((data->type != w83627hf && data->type != w83697hf)
+ && (i == 5 || i == 6)))
+ continue;
+ w83627hf_write_value(data, W83781D_REG_IN_MAX(i),
+ data->in_max[i]);
+ w83627hf_write_value(data, W83781D_REG_IN_MIN(i),
+ data->in_min[i]);
+ }
+ for (i = 0; i <= 2; i++)
+ w83627hf_write_value(data, W83627HF_REG_FAN_MIN(i),
+ data->fan_min[i]);
+ for (i = 0; i < num_temps; i++) {
+ w83627hf_write_value(data, w83627hf_reg_temp_over[i],
+ data->temp_max[i]);
+ w83627hf_write_value(data, w83627hf_reg_temp_hyst[i],
+ data->temp_max_hyst[i]);
+ }
+
+ /* Fixup BIOS bugs */
+ if (data->type == w83627thf || data->type == w83637hf ||
+ data->type == w83687thf)
+ w83627hf_write_value(data, W83627THF_REG_VRM_OVT_CFG,
+ data->vrm_ovt);
+ w83627hf_write_value(data, W83781D_REG_SCFG1, data->scfg1);
+ w83627hf_write_value(data, W83781D_REG_SCFG2, data->scfg2);
+
+ /* Force re-reading all values */
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops w83627hf_dev_pm_ops = {
+ .suspend = w83627hf_suspend,
+ .resume = w83627hf_resume,
+};
+
+#define W83627HF_DEV_PM_OPS (&w83627hf_dev_pm_ops)
+#else
+#define W83627HF_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static struct platform_driver w83627hf_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
+ .pm = W83627HF_DEV_PM_OPS,
},
.probe = w83627hf_probe,
- .remove = __devexit_p(w83627hf_remove),
+ .remove = w83627hf_remove,
};
static ssize_t
@@ -1342,7 +1415,7 @@ static const struct attribute_group w83627hf_group_opt = {
.attrs = w83627hf_attributes_opt,
};
-static int __devinit w83627hf_probe(struct platform_device *pdev)
+static int w83627hf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct w83627hf_sio_data *sio_data = dev->platform_data;
@@ -1508,7 +1581,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
return err;
}
-static int __devexit w83627hf_remove(struct platform_device *pdev)
+static int w83627hf_remove(struct platform_device *pdev)
{
struct w83627hf_data *data = platform_get_drvdata(pdev);
@@ -1564,7 +1637,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
return res;
}
-static int __devinit w83627thf_read_gpio5(struct platform_device *pdev)
+static int w83627thf_read_gpio5(struct platform_device *pdev)
{
struct w83627hf_sio_data *sio_data = pdev->dev.platform_data;
int res = 0xff, sel;
@@ -1597,7 +1670,7 @@ exit:
return res;
}
-static int __devinit w83687thf_read_vid(struct platform_device *pdev)
+static int w83687thf_read_vid(struct platform_device *pdev)
{
struct w83627hf_sio_data *sio_data = pdev->dev.platform_data;
int res = 0xff;
@@ -1649,7 +1722,7 @@ static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
return 0;
}
-static void __devinit w83627hf_init_device(struct platform_device *pdev)
+static void w83627hf_init_device(struct platform_device *pdev)
{
struct w83627hf_data *data = platform_get_drvdata(pdev);
int i;
@@ -1659,8 +1732,10 @@ static void __devinit w83627hf_init_device(struct platform_device *pdev)
/* Minimize conflicts with other winbond i2c-only clients... */
/* disable i2c subclients... how to disable main i2c client?? */
/* force i2c address to relatively uncommon address */
- w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
- w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ if (type == w83627hf) {
+ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
+ w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ }
/* Read VID only once */
if (type == w83627hf || type == w83637hf) {
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 20f11d31da40..93bd28639595 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1764,7 +1764,7 @@ w83781d_write_value(struct w83781d_data *data, u16 reg, u16 value)
return 0;
}
-static int __devinit
+static int
w83781d_isa_probe(struct platform_device *pdev)
{
int err, reg;
@@ -1824,7 +1824,7 @@ w83781d_isa_probe(struct platform_device *pdev)
return err;
}
-static int __devexit
+static int
w83781d_isa_remove(struct platform_device *pdev)
{
struct w83781d_data *data = platform_get_drvdata(pdev);
@@ -1842,7 +1842,7 @@ static struct platform_driver w83781d_isa_driver = {
.name = "w83781d",
},
.probe = w83781d_isa_probe,
- .remove = __devexit_p(w83781d_isa_remove),
+ .remove = w83781d_isa_remove,
};
/* return 1 if a supported chip is found, 0 otherwise */
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index d0db1f2738fb..df6ceaf8d58a 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -157,7 +157,7 @@ static const struct attribute_group wm831x_attr_group = {
.attrs = wm831x_attributes,
};
-static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
+static int wm831x_hwmon_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_hwmon *hwmon;
@@ -189,7 +189,7 @@ err_sysfs:
return ret;
}
-static int __devexit wm831x_hwmon_remove(struct platform_device *pdev)
+static int wm831x_hwmon_remove(struct platform_device *pdev)
{
struct wm831x_hwmon *hwmon = platform_get_drvdata(pdev);
@@ -201,7 +201,7 @@ static int __devexit wm831x_hwmon_remove(struct platform_device *pdev)
static struct platform_driver wm831x_hwmon_driver = {
.probe = wm831x_hwmon_probe,
- .remove = __devexit_p(wm831x_hwmon_remove),
+ .remove = wm831x_hwmon_remove,
.driver = {
.name = "wm831x-hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index b955756bdb42..64bf75c9442b 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -91,7 +91,7 @@ static const struct attribute_group wm8350_attr_group = {
.attrs = wm8350_attributes,
};
-static int __devinit wm8350_hwmon_probe(struct platform_device *pdev)
+static int wm8350_hwmon_probe(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
int ret;
@@ -114,7 +114,7 @@ err:
return ret;
}
-static int __devexit wm8350_hwmon_remove(struct platform_device *pdev)
+static int wm8350_hwmon_remove(struct platform_device *pdev)
{
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
@@ -126,7 +126,7 @@ static int __devexit wm8350_hwmon_remove(struct platform_device *pdev)
static struct platform_driver wm8350_hwmon_driver = {
.probe = wm8350_hwmon_probe,
- .remove = __devexit_p(wm8350_hwmon_remove),
+ .remove = wm8350_hwmon_remove,
.driver = {
.name = "wm8350-hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index 887d34effb3a..292869cc9034 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -78,7 +78,7 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
.relax = omap_hwspinlock_relax,
};
-static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
+static int omap_hwspinlock_probe(struct platform_device *pdev)
{
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
@@ -142,7 +142,7 @@ iounmap_base:
return ret;
}
-static int __devexit omap_hwspinlock_remove(struct platform_device *pdev)
+static int omap_hwspinlock_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
@@ -163,7 +163,7 @@ static int __devexit omap_hwspinlock_remove(struct platform_device *pdev)
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
- .remove = __devexit_p(omap_hwspinlock_remove),
+ .remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.owner = THIS_MODULE,
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 86980fe04117..401c33bcdb45 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -91,7 +91,7 @@ static const struct hwspinlock_ops u8500_hwspinlock_ops = {
.relax = u8500_hsem_relax,
};
-static int __devinit u8500_hsem_probe(struct platform_device *pdev)
+static int u8500_hsem_probe(struct platform_device *pdev)
{
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
@@ -148,7 +148,7 @@ iounmap_base:
return ret;
}
-static int __devexit u8500_hsem_remove(struct platform_device *pdev)
+static int u8500_hsem_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
@@ -172,7 +172,7 @@ static int __devexit u8500_hsem_remove(struct platform_device *pdev)
static struct platform_driver u8500_hsem_driver = {
.probe = u8500_hsem_probe,
- .remove = __devexit_p(u8500_hsem_remove),
+ .remove = u8500_hsem_remove,
.driver = {
.name = "u8500_hsem",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e9df4612b7eb..bdca5111eb9d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -337,6 +337,16 @@ config I2C_BLACKFIN_TWI_CLK_KHZ
help
The unit of the TWI clock is kHz.
+config I2C_CBUS_GPIO
+ tristate "CBUS I2C driver"
+ depends on GENERIC_GPIO
+ help
+ Support for CBUS access using I2C API. Mostly relevant for Nokia
+ Internet Tablets (770, N800 and N810).
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-cbus-gpio.
+
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
depends on (CPM1 || CPM2) && OF_I2C
@@ -818,6 +828,16 @@ config I2C_TINY_USB
This driver can also be built as a module. If so, the module
will be called i2c-tiny-usb.
+config I2C_VIPERBOARD
+ tristate "Viperboard I2C master support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the I2C part of the Nano River
+ Technologies Viperboard as I2C master.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
comment "Other I2C/SMBus bus drivers"
config I2C_ACORN
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 395b516ffa08..6181f3ff263f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
+obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
@@ -79,6 +80,7 @@ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
+obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o
# Other I2C/SMBus bus drivers
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 125cd8e0ad25..3f491815e2c4 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -139,7 +139,7 @@ static unsigned short ali1535_offset;
Note the differences between kernels with the old PCI BIOS interface and
newer kernels with the real PCI interface. In compat.h some things are
defined to make the transition easier. */
-static int __devinit ali1535_setup(struct pci_dev *dev)
+static int ali1535_setup(struct pci_dev *dev)
{
int retval;
unsigned char temp;
@@ -502,7 +502,7 @@ static DEFINE_PCI_DEVICE_TABLE(ali1535_ids) = {
MODULE_DEVICE_TABLE(pci, ali1535_ids);
-static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
@@ -518,7 +518,7 @@ static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_
return i2c_add_adapter(&ali1535_adapter);
}
-static void __devexit ali1535_remove(struct pci_dev *dev)
+static void ali1535_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1535_adapter);
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
@@ -528,7 +528,7 @@ static struct pci_driver ali1535_driver = {
.name = "ali1535_smbus",
.id_table = ali1535_ids,
.probe = ali1535_probe,
- .remove = __devexit_p(ali1535_remove),
+ .remove = ali1535_remove,
};
module_pci_driver(ali1535_driver);
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index e02d9f86c6a0..84ccd9496a5e 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -326,7 +326,7 @@ static u32 ali1563_func(struct i2c_adapter * a)
}
-static int __devinit ali1563_setup(struct pci_dev * dev)
+static int ali1563_setup(struct pci_dev *dev)
{
u16 ctrl;
@@ -390,8 +390,8 @@ static struct i2c_adapter ali1563_adapter = {
.algo = &ali1563_algorithm,
};
-static int __devinit ali1563_probe(struct pci_dev * dev,
- const struct pci_device_id * id_table)
+static int ali1563_probe(struct pci_dev *dev,
+ const struct pci_device_id *id_table)
{
int error;
@@ -411,7 +411,7 @@ exit:
return error;
}
-static void __devexit ali1563_remove(struct pci_dev * dev)
+static void ali1563_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali1563_adapter);
ali1563_shutdown(dev);
@@ -428,7 +428,7 @@ static struct pci_driver ali1563_pci_driver = {
.name = "ali1563_smbus",
.id_table = ali1563_id_table,
.probe = ali1563_probe,
- .remove = __devexit_p(ali1563_remove),
+ .remove = ali1563_remove,
};
module_pci_driver(ali1563_pci_driver);
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index ce8d26d053a5..26bcc6127cee 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -131,7 +131,7 @@ MODULE_PARM_DESC(force_addr,
static struct pci_driver ali15x3_driver;
static unsigned short ali15x3_smba;
-static int __devinit ali15x3_setup(struct pci_dev *ALI15X3_dev)
+static int ali15x3_setup(struct pci_dev *ALI15X3_dev)
{
u16 a;
unsigned char temp;
@@ -484,7 +484,7 @@ static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = {
MODULE_DEVICE_TABLE (pci, ali15x3_ids);
-static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
@@ -500,7 +500,7 @@ static int __devinit ali15x3_probe(struct pci_dev *dev, const struct pci_device_
return i2c_add_adapter(&ali15x3_adapter);
}
-static void __devexit ali15x3_remove(struct pci_dev *dev)
+static void ali15x3_remove(struct pci_dev *dev)
{
i2c_del_adapter(&ali15x3_adapter);
release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
@@ -510,7 +510,7 @@ static struct pci_driver ali15x3_driver = {
.name = "ali15x3_smbus",
.id_table = ali15x3_ids,
.probe = ali15x3_probe,
- .remove = __devexit_p(ali15x3_remove),
+ .remove = ali15x3_remove,
};
module_pci_driver(ali15x3_driver);
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 304aa03b57b2..e13e2aa2d05d 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -324,8 +324,7 @@ static DEFINE_PCI_DEVICE_TABLE(amd756_ids) = {
MODULE_DEVICE_TABLE (pci, amd756_ids);
-static int __devinit amd756_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int amd756_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int nforce = (id->driver_data == NFORCE);
int error;
@@ -397,7 +396,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
return error;
}
-static void __devexit amd756_remove(struct pci_dev *dev)
+static void amd756_remove(struct pci_dev *dev)
{
i2c_del_adapter(&amd756_smbus);
release_region(amd756_ioport, SMB_IOSIZE);
@@ -407,7 +406,7 @@ static struct pci_driver amd756_driver = {
.name = "amd756_smbus",
.id_table = amd756_ids,
.probe = amd756_probe,
- .remove = __devexit_p(amd756_remove),
+ .remove = amd756_remove,
};
module_pci_driver(amd756_driver);
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 0919ac1d99aa..a44e6e77c5a1 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -422,8 +422,7 @@ static DEFINE_PCI_DEVICE_TABLE(amd8111_ids) = {
MODULE_DEVICE_TABLE (pci, amd8111_ids);
-static int __devinit amd8111_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int amd8111_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct amd_smbus *smbus;
int error;
@@ -475,7 +474,7 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
return error;
}
-static void __devexit amd8111_remove(struct pci_dev *dev)
+static void amd8111_remove(struct pci_dev *dev)
{
struct amd_smbus *smbus = pci_get_drvdata(dev);
@@ -488,7 +487,7 @@ static struct pci_driver amd8111_driver = {
.name = "amd8111_smbus2",
.id_table = amd8111_ids,
.probe = amd8111_probe,
- .remove = __devexit_p(amd8111_remove),
+ .remove = amd8111_remove,
};
module_pci_driver(amd8111_driver);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index c02bf208084f..2bfc04d0a1b1 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -19,6 +19,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -29,9 +31,11 @@
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/platform_data/dma-atmel.h>
#define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
+#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -66,24 +70,39 @@
#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
struct at91_twi_pdata {
- unsigned clk_max_div;
- unsigned clk_offset;
- bool has_unre_flag;
+ unsigned clk_max_div;
+ unsigned clk_offset;
+ bool has_unre_flag;
+ bool has_dma_support;
+ struct at_dma_slave dma_slave;
+};
+
+struct at91_twi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sg;
+ struct dma_async_tx_descriptor *data_desc;
+ enum dma_data_direction direction;
+ bool buf_mapped;
+ bool xfer_in_progress;
};
struct at91_twi_dev {
- struct device *dev;
- void __iomem *base;
- struct completion cmd_complete;
- struct clk *clk;
- u8 *buf;
- size_t buf_len;
- struct i2c_msg *msg;
- int irq;
- unsigned transfer_status;
- struct i2c_adapter adapter;
- unsigned twi_cwgr_reg;
- struct at91_twi_pdata *pdata;
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct clk *clk;
+ u8 *buf;
+ size_t buf_len;
+ struct i2c_msg *msg;
+ int irq;
+ unsigned imr;
+ unsigned transfer_status;
+ struct i2c_adapter adapter;
+ unsigned twi_cwgr_reg;
+ struct at91_twi_pdata *pdata;
+ bool use_dma;
+ struct at91_twi_dma dma;
};
static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
@@ -102,6 +121,17 @@ static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
}
+static void at91_twi_irq_save(struct at91_twi_dev *dev)
+{
+ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
+ at91_disable_twi_interrupts(dev);
+}
+
+static void at91_twi_irq_restore(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IER, dev->imr);
+}
+
static void at91_init_twi_bus(struct at91_twi_dev *dev)
{
at91_disable_twi_interrupts(dev);
@@ -115,7 +145,7 @@ static void at91_init_twi_bus(struct at91_twi_dev *dev)
* Calculate symmetric clock as stated in datasheet:
* twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
*/
-static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
+static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
{
int ckdiv, cdiv, div;
struct at91_twi_pdata *pdata = dev->pdata;
@@ -138,6 +168,28 @@ static void __devinit at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
}
+static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
+{
+ struct at91_twi_dma *dma = &dev->dma;
+
+ at91_twi_irq_save(dev);
+
+ if (dma->xfer_in_progress) {
+ if (dma->direction == DMA_FROM_DEVICE)
+ dmaengine_terminate_all(dma->chan_rx);
+ else
+ dmaengine_terminate_all(dma->chan_tx);
+ dma->xfer_in_progress = false;
+ }
+ if (dma->buf_mapped) {
+ dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
+ dev->buf_len, dma->direction);
+ dma->buf_mapped = false;
+ }
+
+ at91_twi_irq_restore(dev);
+}
+
static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
{
if (dev->buf_len <= 0)
@@ -154,6 +206,60 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
++dev->buf;
}
+static void at91_twi_write_data_dma_callback(void *data)
+{
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ dev->buf_len, DMA_MEM_TO_DEV);
+
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+}
+
+static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
+{
+ dma_addr_t dma_addr;
+ struct dma_async_tx_descriptor *txdesc;
+ struct at91_twi_dma *dma = &dev->dma;
+ struct dma_chan *chan_tx = dma->chan_tx;
+
+ if (dev->buf_len <= 0)
+ return;
+
+ dma->direction = DMA_TO_DEVICE;
+
+ at91_twi_irq_save(dev);
+ dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, dma_addr)) {
+ dev_err(dev->dev, "dma map failed\n");
+ return;
+ }
+ dma->buf_mapped = true;
+ at91_twi_irq_restore(dev);
+ sg_dma_len(&dma->sg) = dev->buf_len;
+ sg_dma_address(&dma->sg) = dma_addr;
+
+ txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc) {
+ dev_err(dev->dev, "dma prep slave sg failed\n");
+ goto error;
+ }
+
+ txdesc->callback = at91_twi_write_data_dma_callback;
+ txdesc->callback_param = dev;
+
+ dma->xfer_in_progress = true;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(chan_tx);
+
+ return;
+
+error:
+ at91_twi_dma_cleanup(dev);
+}
+
static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
{
if (dev->buf_len <= 0)
@@ -179,6 +285,61 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
++dev->buf;
}
+static void at91_twi_read_data_dma_callback(void *data)
+{
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ dev->buf_len, DMA_DEV_TO_MEM);
+
+ /* The last two bytes have to be read without using dma */
+ dev->buf += dev->buf_len - 2;
+ dev->buf_len = 2;
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
+}
+
+static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
+{
+ dma_addr_t dma_addr;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct at91_twi_dma *dma = &dev->dma;
+ struct dma_chan *chan_rx = dma->chan_rx;
+
+ dma->direction = DMA_FROM_DEVICE;
+
+ /* Keep in mind that we won't use dma to read the last two bytes */
+ at91_twi_irq_save(dev);
+ dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev->dev, dma_addr)) {
+ dev_err(dev->dev, "dma map failed\n");
+ return;
+ }
+ dma->buf_mapped = true;
+ at91_twi_irq_restore(dev);
+ dma->sg.dma_address = dma_addr;
+ sg_dma_len(&dma->sg) = dev->buf_len - 2;
+
+ rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc) {
+ dev_err(dev->dev, "dma prep slave sg failed\n");
+ goto error;
+ }
+
+ rxdesc->callback = at91_twi_read_data_dma_callback;
+ rxdesc->callback_param = dev;
+
+ dma->xfer_in_progress = true;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dma->chan_rx);
+
+ return;
+
+error:
+ at91_twi_dma_cleanup(dev);
+}
+
static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
{
struct at91_twi_dev *dev = dev_id;
@@ -229,12 +390,36 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
- at91_twi_write(dev, AT91_TWI_IER,
+ /*
+ * When using dma, the last byte has to be read manually in
+ * order to not send the stop command too late and then
+ * to receive extra data. In practice, there are some issues
+ * if you use the dma to read n-1 bytes because of latency.
+ * Reading n-2 bytes with dma and the two last ones manually
+ * seems to be the best solution.
+ */
+ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+ at91_twi_read_data_dma(dev);
+ /*
+ * It is important to enable TXCOMP irq here because
+ * doing it only when transferring the last two bytes
+ * will mask NACK errors since TXCOMP is set when a
+ * NACK occurs.
+ */
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXCOMP);
+ } else
+ at91_twi_write(dev, AT91_TWI_IER,
AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
} else {
- at91_twi_write_next_byte(dev);
- at91_twi_write(dev, AT91_TWI_IER,
- AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+ if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+ at91_twi_write_data_dma(dev);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ } else {
+ at91_twi_write_next_byte(dev);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+ }
}
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
@@ -242,23 +427,31 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
if (ret == 0) {
dev_err(dev->dev, "controller timed out\n");
at91_init_twi_bus(dev);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto error;
}
if (dev->transfer_status & AT91_TWI_NACK) {
dev_dbg(dev->dev, "received nack\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto error;
}
if (dev->transfer_status & AT91_TWI_OVRE) {
dev_err(dev->dev, "overrun while reading\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
dev_err(dev->dev, "underrun while writing\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
dev_dbg(dev->dev, "transfer complete\n");
return 0;
+
+error:
+ at91_twi_dma_cleanup(dev);
+ return ret;
}
static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
@@ -329,36 +522,42 @@ static struct at91_twi_pdata at91rm9200_config = {
.clk_max_div = 5,
.clk_offset = 3,
.has_unre_flag = true,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9261_config = {
.clk_max_div = 5,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9260_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = false,
};
static struct at91_twi_pdata at91sam9x5_config = {
.clk_max_div = 7,
.clk_offset = 4,
.has_unre_flag = false,
+ .has_dma_support = true,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -405,7 +604,91 @@ MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
#define atmel_twi_dt_ids NULL
#endif
-static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
+{
+ int ret = 0;
+ struct at_dma_slave *sdata;
+ struct dma_slave_config slave_config;
+ struct at91_twi_dma *dma = &dev->dma;
+
+ sdata = &dev->pdata->dma_slave;
+
+ memset(&slave_config, 0, sizeof(slave_config));
+ slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ if (sdata && sdata->dma_dev) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma->chan_tx = dma_request_channel(mask, filter, sdata);
+ if (!dma->chan_tx) {
+ dev_err(dev->dev, "no DMA channel available for tx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ dma->chan_rx = dma_request_channel(mask, filter, sdata);
+ if (!dma->chan_rx) {
+ dev_err(dev->dev, "no DMA channel available for rx\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
+ dev_err(dev->dev, "failed to configure tx channel\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
+ dev_err(dev->dev, "failed to configure rx channel\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ sg_init_table(&dma->sg, 1);
+ dma->buf_mapped = false;
+ dma->xfer_in_progress = false;
+
+ dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
+
+ return ret;
+
+error:
+ dev_info(dev->dev, "can't use DMA\n");
+ if (dma->chan_rx)
+ dma_release_channel(dma->chan_rx);
+ if (dma->chan_tx)
+ dma_release_channel(dma->chan_tx);
+ return ret;
+}
+
+static struct at91_twi_pdata *at91_twi_get_driver_data(
struct platform_device *pdev)
{
if (pdev->dev.of_node) {
@@ -413,16 +696,17 @@ static struct at91_twi_pdata * __devinit at91_twi_get_driver_data(
match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
if (!match)
return NULL;
- return match->data;
+ return (struct at91_twi_pdata *)match->data;
}
return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
}
-static int __devinit at91_twi_probe(struct platform_device *pdev)
+static int at91_twi_probe(struct platform_device *pdev)
{
struct at91_twi_dev *dev;
struct resource *mem;
int rc;
+ u32 phy_addr;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -433,6 +717,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -ENODEV;
+ phy_addr = mem->start;
dev->pdata = at91_twi_get_driver_data(pdev);
if (!dev->pdata)
@@ -462,6 +747,11 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
}
clk_prepare_enable(dev->clk);
+ if (dev->pdata->has_dma_support) {
+ if (at91_twi_configure_dma(dev, phy_addr) == 0)
+ dev->use_dma = true;
+ }
+
at91_calc_twi_clock(dev, TWI_CLK_HZ);
at91_init_twi_bus(dev);
@@ -489,7 +779,7 @@ static int __devinit at91_twi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit at91_twi_remove(struct platform_device *pdev)
+static int at91_twi_remove(struct platform_device *pdev)
{
struct at91_twi_dev *dev = platform_get_drvdata(pdev);
int rc;
@@ -530,7 +820,7 @@ static const struct dev_pm_ops at91_twi_pm = {
static struct platform_driver at91_twi_driver = {
.probe = at91_twi_probe,
- .remove = __devexit_p(at91_twi_remove),
+ .remove = at91_twi_remove,
.id_table = at91_twi_devtypes,
.driver = {
.name = "at91_i2c",
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 582d616db346..b278298787d7 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -313,7 +313,7 @@ static void i2c_au1550_disable(struct i2c_au1550_data *priv)
* Prior to calling us, the 50MHz clock frequency and routing
* must have been set up for the PSC indicated by the adapter.
*/
-static int __devinit
+static int
i2c_au1550_probe(struct platform_device *pdev)
{
struct i2c_au1550_data *priv;
@@ -372,7 +372,7 @@ out:
return ret;
}
-static int __devexit i2c_au1550_remove(struct platform_device *pdev)
+static int i2c_au1550_remove(struct platform_device *pdev)
{
struct i2c_au1550_data *priv = platform_get_drvdata(pdev);
@@ -423,7 +423,7 @@ static struct platform_driver au1xpsc_smbus_driver = {
.pm = AU1XPSC_SMBUS_PMOPS,
},
.probe = i2c_au1550_probe,
- .remove = __devexit_p(i2c_au1550_remove),
+ .remove = i2c_au1550_remove,
};
module_platform_driver(au1xpsc_smbus_driver);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
new file mode 100644
index 000000000000..98386d659318
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -0,0 +1,300 @@
+/*
+ * CBUS I2C driver for Nokia Internet Tablets.
+ *
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall, Mikko Ylinen and
+ * Felipe Balbi. Converted to I2C driver by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/i2c-cbus-gpio.h>
+
+/*
+ * Bit counts are derived from Nokia implementation. These should be checked
+ * if other CBUS implementations appear.
+ */
+#define CBUS_ADDR_BITS 3
+#define CBUS_REG_BITS 5
+
+struct cbus_host {
+ spinlock_t lock; /* host lock */
+ struct device *dev;
+ int clk_gpio;
+ int dat_gpio;
+ int sel_gpio;
+};
+
+/**
+ * cbus_send_bit - sends one bit over the bus
+ * @host: the host we're using
+ * @bit: one bit of information to send
+ */
+static void cbus_send_bit(struct cbus_host *host, unsigned bit)
+{
+ gpio_set_value(host->dat_gpio, bit ? 1 : 0);
+ gpio_set_value(host->clk_gpio, 1);
+ gpio_set_value(host->clk_gpio, 0);
+}
+
+/**
+ * cbus_send_data - sends @len amount of data over the bus
+ * @host: the host we're using
+ * @data: the data to send
+ * @len: size of the transfer
+ */
+static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len)
+{
+ int i;
+
+ for (i = len; i > 0; i--)
+ cbus_send_bit(host, data & (1 << (i - 1)));
+}
+
+/**
+ * cbus_receive_bit - receives one bit from the bus
+ * @host: the host we're using
+ */
+static int cbus_receive_bit(struct cbus_host *host)
+{
+ int ret;
+
+ gpio_set_value(host->clk_gpio, 1);
+ ret = gpio_get_value(host->dat_gpio);
+ gpio_set_value(host->clk_gpio, 0);
+ return ret;
+}
+
+/**
+ * cbus_receive_word - receives 16-bit word from the bus
+ * @host: the host we're using
+ */
+static int cbus_receive_word(struct cbus_host *host)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 16; i > 0; i--) {
+ int bit = cbus_receive_bit(host);
+
+ if (bit < 0)
+ return bit;
+
+ if (bit)
+ ret |= 1 << (i - 1);
+ }
+ return ret;
+}
+
+/**
+ * cbus_transfer - transfers data over the bus
+ * @host: the host we're using
+ * @rw: read/write flag
+ * @dev: device address
+ * @reg: register address
+ * @data: if @rw == I2C_SBUS_WRITE data to send otherwise 0
+ */
+static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev,
+ unsigned reg, unsigned data)
+{
+ unsigned long flags;
+ int ret;
+
+ /* We don't want interrupts disturbing our transfer */
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Reset state and start of transfer, SEL stays down during transfer */
+ gpio_set_value(host->sel_gpio, 0);
+
+ /* Set the DAT pin to output */
+ gpio_direction_output(host->dat_gpio, 1);
+
+ /* Send the device address */
+ cbus_send_data(host, dev, CBUS_ADDR_BITS);
+
+ /* Send the rw flag */
+ cbus_send_bit(host, rw == I2C_SMBUS_READ);
+
+ /* Send the register address */
+ cbus_send_data(host, reg, CBUS_REG_BITS);
+
+ if (rw == I2C_SMBUS_WRITE) {
+ cbus_send_data(host, data, 16);
+ ret = 0;
+ } else {
+ ret = gpio_direction_input(host->dat_gpio);
+ if (ret) {
+ dev_dbg(host->dev, "failed setting direction\n");
+ goto out;
+ }
+ gpio_set_value(host->clk_gpio, 1);
+
+ ret = cbus_receive_word(host);
+ if (ret < 0) {
+ dev_dbg(host->dev, "failed receiving data\n");
+ goto out;
+ }
+ }
+
+ /* Indicate end of transfer, SEL goes up until next transfer */
+ gpio_set_value(host->sel_gpio, 1);
+ gpio_set_value(host->clk_gpio, 1);
+ gpio_set_value(host->clk_gpio, 0);
+
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+
+static int cbus_i2c_smbus_xfer(struct i2c_adapter *adapter,
+ u16 addr,
+ unsigned short flags,
+ char read_write,
+ u8 command,
+ int size,
+ union i2c_smbus_data *data)
+{
+ struct cbus_host *chost = i2c_get_adapdata(adapter);
+ int ret;
+
+ if (size != I2C_SMBUS_WORD_DATA)
+ return -EINVAL;
+
+ ret = cbus_transfer(chost, read_write == I2C_SMBUS_READ, addr,
+ command, data->word);
+ if (ret < 0)
+ return ret;
+
+ if (read_write == I2C_SMBUS_READ)
+ data->word = ret;
+
+ return 0;
+}
+
+static u32 cbus_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+}
+
+static const struct i2c_algorithm cbus_i2c_algo = {
+ .smbus_xfer = cbus_i2c_smbus_xfer,
+ .functionality = cbus_i2c_func,
+};
+
+static int cbus_i2c_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+
+ return i2c_del_adapter(adapter);
+}
+
+static int cbus_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ struct cbus_host *chost;
+ int ret;
+
+ adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter),
+ GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ chost = devm_kzalloc(&pdev->dev, sizeof(*chost), GFP_KERNEL);
+ if (!chost)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ struct device_node *dnode = pdev->dev.of_node;
+ if (of_gpio_count(dnode) != 3)
+ return -ENODEV;
+ chost->clk_gpio = of_get_gpio(dnode, 0);
+ chost->dat_gpio = of_get_gpio(dnode, 1);
+ chost->sel_gpio = of_get_gpio(dnode, 2);
+ } else if (pdev->dev.platform_data) {
+ struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data;
+ chost->clk_gpio = pdata->clk_gpio;
+ chost->dat_gpio = pdata->dat_gpio;
+ chost->sel_gpio = pdata->sel_gpio;
+ } else {
+ return -ENODEV;
+ }
+
+ adapter->owner = THIS_MODULE;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->dev.parent = &pdev->dev;
+ adapter->nr = pdev->id;
+ adapter->timeout = HZ;
+ adapter->algo = &cbus_i2c_algo;
+ strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name));
+
+ spin_lock_init(&chost->lock);
+ chost->dev = &pdev->dev;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->clk_gpio,
+ GPIOF_OUT_INIT_LOW, "CBUS clk");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->dat_gpio, GPIOF_IN,
+ "CBUS data");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, chost->sel_gpio,
+ GPIOF_OUT_INIT_HIGH, "CBUS sel");
+ if (ret)
+ return ret;
+
+ i2c_set_adapdata(adapter, chost);
+ platform_set_drvdata(pdev, adapter);
+
+ return i2c_add_numbered_adapter(adapter);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id i2c_cbus_dt_ids[] = {
+ { .compatible = "i2c-cbus-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids);
+#endif
+
+static struct platform_driver cbus_i2c_driver = {
+ .probe = cbus_i2c_probe,
+ .remove = cbus_i2c_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "i2c-cbus-gpio",
+ },
+};
+module_platform_driver(cbus_i2c_driver);
+
+MODULE_ALIAS("platform:i2c-cbus-gpio");
+MODULE_DESCRIPTION("CBUS I2C driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index c1e1096ba069..2e79c1024191 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -426,7 +426,7 @@ static const struct i2c_adapter cpm_ops = {
.algo = &cpm_i2c_algo,
};
-static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
+static int cpm_i2c_setup(struct cpm_i2c *cpm)
{
struct platform_device *ofdev = cpm->ofdev;
const u32 *data;
@@ -634,7 +634,7 @@ static void cpm_i2c_shutdown(struct cpm_i2c *cpm)
cpm_muram_free(cpm->i2c_addr);
}
-static int __devinit cpm_i2c_probe(struct platform_device *ofdev)
+static int cpm_i2c_probe(struct platform_device *ofdev)
{
int result, len;
struct cpm_i2c *cpm;
@@ -688,7 +688,7 @@ out_free:
return result;
}
-static int __devexit cpm_i2c_remove(struct platform_device *ofdev)
+static int cpm_i2c_remove(struct platform_device *ofdev)
{
struct cpm_i2c *cpm = dev_get_drvdata(&ofdev->dev);
@@ -716,7 +716,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct platform_driver cpm_i2c_driver = {
.probe = cpm_i2c_probe,
- .remove = __devexit_p(cpm_i2c_remove),
+ .remove = cpm_i2c_remove,
.driver = {
.name = "fsl-i2c-cpm",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index cbba7db9ad59..f5258c205de5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include "i2c-designware-core.h"
/*
@@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
return dw_readl(dev, DW_IC_COMP_PARAM_1);
}
EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 92a1e2c15baa..6add851e9dee 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -207,7 +207,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return dev->controller->clk_khz;
}
-static int __devinit i2c_dw_pci_probe(struct pci_dev *pdev,
+static int i2c_dw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct dw_i2c_dev *dev;
@@ -328,7 +328,7 @@ exit:
return r;
}
-static void __devexit i2c_dw_pci_remove(struct pci_dev *pdev)
+static void i2c_dw_pci_remove(struct pci_dev *pdev)
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
@@ -368,7 +368,7 @@ static struct pci_driver dw_i2c_driver = {
.name = DRIVER_NAME,
.id_table = i2_designware_pci_ids,
.probe = i2c_dw_pci_probe,
- .remove = __devexit_p(i2c_dw_pci_remove),
+ .remove = i2c_dw_pci_remove,
.driver = {
.pm = &i2c_dw_pm_ops,
},
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0506fef8dc00..343357a2b5b4 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -50,7 +50,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
return clk_get_rate(dev->clk)/1000;
}
-static int __devinit dw_i2c_probe(struct platform_device *pdev)
+static int dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -169,7 +169,7 @@ err_release_region:
return r;
}
-static int __devexit dw_i2c_remove(struct platform_device *pdev)
+static int dw_i2c_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
@@ -228,7 +228,7 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
MODULE_ALIAS("platform:i2c_designware");
static struct platform_driver dw_i2c_driver = {
- .remove = __devexit_p(dw_i2c_remove),
+ .remove = dw_i2c_remove,
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 259f7697bf25..5e7886e7136e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -758,7 +758,7 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
}
-static int __devinit pch_i2c_probe(struct pci_dev *pdev,
+static int pch_i2c_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *base_addr;
@@ -851,7 +851,7 @@ err_pci_enable:
return ret;
}
-static void __devexit pch_i2c_remove(struct pci_dev *pdev)
+static void pch_i2c_remove(struct pci_dev *pdev)
{
int i;
struct adapter_info *adap_info = pci_get_drvdata(pdev);
@@ -948,7 +948,7 @@ static struct pci_driver pch_pcidriver = {
.name = KBUILD_MODNAME,
.id_table = pch_pcidev_id,
.probe = pch_i2c_probe,
- .remove = __devexit_p(pch_i2c_remove),
+ .remove = pch_i2c_remove,
.suspend = pch_i2c_suspend,
.resume = pch_i2c_resume
};
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 37e2e82a9c88..485497066ed7 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -205,7 +205,7 @@ static struct i2c_adapter pcf_isa_ops = {
.name = "i2c-elektor",
};
-static int __devinit elektor_match(struct device *dev, unsigned int id)
+static int elektor_match(struct device *dev, unsigned int id)
{
#ifdef __alpha__
/* check to see we have memory mapped PCF8584 connected to the
@@ -264,7 +264,7 @@ static int __devinit elektor_match(struct device *dev, unsigned int id)
return 1;
}
-static int __devinit elektor_probe(struct device *dev, unsigned int id)
+static int elektor_probe(struct device *dev, unsigned int id)
{
init_waitqueue_head(&pcf_wait);
if (pcf_isa_init())
@@ -293,7 +293,7 @@ static int __devinit elektor_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int __devexit elektor_remove(struct device *dev, unsigned int id)
+static int elektor_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pcf_isa_ops);
@@ -316,7 +316,7 @@ static int __devexit elektor_remove(struct device *dev, unsigned int id)
static struct isa_driver i2c_elektor_driver = {
.match = elektor_match,
.probe = elektor_probe,
- .remove = __devexit_p(elektor_remove),
+ .remove = elektor_remove,
.driver = {
.owner = THIS_MODULE,
.name = "i2c-elektor",
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index e62d2d938628..f3fa4332bbdf 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -85,7 +85,7 @@ static int i2c_gpio_getscl(void *data)
return gpio_get_value(pdata->scl_pin);
}
-static int __devinit of_i2c_gpio_probe(struct device_node *np,
+static int of_i2c_gpio_probe(struct device_node *np,
struct i2c_gpio_platform_data *pdata)
{
u32 reg;
@@ -117,7 +117,7 @@ static int __devinit of_i2c_gpio_probe(struct device_node *np,
return 0;
}
-static int __devinit i2c_gpio_probe(struct platform_device *pdev)
+static int i2c_gpio_probe(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
@@ -184,7 +184,11 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
bit_data->data = pdata;
adap->owner = THIS_MODULE;
- snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
+ if (pdev->dev.of_node)
+ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ else
+ snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
+
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->dev.parent = &pdev->dev;
@@ -214,7 +218,7 @@ err_request_sda:
return ret;
}
-static int __devexit i2c_gpio_remove(struct platform_device *pdev)
+static int i2c_gpio_remove(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
@@ -247,7 +251,7 @@ static struct platform_driver i2c_gpio_driver = {
.of_match_table = of_match_ptr(i2c_gpio_dt_ids),
},
.probe = i2c_gpio_probe,
- .remove = __devexit_p(i2c_gpio_remove),
+ .remove = i2c_gpio_remove,
};
static int __init i2c_gpio_init(void)
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 19515df61021..3351cc7ed11f 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -356,7 +356,7 @@ static const struct i2c_algorithm highlander_i2c_algo = {
.functionality = highlander_i2c_func,
};
-static int __devinit highlander_i2c_probe(struct platform_device *pdev)
+static int highlander_i2c_probe(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -441,7 +441,7 @@ err:
return ret;
}
-static int __devexit highlander_i2c_remove(struct platform_device *pdev)
+static int highlander_i2c_remove(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev = platform_get_drvdata(pdev);
@@ -465,7 +465,7 @@ static struct platform_driver highlander_i2c_driver = {
},
.probe = highlander_i2c_probe,
- .remove = __devexit_p(highlander_i2c_remove),
+ .remove = highlander_i2c_remove,
};
module_platform_driver(highlander_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c9f95e1666a8..79c3d9069a48 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -112,7 +112,7 @@ static DEFINE_PCI_DEVICE_TABLE(hydra_ids) = {
MODULE_DEVICE_TABLE (pci, hydra_ids);
-static int __devinit hydra_probe(struct pci_dev *dev,
+static int hydra_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned long base = pci_resource_start(dev, 0);
@@ -139,7 +139,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit hydra_remove(struct pci_dev *dev)
+static void hydra_remove(struct pci_dev *dev)
{
pdregw(hydra_bit_data.data, 0); /* clear SCLK_OE and SDAT_OE */
i2c_del_adapter(&hydra_adap);
@@ -153,7 +153,7 @@ static struct pci_driver hydra_driver = {
.name = "hydra_smbus",
.id_table = hydra_ids,
.probe = hydra_probe,
- .remove = __devexit_p(hydra_remove),
+ .remove = hydra_remove,
};
module_pci_driver(hydra_driver);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6abc00d59881..3092387f6ef4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -81,6 +81,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/err.h>
+#include <linux/of_i2c.h>
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
defined CONFIG_DMI
@@ -840,14 +841,14 @@ struct dmi_onboard_device_info {
const char *i2c_type;
};
-static struct dmi_onboard_device_info __devinitdata dmi_devices[] = {
+static const struct dmi_onboard_device_info dmi_devices[] = {
{ "Syleus", DMI_DEV_TYPE_OTHER, 0x73, "fscsyl" },
{ "Hermes", DMI_DEV_TYPE_OTHER, 0x73, "fscher" },
{ "Hades", DMI_DEV_TYPE_OTHER, 0x73, "fschds" },
};
-static void __devinit dmi_check_onboard_device(u8 type, const char *name,
- struct i2c_adapter *adap)
+static void dmi_check_onboard_device(u8 type, const char *name,
+ struct i2c_adapter *adap)
{
int i;
struct i2c_board_info info;
@@ -870,8 +871,7 @@ static void __devinit dmi_check_onboard_device(u8 type, const char *name,
/* We use our own function to check for onboard devices instead of
dmi_find_device() as some buggy BIOS's have the devices we are interested
in marked as disabled */
-static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
- void *adap)
+static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
{
int i, count;
@@ -900,7 +900,7 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
/* Register optional slaves */
-static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
+static void i801_probe_optional_slaves(struct i801_priv *priv)
{
/* Only register slaves on main SMBus channel */
if (priv->features & FEATURE_IDF)
@@ -920,7 +920,7 @@ static void __devinit i801_probe_optional_slaves(struct i801_priv *priv)
}
#else
static void __init input_apanel_init(void) {}
-static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
+static void i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
@@ -943,7 +943,7 @@ static struct i801_mux_config i801_mux_config_asus_z8_d18 = {
.n_gpios = 2,
};
-static struct dmi_system_id __devinitdata mux_dmi_table[] = {
+static const struct dmi_system_id mux_dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
@@ -1011,7 +1011,7 @@ static struct dmi_system_id __devinitdata mux_dmi_table[] = {
};
/* Setup multiplexing if needed */
-static int __devinit i801_add_mux(struct i801_priv *priv)
+static int i801_add_mux(struct i801_priv *priv)
{
struct device *dev = &priv->adapter.dev;
const struct i801_mux_config *mux_config;
@@ -1047,13 +1047,13 @@ static int __devinit i801_add_mux(struct i801_priv *priv)
return 0;
}
-static void __devexit i801_del_mux(struct i801_priv *priv)
+static void i801_del_mux(struct i801_priv *priv)
{
if (priv->mux_pdev)
platform_device_unregister(priv->mux_pdev);
}
-static unsigned int __devinit i801_get_adapter_class(struct i801_priv *priv)
+static unsigned int i801_get_adapter_class(struct i801_priv *priv)
{
const struct dmi_system_id *id;
const struct i801_mux_config *mux_config;
@@ -1083,8 +1083,7 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
int err, i;
@@ -1108,6 +1107,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
/* fall through */
default:
priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_IRQ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
priv->features |= FEATURE_SMBUS_PEC;
@@ -1120,16 +1120,6 @@ static int __devinit i801_probe(struct pci_dev *dev,
break;
}
- /* IRQ processing tested on CougarPoint PCH, ICH5, ICH7-M and ICH10 */
- if (dev->device == PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS ||
- dev->device == PCI_DEVICE_ID_INTEL_82801EB_3 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH7_17 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH8_5 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH9_6 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH10_4 ||
- dev->device == PCI_DEVICE_ID_INTEL_ICH10_5)
- priv->features |= FEATURE_IRQ;
-
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
if (priv->features & disable_features & (1 << i))
@@ -1215,6 +1205,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
goto exit_free_irq;
}
+ of_i2c_register_devices(&priv->adapter);
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
@@ -1233,7 +1224,7 @@ exit:
return err;
}
-static void __devexit i801_remove(struct pci_dev *dev)
+static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
@@ -1279,7 +1270,7 @@ static struct pci_driver i801_driver = {
.name = "i801_smbus",
.id_table = i801_ids,
.probe = i801_probe,
- .remove = __devexit_p(i801_remove),
+ .remove = i801_remove,
.suspend = i801_suspend,
.resume = i801_resume,
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 806e225f3de7..33a2abb6c063 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -660,7 +660,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
return (u8)((opb + 9) / 10 - 1);
}
-static int __devinit iic_request_irq(struct platform_device *ofdev,
+static int iic_request_irq(struct platform_device *ofdev,
struct ibm_iic_private *dev)
{
struct device_node *np = ofdev->dev.of_node;
@@ -691,7 +691,7 @@ static int __devinit iic_request_irq(struct platform_device *ofdev,
/*
* Register single IIC interface
*/
-static int __devinit iic_probe(struct platform_device *ofdev)
+static int iic_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
@@ -781,7 +781,7 @@ error_cleanup:
/*
* Cleanup initialized IIC interface
*/
-static int __devexit iic_remove(struct platform_device *ofdev)
+static int iic_remove(struct platform_device *ofdev)
{
struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev);
@@ -812,7 +812,7 @@ static struct platform_driver ibm_iic_driver = {
.of_match_table = ibm_iic_match,
},
.probe = iic_probe,
- .remove = __devexit_p(iic_remove),
+ .remove = iic_remove,
};
module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 2ef162d148cb..b9734747d610 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -52,8 +52,6 @@
#include <linux/of_device.h>
#include <linux/of_i2c.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/hardware.h>
#include <linux/platform_data/i2c-imx.h>
/** Defines ********************************************************************
@@ -115,6 +113,11 @@ static u16 __initdata i2c_clk_div[50][2] = {
{ 3072, 0x1E }, { 3840, 0x1F }
};
+enum imx_i2c_type {
+ IMX1_I2C,
+ IMX21_I2C,
+};
+
struct imx_i2c_struct {
struct i2c_adapter adapter;
struct clk *clk;
@@ -124,13 +127,33 @@ struct imx_i2c_struct {
unsigned int disable_delay;
int stopped;
unsigned int ifdr; /* IMX_I2C_IFDR */
+ enum imx_i2c_type devtype;
+};
+
+static struct platform_device_id imx_i2c_devtype[] = {
+ {
+ .name = "imx1-i2c",
+ .driver_data = IMX1_I2C,
+ }, {
+ .name = "imx21-i2c",
+ .driver_data = IMX21_I2C,
+ }, {
+ /* sentinel */
+ }
};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
static const struct of_device_id i2c_imx_dt_ids[] = {
- { .compatible = "fsl,imx1-i2c", },
+ { .compatible = "fsl,imx1-i2c", .data = &imx_i2c_devtype[IMX1_I2C], },
+ { .compatible = "fsl,imx21-i2c", .data = &imx_i2c_devtype[IMX21_I2C], },
{ /* sentinel */ }
};
+static inline int is_imx1_i2c(struct imx_i2c_struct *i2c_imx)
+{
+ return i2c_imx->devtype == IMX1_I2C;
+}
+
/** Functions for IMX I2C adapter driver ***************************************
*******************************************************************************/
@@ -223,7 +246,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
temp &= ~(I2CR_MSTA | I2CR_MTX);
writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
}
- if (cpu_is_mx1()) {
+ if (is_imx1_i2c(i2c_imx)) {
/*
* This delay caused by an i.MXL hardware bug.
* If no (or too short) delay, no "STOP" bit will be generated.
@@ -465,6 +488,8 @@ static struct i2c_algorithm i2c_imx_algo = {
static int __init i2c_imx_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
+ &pdev->dev);
struct imx_i2c_struct *i2c_imx;
struct resource *res;
struct imxi2c_platform_data *pdata = pdev->dev.platform_data;
@@ -497,6 +522,10 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ i2c_imx->devtype = pdev->id_entry->driver_data;
+
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
i2c_imx->adapter.owner = THIS_MODULE;
@@ -593,7 +622,8 @@ static struct platform_driver i2c_imx_driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = i2c_imx_dt_ids,
- }
+ },
+ .id_table = imx_i2c_devtype,
};
static int __init i2c_adap_imx_init(void)
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index 7c28f10f95ca..de3736bf6465 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -947,7 +947,7 @@ static const struct dev_pm_ops intel_mid_i2c_pm_ops = {
* 5. Call intel_mid_i2c_hwinit() for hardware initialization
* 6. Register I2C adapter in i2c-core
*/
-static int __devinit intel_mid_i2c_probe(struct pci_dev *dev,
+static int intel_mid_i2c_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct intel_mid_i2c_private *mrst;
@@ -1079,7 +1079,7 @@ exit:
return err;
}
-static void __devexit intel_mid_i2c_remove(struct pci_dev *dev)
+static void intel_mid_i2c_remove(struct pci_dev *dev)
{
struct intel_mid_i2c_private *mrst = pci_get_drvdata(dev);
intel_mid_i2c_disable(&mrst->adap);
@@ -1113,7 +1113,7 @@ static struct pci_driver intel_mid_i2c_driver = {
.name = DRIVER_NAME,
.id_table = intel_mid_i2c_ids,
.probe = intel_mid_i2c_probe,
- .remove = __devexit_p(intel_mid_i2c_remove),
+ .remove = intel_mid_i2c_remove,
};
module_pci_driver(intel_mid_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index f90a6057508d..4099f79c2280 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -249,7 +249,7 @@ static struct i2c_adapter sch_adapter = {
.algo = &smbus_algorithm,
};
-static int __devinit smbus_sch_probe(struct platform_device *dev)
+static int smbus_sch_probe(struct platform_device *dev)
{
struct resource *res;
int retval;
@@ -284,7 +284,7 @@ static int __devinit smbus_sch_probe(struct platform_device *dev)
return retval;
}
-static int __devexit smbus_sch_remove(struct platform_device *pdev)
+static int smbus_sch_remove(struct platform_device *pdev)
{
struct resource *res;
if (sch_smba) {
@@ -303,7 +303,7 @@ static struct platform_driver smbus_sch_driver = {
.owner = THIS_MODULE,
},
.probe = smbus_sch_probe,
- .remove = __devexit_p(smbus_sch_remove),
+ .remove = smbus_sch_remove,
};
module_platform_driver(smbus_sch_driver);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index ca86430cb4a2..a69459e5c3f3 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -175,7 +175,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
}
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
-static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
+static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
{28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02},
{36, 0x26}, {40, 0x27}, {44, 0x04}, {48, 0x28},
@@ -196,7 +196,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = {
{10240, 0x9d}, {12288, 0x9e}, {15360, 0x9f}
};
-static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
+static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
int prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
@@ -230,7 +230,7 @@ static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
return (int)div->fdr;
}
-static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
+static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -252,7 +252,7 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
fdr);
}
#else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */
-static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
+static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -260,7 +260,7 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node,
#endif /* CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x */
#ifdef CONFIG_PPC_MPC512x
-static void __devinit mpc_i2c_setup_512x(struct device_node *node,
+static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -288,7 +288,7 @@ static void __devinit mpc_i2c_setup_512x(struct device_node *node,
mpc_i2c_setup_52xx(node, i2c, clock, prescaler);
}
#else /* CONFIG_PPC_MPC512x */
-static void __devinit mpc_i2c_setup_512x(struct device_node *node,
+static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -296,7 +296,7 @@ static void __devinit mpc_i2c_setup_512x(struct device_node *node,
#endif /* CONFIG_PPC_MPC512x */
#ifdef CONFIG_FSL_SOC
-static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] __devinitconst = {
+static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
{160, 0x0120}, {192, 0x0121}, {224, 0x0122}, {256, 0x0123},
{288, 0x0100}, {320, 0x0101}, {352, 0x0601}, {384, 0x0102},
{416, 0x0602}, {448, 0x0126}, {480, 0x0103}, {512, 0x0127},
@@ -316,7 +316,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] __devinitconst = {
{49152, 0x011e}, {61440, 0x011f}
};
-static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
+static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
struct device_node *node = NULL;
u32 __iomem *reg;
@@ -345,7 +345,7 @@ static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void)
return val;
}
-static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
+static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
u32 prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
@@ -383,7 +383,7 @@ static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
return div ? (int)div->fdr : -EINVAL;
}
-static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
+static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -408,7 +408,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
}
#else /* !CONFIG_FSL_SOC */
-static void __devinit mpc_i2c_setup_8xxx(struct device_node *node,
+static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
@@ -615,7 +615,7 @@ static struct i2c_adapter mpc_ops = {
};
static const struct of_device_id mpc_i2c_of_match[];
-static int __devinit fsl_i2c_probe(struct platform_device *op)
+static int fsl_i2c_probe(struct platform_device *op)
{
const struct of_device_id *match;
struct mpc_i2c *i2c;
@@ -706,7 +706,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
return result;
};
-static int __devexit fsl_i2c_remove(struct platform_device *op)
+static int fsl_i2c_remove(struct platform_device *op)
{
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
@@ -746,24 +746,24 @@ static int mpc_i2c_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
#endif
-static const struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_512x = {
.setup = mpc_i2c_setup_512x,
};
-static const struct mpc_i2c_data mpc_i2c_data_52xx __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_52xx = {
.setup = mpc_i2c_setup_52xx,
};
-static const struct mpc_i2c_data mpc_i2c_data_8313 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8313 = {
.setup = mpc_i2c_setup_8xxx,
};
-static const struct mpc_i2c_data mpc_i2c_data_8543 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8543 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 2,
};
-static const struct mpc_i2c_data mpc_i2c_data_8544 __devinitdata = {
+static const struct mpc_i2c_data mpc_i2c_data_8544 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 3,
};
@@ -785,7 +785,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
- .remove = __devexit_p(fsl_i2c_remove),
+ .remove = fsl_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 2e9d56719e99..8b20ef8524ac 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -495,7 +495,7 @@ static const struct i2c_algorithm mv64xxx_i2c_algo = {
*
*****************************************************************************
*/
-static int __devinit
+static int
mv64xxx_i2c_map_regs(struct platform_device *pd,
struct mv64xxx_i2c_data *drv_data)
{
@@ -530,13 +530,13 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
}
#ifdef CONFIG_OF
-static int __devinit
+static int
mv64xxx_calc_freq(const int tclk, const int n, const int m)
{
return tclk / (10 * (m + 1) * (2 << n));
}
-static bool __devinit
+static bool
mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
u32 *best_m)
{
@@ -560,7 +560,7 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
return true;
}
-static int __devinit
+static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device_node *np)
{
@@ -597,7 +597,7 @@ out:
#endif
}
#else /* CONFIG_OF */
-static int __devinit
+static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device_node *np)
{
@@ -605,7 +605,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
}
#endif /* CONFIG_OF */
-static int __devinit
+static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data;
@@ -697,7 +697,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
return rc;
}
-static int __devexit
+static int
mv64xxx_i2c_remove(struct platform_device *dev)
{
struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
@@ -718,7 +718,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
return rc;
}
-static const struct of_device_id mv64xxx_i2c_of_match_table[] __devinitdata = {
+static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "marvell,mv64xxx-i2c", },
{}
};
@@ -726,7 +726,7 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
- .remove = __devexit_p(mv64xxx_i2c_remove),
+ .remove = mv64xxx_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 0670da79ee5e..d6abaf2cf2e3 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -127,7 +127,7 @@ struct mxs_i2c_dev {
struct device *dev;
void __iomem *regs;
struct completion cmd_complete;
- u32 cmd_err;
+ int cmd_err;
struct i2c_adapter adapter;
const struct mxs_i2c_speed_config *speed;
@@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (msg->len == 0)
return -EINVAL;
- init_completion(&i2c->cmd_complete);
+ INIT_COMPLETION(i2c->cmd_complete);
i2c->cmd_err = 0;
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -359,7 +359,7 @@ static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 mxs_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
@@ -432,7 +432,7 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
return 0;
}
-static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+static int mxs_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxs_i2c_dev *i2c;
@@ -473,6 +473,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
i2c->dev = dev;
i2c->speed = &mxs_i2c_95kHz_config;
+ init_completion(&i2c->cmd_complete);
+
if (dev->of_node) {
err = mxs_i2c_get_ofdata(i2c);
if (err)
@@ -515,7 +517,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+static int mxs_i2c_remove(struct platform_device *pdev)
{
struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
int ret;
@@ -546,7 +548,7 @@ static struct platform_driver mxs_i2c_driver = {
.owner = THIS_MODULE,
.of_match_table = mxs_i2c_dt_ids,
},
- .remove = __devexit_p(mxs_i2c_remove),
+ .remove = mxs_i2c_remove,
};
static int __init mxs_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 392303b4be07..adac8542771d 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -117,7 +117,7 @@ struct nforce2_smbus {
#define MAX_TIMEOUT 100
/* We disable the second SMBus channel on these boards */
-static struct dmi_system_id __devinitdata nforce2_dmi_blacklist2[] = {
+static const struct dmi_system_id nforce2_dmi_blacklist2[] = {
{
.ident = "DFI Lanparty NF4 Expert",
.matches = {
@@ -330,8 +330,8 @@ static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
MODULE_DEVICE_TABLE (pci, nforce2_ids);
-static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
- int alt_reg, struct nforce2_smbus *smbus, const char *name)
+static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
+ struct nforce2_smbus *smbus, const char *name)
{
int error;
@@ -382,7 +382,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
}
-static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct nforce2_smbus *smbuses;
int res1, res2;
@@ -430,7 +430,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
}
-static void __devexit nforce2_remove(struct pci_dev *dev)
+static void nforce2_remove(struct pci_dev *dev)
{
struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
@@ -450,7 +450,7 @@ static struct pci_driver nforce2_driver = {
.name = "nForce2_smbus",
.id_table = nforce2_ids,
.probe = nforce2_probe,
- .remove = __devexit_p(nforce2_remove),
+ .remove = nforce2_remove,
};
module_pci_driver(nforce2_driver);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 02c3115a2dfa..8b2ffcf45322 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -435,13 +435,6 @@ static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
- if (timeout < 0) {
- dev_err(&dev->adev->dev,
- "wait_for_completion_timeout "
- "returned %d waiting for event\n", timeout);
- status = timeout;
- }
-
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
@@ -523,13 +516,6 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
- if (timeout < 0) {
- dev_err(&dev->adev->dev,
- "wait_for_completion_timeout "
- "returned %d waiting for event\n", timeout);
- status = timeout;
- }
-
if (timeout == 0) {
/* Controller timed out */
dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index f41502ef3f55..865ee350adb3 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -304,7 +304,7 @@ retry_write:
case STATE_READ:
/* we have a byte of data in the data register, do
- * something with it, and then work out wether we are
+ * something with it, and then work out whether we are
* going to do any more read/write
*/
@@ -518,7 +518,7 @@ static const struct i2c_algorithm nuc900_i2c_algorithm = {
* called by the bus driver when a suitable device is found
*/
-static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
+static int nuc900_i2c_probe(struct platform_device *pdev)
{
struct nuc900_i2c *i2c;
struct nuc900_platform_i2c *pdata;
@@ -663,7 +663,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
* called when device is removed from the bus
*/
-static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
+static int nuc900_i2c_remove(struct platform_device *pdev)
{
struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
@@ -684,7 +684,7 @@ static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
static struct platform_driver nuc900_i2c_driver = {
.probe = nuc900_i2c_probe,
- .remove = __devexit_p(nuc900_i2c_remove),
+ .remove = nuc900_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "nuc900-i2c0",
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index bffd5501ac2d..a873d0ad1acb 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -4,15 +4,14 @@
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
+ * Support for the GRLIB port of the controller by
+ * Andreas Larsson <andreas@gaisler.com>
+ *
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
-/*
- * This driver can be used from the device tree, see
- * Documentation/devicetree/bindings/i2c/ocore-i2c.txt
- */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -38,6 +37,8 @@ struct ocores_i2c {
int nmsgs;
int state; /* see STATE_ */
int clock_khz;
+ void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
+ u8 (*getreg)(struct ocores_i2c *i2c, int reg);
};
/* registers */
@@ -71,24 +72,47 @@ struct ocores_i2c {
#define STATE_READ 3
#define STATE_ERROR 4
+#define TYPE_OCORES 0
+#define TYPE_GRLIB 1
+
+static void oc_setreg_8(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite8(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static void oc_setreg_16(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite16(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static void oc_setreg_32(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ iowrite32(value, i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_8(struct ocores_i2c *i2c, int reg)
+{
+ return ioread8(i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_16(struct ocores_i2c *i2c, int reg)
+{
+ return ioread16(i2c->base + (reg << i2c->reg_shift));
+}
+
+static inline u8 oc_getreg_32(struct ocores_i2c *i2c, int reg)
+{
+ return ioread32(i2c->base + (reg << i2c->reg_shift));
+}
+
static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value)
{
- if (i2c->reg_io_width == 4)
- iowrite32(value, i2c->base + (reg << i2c->reg_shift));
- else if (i2c->reg_io_width == 2)
- iowrite16(value, i2c->base + (reg << i2c->reg_shift));
- else
- iowrite8(value, i2c->base + (reg << i2c->reg_shift));
+ i2c->setreg(i2c, reg, value);
}
static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg)
{
- if (i2c->reg_io_width == 4)
- return ioread32(i2c->base + (reg << i2c->reg_shift));
- else if (i2c->reg_io_width == 2)
- return ioread16(i2c->base + (reg << i2c->reg_shift));
- else
- return ioread8(i2c->base + (reg << i2c->reg_shift));
+ return i2c->getreg(i2c, reg);
}
static void ocores_process(struct ocores_i2c *i2c)
@@ -227,11 +251,59 @@ static struct i2c_adapter ocores_adapter = {
.algo = &ocores_algorithm,
};
+static struct of_device_id ocores_i2c_match[] = {
+ {
+ .compatible = "opencores,i2c-ocores",
+ .data = (void *)TYPE_OCORES,
+ },
+ {
+ .compatible = "aeroflexgaisler,i2cmst",
+ .data = (void *)TYPE_GRLIB,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ocores_i2c_match);
+
#ifdef CONFIG_OF
+/* Read and write functions for the GRLIB port of the controller. Registers are
+ * 32-bit big endian and the PRELOW and PREHIGH registers are merged into one
+ * register. The subsequent registers has their offset decreased accordingly. */
+static u8 oc_getreg_grlib(struct ocores_i2c *i2c, int reg)
+{
+ u32 rd;
+ int rreg = reg;
+ if (reg != OCI2C_PRELOW)
+ rreg--;
+ rd = ioread32be(i2c->base + (rreg << i2c->reg_shift));
+ if (reg == OCI2C_PREHIGH)
+ return (u8)(rd >> 8);
+ else
+ return (u8)rd;
+}
+
+static void oc_setreg_grlib(struct ocores_i2c *i2c, int reg, u8 value)
+{
+ u32 curr, wr;
+ int rreg = reg;
+ if (reg != OCI2C_PRELOW)
+ rreg--;
+ if (reg == OCI2C_PRELOW || reg == OCI2C_PREHIGH) {
+ curr = ioread32be(i2c->base + (rreg << i2c->reg_shift));
+ if (reg == OCI2C_PRELOW)
+ wr = (curr & 0xff00) | value;
+ else
+ wr = (((u32)value) << 8) | (curr & 0xff);
+ } else {
+ wr = value;
+ }
+ iowrite32be(wr, i2c->base + (rreg << i2c->reg_shift));
+}
+
static int ocores_i2c_of_probe(struct platform_device *pdev,
struct ocores_i2c *i2c)
{
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
u32 val;
if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
@@ -257,17 +329,26 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
of_property_read_u32(pdev->dev.of_node, "reg-io-width",
&i2c->reg_io_width);
+
+ match = of_match_node(ocores_i2c_match, pdev->dev.of_node);
+ if (match && (int)match->data == TYPE_GRLIB) {
+ dev_dbg(&pdev->dev, "GRLIB variant of i2c-ocores\n");
+ i2c->setreg = oc_setreg_grlib;
+ i2c->getreg = oc_getreg_grlib;
+ }
+
return 0;
}
#else
#define ocores_i2c_of_probe(pdev,i2c) -ENODEV
#endif
-static int __devinit ocores_i2c_probe(struct platform_device *pdev)
+static int ocores_i2c_probe(struct platform_device *pdev)
{
struct ocores_i2c *i2c;
struct ocores_i2c_platform_data *pdata;
- struct resource *res, *res2;
+ struct resource *res;
+ int irq;
int ret;
int i;
@@ -275,26 +356,17 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Memory region busy\n");
- return -EBUSY;
- }
-
- i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!i2c->base) {
- dev_err(&pdev->dev, "Unable to map registers\n");
- return -EIO;
- }
+ i2c->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!i2c->base)
+ return -EADDRNOTAVAIL;
pdata = pdev->dev.platform_data;
if (pdata) {
@@ -310,10 +382,34 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (i2c->reg_io_width == 0)
i2c->reg_io_width = 1; /* Set to default value */
+ if (!i2c->setreg || !i2c->getreg) {
+ switch (i2c->reg_io_width) {
+ case 1:
+ i2c->setreg = oc_setreg_8;
+ i2c->getreg = oc_getreg_8;
+ break;
+
+ case 2:
+ i2c->setreg = oc_setreg_16;
+ i2c->getreg = oc_getreg_16;
+ break;
+
+ case 4:
+ i2c->setreg = oc_setreg_32;
+ i2c->getreg = oc_getreg_32;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
+ i2c->reg_io_width);
+ return -EINVAL;
+ }
+ }
+
ocores_init(i2c);
init_waitqueue_head(&i2c->wait);
- ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
@@ -345,7 +441,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ocores_i2c_remove(struct platform_device *pdev)
+static int ocores_i2c_remove(struct platform_device *pdev)
{
struct ocores_i2c *i2c = platform_get_drvdata(pdev);
@@ -387,15 +483,9 @@ static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
#define OCORES_I2C_PM NULL
#endif
-static struct of_device_id ocores_i2c_match[] = {
- { .compatible = "opencores,i2c-ocores", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
- .remove = __devexit_p(ocores_i2c_remove),
+ .remove = ocores_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index f44c83549fe5..484ca771fdff 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -446,7 +446,7 @@ static struct i2c_adapter octeon_i2c_ops = {
/**
* octeon_i2c_setclock - Calculate and set clock divisors.
*/
-static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
+static int octeon_i2c_setclock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -489,7 +489,7 @@ static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c)
return 0;
}
-static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
+static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
{
u8 status;
int tries;
@@ -510,7 +510,7 @@ static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
return -EIO;
}
-static int __devinit octeon_i2c_probe(struct platform_device *pdev)
+static int octeon_i2c_probe(struct platform_device *pdev)
{
int irq, result = 0;
struct octeon_i2c *i2c;
@@ -609,7 +609,7 @@ out:
return result;
};
-static int __devexit octeon_i2c_remove(struct platform_device *pdev)
+static int octeon_i2c_remove(struct platform_device *pdev)
{
struct octeon_i2c *i2c = platform_get_drvdata(pdev);
@@ -628,7 +628,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match);
static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
- .remove = __devexit_p(octeon_i2c_remove),
+ .remove = octeon_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 3525c9e62cb0..4cc2f0528c88 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -43,14 +43,16 @@
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
/* I2C controller revisions present on specific hardware */
-#define OMAP_I2C_REV_ON_2430 0x36
-#define OMAP_I2C_REV_ON_3430_3530 0x3C
-#define OMAP_I2C_REV_ON_3630_4430 0x40
+#define OMAP_I2C_REV_ON_2430 0x00000036
+#define OMAP_I2C_REV_ON_3430_3530 0x0000003C
+#define OMAP_I2C_REV_ON_3630 0x00000040
+#define OMAP_I2C_REV_ON_4430_PLUS 0x50400002
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -190,7 +192,6 @@ struct omap_i2c_dev {
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
u32 speed; /* Speed of bus in kHz */
- u32 dtrev; /* extra revision from DT */
u32 flags;
u16 cmd_err;
u8 *buf;
@@ -202,17 +203,18 @@ struct omap_i2c_dev {
* fifo_size==0 implies no fifo
* if set, should be trsh+1
*/
- u8 rev;
+ u32 rev;
unsigned b_hw:1; /* bad h/w fixes */
unsigned receiver:1; /* true when we're in receiver mode */
u16 iestate; /* Saved interrupt register */
u16 pscstate;
u16 scllstate;
u16 sclhstate;
- u16 bufstate;
u16 syscstate;
u16 westate;
u16 errata;
+
+ struct pinctrl *pins;
};
static const u8 reg_map_ip_v1[] = {
@@ -275,16 +277,39 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
-static int omap_i2c_init(struct omap_i2c_dev *dev)
+static void __omap_i2c_init(struct omap_i2c_dev *dev)
+{
+
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+
+ /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
+ omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
+
+ /* SCL low and high time values */
+ omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
+ omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
+ if (dev->rev >= OMAP_I2C_REV_ON_3430_3530)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+
+ /* Take the I2C module out of reset: */
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+
+ /*
+ * Don't write to this register if the IE state is 0 as it can
+ * cause deadlock.
+ */
+ if (dev->iestate)
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+}
+
+static int omap_i2c_reset(struct omap_i2c_dev *dev)
{
- u16 psc = 0, scll = 0, sclh = 0, buf = 0;
- u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
- unsigned long fclk_rate = 12000000;
unsigned long timeout;
- unsigned long internal_clk = 0;
- struct clk *fclk;
+ u16 sysc;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
+ sysc = omap_i2c_read_reg(dev, OMAP_I2C_SYSC_REG);
+
/* Disable I2C controller before soft reset */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
@@ -306,32 +331,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
/* SYSC register is cleared by the reset; rewrite it */
- if (dev->rev == OMAP_I2C_REV_ON_2430) {
-
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
- SYSC_AUTOIDLE_MASK);
-
- } else if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
- dev->syscstate = SYSC_AUTOIDLE_MASK;
- dev->syscstate |= SYSC_ENAWAKEUP_MASK;
- dev->syscstate |= (SYSC_IDLEMODE_SMART <<
- __ffs(SYSC_SIDLEMODE_MASK));
- dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK <<
- __ffs(SYSC_CLOCKACTIVITY_MASK));
-
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
- dev->syscstate);
- /*
- * Enabling all wakup sources to stop I2C freezing on
- * WFI instruction.
- * REVISIT: Some wkup sources might not be needed.
- */
- dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
- dev->westate);
- }
+ omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, sysc);
+
+ }
+ return 0;
+}
+
+static int omap_i2c_init(struct omap_i2c_dev *dev)
+{
+ u16 psc = 0, scll = 0, sclh = 0;
+ u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
+ unsigned long fclk_rate = 12000000;
+ unsigned long internal_clk = 0;
+ struct clk *fclk;
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
+ /*
+ * Enabling all wakup sources to stop I2C freezing on
+ * WFI instruction.
+ * REVISIT: Some wkup sources might not be needed.
+ */
+ dev->westate = OMAP_I2C_WE_ALL;
}
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
@@ -416,28 +437,17 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
}
- /* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
- omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);
-
- /* SCL low and high time values */
- omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
- omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);
-
- /* Take the I2C module out of reset: */
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
-
- /* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- dev->pscstate = psc;
- dev->scllstate = scll;
- dev->sclhstate = sclh;
- dev->bufstate = buf;
- }
+
+ dev->pscstate = psc;
+ dev->scllstate = scll;
+ dev->sclhstate = sclh;
+
+ __omap_i2c_init(dev);
+
return 0;
}
@@ -490,7 +500,7 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
- if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
+ if (dev->rev < OMAP_I2C_REV_ON_3630)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
@@ -586,7 +596,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
OMAP_I2C_TIMEOUT);
if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
- omap_i2c_init(dev);
+ omap_i2c_reset(dev);
+ __omap_i2c_init(dev);
return -ETIMEDOUT;
}
@@ -596,7 +607,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
/* We have an error */
if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
OMAP_I2C_STAT_XUDF)) {
- omap_i2c_init(dev);
+ omap_i2c_reset(dev);
+ __omap_i2c_init(dev);
return -EIO;
}
@@ -642,13 +654,14 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
break;
}
- if (dev->set_mpu_wkup_lat != NULL)
- dev->set_mpu_wkup_lat(dev->dev, -1);
-
if (r == 0)
r = num;
omap_i2c_wait_for_bb(dev);
+
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
+
out:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -790,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
dev->cmd_err |= OMAP_I2C_STAT_AL;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
}
return -EIO;
@@ -950,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
i2c_omap_errata_i207(dev, stat);
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
- break;
+ continue;
}
if (stat & OMAP_I2C_STAT_RRDY) {
@@ -976,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
break;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
- break;
+ continue;
}
if (stat & OMAP_I2C_STAT_XRDY) {
@@ -1025,9 +1038,7 @@ static const struct i2c_algorithm omap_i2c_algo = {
#ifdef CONFIG_OF
static struct omap_i2c_bus_platform_data omap3_pdata = {
.rev = OMAP_I2C_IP_VERSION_1,
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
+ .flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_i2c_bus_platform_data omap4_pdata = {
@@ -1048,7 +1059,17 @@ static const struct of_device_id omap_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
#endif
-static int __devinit
+#define OMAP_I2C_SCHEME(rev) ((rev & 0xc000) >> 14)
+
+#define OMAP_I2C_REV_SCHEME_0_MAJOR(rev) (rev >> 4)
+#define OMAP_I2C_REV_SCHEME_0_MINOR(rev) (rev & 0xf)
+
+#define OMAP_I2C_REV_SCHEME_1_MAJOR(rev) ((rev & 0x0700) >> 7)
+#define OMAP_I2C_REV_SCHEME_1_MINOR(rev) (rev & 0x1f)
+#define OMAP_I2C_SCHEME_0 0
+#define OMAP_I2C_SCHEME_1 1
+
+static int
omap_i2c_probe(struct platform_device *pdev)
{
struct omap_i2c_dev *dev;
@@ -1060,6 +1081,8 @@ omap_i2c_probe(struct platform_device *pdev)
const struct of_device_id *match;
int irq;
int r;
+ u32 rev;
+ u16 minor, major, scheme;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1091,7 +1114,6 @@ omap_i2c_probe(struct platform_device *pdev)
u32 freq = 100000; /* default to 100000 Hz */
pdata = match->data;
- dev->dtrev = pdata->rev;
dev->flags = pdata->flags;
of_property_read_u32(node, "clock-frequency", &freq);
@@ -1101,7 +1123,16 @@ omap_i2c_probe(struct platform_device *pdev)
dev->speed = pdata->clkrate;
dev->flags = pdata->flags;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- dev->dtrev = pdata->rev;
+ }
+
+ dev->pins = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(dev->pins)) {
+ if (PTR_ERR(dev->pins) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(&pdev->dev, "did not get pins for i2c error: %li\n",
+ PTR_ERR(dev->pins));
+ dev->pins = NULL;
}
dev->dev = &pdev->dev;
@@ -1114,11 +1145,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
- dev->regs = (u8 *)reg_map_ip_v2;
- else
- dev->regs = (u8 *)reg_map_ip_v1;
-
pm_runtime_enable(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, OMAP_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(dev->dev);
@@ -1127,11 +1153,37 @@ omap_i2c_probe(struct platform_device *pdev)
if (IS_ERR_VALUE(r))
goto err_free_mem;
- dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
+ /*
+ * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
+ * On omap1/3/2 Offset 4 is IE Reg the bit [15:14] is 0 at reset.
+ * Also since the omap_i2c_read_reg uses reg_map_ip_* a
+ * raw_readw is done.
+ */
+ rev = __raw_readw(dev->base + 0x04);
+
+ scheme = OMAP_I2C_SCHEME(rev);
+ switch (scheme) {
+ case OMAP_I2C_SCHEME_0:
+ dev->regs = (u8 *)reg_map_ip_v1;
+ dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG);
+ minor = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ major = OMAP_I2C_REV_SCHEME_0_MAJOR(dev->rev);
+ break;
+ case OMAP_I2C_SCHEME_1:
+ /* FALLTHROUGH */
+ default:
+ dev->regs = (u8 *)reg_map_ip_v2;
+ rev = (rev << 16) |
+ omap_i2c_read_reg(dev, OMAP_I2C_IP_V2_REVNB_LO);
+ minor = OMAP_I2C_REV_SCHEME_1_MINOR(rev);
+ major = OMAP_I2C_REV_SCHEME_1_MAJOR(rev);
+ dev->rev = rev;
+ }
dev->errata = 0;
- if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (dev->rev >= OMAP_I2C_REV_ON_2430 &&
+ dev->rev < OMAP_I2C_REV_ON_4430_PLUS)
dev->errata |= I2C_OMAP_ERRATA_I207;
if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
@@ -1152,7 +1204,7 @@ omap_i2c_probe(struct platform_device *pdev)
dev->fifo_size = (dev->fifo_size / 2);
- if (dev->rev < OMAP_I2C_REV_ON_3630_4430)
+ if (dev->rev < OMAP_I2C_REV_ON_3630)
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
@@ -1195,8 +1247,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_unuse_clocks;
}
- dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", adap->nr,
- dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
+ major, minor, dev->speed);
of_i2c_register_devices(adap);
@@ -1215,7 +1267,7 @@ err_free_mem:
return r;
}
-static int __devexit omap_i2c_remove(struct platform_device *pdev)
+static int omap_i2c_remove(struct platform_device *pdev)
{
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
int ret;
@@ -1239,14 +1291,13 @@ static int omap_i2c_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- u16 iv;
_dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
- iv = omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
+ omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
@@ -1262,23 +1313,10 @@ static int omap_i2c_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- if (_dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, 0);
- omap_i2c_write_reg(_dev, OMAP_I2C_PSC_REG, _dev->pscstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SCLL_REG, _dev->scllstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SCLH_REG, _dev->sclhstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_BUF_REG, _dev->bufstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_SYSC_REG, _dev->syscstate);
- omap_i2c_write_reg(_dev, OMAP_I2C_WE_REG, _dev->westate);
- omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
- }
+ if (!_dev->regs)
+ return 0;
- /*
- * Don't write to this register if the IE state is 0 as it can
- * cause deadlock.
- */
- if (_dev->iestate)
- omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, _dev->iestate);
+ __omap_i2c_init(_dev);
return 0;
}
@@ -1295,7 +1333,7 @@ static struct dev_pm_ops omap_i2c_pm_ops = {
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
- .remove = __devexit_p(omap_i2c_remove),
+ .remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 4b95f7a63a3b..aa9577881925 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -135,7 +135,7 @@ static struct lineop parport_ctrl_irq = {
.port = PORT_CTRL,
};
-static int __devinit i2c_parport_probe(struct platform_device *pdev)
+static int i2c_parport_probe(struct platform_device *pdev)
{
int err;
@@ -169,7 +169,7 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit i2c_parport_remove(struct platform_device *pdev)
+static int i2c_parport_remove(struct platform_device *pdev)
{
if (ara) {
line_set(0, &parport_ctrl_irq);
@@ -191,7 +191,7 @@ static struct platform_driver i2c_parport_driver = {
.name = DRVNAME,
},
.probe = i2c_parport_probe,
- .remove = __devexit_p(i2c_parport_remove),
+ .remove = i2c_parport_remove,
};
static int __init i2c_parport_device_add(u16 address)
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 12edefd4183a..615f632c846f 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -340,7 +340,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = pasemi_smb_func,
};
-static int __devinit pasemi_smb_probe(struct pci_dev *dev,
+static int pasemi_smb_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct pasemi_smbus *smbus;
@@ -392,7 +392,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
return error;
}
-static void __devexit pasemi_smb_remove(struct pci_dev *dev)
+static void pasemi_smb_remove(struct pci_dev *dev)
{
struct pasemi_smbus *smbus = pci_get_drvdata(dev);
@@ -412,7 +412,7 @@ static struct pci_driver pasemi_smb_driver = {
.name = "i2c-pasemi",
.id_table = pasemi_smb_ids,
.probe = pasemi_smb_probe,
- .remove = __devexit_p(pasemi_smb_remove),
+ .remove = pasemi_smb_remove,
};
module_pci_driver(pasemi_smb_driver);
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 29933f87d8fa..323f061a3163 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -119,7 +119,7 @@ static struct i2c_adapter pca_isa_ops = {
.timeout = HZ,
};
-static int __devinit pca_isa_match(struct device *dev, unsigned int id)
+static int pca_isa_match(struct device *dev, unsigned int id)
{
int match = base != 0;
@@ -132,7 +132,7 @@ static int __devinit pca_isa_match(struct device *dev, unsigned int id)
return match;
}
-static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
+static int pca_isa_probe(struct device *dev, unsigned int id)
{
init_waitqueue_head(&pca_wait);
@@ -174,7 +174,7 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
return -ENODEV;
}
-static int __devexit pca_isa_remove(struct device *dev, unsigned int id)
+static int pca_isa_remove(struct device *dev, unsigned int id)
{
i2c_del_adapter(&pca_isa_ops);
@@ -190,7 +190,7 @@ static int __devexit pca_isa_remove(struct device *dev, unsigned int id)
static struct isa_driver pca_isa_driver = {
.match = pca_isa_match,
.probe = pca_isa_probe,
- .remove = __devexit_p(pca_isa_remove),
+ .remove = pca_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER,
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 675878f49f76..a30d2f613c03 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -131,7 +131,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
}
-static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
+static int i2c_pca_pf_probe(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c;
struct resource *res;
@@ -257,7 +257,7 @@ e_print:
return ret;
}
-static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
+static int i2c_pca_pf_remove(struct platform_device *pdev)
{
struct i2c_pca_pf_data *i2c = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
@@ -279,7 +279,7 @@ static int __devexit i2c_pca_pf_remove(struct platform_device *pdev)
static struct platform_driver i2c_pca_pf_driver = {
.probe = i2c_pca_pf_probe,
- .remove = __devexit_p(i2c_pca_pf_remove),
+ .remove = i2c_pca_pf_remove,
.driver = {
.name = "i2c-pca-platform",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 8bbd6ece7c41..39ab78c1a02c 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(force_addr,
static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
-static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
+static const struct dmi_system_id piix4_dmi_blacklist[] = {
{
.ident = "Sapphire AM2RD790",
.matches = {
@@ -119,7 +119,7 @@ static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
/* The IBM entry is in a separate table because we only check it
on Intel-based systems */
-static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = {
+static const struct dmi_system_id piix4_dmi_ibm[] = {
{
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
@@ -131,8 +131,8 @@ struct i2c_piix4_adapdata {
unsigned short smba;
};
-static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+static int piix4_setup(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
unsigned short piix4_smba;
@@ -204,9 +204,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
*/
pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
temp | 1);
- dev_printk(KERN_NOTICE, &PIIX4_dev->dev,
- "WARNING: SMBus interface has been "
- "FORCEFULLY ENABLED!\n");
+ dev_notice(&PIIX4_dev->dev,
+ "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
@@ -231,8 +230,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
@@ -295,9 +294,9 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int __devinit piix4_setup_aux(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id,
- unsigned short base_reg_addr)
+static int piix4_setup_aux(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id,
+ unsigned short base_reg_addr)
{
/* Set up auxiliary SMBus controllers found on some
* AMD chipsets e.g. SP5100 (SB700 derivative) */
@@ -541,9 +540,8 @@ MODULE_DEVICE_TABLE (pci, piix4_ids);
static struct i2c_adapter *piix4_main_adapter;
static struct i2c_adapter *piix4_aux_adapter;
-static int __devinit piix4_add_adapter(struct pci_dev *dev,
- unsigned short smba,
- struct i2c_adapter **padap)
+static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
struct i2c_piix4_adapdata *adapdata;
@@ -589,8 +587,7 @@ static int __devinit piix4_add_adapter(struct pci_dev *dev,
return 0;
}
-static int __devinit piix4_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
@@ -627,7 +624,7 @@ static int __devinit piix4_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
+static void piix4_adap_remove(struct i2c_adapter *adap)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
@@ -639,7 +636,7 @@ static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
}
}
-static void __devexit piix4_remove(struct pci_dev *dev)
+static void piix4_remove(struct pci_dev *dev)
{
if (piix4_main_adapter) {
piix4_adap_remove(piix4_main_adapter);
@@ -656,7 +653,7 @@ static struct pci_driver piix4_driver = {
.name = "piix4_smbus",
.id_table = piix4_ids,
.probe = piix4_probe,
- .remove = __devexit_p(piix4_remove),
+ .remove = piix4_remove,
};
module_pci_driver(piix4_driver);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 3d71395ae1f7..083d68cfaf0b 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -270,7 +270,7 @@ static irqreturn_t pmcmsptwi_interrupt(int irq, void *ptr)
/*
* Probe for and register the device and return 0 if there is one.
*/
-static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
+static int pmcmsptwi_probe(struct platform_device *pldev)
{
struct resource *res;
int rc = -ENODEV;
@@ -368,7 +368,7 @@ ret_err:
/*
* Release the device and return 0 if there is one.
*/
-static int __devexit pmcmsptwi_remove(struct platform_device *pldev)
+static int pmcmsptwi_remove(struct platform_device *pldev)
{
struct resource *res;
@@ -628,7 +628,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
static struct platform_driver pmcmsptwi_driver = {
.probe = pmcmsptwi_probe,
- .remove = __devexit_p(pmcmsptwi_remove),
+ .remove = pmcmsptwi_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 8488bddfe465..ce4097012e97 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -619,7 +619,7 @@ static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
#define PNX_I2C_PM NULL
#endif
-static int __devinit i2c_pnx_probe(struct platform_device *pdev)
+static int i2c_pnx_probe(struct platform_device *pdev)
{
unsigned long tmp;
int ret = 0;
@@ -765,7 +765,7 @@ err_kzalloc:
return ret;
}
-static int __devexit i2c_pnx_remove(struct platform_device *pdev)
+static int i2c_pnx_remove(struct platform_device *pdev)
{
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
@@ -797,7 +797,7 @@ static struct platform_driver i2c_pnx_driver = {
.pm = PNX_I2C_PM,
},
.probe = i2c_pnx_probe,
- .remove = __devexit_p(i2c_pnx_remove),
+ .remove = i2c_pnx_remove,
};
static int __init i2c_adap_pnx_init(void)
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 5285f8565de4..0dd5b334d090 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -210,7 +210,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
};
-static int __devexit i2c_powermac_remove(struct platform_device *dev)
+static int i2c_powermac_remove(struct platform_device *dev)
{
struct i2c_adapter *adapter = platform_get_drvdata(dev);
int rc;
@@ -227,7 +227,7 @@ static int __devexit i2c_powermac_remove(struct platform_device *dev)
return 0;
}
-static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
+static u32 i2c_powermac_get_addr(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus,
struct device_node *node)
{
@@ -255,7 +255,7 @@ static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
return 0xffffffff;
}
-static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
+static void i2c_powermac_create_one(struct i2c_adapter *adap,
const char *type,
u32 addr)
{
@@ -271,7 +271,7 @@ static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
type);
}
-static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
+static void i2c_powermac_add_missing(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus,
bool found_onyx)
{
@@ -297,7 +297,7 @@ static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
}
}
-static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
+static bool i2c_powermac_get_type(struct i2c_adapter *adap,
struct device_node *node,
u32 addr, char *type, int type_size)
{
@@ -336,7 +336,7 @@ static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
return false;
}
-static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
+static void i2c_powermac_register_devices(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus)
{
struct i2c_client *newdev;
@@ -403,7 +403,7 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
i2c_powermac_add_missing(adap, bus, found_onyx);
}
-static int __devinit i2c_powermac_probe(struct platform_device *dev)
+static int i2c_powermac_probe(struct platform_device *dev)
{
struct pmac_i2c_bus *bus = dev->dev.platform_data;
struct device_node *parent = NULL;
@@ -467,7 +467,7 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
- .remove = __devexit_p(i2c_powermac_remove),
+ .remove = i2c_powermac_remove,
.driver = {
.name = "i2c-powermac",
.bus = &platform_bus_type,
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index d8515be00b98..d7c512d717a7 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -184,7 +184,7 @@ static struct i2c_algorithm puv3_i2c_algorithm = {
/*
* Main initialization routine.
*/
-static int __devinit puv3_i2c_probe(struct platform_device *pdev)
+static int puv3_i2c_probe(struct platform_device *pdev)
{
struct i2c_adapter *adapter;
struct resource *mem;
@@ -231,7 +231,7 @@ fail_nomem:
return rc;
}
-static int __devexit puv3_i2c_remove(struct platform_device *pdev)
+static int puv3_i2c_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct resource *mem;
@@ -276,7 +276,7 @@ static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL);
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
- .remove = __devexit_p(puv3_i2c_remove),
+ .remove = puv3_i2c_remove,
.driver = {
.name = "PKUnity-v3-I2C",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 4dc9bef17d77..3d4985695aed 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -94,7 +94,7 @@ out:
return ERR_PTR(ret);
}
-static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
+static int ce4100_i2c_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
@@ -135,7 +135,7 @@ err_mem:
return ret;
}
-static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
+static void ce4100_i2c_remove(struct pci_dev *dev)
{
struct ce4100_devices *sds;
unsigned int i;
@@ -160,7 +160,7 @@ static struct pci_driver ce4100_i2c_driver = {
.name = "ce4100_i2c",
.id_table = ce4100_i2c_devices,
.probe = ce4100_i2c_probe,
- .remove = __devexit_p(ce4100_i2c_remove),
+ .remove = ce4100_i2c_remove,
};
module_pci_driver(ce4100_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index f9399d163af2..9bd4d73d29e3 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -613,7 +613,7 @@ static const struct i2c_algorithm rcar_i2c_algo = {
.functionality = rcar_i2c_func,
};
-static int __devinit rcar_i2c_probe(struct platform_device *pdev)
+static int rcar_i2c_probe(struct platform_device *pdev)
{
struct i2c_rcar_platform_data *pdata = pdev->dev.platform_data;
struct rcar_i2c_priv *priv;
@@ -642,7 +642,7 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- priv->io = devm_ioremap(dev, res->start, resource_size(res));
+ priv->io = devm_request_and_ioremap(dev, res);
if (!priv->io) {
dev_err(dev, "cannot ioremap\n");
return -ENODEV;
@@ -682,7 +682,7 @@ static int __devinit rcar_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rcar_i2c_remove(struct platform_device *pdev)
+static int rcar_i2c_remove(struct platform_device *pdev)
{
struct rcar_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -693,16 +693,16 @@ static int __devexit rcar_i2c_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver rcar_i2c_drv = {
+static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.owner = THIS_MODULE,
},
.probe = rcar_i2c_probe,
- .remove = __devexit_p(rcar_i2c_remove),
+ .remove = rcar_i2c_remove,
};
-module_platform_driver(rcar_i2c_drv);
+module_platform_driver(rcar_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas R-Car I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 9d902725bac9..a290d089ceaf 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -38,6 +38,7 @@
#include <linux/io.h>
#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/irq.h>
@@ -49,6 +50,9 @@
#define QUIRK_HDMIPHY (1 << 1)
#define QUIRK_NO_GPIO (1 << 2)
+/* Max time to wait for bus to become idle after a xfer (in us) */
+#define S3C2410_IDLE_TIMEOUT 5000
+
/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
@@ -59,7 +63,6 @@ enum s3c24xx_i2c_state {
};
struct s3c24xx_i2c {
- spinlock_t lock;
wait_queue_head_t wait;
unsigned int quirks;
unsigned int suspended:1;
@@ -78,11 +81,11 @@ struct s3c24xx_i2c {
void __iomem *regs;
struct clk *clk;
struct device *dev;
- struct resource *ioarea;
struct i2c_adapter adap;
struct s3c2410_platform_i2c *pdata;
int gpios[2];
+ struct pinctrl *pctrl;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -208,7 +211,7 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
if (msg->flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
- /* todo - check for wether ack wanted or not */
+ /* todo - check for whether ack wanted or not */
s3c24xx_i2c_enable_ack(i2c);
iiccon = readl(i2c->regs + S3C2410_IICCON);
@@ -235,8 +238,47 @@ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
dev_dbg(i2c->dev, "STOP\n");
- /* stop the transfer */
- iicstat &= ~S3C2410_IICSTAT_START;
+ /*
+ * The datasheet says that the STOP sequence should be:
+ * 1) I2CSTAT.5 = 0 - Clear BUSY (or 'generate STOP')
+ * 2) I2CCON.4 = 0 - Clear IRQPEND
+ * 3) Wait until the stop condition takes effect.
+ * 4*) I2CSTAT.4 = 0 - Clear TXRXEN
+ *
+ * Where, step "4*" is only for buses with the "HDMIPHY" quirk.
+ *
+ * However, after much experimentation, it appears that:
+ * a) normal buses automatically clear BUSY and transition from
+ * Master->Slave when they complete generating a STOP condition.
+ * Therefore, step (3) can be done in doxfer() by polling I2CCON.4
+ * after starting the STOP generation here.
+ * b) HDMIPHY bus does neither, so there is no way to do step 3.
+ * There is no indication when this bus has finished generating
+ * STOP.
+ *
+ * In fact, we have found that as soon as the IRQPEND bit is cleared in
+ * step 2, the HDMIPHY bus generates the STOP condition, and then
+ * immediately starts transferring another data byte, even though the
+ * bus is supposedly stopped. This is presumably because the bus is
+ * still in "Master" mode, and its BUSY bit is still set.
+ *
+ * To avoid these extra post-STOP transactions on HDMI phy devices, we
+ * just disable Serial Output on the bus (I2CSTAT.4 = 0) directly,
+ * instead of first generating a proper STOP condition. This should
+ * float SDA & SCK terminating the transfer. Subsequent transfers
+ * start with a proper START condition, and proceed normally.
+ *
+ * The HDMIPHY bus is an internal bus that always has exactly two
+ * devices, the host as Master and the HDMIPHY device as the slave.
+ * Skipping the STOP condition has been tested on this bus and works.
+ */
+ if (i2c->quirks & QUIRK_HDMIPHY) {
+ /* Stop driving the I2C pins */
+ iicstat &= ~S3C2410_IICSTAT_TXRXEN;
+ } else {
+ /* stop the transfer */
+ iicstat &= ~S3C2410_IICSTAT_START;
+ }
writel(iicstat, i2c->regs + S3C2410_IICSTAT);
i2c->state = STATE_STOP;
@@ -397,7 +439,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
case STATE_READ:
/* we have a byte of data in the data register, do
- * something with it, and then work out wether we are
+ * something with it, and then work out whether we are
* going to do any more read/write
*/
@@ -490,13 +532,6 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
unsigned long iicstat;
int timeout = 400;
- /* the timeout for HDMIPHY is reduced to 10 ms because
- * the hangup is expected to happen, so waiting 400 ms
- * causes only unnecessary system hangup
- */
- if (i2c->quirks & QUIRK_HDMIPHY)
- timeout = 10;
-
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -506,16 +541,61 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
msleep(1);
}
- /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
- if (i2c->quirks & QUIRK_HDMIPHY) {
- writel(0, i2c->regs + S3C2410_IICCON);
- writel(0, i2c->regs + S3C2410_IICSTAT);
- writel(0, i2c->regs + S3C2410_IICDS);
+ return -ETIMEDOUT;
+}
- return 0;
+/* s3c24xx_i2c_wait_idle
+ *
+ * wait for the i2c bus to become idle.
+*/
+
+static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
+{
+ unsigned long iicstat;
+ ktime_t start, now;
+ unsigned long delay;
+ int spins;
+
+ /* ensure the stop has been through the bus */
+
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ start = now = ktime_get();
+
+ /*
+ * Most of the time, the bus is already idle within a few usec of the
+ * end of a transaction. However, really slow i2c devices can stretch
+ * the clock, delaying STOP generation.
+ *
+ * On slower SoCs this typically happens within a very small number of
+ * instructions so busy wait briefly to avoid scheduling overhead.
+ */
+ spins = 3;
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ while ((iicstat & S3C2410_IICSTAT_START) && --spins) {
+ cpu_relax();
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
}
- return -ETIMEDOUT;
+ /*
+ * If we do get an appreciable delay as a compromise between idle
+ * detection latency for the normal, fast case, and system load in the
+ * slow device case, use an exponential back off in the polling loop,
+ * up to 1/10th of the total timeout, then continue to poll at a
+ * constant rate up to the timeout.
+ */
+ delay = 1;
+ while ((iicstat & S3C2410_IICSTAT_START) &&
+ ktime_us_delta(now, start) < S3C2410_IDLE_TIMEOUT) {
+ usleep_range(delay, 2 * delay);
+ if (delay < S3C2410_IDLE_TIMEOUT / 10)
+ delay <<= 1;
+ now = ktime_get();
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ }
+
+ if (iicstat & S3C2410_IICSTAT_START)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
}
/* s3c24xx_i2c_doxfer
@@ -526,8 +606,7 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long iicstat, timeout;
- int spins = 20;
+ unsigned long timeout;
int ret;
if (i2c->suspended)
@@ -540,8 +619,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
goto out;
}
- spin_lock_irq(&i2c->lock);
-
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
@@ -550,7 +627,6 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
- spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
@@ -564,24 +640,11 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
- /* ensure the stop has been through the bus */
-
- dev_dbg(i2c->dev, "waiting for bus idle\n");
-
- /* first, try busy waiting briefly */
- do {
- cpu_relax();
- iicstat = readl(i2c->regs + S3C2410_IICSTAT);
- } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
-
- /* if that timed out sleep */
- if (!spins) {
- msleep(1);
- iicstat = readl(i2c->regs + S3C2410_IICSTAT);
- }
+ /* For QUIRK_HDMIPHY, bus is already disabled */
+ if (i2c->quirks & QUIRK_HDMIPHY)
+ goto out;
- if (iicstat & S3C2410_IICSTAT_START)
- dev_warn(i2c->dev, "timeout waiting for bus idle\n");
+ s3c24xx_i2c_wait_idle(i2c);
out:
return ret;
@@ -740,7 +803,6 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct s3c24xx_i2c *i2c = freq_to_i2c(nb);
- unsigned long flags;
unsigned int got;
int delta_f;
int ret;
@@ -754,9 +816,9 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
(val == CPUFREQ_PRECHANGE && delta_f > 0)) {
- spin_lock_irqsave(&i2c->lock, flags);
+ i2c_lock_adapter(&i2c->adap);
ret = s3c24xx_i2c_clockrate(i2c, &got);
- spin_unlock_irqrestore(&i2c->lock, flags);
+ i2c_unlock_adapter(&i2c->adap);
if (ret < 0)
dev_err(i2c->dev, "cannot find frequency\n");
@@ -858,14 +920,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
pdata = i2c->pdata;
- /* inititalise the gpio */
-
- if (pdata->cfg_gpio)
- pdata->cfg_gpio(to_platform_device(i2c->dev));
- else
- if (s3c24xx_i2c_parse_dt_gpio(i2c))
- return -EINVAL;
-
/* write slave address */
writeb(pdata->slave_addr, i2c->regs + S3C2410_IICADD);
@@ -963,7 +1017,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
i2c->tx_setup = 50;
- spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
@@ -989,36 +1042,38 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
- i2c->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
-
- if (i2c->ioarea == NULL) {
- dev_err(&pdev->dev, "cannot request IO\n");
- ret = -ENXIO;
- goto err_clk;
- }
-
- i2c->regs = ioremap(res->start, resource_size(res));
+ i2c->regs = devm_request_and_ioremap(&pdev->dev, res);
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
- goto err_ioarea;
+ goto err_clk;
}
- dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
- i2c->regs, i2c->ioarea, res);
+ dev_dbg(&pdev->dev, "registers %p (%p)\n",
+ i2c->regs, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
+ i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev);
+
+ /* inititalise the i2c gpio lines */
+
+ if (i2c->pdata->cfg_gpio) {
+ i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
+ } else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
+ ret = -EINVAL;
+ goto err_clk;
+ }
+
/* initialise the i2c controller */
ret = s3c24xx_i2c_init(i2c);
if (ret != 0)
- goto err_iomap;
+ goto err_clk;
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
@@ -1027,7 +1082,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto err_iomap;
+ goto err_clk;
}
ret = request_irq(i2c->irq, s3c24xx_i2c_irq, 0,
@@ -1035,7 +1090,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- goto err_iomap;
+ goto err_clk;
}
ret = s3c24xx_i2c_register_cpufreq(i2c);
@@ -1075,13 +1130,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
err_irq:
free_irq(i2c->irq, i2c);
- err_iomap:
- iounmap(i2c->regs);
-
- err_ioarea:
- release_resource(i2c->ioarea);
- kfree(i2c->ioarea);
-
err_clk:
clk_disable_unprepare(i2c->clk);
clk_put(i2c->clk);
@@ -1110,16 +1158,13 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
clk_disable_unprepare(i2c->clk);
clk_put(i2c->clk);
- iounmap(i2c->regs);
-
- release_resource(i2c->ioarea);
- s3c24xx_i2c_dt_gpio_free(i2c);
- kfree(i2c->ioarea);
+ if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
+ s3c24xx_i2c_dt_gpio_free(i2c);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1142,10 +1187,14 @@ static int s3c24xx_i2c_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
.resume = s3c24xx_i2c_resume,
+#endif
};
#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index b76a29d1f8e4..008836409efe 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -248,7 +248,7 @@ static struct i2c_algorithm s6i2c_algorithm = {
.functionality = s6i2c_functionality,
};
-static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
+static u16 nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
{
u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000;
if (dividend > 0xffff)
@@ -256,7 +256,7 @@ static u16 __devinit nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
return dividend;
}
-static int __devinit s6i2c_probe(struct platform_device *dev)
+static int s6i2c_probe(struct platform_device *dev)
{
struct s6i2c_if *iface = &s6i2c_if;
struct i2c_adapter *p_adap;
@@ -361,7 +361,7 @@ err_out:
return rc;
}
-static int __devexit s6i2c_remove(struct platform_device *pdev)
+static int s6i2c_remove(struct platform_device *pdev)
{
struct s6i2c_if *iface = platform_get_drvdata(pdev);
i2c_wr16(iface, S6_I2C_ENABLE, 0);
@@ -378,7 +378,7 @@ static int __devexit s6i2c_remove(struct platform_device *pdev)
static struct platform_driver s6i2c_driver = {
.probe = s6i2c_probe,
- .remove = __devexit_p(s6i2c_remove),
+ .remove = s6i2c_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index c0c9dffbdb12..3a2253e1bf59 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -390,7 +390,7 @@ static const struct i2c_algorithm sh7760_i2c_algo = {
* iclk = mclk/(CDF + 1). iclk must be < 20MHz.
* scl = iclk/(SCGD*8 + 20).
*/
-static int __devinit calc_CCR(unsigned long scl_hz)
+static int calc_CCR(unsigned long scl_hz)
{
struct clk *mclk;
unsigned long mck, m1, dff, odff, iclk;
@@ -430,7 +430,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
return ((scgdm << 2) | cdfm);
}
-static int __devinit sh7760_i2c_probe(struct platform_device *pdev)
+static int sh7760_i2c_probe(struct platform_device *pdev)
{
struct sh7760_i2c_platdata *pd;
struct resource *res;
@@ -536,7 +536,7 @@ out0:
return ret;
}
-static int __devexit sh7760_i2c_remove(struct platform_device *pdev)
+static int sh7760_i2c_remove(struct platform_device *pdev)
{
struct cami2c *id = platform_get_drvdata(pdev);
@@ -557,7 +557,7 @@ static struct platform_driver sh7760_i2c_drv = {
.owner = THIS_MODULE,
},
.probe = sh7760_i2c_probe,
- .remove = __devexit_p(sh7760_i2c_remove),
+ .remove = sh7760_i2c_remove,
};
module_platform_driver(sh7760_i2c_drv);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8110ca45f342..b6e7a83a8296 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -120,11 +120,12 @@ struct sh_mobile_i2c_data {
void __iomem *reg;
struct i2c_adapter adap;
unsigned long bus_speed;
+ unsigned int clks_per_count;
struct clk *clk;
u_int8_t icic;
- u_int8_t iccl;
- u_int8_t icch;
u_int8_t flags;
+ u_int16_t iccl;
+ u_int16_t icch;
spinlock_t lock;
wait_queue_head_t wait;
@@ -135,7 +136,8 @@ struct sh_mobile_i2c_data {
#define IIC_FLAG_HAS_ICIC67 (1 << 0)
-#define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */
+#define STANDARD_MODE 100000
+#define FAST_MODE 400000
/* Register offsets */
#define ICDR 0x00
@@ -187,57 +189,90 @@ static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs,
iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr);
}
-static void activate_ch(struct sh_mobile_i2c_data *pd)
+static u32 sh_mobile_i2c_iccl(unsigned long count_khz, u32 tLOW, u32 tf, int offset)
{
- unsigned long i2c_clk;
- u_int32_t num;
- u_int32_t denom;
- u_int32_t tmp;
-
- /* Wake up device and enable clock */
- pm_runtime_get_sync(pd->dev);
- clk_enable(pd->clk);
-
- /* Get clock rate after clock is enabled */
- i2c_clk = clk_get_rate(pd->clk);
+ /*
+ * Conditional expression:
+ * ICCL >= COUNT_CLK * (tLOW + tf)
+ *
+ * SH-Mobile IIC hardware starts counting the LOW period of
+ * the SCL signal (tLOW) as soon as it pulls the SCL line.
+ * In order to meet the tLOW timing spec, we need to take into
+ * account the fall time of SCL signal (tf). Default tf value
+ * should be 0.3 us, for safety.
+ */
+ return (((count_khz * (tLOW + tf)) + 5000) / 10000) + offset;
+}
- /* Calculate the value for iccl. From the data sheet:
- * iccl = (p clock / transfer rate) * (L / (L + H))
- * where L and H are the SCL low/high ratio (5/4 in this case).
- * We also round off the result.
+static u32 sh_mobile_i2c_icch(unsigned long count_khz, u32 tHIGH, u32 tf, int offset)
+{
+ /*
+ * Conditional expression:
+ * ICCH >= COUNT_CLK * (tHIGH + tf)
+ *
+ * SH-Mobile IIC hardware is aware of SCL transition period 'tr',
+ * and can ignore it. SH-Mobile IIC controller starts counting
+ * the HIGH period of the SCL signal (tHIGH) after the SCL input
+ * voltage increases at VIH.
+ *
+ * Afterward it turned out calculating ICCH using only tHIGH spec
+ * will result in violation of the tHD;STA timing spec. We need
+ * to take into account the fall time of SDA signal (tf) at START
+ * condition, in order to meet both tHIGH and tHD;STA specs.
*/
- num = i2c_clk * 5;
- denom = pd->bus_speed * 9;
- tmp = num * 10 / denom;
- if (tmp % 10 >= 5)
- pd->iccl = (u_int8_t)((num/denom) + 1);
- else
- pd->iccl = (u_int8_t)(num/denom);
+ return (((count_khz * (tHIGH + tf)) + 5000) / 10000) + offset;
+}
- /* one more bit of ICCL in ICIC */
- if (pd->flags & IIC_FLAG_HAS_ICIC67) {
- if ((num/denom) > 0xff)
- pd->icic |= ICIC_ICCLB8;
- else
- pd->icic &= ~ICIC_ICCLB8;
+static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd)
+{
+ unsigned long i2c_clk_khz;
+ u32 tHIGH, tLOW, tf;
+ int offset;
+
+ /* Get clock rate after clock is enabled */
+ clk_enable(pd->clk);
+ i2c_clk_khz = clk_get_rate(pd->clk) / 1000;
+ i2c_clk_khz /= pd->clks_per_count;
+
+ if (pd->bus_speed == STANDARD_MODE) {
+ tLOW = 47; /* tLOW = 4.7 us */
+ tHIGH = 40; /* tHD;STA = tHIGH = 4.0 us */
+ tf = 3; /* tf = 0.3 us */
+ offset = 0; /* No offset */
+ } else if (pd->bus_speed == FAST_MODE) {
+ tLOW = 13; /* tLOW = 1.3 us */
+ tHIGH = 6; /* tHD;STA = tHIGH = 0.6 us */
+ tf = 3; /* tf = 0.3 us */
+ offset = 0; /* No offset */
+ } else {
+ dev_err(pd->dev, "unrecognized bus speed %lu Hz\n",
+ pd->bus_speed);
+ goto out;
}
- /* Calculate the value for icch. From the data sheet:
- icch = (p clock / transfer rate) * (H / (L + H)) */
- num = i2c_clk * 4;
- tmp = num * 10 / denom;
- if (tmp % 10 >= 5)
- pd->icch = (u_int8_t)((num/denom) + 1);
+ pd->iccl = sh_mobile_i2c_iccl(i2c_clk_khz, tLOW, tf, offset);
+ /* one more bit of ICCL in ICIC */
+ if ((pd->iccl > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
+ pd->icic |= ICIC_ICCLB8;
else
- pd->icch = (u_int8_t)(num/denom);
+ pd->icic &= ~ICIC_ICCLB8;
+ pd->icch = sh_mobile_i2c_icch(i2c_clk_khz, tHIGH, tf, offset);
/* one more bit of ICCH in ICIC */
- if (pd->flags & IIC_FLAG_HAS_ICIC67) {
- if ((num/denom) > 0xff)
- pd->icic |= ICIC_ICCHB8;
- else
- pd->icic &= ~ICIC_ICCHB8;
- }
+ if ((pd->icch > 0xff) && (pd->flags & IIC_FLAG_HAS_ICIC67))
+ pd->icic |= ICIC_ICCHB8;
+ else
+ pd->icic &= ~ICIC_ICCHB8;
+
+out:
+ clk_disable(pd->clk);
+}
+
+static void activate_ch(struct sh_mobile_i2c_data *pd)
+{
+ /* Wake up device and enable clock */
+ pm_runtime_get_sync(pd->dev);
+ clk_enable(pd->clk);
/* Enable channel and configure rx ack */
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
@@ -246,8 +281,8 @@ static void activate_ch(struct sh_mobile_i2c_data *pd)
iic_wr(pd, ICIC, 0);
/* Set the clock */
- iic_wr(pd, ICCL, pd->iccl);
- iic_wr(pd, ICCH, pd->icch);
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
}
static void deactivate_ch(struct sh_mobile_i2c_data *pd)
@@ -434,6 +469,9 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
wake_up(&pd->wait);
}
+ /* defeat write posting to avoid spurious WAIT interrupts */
+ iic_rd(pd, ICSR);
+
return IRQ_HANDLED;
}
@@ -451,8 +489,8 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
iic_set_clr(pd, ICCR, ICCR_ICE, 0);
/* Set the clock */
- iic_wr(pd, ICCL, pd->iccl);
- iic_wr(pd, ICCH, pd->icch);
+ iic_wr(pd, ICCL, pd->iccl & 0xff);
+ iic_wr(pd, ICCH, pd->icch & 0xff);
pd->msg = usr_msg;
pd->pos = -1;
@@ -621,10 +659,13 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_irq;
}
- /* Use platformd data bus speed or NORMAL_SPEED */
- pd->bus_speed = NORMAL_SPEED;
+ /* Use platform data bus speed or STANDARD_MODE */
+ pd->bus_speed = STANDARD_MODE;
if (pdata && pdata->bus_speed)
pd->bus_speed = pdata->bus_speed;
+ pd->clks_per_count = 1;
+ if (pdata && pdata->clks_per_count)
+ pd->clks_per_count = pdata->clks_per_count;
/* The IIC blocks on SH-Mobile ARM processors
* come with two new bits in ICIC.
@@ -632,6 +673,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
if (size > 0x17)
pd->flags |= IIC_FLAG_HAS_ICIC67;
+ sh_mobile_i2c_init(pd);
+
/* Enable Runtime PM for this device.
*
* Also tell the Runtime PM core to ignore children
@@ -667,8 +710,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
goto err_all;
}
- dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
- adap->nr, pd->bus_speed);
+ dev_info(&dev->dev,
+ "I2C adapter %d with bus speed %lu Hz (L/H=%x/%x)\n",
+ adap->nr, pd->bus_speed, pd->iccl, pd->icch);
of_i2c_register_devices(adap);
return 0;
@@ -714,7 +758,7 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
-static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,rmobile-iic", },
{},
};
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 5574a47792fb..e03381aee34f 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/of_i2c.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -258,7 +259,7 @@ static const struct i2c_algorithm i2c_sirfsoc_algo = {
.functionality = i2c_sirfsoc_func,
};
-static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
+static int i2c_sirfsoc_probe(struct platform_device *pdev)
{
struct sirfsoc_i2c *siic;
struct i2c_adapter *adap;
@@ -328,6 +329,7 @@ static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
adap->algo = &i2c_sirfsoc_algo;
adap->algo_data = siic;
+ adap->dev.of_node = pdev->dev.of_node;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
@@ -371,6 +373,8 @@ static int __devinit i2c_sirfsoc_probe(struct platform_device *pdev)
clk_disable(clk);
+ of_i2c_register_devices(adap);
+
dev_info(&pdev->dev, " I2C adapter ready to operate\n");
return 0;
@@ -385,7 +389,7 @@ err_get_clk:
return err;
}
-static int __devexit i2c_sirfsoc_remove(struct platform_device *pdev)
+static int i2c_sirfsoc_remove(struct platform_device *pdev)
{
struct i2c_adapter *adapter = platform_get_drvdata(pdev);
struct sirfsoc_i2c *siic = adapter->algo_data;
@@ -433,7 +437,7 @@ static const struct dev_pm_ops i2c_sirfsoc_pm_ops = {
};
#endif
-static const struct of_device_id sirfsoc_i2c_of_match[] __devinitconst = {
+static const struct of_device_id sirfsoc_i2c_of_match[] = {
{ .compatible = "sirf,prima2-i2c", },
{},
};
@@ -449,7 +453,7 @@ static struct platform_driver i2c_sirfsoc_driver = {
.of_match_table = sirfsoc_i2c_of_match,
},
.probe = i2c_sirfsoc_probe,
- .remove = __devexit_p(i2c_sirfsoc_remove),
+ .remove = i2c_sirfsoc_remove,
};
module_platform_driver(i2c_sirfsoc_driver);
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 87e5126d449c..79fd96a04386 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -142,7 +142,7 @@ static void sis5595_write(u8 reg, u8 data)
outb(data, sis5595_base + SMB_DAT);
}
-static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+static int sis5595_setup(struct pci_dev *SIS5595_dev)
{
u16 a;
u8 val;
@@ -376,7 +376,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = {
MODULE_DEVICE_TABLE (pci, sis5595_ids);
-static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err;
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 5d6723b7525e..de6dddb9f865 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -389,7 +389,7 @@ static u32 sis630_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_BLOCK_DATA;
}
-static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+static int sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
@@ -480,7 +480,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
MODULE_DEVICE_TABLE (pci, sis630_ids);
-static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n");
@@ -496,7 +496,7 @@ static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_i
return i2c_add_adapter(&sis630_adapter);
}
-static void __devexit sis630_remove(struct pci_dev *dev)
+static void sis630_remove(struct pci_dev *dev)
{
if (acpi_base) {
i2c_del_adapter(&sis630_adapter);
@@ -510,7 +510,7 @@ static struct pci_driver sis630_driver = {
.name = "sis630_smbus",
.id_table = sis630_ids,
.probe = sis630_probe,
- .remove = __devexit_p(sis630_remove),
+ .remove = sis630_remove,
};
module_pci_driver(sis630_driver);
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 7b72614a9bc0..b9faf9b6002b 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -252,7 +252,7 @@ static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = {
MODULE_DEVICE_TABLE (pci, sis96x_ids);
-static int __devinit sis96x_probe(struct pci_dev *dev,
+static int sis96x_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 ww = 0;
@@ -308,7 +308,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
return retval;
}
-static void __devexit sis96x_remove(struct pci_dev *dev)
+static void sis96x_remove(struct pci_dev *dev)
{
if (sis96x_smbus_base) {
i2c_del_adapter(&sis96x_adapter);
@@ -321,7 +321,7 @@ static struct pci_driver sis96x_driver = {
.name = "sis96x_smbus",
.id_table = sis96x_ids,
.probe = sis96x_probe,
- .remove = __devexit_p(sis96x_remove),
+ .remove = sis96x_remove,
};
module_pci_driver(sis96x_driver);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index dcea77bf6f50..7b38877ffec1 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -642,7 +642,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
-static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+static const struct of_device_id tegra_i2c_of_match[] = {
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c", .data = &tegra20_i2c_hw, },
{ .compatible = "nvidia,tegra20-i2c-dvc", .data = &tegra20_i2c_hw, },
@@ -651,7 +651,7 @@ static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
#endif
-static int __devinit tegra_i2c_probe(struct platform_device *pdev)
+static int tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
@@ -769,7 +769,7 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tegra_i2c_remove(struct platform_device *pdev)
+static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
@@ -817,7 +817,7 @@ static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume);
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
- .remove = __devexit_p(tegra_i2c_remove),
+ .remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ffee71ca190..be662511c58b 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -96,7 +96,7 @@ static DEFINE_PCI_DEVICE_TABLE(vt586b_ids) = {
MODULE_DEVICE_TABLE (pci, vt586b_ids);
-static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u16 base;
u8 rev;
@@ -146,7 +146,7 @@ static int __devinit vt586b_probe(struct pci_dev *dev, const struct pci_device_i
return 0;
}
-static void __devexit vt586b_remove(struct pci_dev *dev)
+static void vt586b_remove(struct pci_dev *dev)
{
i2c_del_adapter(&vt586b_adapter);
release_region(I2C_DIR, IOSPACE);
@@ -158,7 +158,7 @@ static struct pci_driver vt586b_driver = {
.name = "vt586b_smbus",
.id_table = vt586b_ids,
.probe = vt586b_probe,
- .remove = __devexit_p(vt586b_remove),
+ .remove = vt586b_remove,
};
module_pci_driver(vt586b_driver);
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 271c9a2b0fd7..b2d90e105f41 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -320,8 +320,8 @@ static struct i2c_adapter vt596_adapter = {
.algo = &smbus_algorithm,
};
-static int __devinit vt596_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int vt596_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
unsigned char temp;
int error;
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
new file mode 100644
index 000000000000..f45c32c1ace6
--- /dev/null
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -0,0 +1,480 @@
+/*
+ * Nano River Technologies viperboard i2c master driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/viperboard.h>
+
+struct vprbrd_i2c {
+ struct i2c_adapter i2c;
+ u8 bus_freq_param;
+};
+
+/* i2c bus frequency module parameter */
+static u8 i2c_bus_param;
+static unsigned int i2c_bus_freq = 100;
+module_param(i2c_bus_freq, int, 0);
+MODULE_PARM_DESC(i2c_bus_freq,
+ "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000");
+
+static int vprbrd_i2c_status(struct i2c_adapter *i2c,
+ struct vprbrd_i2c_status *status, bool prev_error)
+{
+ u16 bytes_xfer;
+ int ret;
+ struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+
+ /* check for protocol error */
+ bytes_xfer = sizeof(struct vprbrd_i2c_status);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000,
+ status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != bytes_xfer)
+ prev_error = true;
+
+ if (prev_error) {
+ dev_err(&i2c->dev, "failure in usb communication\n");
+ return -EREMOTEIO;
+ }
+
+ dev_dbg(&i2c->dev, " status = %d\n", status->status);
+ if (status->status != 0x00) {
+ dev_err(&i2c->dev, "failure: i2c protocol error\n");
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_receive(struct usb_device *usb_dev,
+ struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer)
+{
+ int ret, bytes_actual;
+ int error = 0;
+
+ /* send the read request */
+ ret = usb_bulk_msg(usb_dev,
+ usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg,
+ sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual,
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0)
+ || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) {
+ dev_err(&usb_dev->dev, "failure transmitting usb\n");
+ error = -EREMOTEIO;
+ }
+
+ /* read the actual data */
+ ret = usb_bulk_msg(usb_dev,
+ usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg,
+ bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0) || (bytes_xfer != bytes_actual)) {
+ dev_err(&usb_dev->dev, "failure receiving usb\n");
+ error = -EREMOTEIO;
+ }
+ return error;
+}
+
+static int vprbrd_i2c_addr(struct usb_device *usb_dev,
+ struct vprbrd_i2c_addr_msg *amsg)
+{
+ int ret, bytes_actual;
+
+ ret = usb_bulk_msg(usb_dev,
+ usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg,
+ sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual,
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if ((ret < 0) ||
+ (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) {
+ dev_err(&usb_dev->dev, "failure transmitting usb\n");
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
+{
+ int ret;
+ u16 remain_len, bytes_xfer, len1, len2,
+ start = 0x0000;
+ struct vprbrd_i2c_read_msg *rmsg =
+ (struct vprbrd_i2c_read_msg *)vb->buf;
+
+ remain_len = msg->len;
+ rmsg->header.cmd = VPRBRD_I2C_CMD_READ;
+ while (remain_len > 0) {
+ rmsg->header.addr = cpu_to_le16(start + 0x4000);
+ if (remain_len <= 255) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len;
+ rmsg->header.len1 = 0x00;
+ rmsg->header.len2 = 0x00;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 510) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len - 255;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0x00;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 512) {
+ len1 = remain_len;
+ len2 = 0x00;
+ rmsg->header.len0 = remain_len - 510;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = 0x00;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 767) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 512;
+ rmsg->header.len4 = 0x00;
+ rmsg->header.len5 = 0x00;
+ bytes_xfer = remain_len;
+ remain_len = 0;
+ } else if (remain_len <= 1022) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 767;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0x00;
+ remain_len = 0;
+ } else if (remain_len <= 1024) {
+ len1 = 512;
+ len2 = remain_len - 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = remain_len - 1022;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0xff;
+ remain_len = 0;
+ } else {
+ len1 = 512;
+ len2 = 512;
+ rmsg->header.len0 = 0x02;
+ rmsg->header.len1 = 0xff;
+ rmsg->header.len2 = 0xff;
+ rmsg->header.len3 = 0x02;
+ rmsg->header.len4 = 0xff;
+ rmsg->header.len5 = 0xff;
+ remain_len -= 1024;
+ start += 1024;
+ }
+ rmsg->header.tf1 = cpu_to_le16(len1);
+ rmsg->header.tf2 = cpu_to_le16(len2);
+
+ /* first read transfer */
+ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1);
+ if (ret < 0)
+ return ret;
+ /* copy the received data */
+ memcpy(msg->buf + start, rmsg, len1);
+
+ /* second read transfer if neccessary */
+ if (len2 > 0) {
+ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
+ if (ret < 0)
+ return ret;
+ /* copy the received data */
+ memcpy(msg->buf + start + 512, rmsg, len2);
+ }
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg)
+{
+ int ret, bytes_actual;
+ u16 remain_len, bytes_xfer,
+ start = 0x0000;
+ struct vprbrd_i2c_write_msg *wmsg =
+ (struct vprbrd_i2c_write_msg *)vb->buf;
+
+ remain_len = msg->len;
+ wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE;
+ wmsg->header.last = 0x00;
+ wmsg->header.chan = 0x00;
+ wmsg->header.spi = 0x0000;
+ while (remain_len > 0) {
+ wmsg->header.addr = cpu_to_le16(start + 0x4000);
+ if (remain_len > 503) {
+ wmsg->header.len1 = 0xff;
+ wmsg->header.len2 = 0xf8;
+ remain_len -= 503;
+ bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr);
+ start += 503;
+ } else if (remain_len > 255) {
+ wmsg->header.len1 = 0xff;
+ wmsg->header.len2 = (remain_len - 255);
+ bytes_xfer = remain_len +
+ sizeof(struct vprbrd_i2c_write_hdr);
+ remain_len = 0;
+ } else {
+ wmsg->header.len1 = remain_len;
+ wmsg->header.len2 = 0x00;
+ bytes_xfer = remain_len +
+ sizeof(struct vprbrd_i2c_write_hdr);
+ remain_len = 0;
+ }
+ memcpy(wmsg->data, msg->buf + start,
+ bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr));
+
+ ret = usb_bulk_msg(vb->usb_dev,
+ usb_sndbulkpipe(vb->usb_dev,
+ VPRBRD_EP_OUT), wmsg,
+ bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+ if ((ret < 0) || (bytes_xfer != bytes_actual))
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_msg *pmsg;
+ int i, ret,
+ error = 0;
+ struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+ struct vprbrd_i2c_addr_msg *amsg =
+ (struct vprbrd_i2c_addr_msg *)vb->buf;
+ struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf;
+
+ dev_dbg(&i2c->dev, "master xfer %d messages:\n", num);
+
+ for (i = 0 ; i < num ; i++) {
+ pmsg = &msgs[i];
+
+ dev_dbg(&i2c->dev,
+ " %d: %s (flags %d) %d bytes to 0x%02x\n",
+ i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ pmsg->flags, pmsg->len, pmsg->addr);
+
+ /* msgs longer than 2048 bytes are not supported by adapter */
+ if (pmsg->len > 2048)
+ return -EINVAL;
+
+ mutex_lock(&vb->lock);
+ /* directly send the message */
+ if (pmsg->flags & I2C_M_RD) {
+ /* read data */
+ amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+ amsg->unknown2 = 0x00;
+ amsg->unknown3 = 0x00;
+ amsg->addr = pmsg->addr;
+ amsg->unknown1 = 0x01;
+ amsg->len = cpu_to_le16(pmsg->len);
+ /* send the addr and len, we're interested to board */
+ ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_read(vb, pmsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_status(i2c, smsg, error);
+ if (ret < 0)
+ error = ret;
+ /* in case of protocol error, return the error */
+ if (error < 0)
+ goto error;
+ } else {
+ /* write data */
+ ret = vprbrd_i2c_write(vb, pmsg);
+
+ amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+ amsg->unknown2 = 0x00;
+ amsg->unknown3 = 0x00;
+ amsg->addr = pmsg->addr;
+ amsg->unknown1 = 0x00;
+ amsg->len = cpu_to_le16(pmsg->len);
+ /* send the addr, the data goes to to board */
+ ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+ if (ret < 0)
+ error = ret;
+
+ ret = vprbrd_i2c_status(i2c, smsg, error);
+ if (ret < 0)
+ error = ret;
+
+ if (error < 0)
+ goto error;
+ }
+ mutex_unlock(&vb->lock);
+ }
+ return 0;
+error:
+ mutex_unlock(&vb->lock);
+ return error;
+}
+
+static u32 vprbrd_i2c_func(struct i2c_adapter *i2c)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+/* This is the actual algorithm we define */
+static const struct i2c_algorithm vprbrd_algorithm = {
+ .master_xfer = vprbrd_i2c_xfer,
+ .functionality = vprbrd_i2c_func,
+};
+
+static int vprbrd_i2c_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_i2c *vb_i2c;
+ int ret;
+ int pipe;
+
+ vb_i2c = kzalloc(sizeof(*vb_i2c), GFP_KERNEL);
+ if (vb_i2c == NULL)
+ return -ENOMEM;
+
+ /* setup i2c adapter description */
+ vb_i2c->i2c.owner = THIS_MODULE;
+ vb_i2c->i2c.class = I2C_CLASS_HWMON;
+ vb_i2c->i2c.algo = &vprbrd_algorithm;
+ vb_i2c->i2c.algo_data = vb;
+ /* save the param in usb capabable memory */
+ vb_i2c->bus_freq_param = i2c_bus_param;
+
+ snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name),
+ "viperboard at bus %03d device %03d",
+ vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+ /* setting the bus frequency */
+ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ)
+ && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) {
+ pipe = usb_sndctrlpipe(vb->usb_dev, 0);
+ ret = usb_control_msg(vb->usb_dev, pipe,
+ VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != 1) {
+ dev_err(&pdev->dev,
+ "failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
+ ret = -EIO;
+ goto error;
+ }
+ } else {
+ dev_err(&pdev->dev,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
+ ret = -EIO;
+ goto error;
+ }
+
+ vb_i2c->i2c.dev.parent = &pdev->dev;
+
+ /* attach to i2c layer */
+ i2c_add_adapter(&vb_i2c->i2c);
+
+ platform_set_drvdata(pdev, vb_i2c);
+
+ return 0;
+
+error:
+ kfree(vb_i2c);
+ return ret;
+}
+
+static int vprbrd_i2c_remove(struct platform_device *pdev)
+{
+ struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&vb_i2c->i2c);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_i2c_driver = {
+ .driver.name = "viperboard-i2c",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_i2c_probe,
+ .remove = vprbrd_i2c_remove,
+};
+
+static int __init vprbrd_i2c_init(void)
+{
+ switch (i2c_bus_freq) {
+ case 6000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ;
+ break;
+ case 3000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ;
+ break;
+ case 1000:
+ i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ;
+ break;
+ case 400:
+ i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ;
+ break;
+ case 200:
+ i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ;
+ break;
+ case 100:
+ i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+ break;
+ case 10:
+ i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ;
+ break;
+ default:
+ pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq);
+ i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_i2c_driver);
+}
+subsys_initcall(vprbrd_i2c_init);
+
+static void __exit vprbrd_i2c_exit(void)
+{
+ platform_driver_unregister(&vprbrd_i2c_driver);
+}
+module_exit(vprbrd_i2c_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("I2C master driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-i2c");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 641d0e5e3303..f042f6da0ace 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -689,7 +689,7 @@ static struct i2c_adapter xiic_adapter = {
};
-static int __devinit xiic_i2c_probe(struct platform_device *pdev)
+static int xiic_i2c_probe(struct platform_device *pdev)
{
struct xiic_i2c *i2c;
struct xiic_i2c_platform_data *pdata;
@@ -774,7 +774,7 @@ resource_missing:
return -ENOENT;
}
-static int __devexit xiic_i2c_remove(struct platform_device* pdev)
+static int xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
struct resource *res;
@@ -800,7 +800,7 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
}
#if defined(CONFIG_OF)
-static const struct of_device_id xiic_of_match[] __devinitconst = {
+static const struct of_device_id xiic_of_match[] = {
{ .compatible = "xlnx,xps-iic-2.00.a", },
{},
};
@@ -809,7 +809,7 @@ MODULE_DEVICE_TABLE(of, xiic_of_match);
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
- .remove = __devexit_p(xiic_i2c_remove),
+ .remove = xiic_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 96d3fabd8883..a005265461da 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -214,7 +214,7 @@ static struct i2c_algorithm xlr_i2c_algo = {
.functionality = xlr_func,
};
-static int __devinit xlr_i2c_probe(struct platform_device *pdev)
+static int xlr_i2c_probe(struct platform_device *pdev)
{
struct xlr_i2c_private *priv;
struct resource *res;
@@ -251,7 +251,7 @@ static int __devinit xlr_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit xlr_i2c_remove(struct platform_device *pdev)
+static int xlr_i2c_remove(struct platform_device *pdev)
{
struct xlr_i2c_private *priv;
@@ -263,7 +263,7 @@ static int __devexit xlr_i2c_remove(struct platform_device *pdev)
static struct platform_driver xlr_i2c_driver = {
.probe = xlr_i2c_probe,
- .remove = __devexit_p(xlr_i2c_remove),
+ .remove = xlr_i2c_remove,
.driver = {
.name = "xlr-i2cbus",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 08aab57337dd..3862a953239c 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -389,7 +389,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = {
static struct scx200_acb_iface *scx200_acb_list;
static DEFINE_MUTEX(scx200_acb_list_mutex);
-static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
+static int scx200_acb_probe(struct scx200_acb_iface *iface)
{
u8 val;
@@ -424,7 +424,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
return 0;
}
-static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
+static struct scx200_acb_iface *scx200_create_iface(const char *text,
struct device *dev, int index)
{
struct scx200_acb_iface *iface;
@@ -449,7 +449,7 @@ static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
return iface;
}
-static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
+static int scx200_acb_create(struct scx200_acb_iface *iface)
{
struct i2c_adapter *adapter;
int rc;
@@ -480,7 +480,7 @@ static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
return 0;
}
-static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
+static struct scx200_acb_iface *scx200_create_dev(const char *text,
unsigned long base, int index, struct device *dev)
{
struct scx200_acb_iface *iface;
@@ -508,7 +508,7 @@ static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
return NULL;
}
-static int __devinit scx200_probe(struct platform_device *pdev)
+static int scx200_probe(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
struct resource *res;
@@ -530,14 +530,14 @@ static int __devinit scx200_probe(struct platform_device *pdev)
return 0;
}
-static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
+static void scx200_cleanup_iface(struct scx200_acb_iface *iface)
{
i2c_del_adapter(&iface->adapter);
release_region(iface->base, 8);
kfree(iface);
}
-static int __devexit scx200_remove(struct platform_device *pdev)
+static int scx200_remove(struct platform_device *pdev)
{
struct scx200_acb_iface *iface;
@@ -554,7 +554,7 @@ static struct platform_driver scx200_pci_driver = {
.owner = THIS_MODULE,
},
.probe = scx200_probe,
- .remove = __devexit_p(scx200_remove),
+ .remove = scx200_remove,
};
static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index a7edf987a339..e388590b44ab 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -39,6 +39,7 @@
#include <linux/irqflags.h>
#include <linux/rwsem.h>
#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
#include <asm/uaccess.h>
#include "i2c-core.h"
@@ -78,6 +79,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
driver = to_i2c_driver(drv);
/* match on an id table if there is one */
if (driver->id_table)
@@ -539,6 +544,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
+ ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle);
/* For 10-bit clients, add an arbitrary offset to avoid collisions */
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 566a6757a33d..9f50ef04a4bd 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include <linux/of_i2c.h>
+#include <linux/of_gpio.h>
struct gpiomux {
struct i2c_adapter *parent;
@@ -51,35 +53,116 @@ static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
return 0;
}
-static int __devinit match_gpio_chip_by_label(struct gpio_chip *chip,
+static int match_gpio_chip_by_label(struct gpio_chip *chip,
void *data)
{
return !strcmp(chip->label, data);
}
-static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *adapter_np, *child;
+ struct i2c_adapter *adapter;
+ unsigned *values, *gpios;
+ int i = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ if (!adapter) {
+ dev_err(&pdev->dev, "Cannot find parent bus\n");
+ return -ENODEV;
+ }
+ mux->data.parent = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ mux->data.n_values = of_get_child_count(np);
+
+ values = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.values) * mux->data.n_values,
+ GFP_KERNEL);
+ if (!values) {
+ dev_err(&pdev->dev, "Cannot allocate values array");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(np, child) {
+ of_property_read_u32(child, "reg", values + i);
+ i++;
+ }
+ mux->data.values = values;
+
+ if (of_property_read_u32(np, "idle-state", &mux->data.idle))
+ mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
+
+ mux->data.n_gpios = of_gpio_named_count(np, "mux-gpios");
+ if (mux->data.n_gpios < 0) {
+ dev_err(&pdev->dev, "Missing mux-gpios property in the DT.\n");
+ return -EINVAL;
+ }
+
+ gpios = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->data.gpios) * mux->data.n_gpios, GFP_KERNEL);
+ if (!gpios) {
+ dev_err(&pdev->dev, "Cannot allocate gpios array");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mux->data.n_gpios; i++)
+ gpios[i] = of_get_named_gpio(np, "mux-gpios", i);
+
+ mux->data.gpios = gpios;
+
+ return 0;
+}
+#else
+static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
- struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
unsigned initial_state, gpio_base;
int i, ret;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "Missing platform data\n");
- return -ENODEV;
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dev_err(&pdev->dev, "Cannot allocate gpiomux structure");
+ return -ENOMEM;
}
+ platform_set_drvdata(pdev, mux);
+
+ if (!pdev->dev.platform_data) {
+ ret = i2c_mux_gpio_probe_dt(mux, pdev);
+ if (ret < 0)
+ return ret;
+ } else
+ memcpy(&mux->data, pdev->dev.platform_data, sizeof(mux->data));
+
/*
* If a GPIO chip name is provided, the GPIO pin numbers provided are
* relative to its base GPIO number. Otherwise they are absolute.
*/
- if (pdata->gpio_chip) {
+ if (mux->data.gpio_chip) {
struct gpio_chip *gpio;
- gpio = gpiochip_find(pdata->gpio_chip,
+ gpio = gpiochip_find(mux->data.gpio_chip,
match_gpio_chip_by_label);
if (!gpio)
return -EPROBE_DEFER;
@@ -89,49 +172,44 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
gpio_base = 0;
}
- parent = i2c_get_adapter(pdata->parent);
+ parent = i2c_get_adapter(mux->data.parent);
if (!parent) {
dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
- pdata->parent);
+ mux->data.parent);
return -ENODEV;
}
- mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- ret = -ENOMEM;
- goto alloc_failed;
- }
-
mux->parent = parent;
- mux->data = *pdata;
mux->gpio_base = gpio_base;
+
mux->adap = devm_kzalloc(&pdev->dev,
- sizeof(*mux->adap) * pdata->n_values,
+ sizeof(*mux->adap) * mux->data.n_values,
GFP_KERNEL);
if (!mux->adap) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
ret = -ENOMEM;
goto alloc_failed;
}
- if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
- initial_state = pdata->idle;
+ if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) {
+ initial_state = mux->data.idle;
deselect = i2c_mux_gpio_deselect;
} else {
- initial_state = pdata->values[0];
+ initial_state = mux->data.values[0];
deselect = NULL;
}
- for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(gpio_base + pdata->gpios[i], "i2c-mux-gpio");
+ for (i = 0; i < mux->data.n_gpios; i++) {
+ ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
- gpio_direction_output(gpio_base + pdata->gpios[i],
+ gpio_direction_output(gpio_base + mux->data.gpios[i],
initial_state & (1 << i));
}
- for (i = 0; i < pdata->n_values; i++) {
- u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
- unsigned int class = pdata->classes ? pdata->classes[i] : 0;
+ for (i = 0; i < mux->data.n_values; i++) {
+ u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
+ unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
i, class,
@@ -144,26 +222,24 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%d port mux on %s adapter\n",
- pdata->n_values, parent->name);
-
- platform_set_drvdata(pdev, mux);
+ mux->data.n_values, parent->name);
return 0;
add_adapter_failed:
for (; i > 0; i--)
i2c_del_mux_adapter(mux->adap[i - 1]);
- i = pdata->n_gpios;
+ i = mux->data.n_gpios;
err_request_gpio:
for (; i > 0; i--)
- gpio_free(gpio_base + pdata->gpios[i - 1]);
+ gpio_free(gpio_base + mux->data.gpios[i - 1]);
alloc_failed:
i2c_put_adapter(parent);
return ret;
}
-static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
+static int i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct gpiomux *mux = platform_get_drvdata(pdev);
int i;
@@ -180,12 +256,19 @@ static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id i2c_mux_gpio_of_match[] = {
+ { .compatible = "i2c-mux-gpio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_gpio_of_match);
+
static struct platform_driver i2c_mux_gpio_driver = {
.probe = i2c_mux_gpio_probe,
- .remove = __devexit_p(i2c_mux_gpio_remove),
+ .remove = i2c_mux_gpio_remove,
.driver = {
.owner = THIS_MODULE,
.name = "i2c-mux-gpio",
+ .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
},
};
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 7fa5b24b16db..a43c0ce5e3d8 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -129,7 +129,7 @@ static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
}
#endif
-static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
{
struct i2c_mux_pinctrl *mux;
int (*deselect)(struct i2c_adapter *, void *, u32);
@@ -167,7 +167,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
}
mux->busses = devm_kzalloc(&pdev->dev,
- sizeof(mux->busses) * mux->pdata->bus_count,
+ sizeof(*mux->busses) * mux->pdata->bus_count,
GFP_KERNEL);
if (!mux->busses) {
dev_err(&pdev->dev, "Cannot allocate busses\n");
@@ -241,7 +241,7 @@ err:
return ret;
}
-static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+static int i2c_mux_pinctrl_remove(struct platform_device *pdev)
{
struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
int i;
@@ -255,7 +255,7 @@ static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+static const struct of_device_id i2c_mux_pinctrl_of_match[] = {
{ .compatible = "i2c-mux-pinctrl", },
{},
};
@@ -269,7 +269,7 @@ static struct platform_driver i2c_mux_pinctrl_driver = {
.of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
},
.probe = i2c_mux_pinctrl_probe,
- .remove = __devexit_p(i2c_mux_pinctrl_remove),
+ .remove = i2c_mux_pinctrl_remove,
};
module_platform_driver(i2c_mux_pinctrl_driver);
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
index 01451940393b..c7eaf20af926 100644
--- a/drivers/ide/aec62xx.c
+++ b/drivers/ide/aec62xx.c
@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
.cable_detect = atp86x_cable_detect,
};
-static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
+static const struct ide_port_info aec62xx_chipsets[] = {
{ /* 0: AEC6210 */
.name = DRV_NAME,
.init_chipset = init_chipset_aec62xx,
@@ -251,7 +251,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
* chips, pass a local copy of 'struct ide_port_info' down the call chain.
*/
-static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct chipset_bus_clock_list_entry *bus_clock;
struct ide_port_info d;
@@ -287,7 +287,7 @@ static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_devi
return err;
}
-static void __devexit aec62xx_remove(struct pci_dev *dev)
+static void aec62xx_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_disable_device(dev);
@@ -307,7 +307,7 @@ static struct pci_driver aec62xx_pci_driver = {
.name = "AEC62xx_IDE",
.id_table = aec62xx_pci_tbl,
.probe = aec62xx_init_one,
- .remove = __devexit_p(aec62xx_remove),
+ .remove = aec62xx_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 911a27ca356b..36f76e28a0bf 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -415,7 +415,7 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
* Sparc systems.
*/
-static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
+static void init_hwif_ali15x3(ide_hwif_t *hwif)
{
u8 ideic, inmir;
s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
@@ -464,8 +464,7 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
* Set up the DMA functionality on the ALi 15x3.
*/
-static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int init_dma_ali15x3(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = ide_pci_dma_base(hwif, d);
@@ -512,7 +511,7 @@ static const struct ide_dma_ops ali_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info ali15x3_chipset __devinitconst = {
+static const struct ide_port_info ali15x3_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_ali15x3,
.init_hwif = init_hwif_ali15x3,
@@ -532,7 +531,8 @@ static const struct ide_port_info ali15x3_chipset __devinitconst = {
* hot plug layer.
*/
-static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int alim15x3_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct ide_port_info d = ali15x3_chipset;
u8 rev = dev->revision, idx = id->driver_data;
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 56fc99557ba2..cbfe846911d1 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
.udma_mask = udma, \
}
-static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
+static const struct ide_port_info amd74xx_chipsets[] = {
/* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
/* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
/* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
@@ -235,7 +235,7 @@ static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
/* 6: AMD5536 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
};
-static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index cb43480b1bd5..dbd0f242ec18 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
.cable_detect = atiixp_cable_detect,
};
-static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
+static const struct ide_port_info atiixp_pci_info[] = {
{ /* 0: IXP200/300/400/700 */
.name = DRV_NAME,
.enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
@@ -168,7 +168,7 @@ static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &atiixp_pci_info[id->driver_data], NULL);
}
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index d1fc43802f5d..b127ed60c733 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
+static const struct ide_port_info cmd64x_chipsets[] = {
{ /* 0: CMD643 */
.name = DRV_NAME,
.init_chipset = init_chipset_cmd64x,
@@ -373,7 +373,7 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
}
};
-static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 14447621e60b..6250aee30503 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
.set_dma_mode = cs5520_set_dma_mode,
};
-static const struct ide_port_info cyrix_chipset __devinitconst = {
+static const struct ide_port_info cyrix_chipset = {
.name = DRV_NAME,
.enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
.port_ops = &cs5520_port_ops,
@@ -108,7 +108,7 @@ static const struct ide_port_info cyrix_chipset __devinitconst = {
* work longhand.
*/
-static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &cyrix_chipset;
struct ide_hw hw[2], *hws[] = { NULL, NULL };
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
index 49b40ad59d1a..65371599b976 100644
--- a/drivers/ide/cs5530.c
+++ b/drivers/ide/cs5530.c
@@ -226,7 +226,7 @@ out:
* performs channel-specific pre-initialization before drive probing.
*/
-static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
+static void init_hwif_cs5530 (ide_hwif_t *hwif)
{
unsigned long basereg;
u32 d0_timings;
@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
.udma_filter = cs5530_udma_filter,
};
-static const struct ide_port_info cs5530_chipset __devinitconst = {
+static const struct ide_port_info cs5530_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_cs5530,
.init_hwif = init_hwif_cs5530,
@@ -257,7 +257,7 @@ static const struct ide_port_info cs5530_chipset __devinitconst = {
.udma_mask = ATA_UDMA2,
};
-static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &cs5530_chipset, NULL);
}
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
index 18d4c852602b..3bc5b9a34013 100644
--- a/drivers/ide/cs5535.c
+++ b/drivers/ide/cs5535.c
@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
.cable_detect = cs5535_cable_detect,
};
-static const struct ide_port_info cs5535_chipset __devinitconst = {
+static const struct ide_port_info cs5535_chipset = {
.name = DRV_NAME,
.port_ops = &cs5535_port_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
@@ -179,8 +179,7 @@ static const struct ide_port_info cs5535_chipset __devinitconst = {
.udma_mask = ATA_UDMA4,
};
-static int __devinit cs5535_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &cs5535_chipset, NULL);
}
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 3ffb49dab574..f5820079a286 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -145,7 +145,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pci_dev_put(dev);
}
-static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
+static void init_iops_cy82c693(ide_hwif_t *hwif)
{
static ide_hwif_t *primary;
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
.set_dma_mode = cy82c693_set_dma_mode,
};
-static const struct ide_port_info cy82c693_chipset __devinitconst = {
+static const struct ide_port_info cy82c693_chipset = {
.name = DRV_NAME,
.init_iops = init_iops_cy82c693,
.port_ops = &cy82c693_port_ops,
@@ -173,7 +173,8 @@ static const struct ide_port_info cy82c693_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int cy82c693_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct pci_dev *dev2;
int ret = -ENODEV;
@@ -190,7 +191,7 @@ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_dev
return ret;
}
-static void __devexit cy82c693_remove(struct pci_dev *dev)
+static void cy82c693_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
@@ -209,7 +210,7 @@ static struct pci_driver cy82c693_pci_driver = {
.name = "Cypress_IDE",
.id_table = cy82c693_pci_tbl,
.probe = cy82c693_init_one,
- .remove = __devexit_p(cy82c693_remove),
+ .remove = cy82c693_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
index 1e10eba62ceb..7e27d3295e55 100644
--- a/drivers/ide/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -71,8 +71,7 @@ static const struct ide_port_info delkin_cb_port_info = {
.chipset = ide_pci,
};
-static int __devinit
-delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
+static int delkin_cb_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_host *host;
unsigned long base;
@@ -158,7 +157,7 @@ static int delkin_cb_resume(struct pci_dev *dev)
#define delkin_cb_resume NULL
#endif
-static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
+static struct pci_device_id delkin_cb_pci_tbl[] = {
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0, },
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 4aec3b87ff91..696b6c1ec940 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
}
};
-static const struct hpt_info hpt36x __devinitconst = {
+static const struct hpt_info hpt36x = {
.chip_name = "HPT36x",
.chip_type = HPT36x,
.udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitconst = {
.timings = &hpt36x_timings
};
-static const struct hpt_info hpt370 __devinitconst = {
+static const struct hpt_info hpt370 = {
.chip_name = "HPT370",
.chip_type = HPT370,
.udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt370a __devinitconst = {
+static const struct hpt_info hpt370a = {
.chip_name = "HPT370A",
.chip_type = HPT370A,
.udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt374 __devinitconst = {
+static const struct hpt_info hpt374 = {
.chip_name = "HPT374",
.chip_type = HPT374,
.udma_mask = ATA_UDMA5,
@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt372 __devinitconst = {
+static const struct hpt_info hpt372 = {
.chip_name = "HPT372",
.chip_type = HPT372,
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt372a __devinitconst = {
+static const struct hpt_info hpt372a = {
.chip_name = "HPT372A",
.chip_type = HPT372A,
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt302 __devinitconst = {
+static const struct hpt_info hpt302 = {
.chip_name = "HPT302",
.chip_type = HPT302,
.udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt371 __devinitconst = {
+static const struct hpt_info hpt371 = {
.chip_name = "HPT371",
.chip_type = HPT371,
.udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt372n __devinitconst = {
+static const struct hpt_info hpt372n = {
.chip_name = "HPT372N",
.chip_type = HPT372N,
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt302n __devinitconst = {
+static const struct hpt_info hpt302n = {
.chip_name = "HPT302N",
.chip_type = HPT302N,
.udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitconst = {
.timings = &hpt37x_timings
};
-static const struct hpt_info hpt371n __devinitconst = {
+static const struct hpt_info hpt371n = {
.chip_name = "HPT371N",
.chip_type = HPT371N,
.udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
@@ -1197,7 +1197,7 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
return (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
+static void init_hwif_hpt366(ide_hwif_t *hwif)
{
struct hpt_info *info = hpt3xx_get_info(hwif->dev);
u8 chip_type = info->chip_type;
@@ -1221,7 +1221,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
}
}
-static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
+static int init_dma_hpt366(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -1265,7 +1265,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
return 0;
}
-static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
+static void hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
{
if (dev2->irq != dev->irq) {
/* FIXME: we need a core pci_set_interrupt() */
@@ -1275,7 +1275,7 @@ static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
}
}
-static void __devinit hpt371_init(struct pci_dev *dev)
+static void hpt371_init(struct pci_dev *dev)
{
u8 mcr1 = 0;
@@ -1290,7 +1290,7 @@ static void __devinit hpt371_init(struct pci_dev *dev)
pci_write_config_byte(dev, 0x50, mcr1 & ~0x04);
}
-static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
+static int hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
{
u8 mcr1 = 0, pin1 = 0, pin2 = 0;
@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
+static const struct ide_port_info hpt366_chipsets[] = {
{ /* 0: HPT36x */
.name = DRV_NAME,
.init_chipset = init_chipset_hpt366,
@@ -1402,7 +1402,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*/
-static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct hpt_info *info = NULL;
struct hpt_info *dyn_info;
@@ -1499,7 +1499,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
return ret;
}
-static void __devexit hpt366_remove(struct pci_dev *dev)
+static void hpt366_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct ide_info *info = host->host_priv;
@@ -1510,7 +1510,7 @@ static void __devexit hpt366_remove(struct pci_dev *dev)
kfree(info);
}
-static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = {
+static const struct pci_device_id hpt366_pci_tbl[] = {
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 },
{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 },
@@ -1525,7 +1525,7 @@ static struct pci_driver hpt366_pci_driver = {
.name = "HPT366_IDE",
.id_table = hpt366_pci_tbl,
.probe = hpt366_init_one,
- .remove = __devexit_p(hpt366_remove),
+ .remove = hpt366_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index e640d0ac3af6..9f0a48e39b8a 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -406,8 +406,8 @@ static const struct ide_port_info icside_v5_port_info = {
.chipset = ide_acorn,
};
-static int __devinit
-icside_register_v5(struct icside_state *state, struct expansion_card *ec)
+static int icside_register_v5(struct icside_state *state,
+ struct expansion_card *ec)
{
void __iomem *base;
struct ide_host *host;
@@ -460,8 +460,8 @@ static const struct ide_port_info icside_v6_port_info __initconst = {
.chipset = ide_acorn,
};
-static int __devinit
-icside_register_v6(struct icside_state *state, struct expansion_card *ec)
+static int icside_register_v6(struct icside_state *state,
+ struct expansion_card *ec)
{
void __iomem *ioc_base, *easi_base;
struct ide_host *host;
@@ -537,8 +537,7 @@ out:
return ret;
}
-static int __devinit
-icside_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int icside_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct icside_state *state;
void __iomem *idmem;
@@ -604,7 +603,7 @@ icside_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit icside_remove(struct expansion_card *ec)
+static void icside_remove(struct expansion_card *ec)
{
struct icside_state *state = ecard_get_drvdata(ec);
@@ -666,7 +665,7 @@ static const struct ecard_id icside_ids[] = {
static struct ecard_driver icside_driver = {
.probe = icside_probe,
- .remove = __devexit_p(icside_remove),
+ .remove = icside_remove,
.shutdown = icside_shutdown,
.id_table = icside_ids,
.drv = {
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index dab5b670bfbf..673420db953f 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
.udma_mask = ATA_UDMA6, \
}
-static const struct ide_port_info generic_chipsets[] __devinitconst = {
+static const struct ide_port_info generic_chipsets[] = {
/* 0: Unknown */
DECLARE_GENERIC_PCI_DEV(0),
@@ -103,7 +103,7 @@ static const struct ide_port_info generic_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &generic_chipsets[id->driver_data];
int ret = -ENODEV;
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 962693b10a1c..ba4bfbead24b 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -22,11 +22,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
- void __iomem *base,
- void __iomem *ctrl,
- struct pata_platform_info *pdata,
- int irq)
+static void plat_ide_setup_ports(struct ide_hw *hw, void __iomem *base,
+ void __iomem *ctrl,
+ struct pata_platform_info *pdata, int irq)
{
unsigned long port = (unsigned long)base;
int i;
@@ -48,7 +46,7 @@ static const struct ide_port_info platform_ide_port_info = {
.chipset = ide_generic,
};
-static int __devinit plat_ide_probe(struct platform_device *pdev)
+static int plat_ide_probe(struct platform_device *pdev)
{
struct resource *res_base, *res_alt, *res_irq;
void __iomem *base, *alt_base;
@@ -115,7 +113,7 @@ out:
return ret;
}
-static int __devexit plat_ide_remove(struct platform_device *pdev)
+static int plat_ide_remove(struct platform_device *pdev)
{
struct ide_host *host = dev_get_drvdata(&pdev->dev);
@@ -130,7 +128,7 @@ static struct platform_driver platform_ide_driver = {
.owner = THIS_MODULE,
},
.probe = plat_ide_probe,
- .remove = __devexit_p(plat_ide_remove),
+ .remove = plat_ide_remove,
};
static int __init platform_ide_init(void)
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
index d5dd180c4b85..b6f674ab4fb7 100644
--- a/drivers/ide/it8172.c
+++ b/drivers/ide/it8172.c
@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
.set_dma_mode = it8172_set_dma_mode,
};
-static const struct ide_port_info it8172_port_info __devinitconst = {
+static const struct ide_port_info it8172_port_info = {
.name = DRV_NAME,
.port_ops = &it8172_port_ops,
.enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
@@ -125,8 +125,7 @@ static const struct ide_port_info it8172_port_info __devinitconst = {
.udma_mask = ATA_UDMA2,
};
-static int __devinit it8172_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int it8172_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return -ENODEV; /* IT8172 is more than an IDE controller */
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
index 1847aeb5450a..6b92846682fc 100644
--- a/drivers/ide/it8213.c
+++ b/drivers/ide/it8213.c
@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
.cable_detect = it8213_cable_detect,
};
-static const struct ide_port_info it8213_chipset __devinitconst = {
+static const struct ide_port_info it8213_chipset = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80} },
.port_ops = &it8213_port_ops,
@@ -177,7 +177,7 @@ static const struct ide_port_info it8213_chipset __devinitconst = {
* standard helper functions to do almost all the work for us.
*/
-static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &it8213_chipset, NULL);
}
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index c5611dbca342..f01ba4606be0 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -528,7 +528,7 @@ static struct ide_dma_ops it821x_pass_through_dma_ops = {
* ide DMA handlers appropriately
*/
-static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
+static void init_hwif_it821x(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
.cable_detect = it821x_cable_detect,
};
-static const struct ide_port_info it821x_chipset __devinitconst = {
+static const struct ide_port_info it821x_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_it821x,
.init_hwif = init_hwif_it821x,
@@ -647,7 +647,7 @@ static const struct ide_port_info it821x_chipset __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct it821x_dev *itdevs;
int rc;
@@ -667,7 +667,7 @@ static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_devic
return rc;
}
-static void __devexit it821x_remove(struct pci_dev *dev)
+static void it821x_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct it821x_dev *itdevs = host->host_priv;
@@ -689,7 +689,7 @@ static struct pci_driver it821x_pci_driver = {
.name = "ITE821x IDE",
.id_table = it821x_pci_tbl,
.probe = it821x_init_one,
- .remove = __devexit_p(it821x_remove),
+ .remove = it821x_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
index efddd7d9f92d..ae6480dcbadf 100644
--- a/drivers/ide/jmicron.c
+++ b/drivers/ide/jmicron.c
@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
.cable_detect = jmicron_cable_detect,
};
-static const struct ide_port_info jmicron_chipset __devinitconst = {
+static const struct ide_port_info jmicron_chipset = {
.name = DRV_NAME,
.enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
.port_ops = &jmicron_port_ops,
@@ -120,7 +120,7 @@ static const struct ide_port_info jmicron_chipset __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &jmicron_chipset, NULL);
}
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
index 73f78d872d55..392fd106edf1 100644
--- a/drivers/ide/ns87415.c
+++ b/drivers/ide/ns87415.c
@@ -96,7 +96,7 @@ static const struct ide_tp_ops superio_tp_ops = {
.output_data = ide_output_data,
};
-static void __devinit superio_init_iops(struct hwif_s *hwif)
+static void superio_init_iops(struct hwif_s *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
u32 dma_stat;
@@ -201,7 +201,7 @@ static int ns87415_dma_end(ide_drive_t *drive)
return (dma_stat & 7) != 4;
}
-static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
+static void init_hwif_ns87415 (ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned int ctrl, using_inta;
@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
.dma_sff_read_status = superio_dma_sff_read_status,
};
-static const struct ide_port_info ns87415_chipset __devinitconst = {
+static const struct ide_port_info ns87415_chipset = {
.name = DRV_NAME,
.init_hwif = init_hwif_ns87415,
.tp_ops = &ns87415_tp_ops,
@@ -302,7 +302,7 @@ static const struct ide_port_info ns87415_chipset __devinitconst = {
IDE_HFLAG_NO_ATAPI_DMA,
};
-static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = ns87415_chipset;
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
index 39edc66cb96c..26a45007e535 100644
--- a/drivers/ide/opti621.c
+++ b/drivers/ide/opti621.c
@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
.set_pio_mode = opti621_set_pio_mode,
};
-static const struct ide_port_info opti621_chipset __devinitconst = {
+static const struct ide_port_info opti621_chipset = {
.name = DRV_NAME,
.enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
.port_ops = &opti621_port_ops,
@@ -139,7 +139,7 @@ static const struct ide_port_info opti621_chipset __devinitconst = {
.pio_mask = ATA_PIO4,
};
-static int __devinit opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &opti621_chipset, NULL);
}
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 712c7904d03e..6107cc4ee012 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -220,7 +220,7 @@ static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
palm_bk3710_setpiomode(base, mate, is_slave, cycle_time, pio);
}
-static void __devinit palm_bk3710_chipinit(void __iomem *base)
+static void palm_bk3710_chipinit(void __iomem *base)
{
/*
* REVISIT: the ATA reset signal needs to be managed through a
@@ -282,8 +282,7 @@ static u8 palm_bk3710_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA80;
}
-static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int palm_bk3710_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
@@ -301,7 +300,7 @@ static const struct ide_port_ops palm_bk3710_ports_ops = {
.cable_detect = palm_bk3710_cable_detect,
};
-static struct ide_port_info __devinitdata palm_bk3710_port_info = {
+static struct ide_port_info palm_bk3710_port_info = {
.init_dma = palm_bk3710_init_dma,
.port_ops = &palm_bk3710_ports_ops,
.dma_ops = &sff_dma_ops,
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 2e5ceb62fb3b..df73cbd9387e 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -422,7 +422,7 @@ static int init_chipset_pdcnew(struct pci_dev *dev)
return 0;
}
-static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
+static struct pci_dev *pdc20270_get_dev2(struct pci_dev *dev)
{
struct pci_dev *dev2;
@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
.udma_mask = udma, \
}
-static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
+static const struct ide_port_info pdcnew_chipsets[] = {
/* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
/* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
};
@@ -479,7 +479,7 @@ static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
struct pci_dev *bridge = dev->bus->self;
@@ -514,7 +514,7 @@ static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_de
return ide_pci_init_one(dev, d, NULL);
}
-static void __devexit pdc202new_remove(struct pci_dev *dev)
+static void pdc202new_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
@@ -539,7 +539,7 @@ static struct pci_driver pdc202new_pci_driver = {
.name = "Promise_IDE",
.id_table = pdc202new_pci_tbl,
.probe = pdc202new_init_one,
- .remove = __devexit_p(pdc202new_remove),
+ .remove = pdc202new_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 563451096812..224ad46d6cb2 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -211,8 +211,7 @@ out:
return 0;
}
-static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
- const char *name)
+static void pdc202ata4_fixup_irq(struct pci_dev *dev, const char *name)
{
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) {
u8 irq = 0, irq2 = 0;
@@ -270,7 +269,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
.max_sectors = sectors, \
}
-static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
+static const struct ide_port_info pdc202xx_chipsets[] = {
{ /* 0: PDC20246 */
.name = DRV_NAME,
.init_chipset = init_chipset_pdc202xx,
@@ -297,7 +296,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int pdc202xx_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
const struct ide_port_info *d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index fe0fd60cfc09..a671cead6ae7 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -297,7 +297,7 @@ static u8 piix_cable_detect(ide_hwif_t *hwif)
* capabilities of the hardware.
*/
-static void __devinit init_hwif_piix(ide_hwif_t *hwif)
+static void init_hwif_piix(ide_hwif_t *hwif)
{
if (!hwif->dma_base)
return;
@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
.udma_mask = udma, \
}
-static const struct ide_port_info piix_pci_info[] __devinitconst = {
+static const struct ide_port_info piix_pci_info[] = {
/* 0: MPIIX */
{ /*
* MPIIX actually has only a single IDE channel mapped to
@@ -382,7 +382,7 @@ static const struct ide_port_info piix_pci_info[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
}
@@ -394,7 +394,7 @@ static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_
* they are found, disable use of DMA IDE
*/
-static void __devinit piix_check_450nx(void)
+static void piix_check_450nx(void)
{
struct pci_dev *pdev = NULL;
u16 cfg;
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index e944c7f705f7..bf83d7bb6bc6 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1025,8 +1025,7 @@ static const struct ide_port_info pmac_port_info = {
* Setup, register & probe an IDE channel driven by this driver, this is
* called by one of the 2 probe functions (macio or PCI).
*/
-static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
- struct ide_hw *hw)
+static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
{
struct device_node *np = pmif->node;
const int *bidp;
@@ -1126,7 +1125,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
return rc;
}
-static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
+static void pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
{
int i;
@@ -1139,8 +1138,8 @@ static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
/*
* Attach to a macio probed interface
*/
-static int __devinit
-pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+static int pmac_ide_macio_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
void __iomem *base;
unsigned long regbase;
@@ -1262,8 +1261,8 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
/*
* Attach to a PCI probed interface
*/
-static int __devinit
-pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+static int pmac_ide_pci_attach(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct device_node *np;
pmac_ide_hwif_t *pmif;
@@ -1692,8 +1691,7 @@ static const struct ide_dma_ops pmac_dma_ops = {
* Allocate the data structures needed for using DMA with an interface
* and fill the proper list of functions pointers
*/
-static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int pmac_ide_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
index 48d976aad7ab..d73c3d10087c 100644
--- a/drivers/ide/rapide.c
+++ b/drivers/ide/rapide.c
@@ -29,8 +29,7 @@ static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
hw->irq = irq;
}
-static int __devinit
-rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
{
void __iomem *base;
struct ide_host *host;
@@ -64,7 +63,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit rapide_remove(struct expansion_card *ec)
+static void rapide_remove(struct expansion_card *ec)
{
struct ide_host *host = ecard_get_drvdata(ec);
@@ -82,7 +81,7 @@ static struct ecard_id rapide_ids[] = {
static struct ecard_driver rapide_driver = {
.probe = rapide_probe,
- .remove = __devexit_p(rapide_remove),
+ .remove = rapide_remove,
.id_table = rapide_ids,
.drv = {
.name = "rapide",
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
index c04173e9fc38..f4b66f7ec9fd 100644
--- a/drivers/ide/rz1000.c
+++ b/drivers/ide/rz1000.c
@@ -22,7 +22,7 @@
#define DRV_NAME "rz1000"
-static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
+static int rz1000_disable_readahead(struct pci_dev *dev)
{
u16 reg;
@@ -38,12 +38,12 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
}
}
-static const struct ide_port_info rz1000_chipset __devinitconst = {
+static const struct ide_port_info rz1000_chipset = {
.name = DRV_NAME,
.host_flags = IDE_HFLAG_NO_DMA,
};
-static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = rz1000_chipset;
int rc;
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
index d4758ebe77da..a5b701818405 100644
--- a/drivers/ide/sc1200.c
+++ b/drivers/ide/sc1200.c
@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info sc1200_chipset __devinitconst = {
+static const struct ide_port_info sc1200_chipset = {
.name = DRV_NAME,
.port_ops = &sc1200_port_ops,
.dma_ops = &sc1200_dma_ops,
@@ -303,7 +303,7 @@ static const struct ide_port_info sc1200_chipset __devinitconst = {
.udma_mask = ATA_UDMA2,
};
-static int __devinit sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct sc1200_saved_state *ss = NULL;
int rc;
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 970103810021..2a2d188b5d5b 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -585,8 +585,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
* Perform the initial set up for this device.
*/
-static int __devinit init_setup_scc(struct pci_dev *dev,
- const struct ide_port_info *d)
+static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d)
{
unsigned long ctl_base;
unsigned long dma_base;
@@ -718,7 +717,7 @@ static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
*
*/
-static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
+static void init_mmio_iops_scc(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct scc_ports *ports = pci_get_drvdata(dev);
@@ -738,7 +737,7 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
* and then do the MMIO setup.
*/
-static void __devinit init_iops_scc(ide_hwif_t *hwif)
+static void init_iops_scc(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -748,8 +747,7 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
init_mmio_iops_scc(hwif);
}
-static int __devinit scc_init_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
return ide_allocate_dma_engine(hwif);
}
@@ -768,7 +766,7 @@ static u8 scc_cable_detect(ide_hwif_t *hwif)
* ide DMA handlers appropriately.
*/
-static void __devinit init_hwif_scc(ide_hwif_t *hwif)
+static void init_hwif_scc(ide_hwif_t *hwif)
{
/* PTERADD */
out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
@@ -811,7 +809,7 @@ static const struct ide_dma_ops scc_dma_ops = {
.dma_sff_read_status = scc_dma_sff_read_status,
};
-static const struct ide_port_info scc_chipset __devinitconst = {
+static const struct ide_port_info scc_chipset = {
.name = "sccIDE",
.init_iops = init_iops_scc,
.init_dma = scc_init_dma,
@@ -834,7 +832,7 @@ static const struct ide_port_info scc_chipset __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return init_setup_scc(dev, &scc_chipset);
}
@@ -846,7 +844,7 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
* Called by the PCI code when it removes an SCC PATA controller.
*/
-static void __devexit scc_remove(struct pci_dev *dev)
+static void scc_remove(struct pci_dev *dev)
{
struct scc_ports *ports = pci_get_drvdata(dev);
struct ide_host *host = ports->host;
@@ -869,7 +867,7 @@ static struct pci_driver scc_pci_driver = {
.name = "SCC IDE",
.id_table = scc_pci_tbl,
.probe = scc_init_one,
- .remove = __devexit_p(scc_remove),
+ .remove = scc_remove,
};
static int __init scc_ide_init(void)
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index 24d72ef23df7..a97affca18ab 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
.cable_detect = svwks_cable_detect,
};
-static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
+static const struct ide_port_info serverworks_chipsets[] = {
{ /* 0: OSB4 */
.name = DRV_NAME,
.init_chipset = init_chipset_svwks,
@@ -391,7 +391,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
* finds a device matching our IDE device tables.
*/
-static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d;
u8 idx = id->driver_data;
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index e3ea591f66d3..a5ca179a83b3 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -307,8 +307,7 @@ static u8 sgiioc4_read_status(ide_hwif_t *hwif)
}
/* Creates a DMA map for the scatter-gather list entries */
-static int __devinit ide_dma_sgiioc4(ide_hwif_t *hwif,
- const struct ide_port_info *d)
+static int ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
@@ -520,7 +519,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
.dma_lost_irq = sgiioc4_dma_lost_irq,
};
-static const struct ide_port_info sgiioc4_port_info __devinitconst = {
+static const struct ide_port_info sgiioc4_port_info = {
.name = DRV_NAME,
.chipset = ide_pci,
.init_dma = ide_dma_sgiioc4,
@@ -532,7 +531,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitconst = {
.mwdma_mask = ATA_MWDMA2_ONLY,
};
-static int __devinit sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
+static int sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
{
unsigned long cmd_base, irqport;
unsigned long bar0, cmd_phys_base, ctl;
@@ -581,7 +580,7 @@ req_mem_rgn_err:
return rc;
}
-static unsigned int __devinit pci_init_sgiioc4(struct pci_dev *dev)
+static unsigned int pci_init_sgiioc4(struct pci_dev *dev)
{
int ret;
@@ -601,7 +600,7 @@ out:
return ret;
}
-int __devinit ioc4_ide_attach_one(struct ioc4_driver_data *idd)
+int ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
/*
* PCI-RT does not bring out IDE connection.
@@ -613,7 +612,7 @@ int __devinit ioc4_ide_attach_one(struct ioc4_driver_data *idd)
return pci_init_sgiioc4(idd->idd_pdev);
}
-static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
+static struct ioc4_submodule ioc4_ide_submodule = {
.is_name = "IOC4_ide",
.is_owner = THIS_MODULE,
.is_probe = ioc4_ide_attach_one,
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index 46f7e30d3790..6a1849bb476c 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -546,7 +546,7 @@ static int init_chipset_siimage(struct pci_dev *dev)
* extended PRD tables. For better SI3112 support use the libata driver
*/
-static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
+static void init_mmio_iops_siimage(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
@@ -646,7 +646,7 @@ static void sil_quirkproc(ide_drive_t *drive)
* can get the iops right before using them.
*/
-static void __devinit init_iops_siimage(ide_hwif_t *hwif)
+static void init_iops_siimage(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
.udma_mask = ATA_UDMA6, \
}
-static const struct ide_port_info siimage_chipsets[] __devinitconst = {
+static const struct ide_port_info siimage_chipsets[] = {
/* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
/* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
};
@@ -733,8 +733,7 @@ static const struct ide_port_info siimage_chipsets[] __devinitconst = {
* We then use the IDE PCI generic helper to do most of the work.
*/
-static int __devinit siimage_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
void __iomem *ioaddr = NULL;
resource_size_t bar5 = pci_resource_start(dev, 5);
@@ -790,7 +789,7 @@ static int __devinit siimage_init_one(struct pci_dev *dev,
return rc;
}
-static void __devexit siimage_remove(struct pci_dev *dev)
+static void siimage_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
void __iomem *ioaddr = host->host_priv;
@@ -822,7 +821,7 @@ static struct pci_driver siimage_pci_driver = {
.name = "SiI_IDE",
.id_table = siimage_pci_tbl,
.probe = siimage_init_one,
- .remove = __devexit_p(siimage_remove),
+ .remove = siimage_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 09e61b4c5e94..247853ea1368 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -362,7 +362,7 @@ static u8 sis_ata133_udma_filter(ide_drive_t *drive)
return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
}
-static int __devinit sis_find_family(struct pci_dev *dev)
+static int sis_find_family(struct pci_dev *dev)
{
struct pci_dev *host;
int i = 0;
@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
.cable_detect = sis_cable_detect,
};
-static const struct ide_port_info sis5513_chipset __devinitconst = {
+static const struct ide_port_info sis5513_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_sis5513,
.enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
@@ -572,7 +572,7 @@ static const struct ide_port_info sis5513_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = sis5513_chipset;
u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
@@ -595,7 +595,7 @@ static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_devi
return ide_pci_init_one(dev, &d, NULL);
}
-static void __devexit sis5513_remove(struct pci_dev *dev)
+static void sis5513_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_disable_device(dev);
@@ -613,7 +613,7 @@ static struct pci_driver sis5513_pci_driver = {
.name = "SIS_IDE",
.id_table = sis5513_pci_tbl,
.probe = sis5513_init_one,
- .remove = __devexit_p(sis5513_remove),
+ .remove = sis5513_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index d051cd224bdb..8755df3330a0 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info sl82c105_chipset __devinitconst = {
+static const struct ide_port_info sl82c105_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_sl82c105,
.enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
@@ -313,7 +313,7 @@ static const struct ide_port_info sl82c105_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = sl82c105_chipset;
u8 rev = sl82c105_bridge_revision(dev);
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
index 863a5e9283ca..8af92bbb3dcb 100644
--- a/drivers/ide/slc90e66.c
+++ b/drivers/ide/slc90e66.c
@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
.cable_detect = slc90e66_cable_detect,
};
-static const struct ide_port_info slc90e66_chipset __devinitconst = {
+static const struct ide_port_info slc90e66_chipset = {
.name = DRV_NAME,
.enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
.port_ops = &slc90e66_port_ops,
@@ -142,7 +142,8 @@ static const struct ide_port_info slc90e66_chipset __devinitconst = {
.udma_mask = ATA_UDMA4,
};
-static int __devinit slc90e66_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int slc90e66_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &slc90e66_chipset, NULL);
}
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index 17946785ebf6..17e6132b99bf 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -144,7 +144,7 @@ static u8 tc86c001_cable_detect(ide_hwif_t *hwif)
return (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
+static void init_hwif_tc86c001(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long sc_base = pci_resource_start(dev, 5);
@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
.dma_sff_read_status = ide_dma_sff_read_status,
};
-static const struct ide_port_info tc86c001_chipset __devinitconst = {
+static const struct ide_port_info tc86c001_chipset = {
.name = DRV_NAME,
.init_hwif = init_hwif_tc86c001,
.port_ops = &tc86c001_port_ops,
@@ -203,8 +203,8 @@ static const struct ide_port_info tc86c001_chipset __devinitconst = {
.udma_mask = ATA_UDMA4,
};
-static int __devinit tc86c001_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int tc86c001_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
int rc;
@@ -232,7 +232,7 @@ out:
return rc;
}
-static void __devexit tc86c001_remove(struct pci_dev *dev)
+static void tc86c001_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_release_region(dev, 5);
@@ -249,7 +249,7 @@ static struct pci_driver tc86c001_pci_driver = {
.name = "TC86C001",
.id_table = tc86c001_pci_tbl,
.probe = tc86c001_init_one,
- .remove = __devexit_p(tc86c001_remove),
+ .remove = tc86c001_remove,
};
static int __init tc86c001_ide_init(void)
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index 55ce1b80efcb..7f1af9493f0e 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
.set_dma_mode = triflex_set_mode,
};
-static const struct ide_port_info triflex_device __devinitconst = {
+static const struct ide_port_info triflex_device = {
.name = DRV_NAME,
.enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
.port_ops = &triflex_port_ops,
@@ -101,8 +101,7 @@ static const struct ide_port_info triflex_device __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit triflex_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &triflex_device, NULL);
}
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
index e494a98a43a9..0069f6ce74cf 100644
--- a/drivers/ide/trm290.c
+++ b/drivers/ide/trm290.c
@@ -231,7 +231,7 @@ static void trm290_dma_host_set(ide_drive_t *drive, int on)
{
}
-static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
+static void init_hwif_trm290(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned int cfg_base = pci_resource_start(dev, 4);
@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
.dma_check = trm290_dma_check,
};
-static const struct ide_port_info trm290_chipset __devinitconst = {
+static const struct ide_port_info trm290_chipset = {
.name = DRV_NAME,
.init_hwif = init_hwif_trm290,
.tp_ops = &trm290_tp_ops,
@@ -338,7 +338,7 @@ static const struct ide_port_info trm290_chipset __devinitconst = {
IDE_HFLAG_NO_LBA48,
};
-static int __devinit trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int trm290_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &trm290_chipset, NULL);
}
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index eb7767864d10..01464f1e2339 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
.cable_detect = via82cxxx_cable_detect,
};
-static const struct ide_port_info via82cxxx_chipset __devinitconst = {
+static const struct ide_port_info via82cxxx_chipset = {
.name = DRV_NAME,
.init_chipset = init_chipset_via82cxxx,
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
@@ -416,7 +416,7 @@ static const struct ide_port_info via82cxxx_chipset __devinitconst = {
.mwdma_mask = ATA_MWDMA2,
};
-static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_dev *isa = NULL;
struct via_isa_bridge *via_config;
@@ -489,7 +489,7 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
return rc;
}
-static void __devexit via_remove(struct pci_dev *dev)
+static void via_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct via82cxxx_dev *vdev = host->host_priv;
@@ -514,7 +514,7 @@ static struct pci_driver via_pci_driver = {
.name = "VIA_IDE",
.id_table = via_pci_tbl,
.probe = via_init_one,
- .remove = __devexit_p(via_remove),
+ .remove = via_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b0f6b4c8ee14..2df9414a72f7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -56,7 +56,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/clockchips.h>
-#include <linux/hrtimer.h> /* ktime_get_real() */
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/notifier.h>
@@ -72,6 +71,7 @@
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
.owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
};
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
@@ -281,8 +281,6 @@ static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
unsigned int cstate;
- ktime_t kt_before, kt_after;
- s64 usec_delta;
int cpu = smp_processor_id();
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
@@ -297,8 +295,6 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- kt_before = ktime_get_real();
-
stop_critical_timings();
if (!need_resched()) {
@@ -310,17 +306,9 @@ static int intel_idle(struct cpuidle_device *dev,
start_critical_timings();
- kt_after = ktime_get_real();
- usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
-
- local_irq_enable();
-
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
- /* Update cpuidle counters */
- dev->last_residency = (int)usec_delta;
-
return index;
}
@@ -460,8 +448,6 @@ static int intel_idle_probe(void)
else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- register_cpu_notifier(&cpu_hotplug_notifier);
-
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
@@ -518,7 +504,7 @@ static int intel_idle_cpuidle_driver_init(void)
if (*cpuidle_state_table[cstate].name == '\0')
pr_debug(PREFIX "unaware of model 0x%x"
" MWAIT %d please"
- " contact lenb@kernel.org",
+ " contact lenb@kernel.org\n",
boot_cpu_data.x86_model, cstate);
continue;
}
@@ -624,6 +610,7 @@ static int __init intel_idle_init(void)
return retval;
}
}
+ register_cpu_notifier(&cpu_hotplug_notifier);
return 0;
}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index fc937aca71fb..b2f963be3993 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -20,6 +20,12 @@ config IIO_BUFFER
if IIO_BUFFER
+config IIO_BUFFER_CB
+boolean "IIO callback buffer used for push in-kernel interfaces"
+ help
+ Should be selected by any drivers that do-inkernel push
+ usage. That is, those where the data is pushed to the consumer.
+
config IIO_KFIFO_BUF
select IIO_TRIGGER
tristate "Industrial I/O buffering based on kfifo"
@@ -57,11 +63,12 @@ config IIO_CONSUMERS_PER_TRIGGER
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
-source "drivers/iio/light/Kconfig"
-source "drivers/iio/frequency/Kconfig"
-source "drivers/iio/dac/Kconfig"
source "drivers/iio/common/Kconfig"
+source "drivers/iio/dac/Kconfig"
+source "drivers/iio/frequency/Kconfig"
source "drivers/iio/gyro/Kconfig"
+source "drivers/iio/imu/Kconfig"
+source "drivers/iio/light/Kconfig"
source "drivers/iio/magnetometer/Kconfig"
endif # IIO
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 761f2b65ac52..a0e8cdd67e4d 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_IIO) += industrialio.o
industrialio-y := industrialio-core.o industrialio-event.o inkern.o
industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
+industrialio-$(CONFIG_IIO_BUFFER_CB) += buffer_cb.o
obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
@@ -13,9 +14,10 @@ obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
obj-y += accel/
obj-y += adc/
obj-y += amplifiers/
-obj-y += light/
-obj-y += frequency/
-obj-y += dac/
obj-y += common/
+obj-y += dac/
obj-y += gyro/
+obj-y += frequency/
+obj-y += imu/
+obj-y += light/
obj-y += magnetometer/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index b2510c4d9a5a..05e996fafc9d 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -8,7 +8,8 @@ config HID_SENSOR_ACCEL_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
- tristate "HID Acelerometers 3D"
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID Accelerometers 3D"
help
Say yes here to build support for the HID SENSOR
accelerometers 3D.
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 314a4057879e..0b0c3c66f6c0 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -197,21 +197,8 @@ static const struct iio_info accel_3d_info = {
/* Function to push data to buffer */
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
{
- struct iio_buffer *buffer = indio_dev->buffer;
- int datum_sz;
-
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- if (!buffer) {
- dev_err(&indio_dev->dev, "Buffer == NULL\n");
- return;
- }
- datum_sz = buffer->access->get_bytes_per_datum(buffer);
- if (len > datum_sz) {
- dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
- datum_sz);
- return;
- }
- iio_push_to_buffer(buffer, (u8 *)data);
+ iio_push_to_buffers(indio_dev, (u8 *)data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -291,7 +278,7 @@ static int accel_3d_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_accel_3d_probe(struct platform_device *pdev)
+static int hid_accel_3d_probe(struct platform_device *pdev)
{
int ret = 0;
static const char *name = "accel_3d";
@@ -319,10 +306,10 @@ static int __devinit hid_accel_3d_probe(struct platform_device *pdev)
goto error_free_dev;
}
- channels = kmemdup(accel_3d_channels,
- sizeof(accel_3d_channels),
- GFP_KERNEL);
+ channels = kmemdup(accel_3d_channels, sizeof(accel_3d_channels),
+ GFP_KERNEL);
if (!channels) {
+ ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
goto error_free_dev;
}
@@ -388,7 +375,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_accel_3d_remove(struct platform_device *pdev)
+static int hid_accel_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 492758120338..fe822a14d130 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -18,6 +18,18 @@ config AD7266
Say yes here to build support for Analog Devices AD7265 and AD7266
ADCs.
+config AD7298
+ tristate "Analog Devices AD7298 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD7298
+ 8 Channel ADC with temperature sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7298.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -30,6 +42,18 @@ config AD7791
To compile this driver as a module, choose M here: the module will be
called ad7791.
+config AD7793
+ tristate "Analog Devices AD7793 and similar ADCs driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7785, AD7792, AD7793,
+ AD7794 and AD7795 SPI analog to digital converters (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called AD7793.
+
config AD7476
tristate "Analog Devices AD7476 and similar 1-channel ADCs driver"
depends on SPI
@@ -45,6 +69,19 @@ config AD7476
To compile this driver as a module, choose M here: the
module will be called ad7476.
+config AD7887
+ tristate "Analog Devices AD7887 ADC driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices
+ AD7887 SPI analog to digital converter (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7887.
+
config AT91_ADC
tristate "Atmel AT91 ADC"
depends on ARCH_AT91
@@ -60,4 +97,46 @@ config LP8788_ADC
help
Say yes here to build support for TI LP8788 ADC.
+config MAX1363
+ tristate "Maxim max1363 ADC driver"
+ depends on I2C
+ select IIO_TRIGGER
+ select MAX1363_RING_BUFFER
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say yes here to build support for many Maxim i2c analog to digital
+ converters (ADC). (max1361, max1362, max1363, max1364, max1036,
+ max1037, max1038, max1039, max1136, max1136, max1137, max1138,
+ max1139, max1236, max1237, max11238, max1239, max11600, max11601,
+ max11602, max11603, max11604, max11605, max11606, max11607,
+ max11608, max11609, max11610, max11611, max11612, max11613,
+ max11614, max11615, max11616, max11617, max11644, max11645,
+ max11646, max11647) Provides direct access via sysfs and buffered
+ data via the iio dev interface.
+
+config TI_ADC081C
+ tristate "Texas Instruments ADC081C021/027"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADC081C021
+ and ADC081C027 ADC chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc081c.
+
+config TI_AM335X_ADC
+ tristate "TI's ADC driver"
+ depends on MFD_TI_AM335X_TSCADC
+ help
+ Say yes here to build support for Texas Instruments ADC
+ driver which is also a MFD client.
+
+config VIPERBOARD_ADC
+ tristate "Viperboard ADC support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the ADC part of the Nano River
+ Technologies Viperboard.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 900995d5e179..2d5f10080d8d 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -4,7 +4,14 @@
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD7266) += ad7266.o
+obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7476) += ad7476.o
obj-$(CONFIG_AD7791) += ad7791.o
+obj-$(CONFIG_AD7793) += ad7793.o
+obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
+obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index b11f214779a2..bbad9b94cd75 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -91,7 +91,6 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- struct iio_buffer *buffer = indio_dev->buffer;
struct ad7266_state *st = iio_priv(indio_dev);
int ret;
@@ -99,7 +98,7 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
if (ret == 0) {
if (indio_dev->scan_timestamp)
((s64 *)st->data)[1] = pf->timestamp;
- iio_push_to_buffer(buffer, (u8 *)st->data);
+ iio_push_to_buffers(indio_dev, (u8 *)st->data);
}
iio_trigger_notify_done(indio_dev->trig);
@@ -368,7 +367,7 @@ static const struct ad7266_chan_info ad7266_chan_infos[] = {
},
};
-static void __devinit ad7266_init_channels(struct iio_dev *indio_dev)
+static void ad7266_init_channels(struct iio_dev *indio_dev)
{
struct ad7266_state *st = iio_priv(indio_dev);
bool is_differential, is_signed;
@@ -392,7 +391,7 @@ static const char * const ad7266_gpio_labels[] = {
"AD0", "AD1", "AD2",
};
-static int __devinit ad7266_probe(struct spi_device *spi)
+static int ad7266_probe(struct spi_device *spi)
{
struct ad7266_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -412,7 +411,11 @@ static int __devinit ad7266_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- st->vref_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref_uv = ret;
} else {
/* Use internal reference */
st->vref_uv = 2500000;
@@ -495,7 +498,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad7266_remove(struct spi_device *spi)
+static int ad7266_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7266_state *st = iio_priv(indio_dev);
@@ -526,7 +529,7 @@ static struct spi_driver ad7266_driver = {
.owner = THIS_MODULE,
},
.probe = ad7266_probe,
- .remove = __devexit_p(ad7266_remove),
+ .remove = ad7266_remove,
.id_table = ad7266_id,
};
module_spi_driver(ad7266_driver);
diff --git a/drivers/staging/iio/adc/ad7298_core.c b/drivers/iio/adc/ad7298.c
index 4c75114e7d7c..b34d754994d5 100644
--- a/drivers/staging/iio/adc/ad7298_core.c
+++ b/drivers/iio/adc/ad7298.c
@@ -15,12 +15,48 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-
-#include "ad7298.h"
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ad7298.h>
+
+#define AD7298_WRITE (1 << 15) /* write to the control register */
+#define AD7298_REPEAT (1 << 14) /* repeated conversion enable */
+#define AD7298_CH(x) (1 << (13 - (x))) /* channel select */
+#define AD7298_TSENSE (1 << 5) /* temperature conversion enable */
+#define AD7298_EXTREF (1 << 2) /* external reference enable */
+#define AD7298_TAVG (1 << 1) /* temperature sensor averaging enable */
+#define AD7298_PDD (1 << 0) /* partial power down enable */
+
+#define AD7298_MAX_CHAN 8
+#define AD7298_BITS 12
+#define AD7298_STORAGE_BITS 16
+#define AD7298_INTREF_mV 2500
+
+#define AD7298_CH_TEMP 9
+
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+struct ad7298_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned ext_ref;
+ struct spi_transfer ring_xfer[10];
+ struct spi_transfer scan_single_xfer[3];
+ struct spi_message ring_msg;
+ struct spi_message scan_single_msg;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 rx_buf[12] ____cacheline_aligned;
+ __be16 tx_buf[2];
+};
#define AD7298_V_CHAN(index) \
{ \
@@ -35,6 +71,7 @@
.sign = 'u', \
.realbits = 12, \
.storagebits = 16, \
+ .endianness = IIO_BE, \
}, \
}
@@ -44,7 +81,8 @@ static const struct iio_chan_spec ad7298_channels[] = {
.indexed = 1,
.channel = 0,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = AD7298_CH_TEMP,
.scan_index = -1,
.scan_type = {
@@ -64,6 +102,84 @@ static const struct iio_chan_spec ad7298_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(8),
};
+/**
+ * ad7298_update_scan_mode() setup the spi transfer buffer for the new scan mask
+ **/
+static int ad7298_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ad7298_state *st = iio_priv(indio_dev);
+ int i, m;
+ unsigned short command;
+ int scan_count;
+
+ /* Now compute overall size */
+ scan_count = bitmap_weight(active_scan_mask, indio_dev->masklength);
+
+ command = AD7298_WRITE | st->ext_ref;
+
+ for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
+ if (test_bit(i, active_scan_mask))
+ command |= m;
+
+ st->tx_buf[0] = cpu_to_be16(command);
+
+ /* build spi ring message */
+ st->ring_xfer[0].tx_buf = &st->tx_buf[0];
+ st->ring_xfer[0].len = 2;
+ st->ring_xfer[0].cs_change = 1;
+ st->ring_xfer[1].tx_buf = &st->tx_buf[1];
+ st->ring_xfer[1].len = 2;
+ st->ring_xfer[1].cs_change = 1;
+
+ spi_message_init(&st->ring_msg);
+ spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
+ spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
+
+ for (i = 0; i < scan_count; i++) {
+ st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
+ st->ring_xfer[i + 2].len = 2;
+ st->ring_xfer[i + 2].cs_change = 1;
+ spi_message_add_tail(&st->ring_xfer[i + 2], &st->ring_msg);
+ }
+ /* make sure last transfer cs_change is not set */
+ st->ring_xfer[i + 1].cs_change = 0;
+
+ return 0;
+}
+
+/**
+ * ad7298_trigger_handler() bh of trigger launched polling to ring buffer
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ **/
+static irqreturn_t ad7298_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7298_state *st = iio_priv(indio_dev);
+ s64 time_ns = 0;
+ int b_sent;
+
+ b_sent = spi_sync(st->spi, &st->ring_msg);
+ if (b_sent)
+ goto done;
+
+ if (indio_dev->scan_timestamp) {
+ time_ns = iio_get_time_ns();
+ memcpy((u8 *)st->rx_buf + indio_dev->scan_bytes - sizeof(s64),
+ &time_ns, sizeof(time_ns));
+ }
+
+ iio_push_to_buffers(indio_dev, (u8 *)st->rx_buf);
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
{
int ret;
@@ -79,7 +195,7 @@ static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
static int ad7298_scan_temp(struct ad7298_state *st, int *val)
{
- int tmp, ret;
+ int ret;
__be16 buf;
buf = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
@@ -101,24 +217,24 @@ static int ad7298_scan_temp(struct ad7298_state *st, int *val)
if (ret)
return ret;
- tmp = be16_to_cpu(buf) & RES_MASK(AD7298_BITS);
+ *val = sign_extend32(be16_to_cpu(buf), 11);
- /*
- * One LSB of the ADC corresponds to 0.25 deg C.
- * The temperature reading is in 12-bit twos complement format
- */
+ return 0;
+}
- if (tmp & (1 << (AD7298_BITS - 1))) {
- tmp = (4096 - tmp) * 250;
- tmp -= (2 * tmp);
+static int ad7298_get_ref_voltage(struct ad7298_state *st)
+{
+ int vref;
+
+ if (st->ext_ref) {
+ vref = regulator_get_voltage(st->reg);
+ if (vref < 0)
+ return vref;
+ return vref / 1000;
} else {
- tmp *= 250; /* temperature in milli degrees Celsius */
+ return AD7298_INTREF_mV;
}
-
- *val = tmp;
-
- return 0;
}
static int ad7298_read_raw(struct iio_dev *indio_dev,
@@ -129,7 +245,6 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
{
int ret;
struct ad7298_state *st = iio_priv(indio_dev);
- unsigned int scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -154,17 +269,19 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = ad7298_get_ref_voltage(st);
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
- *val = 1;
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = ad7298_get_ref_voltage(st);
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 1093 - 2732500 / ad7298_get_ref_voltage(st);
+ return IIO_VAL_INT;
}
return -EINVAL;
}
@@ -175,20 +292,27 @@ static const struct iio_info ad7298_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad7298_probe(struct spi_device *spi)
+static int ad7298_probe(struct spi_device *spi)
{
struct ad7298_platform_data *pdata = spi->dev.platform_data;
struct ad7298_state *st;
- int ret;
struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ int ret;
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
+ if (pdata && pdata->ext_ref)
+ st->ext_ref = AD7298_EXTREF;
+
+ if (st->ext_ref) {
+ st->reg = regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ ret = PTR_ERR(st->reg);
+ goto error_free;
+ }
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
@@ -221,14 +345,8 @@ static int __devinit ad7298_probe(struct spi_device *spi)
spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg);
spi_message_add_tail(&st->scan_single_xfer[2], &st->scan_single_msg);
- if (pdata && pdata->vref_mv) {
- st->int_vref_mv = pdata->vref_mv;
- st->ext_ref = AD7298_EXTREF;
- } else {
- st->int_vref_mv = AD7298_INTREF_mV;
- }
-
- ret = ad7298_register_ring_funcs_and_init(indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &ad7298_trigger_handler, NULL);
if (ret)
goto error_disable_reg;
@@ -239,26 +357,27 @@ static int __devinit ad7298_probe(struct spi_device *spi)
return 0;
error_cleanup_ring:
- ad7298_ring_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
+ if (st->ext_ref)
regulator_disable(st->reg);
error_put_reg:
- if (!IS_ERR(st->reg))
+ if (st->ext_ref)
regulator_put(st->reg);
+error_free:
iio_device_free(indio_dev);
return ret;
}
-static int __devexit ad7298_remove(struct spi_device *spi)
+static int ad7298_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7298_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- ad7298_ring_cleanup(indio_dev);
- if (!IS_ERR(st->reg)) {
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->ext_ref) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
@@ -279,7 +398,7 @@ static struct spi_driver ad7298_driver = {
.owner = THIS_MODULE,
},
.probe = ad7298_probe,
- .remove = __devexit_p(ad7298_remove),
+ .remove = ad7298_remove,
.id_table = ad7298_id,
};
module_spi_driver(ad7298_driver);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 7f2f45a0a48d..1491fa6debb2 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -76,7 +76,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
if (indio_dev->scan_timestamp)
((s64 *)st->data)[1] = time_ns;
- iio_push_to_buffer(indio_dev->buffer, st->data);
+ iio_push_to_buffers(indio_dev, st->data);
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -207,7 +207,7 @@ static const struct iio_info ad7476_info = {
.read_raw = &ad7476_read_raw,
};
-static int __devinit ad7476_probe(struct spi_device *spi)
+static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
struct iio_dev *indio_dev;
@@ -277,7 +277,7 @@ error_ret:
return ret;
}
-static int __devexit ad7476_remove(struct spi_device *spi)
+static int ad7476_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7476_state *st = iio_priv(indio_dev);
@@ -322,7 +322,7 @@ static struct spi_driver ad7476_driver = {
.owner = THIS_MODULE,
},
.probe = ad7476_probe,
- .remove = __devexit_p(ad7476_remove),
+ .remove = ad7476_remove,
.id_table = ad7476_id,
};
module_spi_driver(ad7476_driver);
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index e93740843b2b..5e8d1da6887f 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -325,8 +325,8 @@ static const struct iio_info ad7791_no_filter_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad7791_setup(struct ad7791_state *st,
- struct ad7791_platform_data *pdata)
+static int ad7791_setup(struct ad7791_state *st,
+ struct ad7791_platform_data *pdata)
{
/* Set to poweron-reset default values */
st->mode = AD7791_MODE_BUFFER;
@@ -349,7 +349,7 @@ static int __devinit ad7791_setup(struct ad7791_state *st,
st->mode);
}
-static int __devinit ad7791_probe(struct spi_device *spi)
+static int ad7791_probe(struct spi_device *spi)
{
struct ad7791_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -418,7 +418,7 @@ err_iio_free:
return ret;
}
-static int __devexit ad7791_remove(struct spi_device *spi)
+static int ad7791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7791_state *st = iio_priv(indio_dev);
@@ -450,7 +450,7 @@ static struct spi_driver ad7791_driver = {
.owner = THIS_MODULE,
},
.probe = ad7791_probe,
- .remove = __devexit_p(ad7791_remove),
+ .remove = ad7791_remove,
.id_table = ad7791_spi_ids,
};
module_spi_driver(ad7791_driver);
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 691a7be6f5cb..334e31ff7a4e 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -25,8 +25,106 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/adc/ad_sigma_delta.h>
-
-#include "ad7793.h"
+#include <linux/platform_data/ad7793.h>
+
+/* Registers */
+#define AD7793_REG_COMM 0 /* Communications Register (WO, 8-bit) */
+#define AD7793_REG_STAT 0 /* Status Register (RO, 8-bit) */
+#define AD7793_REG_MODE 1 /* Mode Register (RW, 16-bit */
+#define AD7793_REG_CONF 2 /* Configuration Register (RW, 16-bit) */
+#define AD7793_REG_DATA 3 /* Data Register (RO, 16-/24-bit) */
+#define AD7793_REG_ID 4 /* ID Register (RO, 8-bit) */
+#define AD7793_REG_IO 5 /* IO Register (RO, 8-bit) */
+#define AD7793_REG_OFFSET 6 /* Offset Register (RW, 16-bit
+ * (AD7792)/24-bit (AD7793)) */
+#define AD7793_REG_FULLSALE 7 /* Full-Scale Register
+ * (RW, 16-bit (AD7792)/24-bit (AD7793)) */
+
+/* Communications Register Bit Designations (AD7793_REG_COMM) */
+#define AD7793_COMM_WEN (1 << 7) /* Write Enable */
+#define AD7793_COMM_WRITE (0 << 6) /* Write Operation */
+#define AD7793_COMM_READ (1 << 6) /* Read Operation */
+#define AD7793_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */
+#define AD7793_COMM_CREAD (1 << 2) /* Continuous Read of Data Register */
+
+/* Status Register Bit Designations (AD7793_REG_STAT) */
+#define AD7793_STAT_RDY (1 << 7) /* Ready */
+#define AD7793_STAT_ERR (1 << 6) /* Error (Overrange, Underrange) */
+#define AD7793_STAT_CH3 (1 << 2) /* Channel 3 */
+#define AD7793_STAT_CH2 (1 << 1) /* Channel 2 */
+#define AD7793_STAT_CH1 (1 << 0) /* Channel 1 */
+
+/* Mode Register Bit Designations (AD7793_REG_MODE) */
+#define AD7793_MODE_SEL(x) (((x) & 0x7) << 13) /* Operation Mode Select */
+#define AD7793_MODE_SEL_MASK (0x7 << 13) /* Operation Mode Select mask */
+#define AD7793_MODE_CLKSRC(x) (((x) & 0x3) << 6) /* ADC Clock Source Select */
+#define AD7793_MODE_RATE(x) ((x) & 0xF) /* Filter Update Rate Select */
+
+#define AD7793_MODE_CONT 0 /* Continuous Conversion Mode */
+#define AD7793_MODE_SINGLE 1 /* Single Conversion Mode */
+#define AD7793_MODE_IDLE 2 /* Idle Mode */
+#define AD7793_MODE_PWRDN 3 /* Power-Down Mode */
+#define AD7793_MODE_CAL_INT_ZERO 4 /* Internal Zero-Scale Calibration */
+#define AD7793_MODE_CAL_INT_FULL 5 /* Internal Full-Scale Calibration */
+#define AD7793_MODE_CAL_SYS_ZERO 6 /* System Zero-Scale Calibration */
+#define AD7793_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
+
+#define AD7793_CLK_INT 0 /* Internal 64 kHz Clock not
+ * available at the CLK pin */
+#define AD7793_CLK_INT_CO 1 /* Internal 64 kHz Clock available
+ * at the CLK pin */
+#define AD7793_CLK_EXT 2 /* External 64 kHz Clock */
+#define AD7793_CLK_EXT_DIV2 3 /* External Clock divided by 2 */
+
+/* Configuration Register Bit Designations (AD7793_REG_CONF) */
+#define AD7793_CONF_VBIAS(x) (((x) & 0x3) << 14) /* Bias Voltage
+ * Generator Enable */
+#define AD7793_CONF_BO_EN (1 << 13) /* Burnout Current Enable */
+#define AD7793_CONF_UNIPOLAR (1 << 12) /* Unipolar/Bipolar Enable */
+#define AD7793_CONF_BOOST (1 << 11) /* Boost Enable */
+#define AD7793_CONF_GAIN(x) (((x) & 0x7) << 8) /* Gain Select */
+#define AD7793_CONF_REFSEL(x) ((x) << 6) /* INT/EXT Reference Select */
+#define AD7793_CONF_BUF (1 << 4) /* Buffered Mode Enable */
+#define AD7793_CONF_CHAN(x) ((x) & 0xf) /* Channel select */
+#define AD7793_CONF_CHAN_MASK 0xf /* Channel select mask */
+
+#define AD7793_CH_AIN1P_AIN1M 0 /* AIN1(+) - AIN1(-) */
+#define AD7793_CH_AIN2P_AIN2M 1 /* AIN2(+) - AIN2(-) */
+#define AD7793_CH_AIN3P_AIN3M 2 /* AIN3(+) - AIN3(-) */
+#define AD7793_CH_AIN1M_AIN1M 3 /* AIN1(-) - AIN1(-) */
+#define AD7793_CH_TEMP 6 /* Temp Sensor */
+#define AD7793_CH_AVDD_MONITOR 7 /* AVDD Monitor */
+
+#define AD7795_CH_AIN4P_AIN4M 4 /* AIN4(+) - AIN4(-) */
+#define AD7795_CH_AIN5P_AIN5M 5 /* AIN5(+) - AIN5(-) */
+#define AD7795_CH_AIN6P_AIN6M 6 /* AIN6(+) - AIN6(-) */
+#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */
+
+/* ID Register Bit Designations (AD7793_REG_ID) */
+#define AD7785_ID 0xB
+#define AD7792_ID 0xA
+#define AD7793_ID 0xB
+#define AD7794_ID 0xF
+#define AD7795_ID 0xF
+#define AD7796_ID 0xA
+#define AD7797_ID 0xB
+#define AD7798_ID 0x8
+#define AD7799_ID 0x9
+#define AD7793_ID_MASK 0xF
+
+/* IO (Excitation Current Sources) Register Bit Designations (AD7793_REG_IO) */
+#define AD7793_IO_IEXC1_IOUT1_IEXC2_IOUT2 0 /* IEXC1 connect to IOUT1,
+ * IEXC2 connect to IOUT2 */
+#define AD7793_IO_IEXC1_IOUT2_IEXC2_IOUT1 1 /* IEXC1 connect to IOUT2,
+ * IEXC2 connect to IOUT1 */
+#define AD7793_IO_IEXC1_IEXC2_IOUT1 2 /* Both current sources
+ * IEXC1,2 connect to IOUT1 */
+#define AD7793_IO_IEXC1_IEXC2_IOUT2 3 /* Both current sources
+ * IEXC1,2 connect to IOUT2 */
+
+#define AD7793_IO_IXCEN_10uA (1 << 0) /* Excitation Current 10uA */
+#define AD7793_IO_IXCEN_210uA (2 << 0) /* Excitation Current 210uA */
+#define AD7793_IO_IXCEN_1mA (3 << 0) /* Excitation Current 1mA */
/* NOTE:
* The AD7792/AD7793 features a dual use data out ready DOUT/RDY output.
@@ -36,9 +134,21 @@
* The DOUT/RDY output must also be wired to an interrupt capable GPIO.
*/
+#define AD7793_FLAG_HAS_CLKSEL BIT(0)
+#define AD7793_FLAG_HAS_REFSEL BIT(1)
+#define AD7793_FLAG_HAS_VBIAS BIT(2)
+#define AD7793_HAS_EXITATION_CURRENT BIT(3)
+#define AD7793_FLAG_HAS_GAIN BIT(4)
+#define AD7793_FLAG_HAS_BUFFER BIT(5)
+
struct ad7793_chip_info {
+ unsigned int id;
const struct iio_chan_spec *channels;
unsigned int num_channels;
+ unsigned int flags;
+
+ const struct iio_info *iio_info;
+ const u16 *sample_freq_avail;
};
struct ad7793_state {
@@ -59,6 +169,10 @@ enum ad7793_supported_device_ids {
ID_AD7793,
ID_AD7794,
ID_AD7795,
+ ID_AD7796,
+ ID_AD7797,
+ ID_AD7798,
+ ID_AD7799,
};
static struct ad7793_state *ad_sigma_delta_to_ad7793(struct ad_sigma_delta *sd)
@@ -110,19 +224,52 @@ static int ad7793_calibrate_all(struct ad7793_state *st)
ARRAY_SIZE(ad7793_calib_arr));
}
-static int ad7793_setup(struct iio_dev *indio_dev,
+static int ad7793_check_platform_data(struct ad7793_state *st,
const struct ad7793_platform_data *pdata)
{
+ if ((pdata->current_source_direction == AD7793_IEXEC1_IEXEC2_IOUT1 ||
+ pdata->current_source_direction == AD7793_IEXEC1_IEXEC2_IOUT2) &&
+ ((pdata->exitation_current != AD7793_IX_10uA) &&
+ (pdata->exitation_current != AD7793_IX_210uA)))
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_CLKSEL) &&
+ pdata->clock_src != AD7793_CLK_SRC_INT)
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_REFSEL) &&
+ pdata->refsel != AD7793_REFSEL_REFIN1)
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_VBIAS) &&
+ pdata->bias_voltage != AD7793_BIAS_VOLTAGE_DISABLED)
+ return -EINVAL;
+
+ if (!(st->chip_info->flags & AD7793_HAS_EXITATION_CURRENT) &&
+ pdata->exitation_current != AD7793_IX_DISABLED)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ad7793_setup(struct iio_dev *indio_dev,
+ const struct ad7793_platform_data *pdata,
+ unsigned int vref_mv)
+{
struct ad7793_state *st = iio_priv(indio_dev);
int i, ret = -1;
unsigned long long scale_uv;
u32 id;
+ ret = ad7793_check_platform_data(st, pdata);
+ if (ret)
+ return ret;
+
/* reset the serial interface */
ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
if (ret < 0)
goto out;
- msleep(1); /* Wait for at least 500us */
+ usleep_range(500, 2000); /* Wait for at least 500us */
/* write/read test for device presence */
ret = ad_sd_read_reg(&st->sd, AD7793_REG_ID, 1, &id);
@@ -131,13 +278,32 @@ static int ad7793_setup(struct iio_dev *indio_dev,
id &= AD7793_ID_MASK;
- if (!((id == AD7792_ID) || (id == AD7793_ID) || (id == AD7795_ID))) {
+ if (id != st->chip_info->id) {
dev_err(&st->sd.spi->dev, "device ID query failed\n");
goto out;
}
- st->mode = pdata->mode;
- st->conf = pdata->conf;
+ st->mode = AD7793_MODE_RATE(1);
+ st->conf = 0;
+
+ if (st->chip_info->flags & AD7793_FLAG_HAS_CLKSEL)
+ st->mode |= AD7793_MODE_CLKSRC(pdata->clock_src);
+ if (st->chip_info->flags & AD7793_FLAG_HAS_REFSEL)
+ st->conf |= AD7793_CONF_REFSEL(pdata->refsel);
+ if (st->chip_info->flags & AD7793_FLAG_HAS_VBIAS)
+ st->conf |= AD7793_CONF_VBIAS(pdata->bias_voltage);
+ if (pdata->buffered || !(st->chip_info->flags & AD7793_FLAG_HAS_BUFFER))
+ st->conf |= AD7793_CONF_BUF;
+ if (pdata->boost_enable &&
+ (st->chip_info->flags & AD7793_FLAG_HAS_VBIAS))
+ st->conf |= AD7793_CONF_BOOST;
+ if (pdata->burnout_current)
+ st->conf |= AD7793_CONF_BO_EN;
+ if (pdata->unipolar)
+ st->conf |= AD7793_CONF_UNIPOLAR;
+
+ if (!(st->chip_info->flags & AD7793_FLAG_HAS_GAIN))
+ st->conf |= AD7793_CONF_GAIN(7);
ret = ad7793_set_mode(&st->sd, AD_SD_MODE_IDLE);
if (ret)
@@ -147,10 +313,13 @@ static int ad7793_setup(struct iio_dev *indio_dev,
if (ret)
goto out;
- ret = ad_sd_write_reg(&st->sd, AD7793_REG_IO,
- sizeof(pdata->io), pdata->io);
- if (ret)
- goto out;
+ if (st->chip_info->flags & AD7793_HAS_EXITATION_CURRENT) {
+ ret = ad_sd_write_reg(&st->sd, AD7793_REG_IO, 1,
+ pdata->exitation_current |
+ (pdata->current_source_direction << 2));
+ if (ret)
+ goto out;
+ }
ret = ad7793_calibrate_all(st);
if (ret)
@@ -158,7 +327,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
/* Populate available ADC input ranges */
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
- scale_uv = ((u64)st->int_vref_mv * 100000000)
+ scale_uv = ((u64)vref_mv * 100000000)
>> (st->chip_info->channels[0].scan_type.realbits -
(!!(st->conf & AD7793_CONF_UNIPOLAR) ? 0 : 1));
scale_uv >>= i;
@@ -173,8 +342,11 @@ out:
return ret;
}
-static const u16 sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, 33, 19,
- 17, 16, 12, 10, 8, 6, 4};
+static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
+ 33, 19, 17, 16, 12, 10, 8, 6, 4};
+
+static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
+ 33, 0, 17, 16, 12, 10, 8, 6, 4};
static ssize_t ad7793_read_frequency(struct device *dev,
struct device_attribute *attr,
@@ -184,7 +356,7 @@ static ssize_t ad7793_read_frequency(struct device *dev,
struct ad7793_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n",
- sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
+ st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
}
static ssize_t ad7793_write_frequency(struct device *dev,
@@ -204,14 +376,17 @@ static ssize_t ad7793_write_frequency(struct device *dev,
}
mutex_unlock(&indio_dev->mlock);
- ret = strict_strtol(buf, 10, &lval);
+ ret = kstrtol(buf, 10, &lval);
if (ret)
return ret;
+ if (lval == 0)
+ return -EINVAL;
+
ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(sample_freq_avail); i++)
- if (lval == sample_freq_avail[i]) {
+ for (i = 0; i < 16; i++)
+ if (lval == st->chip_info->sample_freq_avail[i]) {
mutex_lock(&indio_dev->mlock);
st->mode &= ~AD7793_MODE_RATE(-1);
st->mode |= AD7793_MODE_RATE(i);
@@ -231,6 +406,9 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
+static IIO_CONST_ATTR_NAMED(sampling_frequency_available_ad7797,
+ sampling_frequency_available, "123 62 50 33 17 16 12 10 8 6 4");
+
static ssize_t ad7793_show_scale_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -262,6 +440,16 @@ static const struct attribute_group ad7793_attribute_group = {
.attrs = ad7793_attributes,
};
+static struct attribute *ad7797_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7797_attribute_group = {
+ .attrs = ad7797_attributes,
+};
+
static int ad7793_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -292,12 +480,12 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_NANO;
} else {
/* 1170mV / 2^23 * 6 */
- scale_uv = (1170ULL * 100000000ULL * 6ULL);
+ scale_uv = (1170ULL * 1000000000ULL * 6ULL);
}
break;
case IIO_TEMP:
/* 1170mV / 0.81 mV/C / 2^23 */
- scale_uv = 1444444444444ULL;
+ scale_uv = 1444444444444444ULL;
break;
default:
return -EINVAL;
@@ -387,6 +575,15 @@ static const struct iio_info ad7793_info = {
.driver_module = THIS_MODULE,
};
+static const struct iio_info ad7797_info = {
+ .read_raw = &ad7793_read_raw,
+ .write_raw = &ad7793_write_raw,
+ .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .attrs = &ad7793_attribute_group,
+ .validate_trigger = ad_sd_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
const struct iio_chan_spec _name##_channels[] = { \
AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
@@ -412,41 +609,143 @@ const struct iio_chan_spec _name##_channels[] = { \
IIO_CHAN_SOFT_TIMESTAMP(9), \
}
+#define DECLARE_AD7797_CHANNELS(_name, _b, _sb) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD_SD_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD_SD_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD_SD_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DECLARE_AD7799_CHANNELS(_name, _b, _sb) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD_SD_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(5), \
+}
+
static DECLARE_AD7793_CHANNELS(ad7785, 20, 32, 4);
static DECLARE_AD7793_CHANNELS(ad7792, 16, 32, 0);
static DECLARE_AD7793_CHANNELS(ad7793, 24, 32, 0);
static DECLARE_AD7795_CHANNELS(ad7794, 16, 32);
static DECLARE_AD7795_CHANNELS(ad7795, 24, 32);
+static DECLARE_AD7797_CHANNELS(ad7796, 16, 16);
+static DECLARE_AD7797_CHANNELS(ad7797, 24, 32);
+static DECLARE_AD7799_CHANNELS(ad7798, 16, 16);
+static DECLARE_AD7799_CHANNELS(ad7799, 24, 32);
static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
[ID_AD7785] = {
+ .id = AD7785_ID,
.channels = ad7785_channels,
.num_channels = ARRAY_SIZE(ad7785_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
},
[ID_AD7792] = {
+ .id = AD7792_ID,
.channels = ad7792_channels,
.num_channels = ARRAY_SIZE(ad7792_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
},
[ID_AD7793] = {
+ .id = AD7793_ID,
.channels = ad7793_channels,
.num_channels = ARRAY_SIZE(ad7793_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
},
[ID_AD7794] = {
+ .id = AD7794_ID,
.channels = ad7794_channels,
.num_channels = ARRAY_SIZE(ad7794_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
},
[ID_AD7795] = {
+ .id = AD7795_ID,
.channels = ad7795_channels,
.num_channels = ARRAY_SIZE(ad7795_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL |
+ AD7793_FLAG_HAS_REFSEL |
+ AD7793_FLAG_HAS_VBIAS |
+ AD7793_HAS_EXITATION_CURRENT |
+ AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7796] = {
+ .id = AD7796_ID,
+ .channels = ad7796_channels,
+ .num_channels = ARRAY_SIZE(ad7796_channels),
+ .iio_info = &ad7797_info,
+ .sample_freq_avail = ad7797_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL,
+ },
+ [ID_AD7797] = {
+ .id = AD7797_ID,
+ .channels = ad7797_channels,
+ .num_channels = ARRAY_SIZE(ad7797_channels),
+ .iio_info = &ad7797_info,
+ .sample_freq_avail = ad7797_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_CLKSEL,
+ },
+ [ID_AD7798] = {
+ .id = AD7798_ID,
+ .channels = ad7798_channels,
+ .num_channels = ARRAY_SIZE(ad7798_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
+ },
+ [ID_AD7799] = {
+ .id = AD7799_ID,
+ .channels = ad7799_channels,
+ .num_channels = ARRAY_SIZE(ad7799_channels),
+ .iio_info = &ad7793_info,
+ .sample_freq_avail = ad7793_sample_freq_avail,
+ .flags = AD7793_FLAG_HAS_GAIN |
+ AD7793_FLAG_HAS_BUFFER,
},
};
-static int __devinit ad7793_probe(struct spi_device *spi)
+static int ad7793_probe(struct spi_device *spi)
{
const struct ad7793_platform_data *pdata = spi->dev.platform_data;
struct ad7793_state *st;
struct iio_dev *indio_dev;
- int ret, voltage_uv = 0;
+ int ret, vref_mv = 0;
if (!pdata) {
dev_err(&spi->dev, "no platform data?\n");
@@ -466,25 +765,31 @@ static int __devinit ad7793_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7793_sigma_delta_info);
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL) {
+ st->reg = regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg)) {
+ ret = PTR_ERR(st->reg);
+ goto error_device_free;
+ }
+
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ vref_mv = regulator_get_voltage(st->reg);
+ if (vref_mv < 0) {
+ ret = vref_mv;
+ goto error_disable_reg;
+ }
+
+ vref_mv /= 1000;
+ } else {
+ vref_mv = 1170; /* Build-in ref */
}
st->chip_info =
&ad7793_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- if (pdata && pdata->vref_mv)
- st->int_vref_mv = pdata->vref_mv;
- else if (voltage_uv)
- st->int_vref_mv = voltage_uv / 1000;
- else
- st->int_vref_mv = 1170; /* Build-in ref */
-
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
@@ -492,13 +797,13 @@ static int __devinit ad7793_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
- indio_dev->info = &ad7793_info;
+ indio_dev->info = st->chip_info->iio_info;
ret = ad_sd_setup_buffer_and_trigger(indio_dev);
if (ret)
goto error_disable_reg;
- ret = ad7793_setup(indio_dev, pdata);
+ ret = ad7793_setup(indio_dev, pdata, vref_mv);
if (ret)
goto error_remove_trigger;
@@ -511,26 +816,27 @@ static int __devinit ad7793_probe(struct spi_device *spi)
error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL)
regulator_disable(st->reg);
error_put_reg:
- if (!IS_ERR(st->reg))
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL)
regulator_put(st->reg);
-
+error_device_free:
iio_device_free(indio_dev);
return ret;
}
-static int __devexit ad7793_remove(struct spi_device *spi)
+static int ad7793_remove(struct spi_device *spi)
{
+ const struct ad7793_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7793_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
@@ -546,6 +852,10 @@ static const struct spi_device_id ad7793_id[] = {
{"ad7793", ID_AD7793},
{"ad7794", ID_AD7794},
{"ad7795", ID_AD7795},
+ {"ad7796", ID_AD7796},
+ {"ad7797", ID_AD7797},
+ {"ad7798", ID_AD7798},
+ {"ad7799", ID_AD7799},
{}
};
MODULE_DEVICE_TABLE(spi, ad7793_id);
@@ -556,7 +866,7 @@ static struct spi_driver ad7793_driver = {
.owner = THIS_MODULE,
},
.probe = ad7793_probe,
- .remove = __devexit_p(ad7793_remove),
+ .remove = ad7793_remove,
.id_table = ad7793_id,
};
module_spi_driver(ad7793_driver);
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
new file mode 100644
index 000000000000..a33d5cd1a536
--- /dev/null
+++ b/drivers/iio/adc/ad7887.c
@@ -0,0 +1,378 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ad7887.h>
+
+#define AD7887_REF_DIS (1 << 5) /* on-chip reference disable */
+#define AD7887_DUAL (1 << 4) /* dual-channel mode */
+#define AD7887_CH_AIN1 (1 << 3) /* convert on channel 1, DUAL=1 */
+#define AD7887_CH_AIN0 (0 << 3) /* convert on channel 0, DUAL=0,1 */
+#define AD7887_PM_MODE1 (0) /* CS based shutdown */
+#define AD7887_PM_MODE2 (1) /* full on */
+#define AD7887_PM_MODE3 (2) /* auto shutdown after conversion */
+#define AD7887_PM_MODE4 (3) /* standby mode */
+
+enum ad7887_channels {
+ AD7887_CH0,
+ AD7887_CH0_CH1,
+ AD7887_CH1,
+};
+
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+/**
+ * struct ad7887_chip_info - chip specifc information
+ * @int_vref_mv: the internal reference voltage
+ * @channel: channel specification
+ */
+struct ad7887_chip_info {
+ u16 int_vref_mv;
+ struct iio_chan_spec channel[3];
+};
+
+struct ad7887_state {
+ struct spi_device *spi;
+ const struct ad7887_chip_info *chip_info;
+ struct regulator *reg;
+ struct spi_transfer xfer[4];
+ struct spi_message msg[3];
+ struct spi_message *ring_msg;
+ unsigned char tx_cmd_buf[4];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Buffer needs to be large enough to hold two 16 bit samples and a
+ * 64 bit aligned 64 bit timestamp.
+ */
+ unsigned char data[ALIGN(4, sizeof(s64)) + sizeof(s64)]
+ ____cacheline_aligned;
+};
+
+enum ad7887_supported_device_ids {
+ ID_AD7887
+};
+
+static int ad7887_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_sw_buffer_preenable(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /* We know this is a single long so can 'cheat' */
+ switch (*indio_dev->active_scan_mask) {
+ case (1 << 0):
+ st->ring_msg = &st->msg[AD7887_CH0];
+ break;
+ case (1 << 1):
+ st->ring_msg = &st->msg[AD7887_CH1];
+ /* Dummy read: push CH1 setting down to hardware */
+ spi_sync(st->spi, st->ring_msg);
+ break;
+ case ((1 << 1) | (1 << 0)):
+ st->ring_msg = &st->msg[AD7887_CH0_CH1];
+ break;
+ }
+
+ return 0;
+}
+
+static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ /* dummy read: restore default CH0 settin */
+ return spi_sync(st->spi, &st->msg[AD7887_CH0]);
+}
+
+/**
+ * ad7887_trigger_handler() bh of trigger launched polling to ring buffer
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ **/
+static irqreturn_t ad7887_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad7887_state *st = iio_priv(indio_dev);
+ s64 time_ns;
+ int b_sent;
+
+ b_sent = spi_sync(st->spi, st->ring_msg);
+ if (b_sent)
+ goto done;
+
+ time_ns = iio_get_time_ns();
+
+ if (indio_dev->scan_timestamp)
+ memcpy(st->data + indio_dev->scan_bytes - sizeof(s64),
+ &time_ns, sizeof(time_ns));
+
+ iio_push_to_buffers(indio_dev, st->data);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = {
+ .preenable = &ad7887_ring_preenable,
+ .postenable = &iio_triggered_buffer_postenable,
+ .predisable = &iio_triggered_buffer_predisable,
+ .postdisable = &ad7887_ring_postdisable,
+};
+
+static int ad7887_scan_direct(struct ad7887_state *st, unsigned ch)
+{
+ int ret = spi_sync(st->spi, &st->msg[ch]);
+ if (ret)
+ return ret;
+
+ return (st->data[(ch * 2)] << 8) | st->data[(ch * 2) + 1];
+}
+
+static int ad7887_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ int ret;
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = ad7887_scan_direct(st, chan->address);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+ *val = ret >> chan->scan_type.shift;
+ *val &= RES_MASK(chan->scan_type.realbits);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (st->reg) {
+ *val = regulator_get_voltage(st->reg);
+ if (*val < 0)
+ return *val;
+ *val /= 1000;
+ } else {
+ *val = st->chip_info->int_vref_mv;
+ }
+
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+
+static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
+ /*
+ * More devices added in future
+ */
+ [ID_AD7887] = {
+ .channel[0] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ .address = 1,
+ .scan_index = 1,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ .channel[1] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ .address = 0,
+ .scan_index = 0,
+ .scan_type = IIO_ST('u', 12, 16, 0),
+ },
+ .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
+ .int_vref_mv = 2500,
+ },
+};
+
+static const struct iio_info ad7887_info = {
+ .read_raw = &ad7887_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ad7887_probe(struct spi_device *spi)
+{
+ struct ad7887_platform_data *pdata = spi->dev.platform_data;
+ struct ad7887_state *st;
+ struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ uint8_t mode;
+ int ret;
+
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ if (!pdata || !pdata->use_onchip_ref) {
+ st->reg = regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg)) {
+ ret = PTR_ERR(st->reg);
+ goto error_free;
+ }
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+ }
+
+ st->chip_info =
+ &ad7887_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+
+ /* Estabilish that the iio_dev is a child of the spi device */
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad7887_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* Setup default message */
+
+ mode = AD7887_PM_MODE4;
+ if (!pdata || !pdata->use_onchip_ref)
+ mode |= AD7887_REF_DIS;
+ if (pdata && pdata->en_dual)
+ mode |= AD7887_DUAL;
+
+ st->tx_cmd_buf[0] = AD7887_CH_AIN0 | mode;
+
+ st->xfer[0].rx_buf = &st->data[0];
+ st->xfer[0].tx_buf = &st->tx_cmd_buf[0];
+ st->xfer[0].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH0]);
+ spi_message_add_tail(&st->xfer[0], &st->msg[AD7887_CH0]);
+
+ if (pdata && pdata->en_dual) {
+ st->tx_cmd_buf[2] = AD7887_CH_AIN1 | mode;
+
+ st->xfer[1].rx_buf = &st->data[0];
+ st->xfer[1].tx_buf = &st->tx_cmd_buf[2];
+ st->xfer[1].len = 2;
+
+ st->xfer[2].rx_buf = &st->data[2];
+ st->xfer[2].tx_buf = &st->tx_cmd_buf[0];
+ st->xfer[2].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH0_CH1]);
+ spi_message_add_tail(&st->xfer[1], &st->msg[AD7887_CH0_CH1]);
+ spi_message_add_tail(&st->xfer[2], &st->msg[AD7887_CH0_CH1]);
+
+ st->xfer[3].rx_buf = &st->data[2];
+ st->xfer[3].tx_buf = &st->tx_cmd_buf[2];
+ st->xfer[3].len = 2;
+
+ spi_message_init(&st->msg[AD7887_CH1]);
+ spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
+
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = 3;
+ } else {
+ indio_dev->channels = &st->chip_info->channel[1];
+ indio_dev->num_channels = 2;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &ad7887_trigger_handler, &ad7887_ring_setup_ops);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unregister_ring;
+
+ return 0;
+error_unregister_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+ if (st->reg)
+ regulator_disable(st->reg);
+error_put_reg:
+ if (st->reg)
+ regulator_put(st->reg);
+error_free:
+ iio_device_free(indio_dev);
+
+ return ret;
+}
+
+static int ad7887_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7887_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->reg) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7887_id[] = {
+ {"ad7887", ID_AD7887},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7887_id);
+
+static struct spi_driver ad7887_driver = {
+ .driver = {
+ .name = "ad7887",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7887_probe,
+ .remove = ad7887_remove,
+ .id_table = ad7887_id,
+};
+module_spi_driver(ad7887_driver);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 67baa1363d7a..afe6d78c8ff0 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -391,7 +391,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
break;
}
- iio_push_to_buffer(indio_dev->buffer, (uint8_t *)data);
+ iio_push_to_buffers(indio_dev, (uint8_t *)data);
iio_trigger_notify_done(indio_dev->trig);
sigma_delta->irq_dis = false;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 3ed94bf80596..a526c0e3aaa8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -46,7 +46,6 @@ struct at91_adc_state {
struct clk *clk;
bool done;
int irq;
- bool irq_enabled;
u16 last_value;
struct mutex lock;
u8 num_channels;
@@ -66,7 +65,6 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *idev = pf->indio_dev;
struct at91_adc_state *st = iio_priv(idev);
- struct iio_buffer *buffer = idev->buffer;
int i, j = 0;
for (i = 0; i < idev->masklength; i++) {
@@ -82,10 +80,9 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
*timestamp = pf->timestamp;
}
- buffer->access->store_to(buffer, (u8 *)st->buffer);
+ iio_push_to_buffers(idev, (u8 *)st->buffer);
iio_trigger_notify_done(idev->trig);
- st->irq_enabled = true;
/* Needed to ACK the DRDY interruption */
at91_adc_readl(st, AT91_ADC_LCDR);
@@ -106,7 +103,6 @@ static irqreturn_t at91_adc_eoc_trigger(int irq, void *private)
if (iio_buffer_enabled(idev)) {
disable_irq_nosync(irq);
- st->irq_enabled = false;
iio_trigger_poll(idev->trig, iio_get_time_ns());
} else {
st->last_value = at91_adc_readl(st, AT91_ADC_LCDR);
@@ -518,7 +514,7 @@ static const struct iio_info at91_adc_info = {
.read_raw = &at91_adc_read_raw,
};
-static int __devinit at91_adc_probe(struct platform_device *pdev)
+static int at91_adc_probe(struct platform_device *pdev)
{
unsigned int prsc, mstrclk, ticks, adc_clk;
int ret;
@@ -682,7 +678,7 @@ error_ret:
return ret;
}
-static int __devexit at91_adc_remove(struct platform_device *pdev)
+static int at91_adc_remove(struct platform_device *pdev)
{
struct iio_dev *idev = platform_get_drvdata(pdev);
struct at91_adc_state *st = iio_priv(idev);
@@ -706,7 +702,7 @@ MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
static struct platform_driver at91_adc_driver = {
.probe = at91_adc_probe,
- .remove = __devexit_p(at91_adc_remove),
+ .remove = at91_adc_remove,
.driver = {
.name = "at91_adc",
.of_match_table = of_match_ptr(at91_adc_dt_ids),
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index a93aaf0bb841..72955e45e9e0 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -193,7 +193,7 @@ static inline void lp8788_iio_map_unregister(struct iio_dev *indio_dev,
iio_map_array_unregister(indio_dev, adc->map);
}
-static int __devinit lp8788_adc_probe(struct platform_device *pdev)
+static int lp8788_adc_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct iio_dev *indio_dev;
@@ -236,7 +236,7 @@ err_iio_map:
return ret;
}
-static int __devexit lp8788_adc_remove(struct platform_device *pdev)
+static int lp8788_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct lp8788_adc *adc = iio_priv(indio_dev);
@@ -250,7 +250,7 @@ static int __devexit lp8788_adc_remove(struct platform_device *pdev)
static struct platform_driver lp8788_adc_driver = {
.probe = lp8788_adc_probe,
- .remove = __devexit_p(lp8788_adc_remove),
+ .remove = lp8788_adc_remove,
.driver = {
.name = LP8788_DEV_ADC,
.owner = THIS_MODULE,
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/iio/adc/max1363.c
index d7b4ffcfa052..03b25b3dc71e 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/iio/adc/max1363.c
@@ -37,8 +37,151 @@
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
#include <linux/iio/driver.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define MAX1363_SETUP_BYTE(a) ((a) | 0x80)
+
+/* There is a fair bit more defined here than currently
+ * used, but the intention is to support everything these
+ * chips do in the long run */
+
+/* see data sheets */
+/* max1363 and max1236, max1237, max1238, max1239 */
+#define MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD 0x00
+#define MAX1363_SETUP_AIN3_IS_REF_EXT_TO_REF 0x20
+#define MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_INT 0x40
+#define MAX1363_SETUP_AIN3_IS_REF_REF_IS_INT 0x60
+#define MAX1363_SETUP_POWER_UP_INT_REF 0x10
+#define MAX1363_SETUP_POWER_DOWN_INT_REF 0x00
+
+/* think about includeing max11600 etc - more settings */
+#define MAX1363_SETUP_EXT_CLOCK 0x08
+#define MAX1363_SETUP_INT_CLOCK 0x00
+#define MAX1363_SETUP_UNIPOLAR 0x00
+#define MAX1363_SETUP_BIPOLAR 0x04
+#define MAX1363_SETUP_RESET 0x00
+#define MAX1363_SETUP_NORESET 0x02
+/* max1363 only - though don't care on others.
+ * For now monitor modes are not implemented as the relevant
+ * line is not connected on my test board.
+ * The definitions are here as I intend to add this soon.
+ */
+#define MAX1363_SETUP_MONITOR_SETUP 0x01
+
+/* Specific to the max1363 */
+#define MAX1363_MON_RESET_CHAN(a) (1 << ((a) + 4))
+#define MAX1363_MON_INT_ENABLE 0x01
+
+/* defined for readability reasons */
+/* All chips */
+#define MAX1363_CONFIG_BYTE(a) ((a))
+
+#define MAX1363_CONFIG_SE 0x01
+#define MAX1363_CONFIG_DE 0x00
+#define MAX1363_CONFIG_SCAN_TO_CS 0x00
+#define MAX1363_CONFIG_SCAN_SINGLE_8 0x20
+#define MAX1363_CONFIG_SCAN_MONITOR_MODE 0x40
+#define MAX1363_CONFIG_SCAN_SINGLE_1 0x60
+/* max123{6-9} only */
+#define MAX1236_SCAN_MID_TO_CHANNEL 0x40
+
+/* max1363 only - merely part of channel selects or don't care for others*/
+#define MAX1363_CONFIG_EN_MON_MODE_READ 0x18
+
+#define MAX1363_CHANNEL_SEL(a) ((a) << 1)
+
+/* max1363 strictly 0x06 - but doesn't matter */
+#define MAX1363_CHANNEL_SEL_MASK 0x1E
+#define MAX1363_SCAN_MASK 0x60
+#define MAX1363_SE_DE_MASK 0x01
+
+#define MAX1363_MAX_CHANNELS 25
+/**
+ * struct max1363_mode - scan mode information
+ * @conf: The corresponding value of the configuration register
+ * @modemask: Bit mask corresponding to channels enabled in this mode
+ */
+struct max1363_mode {
+ int8_t conf;
+ DECLARE_BITMAP(modemask, MAX1363_MAX_CHANNELS);
+};
+
+/* This must be maintained along side the max1363_mode_table in max1363_core */
+enum max1363_modes {
+ /* Single read of a single channel */
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ /* Differential single read */
+ d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+ /* Scan to channel and mid to channel where overlapping */
+ s0to1, s0to2, s2to3, s0to3, s0to4, s0to5, s0to6,
+ s6to7, s0to7, s6to8, s0to8, s6to9,
+ s0to9, s6to10, s0to10, s6to11, s0to11,
+ /* Differential scan to channel and mid to channel where overlapping */
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
+ d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
+ d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
+ d7m6to11m10, d1m0to11m10,
+};
+
+/**
+ * struct max1363_chip_info - chip specifc information
+ * @info: iio core function callbacks structure
+ * @channels: channel specification
+ * @num_channels: number of channels
+ * @mode_list: array of available scan modes
+ * @default_mode: the scan mode in which the chip starts up
+ * @int_vref_mv: the internal reference voltage
+ * @num_channels: number of channels
+ * @bits: accuracy of the adc in bits
+ */
+struct max1363_chip_info {
+ const struct iio_info *info;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ const enum max1363_modes *mode_list;
+ enum max1363_modes default_mode;
+ u16 int_vref_mv;
+ u8 num_modes;
+ u8 bits;
+};
-#include "max1363.h"
+/**
+ * struct max1363_state - driver instance specific data
+ * @client: i2c_client
+ * @setupbyte: cache of current device setup byte
+ * @configbyte: cache of current device config byte
+ * @chip_info: chip model specific constants, available modes etc
+ * @current_mode: the scan mode of this chip
+ * @requestedmask: a valid requested set of channels
+ * @reg: supply regulator
+ * @monitor_on: whether monitor mode is enabled
+ * @monitor_speed: parameter corresponding to device monitor speed setting
+ * @mask_high: bitmask for enabled high thresholds
+ * @mask_low: bitmask for enabled low thresholds
+ * @thresh_high: high threshold values
+ * @thresh_low: low threshold values
+ */
+struct max1363_state {
+ struct i2c_client *client;
+ u8 setupbyte;
+ u8 configbyte;
+ const struct max1363_chip_info *chip_info;
+ const struct max1363_mode *current_mode;
+ u32 requestedmask;
+ struct regulator *reg;
+
+ /* Using monitor modes and buffer at the same time is
+ currently not supported */
+ bool monitor_on;
+ unsigned int monitor_speed:3;
+ u8 mask_high;
+ u8 mask_low;
+ /* 4x unipolar first then the fours bipolar ones */
+ s16 thresh_high[8];
+ s16 thresh_low[8];
+};
#define MAX1363_MODE_SINGLE(_num, _mask) { \
.conf = MAX1363_CHANNEL_SEL(_num) \
@@ -148,7 +291,7 @@ static const struct max1363_mode max1363_mode_table[] = {
MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(11, 6, 0xFC0000),
};
-const struct max1363_mode
+static const struct max1363_mode
*max1363_match_mode(const unsigned long *mask,
const struct max1363_chip_info *ci)
{
@@ -172,7 +315,7 @@ static int max1363_write_basic_config(struct i2c_client *client,
return i2c_master_send(client, tx_buf, 2);
}
-int max1363_set_scan_mode(struct max1363_state *st)
+static int max1363_set_scan_mode(struct max1363_state *st)
{
st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
| MAX1363_SCAN_MASK
@@ -622,9 +765,9 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
u64 event_code)
{
struct max1363_state *st = iio_priv(indio_dev);
-
int val;
int number = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
+
mutex_lock(&indio_dev->mlock);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
@@ -644,7 +787,7 @@ static int max1363_monitor_mode_update(struct max1363_state *st, int enabled)
const long *modemask;
if (!enabled) {
- /* transition to ring capture is not currently supported */
+ /* transition to buffered capture is not currently supported */
st->setupbyte &= ~MAX1363_SETUP_MONITOR_SETUP;
st->configbyte &= ~MAX1363_SCAN_MASK;
st->monitor_on = false;
@@ -826,8 +969,21 @@ static struct attribute_group max1363_event_attribute_group = {
.name = "events",
};
-#define MAX1363_EVENT_FUNCS \
+static int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ /*
+ * Need to figure out the current mode based upon the requested
+ * scan mask in iio_dev
+ */
+ st->current_mode = max1363_match_mode(scan_mask, st->chip_info);
+ if (!st->current_mode)
+ return -EINVAL;
+ max1363_set_scan_mode(st);
+ return 0;
+}
static const struct iio_info max1238_info = {
.read_raw = &max1363_read_raw,
@@ -1230,8 +1386,6 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
};
-
-
static int max1363_initial_setup(struct max1363_state *st)
{
st->setupbyte = MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD
@@ -1248,7 +1402,7 @@ static int max1363_initial_setup(struct max1363_state *st)
return max1363_set_scan_mode(st);
}
-static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
+static int max1363_alloc_scan_masks(struct iio_dev *indio_dev)
{
struct max1363_state *st = iio_priv(indio_dev);
unsigned long *masks;
@@ -1269,34 +1423,137 @@ static int __devinit max1363_alloc_scan_masks(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit max1363_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+
+static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
- int ret;
- struct max1363_state *st;
- struct iio_dev *indio_dev;
- struct regulator *reg;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct max1363_state *st = iio_priv(indio_dev);
+ s64 time_ns;
+ __u8 *rxbuf;
+ int b_sent;
+ size_t d_size;
+ unsigned long numvals = bitmap_weight(st->current_mode->modemask,
+ MAX1363_MAX_CHANNELS);
+
+ /* Ensure the timestamp is 8 byte aligned */
+ if (st->chip_info->bits != 8)
+ d_size = numvals*2;
+ else
+ d_size = numvals;
+ if (indio_dev->scan_timestamp) {
+ d_size += sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
+ /* Monitor mode prevents reading. Whilst not currently implemented
+ * might as well have this test in here in the meantime as it does
+ * no harm.
+ */
+ if (numvals == 0)
+ goto done;
+
+ rxbuf = kmalloc(d_size, GFP_KERNEL);
+ if (rxbuf == NULL)
+ goto done;
+ if (st->chip_info->bits != 8)
+ b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
+ else
+ b_sent = i2c_master_recv(st->client, rxbuf, numvals);
+ if (b_sent < 0)
+ goto done_free;
- reg = regulator_get(&client->dev, "vcc");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- goto error_out;
+ time_ns = iio_get_time_ns();
+
+ if (indio_dev->scan_timestamp)
+ memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ iio_push_to_buffers(indio_dev, rxbuf);
+
+done_free:
+ kfree(rxbuf);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_buffer_setup_ops max1363_buffered_setup_ops = {
+ .postenable = &iio_triggered_buffer_postenable,
+ .preenable = &iio_sw_buffer_preenable,
+ .predisable = &iio_triggered_buffer_predisable,
+};
+
+static int max1363_register_buffered_funcs_and_init(struct iio_dev *indio_dev)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ indio_dev->buffer = iio_kfifo_allocate(indio_dev);
+ if (!indio_dev->buffer) {
+ ret = -ENOMEM;
+ goto error_ret;
}
+ indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
+ &max1363_trigger_handler,
+ IRQF_ONESHOT,
+ indio_dev,
+ "%s_consumer%d",
+ st->client->name,
+ indio_dev->id);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_deallocate_sw_rb;
+ }
+ /* Buffer functions - here trigger setup related */
+ indio_dev->setup_ops = &max1363_buffered_setup_ops;
- ret = regulator_enable(reg);
- if (ret)
- goto error_put_reg;
+ /* Flag that polled buffering is possible */
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
+
+ return 0;
+
+error_deallocate_sw_rb:
+ iio_kfifo_free(indio_dev->buffer);
+error_ret:
+ return ret;
+}
+
+static void max1363_buffer_cleanup(struct iio_dev *indio_dev)
+{
+ /* ensure that the trigger has been detached */
+ iio_dealloc_pollfunc(indio_dev->pollfunc);
+ iio_kfifo_free(indio_dev->buffer);
+}
+
+static int max1363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct max1363_state *st;
+ struct iio_dev *indio_dev;
indio_dev = iio_device_alloc(sizeof(struct max1363_state));
if (indio_dev == NULL) {
ret = -ENOMEM;
- goto error_disable_reg;
+ goto error_out;
}
+
ret = iio_map_array_register(indio_dev, client->dev.platform_data);
if (ret < 0)
goto error_free_device;
+
st = iio_priv(indio_dev);
- st->reg = reg;
+
+ st->reg = regulator_get(&client->dev, "vcc");
+ if (IS_ERR(st->reg)) {
+ ret = PTR_ERR(st->reg);
+ goto error_unregister_map;
+ }
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -1305,7 +1562,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
ret = max1363_alloc_scan_masks(indio_dev);
if (ret)
- goto error_unregister_map;
+ goto error_disable_reg;
/* Estabilish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
@@ -1320,7 +1577,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
if (ret < 0)
goto error_free_available_scan_masks;
- ret = max1363_register_ring_funcs_and_init(indio_dev);
+ ret = max1363_register_buffered_funcs_and_init(indio_dev);
if (ret)
goto error_free_available_scan_masks;
@@ -1328,7 +1585,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
st->chip_info->channels,
st->chip_info->num_channels);
if (ret)
- goto error_cleanup_ring;
+ goto error_cleanup_buffer;
if (client->irq) {
ret = request_threaded_irq(st->client->irq,
@@ -1339,7 +1596,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
indio_dev);
if (ret)
- goto error_uninit_ring;
+ goto error_uninit_buffer;
}
ret = iio_device_register(indio_dev);
@@ -1348,41 +1605,39 @@ static int __devinit max1363_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(st->client->irq, indio_dev);
-error_uninit_ring:
+ if (client->irq)
+ free_irq(st->client->irq, indio_dev);
+error_uninit_buffer:
iio_buffer_unregister(indio_dev);
-error_cleanup_ring:
- max1363_ring_cleanup(indio_dev);
+error_cleanup_buffer:
+ max1363_buffer_cleanup(indio_dev);
error_free_available_scan_masks:
kfree(indio_dev->available_scan_masks);
+error_disable_reg:
+ regulator_disable(st->reg);
+error_put_reg:
+ regulator_put(st->reg);
error_unregister_map:
iio_map_array_unregister(indio_dev, client->dev.platform_data);
error_free_device:
iio_device_free(indio_dev);
-error_disable_reg:
- regulator_disable(reg);
-error_put_reg:
- regulator_put(reg);
error_out:
return ret;
}
-static int __devexit max1363_remove(struct i2c_client *client)
+static int max1363_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct max1363_state *st = iio_priv(indio_dev);
- struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
if (client->irq)
free_irq(st->client->irq, indio_dev);
iio_buffer_unregister(indio_dev);
- max1363_ring_cleanup(indio_dev);
+ max1363_buffer_cleanup(indio_dev);
kfree(indio_dev->available_scan_masks);
- if (!IS_ERR(reg)) {
- regulator_disable(reg);
- regulator_put(reg);
- }
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
iio_map_array_unregister(indio_dev, client->dev.platform_data);
iio_device_free(indio_dev);
@@ -1434,7 +1689,7 @@ static struct i2c_driver max1363_driver = {
.name = "max1363",
},
.probe = max1363_probe,
- .remove = __devexit_p(max1363_remove),
+ .remove = max1363_remove,
.id_table = max1363_id,
};
module_i2c_driver(max1363_driver);
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
new file mode 100644
index 000000000000..f4a46dd8f43b
--- /dev/null
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+struct adc081c {
+ struct i2c_client *i2c;
+ struct regulator *ref;
+};
+
+#define REG_CONV_RES 0x00
+
+static int adc081c_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct adc081c *adc = iio_priv(iio);
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = i2c_smbus_read_word_swapped(adc->i2c, REG_CONV_RES);
+ if (err < 0)
+ return err;
+
+ *value = (err >> 4) & 0xff;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ err = regulator_get_voltage(adc->ref);
+ if (err < 0)
+ return err;
+
+ *value = err / 1000;
+ *shift = 8;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adc081c_channel = {
+ .type = IIO_VOLTAGE,
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+};
+
+static const struct iio_info adc081c_info = {
+ .read_raw = adc081c_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adc081c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *iio;
+ struct adc081c *adc;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ iio = iio_device_alloc(sizeof(*adc));
+ if (!iio)
+ return -ENOMEM;
+
+ adc = iio_priv(iio);
+ adc->i2c = client;
+
+ adc->ref = regulator_get(&client->dev, "vref");
+ if (IS_ERR(adc->ref)) {
+ err = PTR_ERR(adc->ref);
+ goto iio_free;
+ }
+
+ err = regulator_enable(adc->ref);
+ if (err < 0)
+ goto regulator_put;
+
+ iio->dev.parent = &client->dev;
+ iio->name = dev_name(&client->dev);
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->info = &adc081c_info;
+
+ iio->channels = &adc081c_channel;
+ iio->num_channels = 1;
+
+ err = iio_device_register(iio);
+ if (err < 0)
+ goto regulator_disable;
+
+ i2c_set_clientdata(client, iio);
+
+ return 0;
+
+regulator_disable:
+ regulator_disable(adc->ref);
+regulator_put:
+ regulator_put(adc->ref);
+iio_free:
+ iio_device_free(iio);
+
+ return err;
+}
+
+static int adc081c_remove(struct i2c_client *client)
+{
+ struct iio_dev *iio = i2c_get_clientdata(client);
+ struct adc081c *adc = iio_priv(iio);
+
+ iio_device_unregister(iio);
+ regulator_disable(adc->ref);
+ regulator_put(adc->ref);
+ iio_device_free(iio);
+
+ return 0;
+}
+
+static const struct i2c_device_id adc081c_id[] = {
+ { "adc081c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adc081c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id adc081c_of_match[] = {
+ { .compatible = "ti,adc081c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adc081c_of_match);
+#endif
+
+static struct i2c_driver adc081c_driver = {
+ .driver = {
+ .name = "adc081c",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(adc081c_of_match),
+ },
+ .probe = adc081c_probe,
+ .remove = adc081c_remove,
+ .id_table = adc081c_id,
+};
+module_i2c_driver(adc081c_driver);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("Texas Instruments ADC081C021/027 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
new file mode 100644
index 000000000000..cd030e100c39
--- /dev/null
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -0,0 +1,260 @@
+/*
+ * TI ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+struct tiadc_device {
+ struct ti_tscadc_dev *mfd_tscadc;
+ int channels;
+};
+
+static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg)
+{
+ return readl(adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_writel(struct tiadc_device *adc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_step_config(struct tiadc_device *adc_dev)
+{
+ unsigned int stepconfig;
+ int i, channels = 0, steps;
+
+ /*
+ * There are 16 configurable steps and 8 analog input
+ * lines available which are shared between Touchscreen and ADC.
+ *
+ * Steps backwards i.e. from 16 towards 0 are used by ADC
+ * depending on number of input lines needed.
+ * Channel would represent which analog input
+ * needs to be given to ADC to digitalize data.
+ */
+
+ steps = TOTAL_STEPS - adc_dev->channels;
+ channels = TOTAL_CHANNELS - adc_dev->channels;
+
+ stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
+
+ for (i = (steps + 1); i <= TOTAL_STEPS; i++) {
+ tiadc_writel(adc_dev, REG_STEPCONFIG(i),
+ stepconfig | STEPCONFIG_INP(channels));
+ tiadc_writel(adc_dev, REG_STEPDELAY(i),
+ STEPCONFIG_OPENDLY);
+ channels++;
+ }
+ tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+}
+
+static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
+{
+ struct iio_chan_spec *chan_array;
+ int i;
+
+ indio_dev->num_channels = channels;
+ chan_array = kcalloc(indio_dev->num_channels,
+ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (chan_array == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < (indio_dev->num_channels); i++) {
+ struct iio_chan_spec *chan = chan_array + i;
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = i;
+ chan->info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT;
+ }
+
+ indio_dev->channels = chan_array;
+
+ return indio_dev->num_channels;
+}
+
+static void tiadc_channels_remove(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->channels);
+}
+
+static int tiadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int i;
+ unsigned int fifo1count, readx1;
+
+ /*
+ * When the sub-system is first enabled,
+ * the sequencer will always start with the
+ * lowest step (1) and continue until step (16).
+ * For ex: If we have enabled 4 ADC channels and
+ * currently use only 1 out of them, the
+ * sequencer still configures all the 4 steps,
+ * leading to 3 unwanted data.
+ * Hence we need to flush out this data.
+ */
+
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++) {
+ readx1 = tiadc_readl(adc_dev, REG_FIFO1);
+ if (i == chan->channel)
+ *val = readx1 & 0xfff;
+ }
+ tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info tiadc_info = {
+ .read_raw = &tiadc_read_raw,
+};
+
+static int tiadc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct tiadc_device *adc_dev;
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct mfd_tscadc_board *pdata;
+ int err;
+
+ pdata = tscadc_dev->dev->platform_data;
+ if (!pdata || !pdata->adc_init) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ indio_dev = iio_device_alloc(sizeof(struct tiadc_device));
+ if (indio_dev == NULL) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+ err = -ENOMEM;
+ goto err_ret;
+ }
+ adc_dev = iio_priv(indio_dev);
+
+ adc_dev->mfd_tscadc = tscadc_dev;
+ adc_dev->channels = pdata->adc_init->adc_channels;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tiadc_info;
+
+ tiadc_step_config(adc_dev);
+
+ err = tiadc_channel_init(indio_dev, adc_dev->channels);
+ if (err < 0)
+ goto err_free_device;
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto err_free_channels;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_free_channels:
+ tiadc_channels_remove(indio_dev);
+err_free_device:
+ iio_device_free(indio_dev);
+err_ret:
+ return err;
+}
+
+static int tiadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ tiadc_channels_remove(indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tiadc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ unsigned int idle;
+
+ if (!device_may_wakeup(tscadc_dev->dev)) {
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
+ }
+
+ return 0;
+}
+
+static int tiadc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ unsigned int restore;
+
+ /* Make sure ADC is powered up */
+ restore = tiadc_readl(adc_dev, REG_CTRL);
+ restore &= ~(CNTRLREG_POWERDOWN);
+ tiadc_writel(adc_dev, REG_CTRL, restore);
+
+ tiadc_step_config(adc_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tiadc_pm_ops = {
+ .suspend = tiadc_suspend,
+ .resume = tiadc_resume,
+};
+#define TIADC_PM_OPS (&tiadc_pm_ops)
+#else
+#define TIADC_PM_OPS NULL
+#endif
+
+static struct platform_driver tiadc_driver = {
+ .driver = {
+ .name = "tiadc",
+ .owner = THIS_MODULE,
+ .pm = TIADC_PM_OPS,
+ },
+ .probe = tiadc_probe,
+ .remove = tiadc_remove,
+};
+
+module_platform_driver(tiadc_driver);
+
+MODULE_DESCRIPTION("TI ADC controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
new file mode 100644
index 000000000000..ad0261533dee
--- /dev/null
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -0,0 +1,181 @@
+/*
+ * Nano River Technologies viperboard IIO ADC driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_ADC_CMD_GET 0x00
+
+struct vprbrd_adc_msg {
+ u8 cmd;
+ u8 chan;
+ u8 val;
+} __packed;
+
+struct vprbrd_adc {
+ struct vprbrd *vb;
+};
+
+#define VPRBRD_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
+}
+
+static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
+ VPRBRD_ADC_CHANNEL(0),
+ VPRBRD_ADC_CHANNEL(1),
+ VPRBRD_ADC_CHANNEL(2),
+ VPRBRD_ADC_CHANNEL(3),
+};
+
+static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long info)
+{
+ int ret, error = 0;
+ struct vprbrd_adc *adc = iio_priv(iio_dev);
+ struct vprbrd *vb = adc->vb;
+ struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&vb->lock);
+
+ admsg->cmd = VPRBRD_ADC_CMD_GET;
+ admsg->chan = chan->scan_index;
+ admsg->val = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb send error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg,
+ sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ *val = admsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_adc_msg)) {
+ dev_err(&iio_dev->dev, "usb recv error on adc read\n");
+ error = -EREMOTEIO;
+ }
+
+ if (error)
+ goto error;
+
+ return IIO_VAL_INT;
+ default:
+ error = -EINVAL;
+ break;
+ }
+error:
+ return error;
+}
+
+static const struct iio_info vprbrd_adc_iio_info = {
+ .read_raw = &vprbrd_iio_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int vprbrd_adc_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_adc *adc;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ /* registering iio */
+ indio_dev = iio_device_alloc(sizeof(*adc));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(indio_dev);
+ adc->vb = vb;
+ indio_dev->name = "viperboard adc";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &vprbrd_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vprbrd_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register iio (adc)");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+error:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int vprbrd_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver vprbrd_adc_driver = {
+ .driver = {
+ .name = "viperboard-adc",
+ .owner = THIS_MODULE,
+ },
+ .probe = vprbrd_adc_probe,
+ .remove = vprbrd_adc_remove,
+};
+
+module_platform_driver(vprbrd_adc_driver);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-adc");
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d8281cdbfc4a..d6c0af23a2a7 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -133,7 +133,7 @@ static const struct iio_chan_spec ad8366_channels[] = {
AD8366_CHAN(1),
};
-static int __devinit ad8366_probe(struct spi_device *spi)
+static int ad8366_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct ad8366_state *st;
@@ -182,7 +182,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad8366_remove(struct spi_device *spi)
+static int ad8366_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad8366_state *st = iio_priv(indio_dev);
@@ -211,7 +211,7 @@ static struct spi_driver ad8366_driver = {
.owner = THIS_MODULE,
},
.probe = ad8366_probe,
- .remove = __devexit_p(ad8366_remove),
+ .remove = ad8366_remove,
.id_table = ad8366_id,
};
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
new file mode 100644
index 000000000000..4d40e24f3721
--- /dev/null
+++ b/drivers/iio/buffer_cb.c
@@ -0,0 +1,113 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/consumer.h>
+
+struct iio_cb_buffer {
+ struct iio_buffer buffer;
+ int (*cb)(u8 *data, void *private);
+ void *private;
+ struct iio_channel *channels;
+};
+
+static int iio_buffer_cb_store_to(struct iio_buffer *buffer, u8 *data)
+{
+ struct iio_cb_buffer *cb_buff = container_of(buffer,
+ struct iio_cb_buffer,
+ buffer);
+
+ return cb_buff->cb(data, cb_buff->private);
+}
+
+static struct iio_buffer_access_funcs iio_cb_access = {
+ .store_to = &iio_buffer_cb_store_to,
+};
+
+struct iio_cb_buffer *iio_channel_get_all_cb(const char *name,
+ int (*cb)(u8 *data,
+ void *private),
+ void *private)
+{
+ int ret;
+ struct iio_cb_buffer *cb_buff;
+ struct iio_dev *indio_dev;
+ struct iio_channel *chan;
+
+ cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
+ if (cb_buff == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ cb_buff->private = private;
+ cb_buff->cb = cb;
+ cb_buff->buffer.access = &iio_cb_access;
+ INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
+
+ cb_buff->channels = iio_channel_get_all(name);
+ if (IS_ERR(cb_buff->channels)) {
+ ret = PTR_ERR(cb_buff->channels);
+ goto error_free_cb_buff;
+ }
+
+ indio_dev = cb_buff->channels[0].indio_dev;
+ cb_buff->buffer.scan_mask
+ = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
+ GFP_KERNEL);
+ if (cb_buff->buffer.scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
+ chan = &cb_buff->channels[0];
+ while (chan->indio_dev) {
+ if (chan->indio_dev != indio_dev) {
+ ret = -EINVAL;
+ goto error_release_channels;
+ }
+ set_bit(chan->channel->scan_index,
+ cb_buff->buffer.scan_mask);
+ chan++;
+ }
+
+ return cb_buff;
+
+error_release_channels:
+ iio_channel_release_all(cb_buff->channels);
+error_free_cb_buff:
+ kfree(cb_buff);
+error_ret:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+
+int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
+{
+ return iio_update_buffers(cb_buff->channels[0].indio_dev,
+ &cb_buff->buffer,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
+
+void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
+{
+ iio_update_buffers(cb_buff->channels[0].indio_dev,
+ NULL,
+ &cb_buff->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
+
+void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
+{
+ iio_channel_release_all(cb_buff->channels);
+ kfree(cb_buff);
+}
+EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
+
+struct iio_channel
+*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
+{
+ return cb_buffer->channels;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 8e63d81d652a..1178121b55b0 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -6,7 +6,7 @@ menu "Hid Sensor IIO Common"
config HID_SENSOR_IIO_COMMON
tristate "Common modules for all HID Sensor IIO drivers"
depends on HID_SENSOR_HUB
- select IIO_TRIGGER if IIO_BUFFER
+ select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
help
Say yes here to build support for HID sensor to use
HID sensor common processing for attributes and IIO triggers.
@@ -14,8 +14,19 @@ config HID_SENSOR_IIO_COMMON
HID sensor drivers, this module contains processing for those
attributes.
+config HID_SENSOR_IIO_TRIGGER
+ tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+ select IIO_TRIGGER
+ help
+ Say yes here to build trigger support for HID sensors.
+ Triggers will be send if all requested attributes were read.
+
+ If this driver is compiled as a module, it will be named
+ hid-sensor-trigger.
+
config HID_SENSOR_ENUM_BASE_QUIRKS
- tristate "ENUM base quirks for HID Sensor IIO drivers"
+ bool "ENUM base quirks for HID Sensor IIO drivers"
depends on HID_SENSOR_IIO_COMMON
help
Say yes here to build support for sensor hub FW using
diff --git a/drivers/iio/common/hid-sensors/Makefile b/drivers/iio/common/hid-sensors/Makefile
index 1f463e00c242..22e7c5a82325 100644
--- a/drivers/iio/common/hid-sensors/Makefile
+++ b/drivers/iio/common/hid-sensors/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
-hid-sensor-iio-common-y := hid-sensor-attributes.o hid-sensor-trigger.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index d4b790d18efb..d60198a6ca29 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,10 +36,8 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
int state_val;
state_val = state ? 1 : 0;
-#if (defined CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS) || \
- (defined CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS_MODULE)
- ++state_val;
-#endif
+ if (IS_ENABLED(CONFIG_HID_SENSOR_ENUM_BASE_QUIRKS))
+ ++state_val;
st->data_ready = state;
sensor_hub_set_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index b1c0ee5294ca..f4a6f0838327 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -67,6 +67,16 @@ config AD5446
To compile this driver as a module, choose M here: the
module will be called ad5446.
+config AD5449
+ tristate "Analog Device AD5449 and similar DACs driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5415, AD5426, AD5429,
+ AD5432, AD5439, AD5443, AD5449 Digital to Analog Converters.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5449.
+
config AD5504
tristate "Analog Devices AD5504/AD5501 DAC SPI driver"
depends on SPI
@@ -122,7 +132,7 @@ config AD5686
config MAX517
tristate "Maxim MAX517/518/519 DAC driver"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for the Maxim chips MAX517,
MAX518 and MAX519 (I2C 8-Bit DACs with rail-to-rail outputs).
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index c0d333b23ba3..5b528ebb3343 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
+obj-$(CONFIG_AD5449) += ad5449.o
obj-$(CONFIG_AD5755) += ad5755.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index eb281a2c295b..2fe1d4edcb2f 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -424,8 +424,8 @@ static const char * const ad5064_vref_name(struct ad5064_state *st,
return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
}
-static int __devinit ad5064_probe(struct device *dev, enum ad5064_type type,
- const char *name, ad5064_write_func write)
+static int ad5064_probe(struct device *dev, enum ad5064_type type,
+ const char *name, ad5064_write_func write)
{
struct iio_dev *indio_dev;
struct ad5064_state *st;
@@ -495,7 +495,7 @@ error_free:
return ret;
}
-static int __devexit ad5064_remove(struct device *dev)
+static int ad5064_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
@@ -523,7 +523,7 @@ static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
return spi_write(spi, &st->data.spi, sizeof(st->data.spi));
}
-static int __devinit ad5064_spi_probe(struct spi_device *spi)
+static int ad5064_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -531,7 +531,7 @@ static int __devinit ad5064_spi_probe(struct spi_device *spi)
ad5064_spi_write);
}
-static int __devexit ad5064_spi_remove(struct spi_device *spi)
+static int ad5064_spi_remove(struct spi_device *spi)
{
return ad5064_remove(&spi->dev);
}
@@ -563,7 +563,7 @@ static struct spi_driver ad5064_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5064_spi_probe,
- .remove = __devexit_p(ad5064_spi_remove),
+ .remove = ad5064_spi_remove,
.id_table = ad5064_spi_ids,
};
@@ -596,14 +596,14 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
return i2c_master_send(i2c, st->data.i2c, 3);
}
-static int __devinit ad5064_i2c_probe(struct i2c_client *i2c,
+static int ad5064_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
return ad5064_probe(&i2c->dev, id->driver_data, id->name,
ad5064_i2c_write);
}
-static int __devexit ad5064_i2c_remove(struct i2c_client *i2c)
+static int ad5064_i2c_remove(struct i2c_client *i2c)
{
return ad5064_remove(&i2c->dev);
}
@@ -625,7 +625,7 @@ static struct i2c_driver ad5064_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad5064_i2c_probe,
- .remove = __devexit_p(ad5064_i2c_remove),
+ .remove = ad5064_i2c_remove,
.id_table = ad5064_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 8fce84fe70b1..54b46fd3aede 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -433,7 +433,7 @@ static const char * const ad5360_vref_name[] = {
"vref0", "vref1", "vref2"
};
-static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
+static int ad5360_alloc_channels(struct iio_dev *indio_dev)
{
struct ad5360_state *st = iio_priv(indio_dev);
struct iio_chan_spec *channels;
@@ -456,7 +456,7 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit ad5360_probe(struct spi_device *spi)
+static int ad5360_probe(struct spi_device *spi)
{
enum ad5360_type type = spi_get_device_id(spi)->driver_data;
struct iio_dev *indio_dev;
@@ -524,7 +524,7 @@ error_free:
return ret;
}
-static int __devexit ad5360_remove(struct spi_device *spi)
+static int ad5360_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5360_state *st = iio_priv(indio_dev);
@@ -560,7 +560,7 @@ static struct spi_driver ad5360_driver = {
.owner = THIS_MODULE,
},
.probe = ad5360_probe,
- .remove = __devexit_p(ad5360_remove),
+ .remove = ad5360_remove,
.id_table = ad5360_ids,
};
module_spi_driver(ad5360_driver);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 14991ac55f26..483fc379a2da 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -338,7 +338,7 @@ static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
},
};
-static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
+static int ad5380_alloc_channels(struct iio_dev *indio_dev)
{
struct ad5380_state *st = iio_priv(indio_dev);
struct iio_chan_spec *channels;
@@ -361,8 +361,8 @@ static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
- enum ad5380_type type, const char *name)
+static int ad5380_probe(struct device *dev, struct regmap *regmap,
+ enum ad5380_type type, const char *name)
{
struct iio_dev *indio_dev;
struct ad5380_state *st;
@@ -406,7 +406,11 @@ static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
goto error_free_reg;
}
- st->vref = regulator_get_voltage(st->vref_reg);
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref = ret;
} else {
st->vref = st->chip_info->int_vref;
ctrl |= AD5380_CTRL_INT_VREF_EN;
@@ -441,7 +445,7 @@ error_out:
return ret;
}
-static int __devexit ad5380_remove(struct device *dev)
+static int ad5380_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5380_state *st = iio_priv(indio_dev);
@@ -478,7 +482,7 @@ static const struct regmap_config ad5380_regmap_config = {
#if IS_ENABLED(CONFIG_SPI_MASTER)
-static int __devinit ad5380_spi_probe(struct spi_device *spi)
+static int ad5380_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct regmap *regmap;
@@ -491,7 +495,7 @@ static int __devinit ad5380_spi_probe(struct spi_device *spi)
return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
}
-static int __devexit ad5380_spi_remove(struct spi_device *spi)
+static int ad5380_spi_remove(struct spi_device *spi)
{
return ad5380_remove(&spi->dev);
}
@@ -523,7 +527,7 @@ static struct spi_driver ad5380_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5380_spi_probe,
- .remove = __devexit_p(ad5380_spi_remove),
+ .remove = ad5380_spi_remove,
.id_table = ad5380_spi_ids,
};
@@ -552,8 +556,8 @@ static inline void ad5380_spi_unregister_driver(void)
#if IS_ENABLED(CONFIG_I2C)
-static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ad5380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct regmap *regmap;
@@ -565,7 +569,7 @@ static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
}
-static int __devexit ad5380_i2c_remove(struct i2c_client *i2c)
+static int ad5380_i2c_remove(struct i2c_client *i2c)
{
return ad5380_remove(&i2c->dev);
}
@@ -597,7 +601,7 @@ static struct i2c_driver ad5380_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad5380_i2c_probe,
- .remove = __devexit_p(ad5380_i2c_remove),
+ .remove = ad5380_i2c_remove,
.id_table = ad5380_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index cdbc5bf25c31..43be948db83e 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -449,7 +449,7 @@ static const struct iio_info ad5421_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5421_probe(struct spi_device *spi)
+static int ad5421_probe(struct spi_device *spi)
{
struct ad5421_platform_data *pdata = dev_get_platdata(&spi->dev);
struct iio_dev *indio_dev;
@@ -516,7 +516,7 @@ error_free:
return ret;
}
-static int __devexit ad5421_remove(struct spi_device *spi)
+static int ad5421_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
@@ -534,7 +534,7 @@ static struct spi_driver ad5421_driver = {
.owner = THIS_MODULE,
},
.probe = ad5421_probe,
- .remove = __devexit_p(ad5421_remove),
+ .remove = ad5421_remove,
};
module_spi_driver(ad5421_driver);
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 3310cbbd41e7..f5583aedfb59 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -212,8 +212,8 @@ static const struct iio_info ad5446_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5446_probe(struct device *dev, const char *name,
- const struct ad5446_chip_info *chip_info)
+static int ad5446_probe(struct device *dev, const char *name,
+ const struct ad5446_chip_info *chip_info)
{
struct ad5446_state *st;
struct iio_dev *indio_dev;
@@ -226,7 +226,11 @@ static int __devinit ad5446_probe(struct device *dev, const char *name,
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
indio_dev = iio_device_alloc(sizeof(*st));
@@ -461,7 +465,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
-static int __devinit ad5446_spi_probe(struct spi_device *spi)
+static int ad5446_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -469,7 +473,7 @@ static int __devinit ad5446_spi_probe(struct spi_device *spi)
&ad5446_spi_chip_info[id->driver_data]);
}
-static int __devexit ad5446_spi_remove(struct spi_device *spi)
+static int ad5446_spi_remove(struct spi_device *spi)
{
return ad5446_remove(&spi->dev);
}
@@ -480,7 +484,7 @@ static struct spi_driver ad5446_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad5446_spi_probe,
- .remove = __devexit_p(ad5446_spi_remove),
+ .remove = ad5446_spi_remove,
.id_table = ad5446_spi_ids,
};
@@ -539,14 +543,14 @@ static const struct ad5446_chip_info ad5446_i2c_chip_info[] = {
},
};
-static int __devinit ad5446_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ad5446_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
return ad5446_probe(&i2c->dev, id->name,
&ad5446_i2c_chip_info[id->driver_data]);
}
-static int __devexit ad5446_i2c_remove(struct i2c_client *i2c)
+static int ad5446_i2c_remove(struct i2c_client *i2c)
{
return ad5446_remove(&i2c->dev);
}
@@ -568,7 +572,7 @@ static struct i2c_driver ad5446_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad5446_i2c_probe,
- .remove = __devexit_p(ad5446_i2c_remove),
+ .remove = ad5446_i2c_remove,
.id_table = ad5446_i2c_ids,
};
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
new file mode 100644
index 000000000000..c4731b7b577b
--- /dev/null
+++ b/drivers/iio/dac/ad5449.c
@@ -0,0 +1,376 @@
+/*
+ * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
+ * Converter driver.
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/platform_data/ad5449.h>
+
+#define AD5449_MAX_CHANNELS 2
+#define AD5449_MAX_VREFS 2
+
+#define AD5449_CMD_NOOP 0x0
+#define AD5449_CMD_LOAD_AND_UPDATE(x) (0x1 + (x) * 3)
+#define AD5449_CMD_READ(x) (0x2 + (x) * 3)
+#define AD5449_CMD_LOAD(x) (0x3 + (x) * 3)
+#define AD5449_CMD_CTRL 13
+
+#define AD5449_CTRL_SDO_OFFSET 10
+#define AD5449_CTRL_DAISY_CHAIN BIT(9)
+#define AD5449_CTRL_HCLR_TO_MIDSCALE BIT(8)
+#define AD5449_CTRL_SAMPLE_RISING BIT(7)
+
+/**
+ * struct ad5449_chip_info - chip specific information
+ * @channels: Channel specification
+ * @num_channels: Number of channels
+ * @has_ctrl: Chip has a control register
+ */
+struct ad5449_chip_info {
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ bool has_ctrl;
+};
+
+/**
+ * struct ad5449 - driver instance specific data
+ * @spi: the SPI device for this driver instance
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulators
+ * @has_sdo: whether the SDO line is connected
+ * @dac_cache: Cache for the DAC values
+ * @data: spi transfer buffers
+ */
+struct ad5449 {
+ struct spi_device *spi;
+ const struct ad5449_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[AD5449_MAX_VREFS];
+
+ bool has_sdo;
+ uint16_t dac_cache[AD5449_MAX_CHANNELS];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 data[2] ____cacheline_aligned;
+};
+
+enum ad5449_type {
+ ID_AD5426,
+ ID_AD5429,
+ ID_AD5432,
+ ID_AD5439,
+ ID_AD5443,
+ ID_AD5449,
+};
+
+static int ad5449_write(struct iio_dev *indio_dev, unsigned int addr,
+ unsigned int val)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0] = cpu_to_be16((addr << 12) | val);
+ ret = spi_write(st->spi, st->data, 2);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
+ unsigned int *val)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ int ret;
+ struct spi_message msg;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0],
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1],
+ .rx_buf = &st->data[1],
+ .len = 2,
+ },
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&t[0], &msg);
+ spi_message_add_tail(&t[1], &msg);
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0] = cpu_to_be16(addr << 12);
+ st->data[1] = cpu_to_be16(AD5449_CMD_NOOP);
+
+ ret = spi_sync(st->spi, &msg);
+ if (ret < 0)
+ goto out_unlock;
+
+ *val = be16_to_cpu(st->data[1]);
+
+out_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad5449_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ struct regulator_bulk_data *reg;
+ int scale_uv;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (st->has_sdo) {
+ ret = ad5449_read(indio_dev,
+ AD5449_CMD_READ(chan->address), val);
+ if (ret)
+ return ret;
+ *val &= 0xfff;
+ } else {
+ *val = st->dac_cache[chan->address];
+ }
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ reg = &st->vref_reg[chan->channel];
+ scale_uv = regulator_get_voltage(reg->consumer);
+ if (scale_uv < 0)
+ return scale_uv;
+
+ *val = scale_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5449_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct ad5449 *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < 0 || val >= (1 << chan->scan_type.realbits))
+ return -EINVAL;
+
+ ret = ad5449_write(indio_dev,
+ AD5449_CMD_LOAD_AND_UPDATE(chan->address),
+ val << chan->scan_type.shift);
+ if (ret == 0)
+ st->dac_cache[chan->address] = val;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5449_info = {
+ .read_raw = ad5449_read_raw,
+ .write_raw = ad5449_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD5449_CHANNEL(chan, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (chan), \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = (chan), \
+ .scan_type = IIO_ST('u', (bits), 16, 12 - (bits)), \
+}
+
+#define DECLARE_AD5449_CHANNELS(name, bits) \
+const struct iio_chan_spec name[] = { \
+ AD5449_CHANNEL(0, bits), \
+ AD5449_CHANNEL(1, bits), \
+}
+
+static DECLARE_AD5449_CHANNELS(ad5429_channels, 8);
+static DECLARE_AD5449_CHANNELS(ad5439_channels, 10);
+static DECLARE_AD5449_CHANNELS(ad5449_channels, 12);
+
+static const struct ad5449_chip_info ad5449_chip_info[] = {
+ [ID_AD5426] = {
+ .channels = ad5429_channels,
+ .num_channels = 1,
+ .has_ctrl = false,
+ },
+ [ID_AD5429] = {
+ .channels = ad5429_channels,
+ .num_channels = 2,
+ .has_ctrl = true,
+ },
+ [ID_AD5432] = {
+ .channels = ad5439_channels,
+ .num_channels = 1,
+ .has_ctrl = false,
+ },
+ [ID_AD5439] = {
+ .channels = ad5439_channels,
+ .num_channels = 2,
+ .has_ctrl = true,
+ },
+ [ID_AD5443] = {
+ .channels = ad5449_channels,
+ .num_channels = 1,
+ .has_ctrl = false,
+ },
+ [ID_AD5449] = {
+ .channels = ad5449_channels,
+ .num_channels = 2,
+ .has_ctrl = true,
+ },
+};
+
+static const char *ad5449_vref_name(struct ad5449 *st, int n)
+{
+ if (st->chip_info->num_channels == 1)
+ return "VREF";
+
+ if (n == 0)
+ return "VREFA";
+ else
+ return "VREFB";
+}
+
+static int ad5449_spi_probe(struct spi_device *spi)
+{
+ struct ad5449_platform_data *pdata = spi->dev.platform_data;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct ad5449 *st;
+ unsigned int i;
+ int ret;
+
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->chip_info = &ad5449_chip_info[id->driver_data];
+ st->spi = spi;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i)
+ st->vref_reg[i].supply = ad5449_vref_name(st, i);
+
+ ret = regulator_bulk_get(&spi->dev, st->chip_info->num_channels,
+ st->vref_reg);
+ if (ret)
+ goto error_free;
+
+ ret = regulator_bulk_enable(st->chip_info->num_channels, st->vref_reg);
+ if (ret)
+ goto error_free_reg;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ad5449_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ if (st->chip_info->has_ctrl) {
+ unsigned int ctrl = 0x00;
+ if (pdata) {
+ if (pdata->hardware_clear_to_midscale)
+ ctrl |= AD5449_CTRL_HCLR_TO_MIDSCALE;
+ ctrl |= pdata->sdo_mode << AD5449_CTRL_SDO_OFFSET;
+ st->has_sdo = pdata->sdo_mode != AD5449_SDO_DISABLED;
+ } else {
+ st->has_sdo = true;
+ }
+ ad5449_write(indio_dev, AD5449_CMD_CTRL, ctrl);
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
+error_free_reg:
+ regulator_bulk_free(st->chip_info->num_channels, st->vref_reg);
+error_free:
+ iio_device_free(indio_dev);
+
+ return ret;
+}
+
+static int ad5449_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5449 *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
+ regulator_bulk_free(st->chip_info->num_channels, st->vref_reg);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5449_spi_ids[] = {
+ { "ad5415", ID_AD5449 },
+ { "ad5426", ID_AD5426 },
+ { "ad5429", ID_AD5429 },
+ { "ad5432", ID_AD5432 },
+ { "ad5439", ID_AD5439 },
+ { "ad5443", ID_AD5443 },
+ { "ad5449", ID_AD5449 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5449_spi_ids);
+
+static struct spi_driver ad5449_spi_driver = {
+ .driver = {
+ .name = "ad5449",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5449_spi_probe,
+ .remove = ad5449_spi_remove,
+ .id_table = ad5449_spi_ids,
+};
+module_spi_driver(ad5449_spi_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5449 and similar DACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 242bdc7d0044..0661829f2773 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -277,7 +277,7 @@ static const struct iio_chan_spec ad5504_channels[] = {
AD5504_CHANNEL(3),
};
-static int __devinit ad5504_probe(struct spi_device *spi)
+static int ad5504_probe(struct spi_device *spi)
{
struct ad5504_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -296,7 +296,11 @@ static int __devinit ad5504_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -352,7 +356,7 @@ error_ret:
return ret;
}
-static int __devexit ad5504_remove(struct spi_device *spi)
+static int ad5504_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5504_state *st = iio_priv(indio_dev);
@@ -383,7 +387,7 @@ static struct spi_driver ad5504_driver = {
.owner = THIS_MODULE,
},
.probe = ad5504_probe,
- .remove = __devexit_p(ad5504_remove),
+ .remove = ad5504_remove,
.id_table = ad5504_id,
};
module_spi_driver(ad5504_driver);
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 6a7d6a48cc6d..f6e116627b71 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -220,7 +220,7 @@ static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
},
};
-static int __devinit ad5624r_probe(struct spi_device *spi)
+static int ad5624r_probe(struct spi_device *spi)
{
struct ad5624r_state *st;
struct iio_dev *indio_dev;
@@ -238,7 +238,11 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -282,7 +286,7 @@ error_ret:
return ret;
}
-static int __devexit ad5624r_remove(struct spi_device *spi)
+static int ad5624r_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5624r_state *st = iio_priv(indio_dev);
@@ -314,7 +318,7 @@ static struct spi_driver ad5624r_driver = {
.owner = THIS_MODULE,
},
.probe = ad5624r_probe,
- .remove = __devexit_p(ad5624r_remove),
+ .remove = ad5624r_remove,
.id_table = ad5624r_id,
};
module_spi_driver(ad5624r_driver);
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 6948d75e1036..ca9609d7a15c 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -188,7 +188,7 @@ static ssize_t ad5686_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- if (readin == true)
+ if (readin)
st->pwr_down_mask |= (0x3 << (chan->channel * 2));
else
st->pwr_down_mask &= ~(0x3 << (chan->channel * 2));
@@ -313,7 +313,7 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
};
-static int __devinit ad5686_probe(struct spi_device *spi)
+static int ad5686_probe(struct spi_device *spi)
{
struct ad5686_state *st;
struct iio_dev *indio_dev;
@@ -332,7 +332,11 @@ static int __devinit ad5686_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
st->chip_info =
@@ -379,7 +383,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad5686_remove(struct spi_device *spi)
+static int ad5686_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5686_state *st = iio_priv(indio_dev);
@@ -408,7 +412,7 @@ static struct spi_driver ad5686_driver = {
.owner = THIS_MODULE,
},
.probe = ad5686_probe,
- .remove = __devexit_p(ad5686_remove),
+ .remove = ad5686_remove,
.id_table = ad5686_id,
};
module_spi_driver(ad5686_driver);
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 5db3506034c5..0869bbd27d30 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -447,8 +447,8 @@ static bool ad5755_is_valid_mode(struct ad5755_state *st, enum ad5755_mode mode)
}
}
-static int __devinit ad5755_setup_pdata(struct iio_dev *indio_dev,
- const struct ad5755_platform_data *pdata)
+static int ad5755_setup_pdata(struct iio_dev *indio_dev,
+ const struct ad5755_platform_data *pdata)
{
struct ad5755_state *st = iio_priv(indio_dev);
unsigned int val;
@@ -503,7 +503,7 @@ static int __devinit ad5755_setup_pdata(struct iio_dev *indio_dev,
return 0;
}
-static bool __devinit ad5755_is_voltage_mode(enum ad5755_mode mode)
+static bool ad5755_is_voltage_mode(enum ad5755_mode mode)
{
switch (mode) {
case AD5755_MODE_VOLTAGE_0V_5V:
@@ -516,8 +516,8 @@ static bool __devinit ad5755_is_voltage_mode(enum ad5755_mode mode)
}
}
-static int __devinit ad5755_init_channels(struct iio_dev *indio_dev,
- const struct ad5755_platform_data *pdata)
+static int ad5755_init_channels(struct iio_dev *indio_dev,
+ const struct ad5755_platform_data *pdata)
{
struct ad5755_state *st = iio_priv(indio_dev);
struct iio_chan_spec *channels = st->channels;
@@ -562,7 +562,7 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
},
};
-static int __devinit ad5755_probe(struct spi_device *spi)
+static int ad5755_probe(struct spi_device *spi)
{
enum ad5755_type type = spi_get_device_id(spi)->driver_data;
const struct ad5755_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -614,7 +614,7 @@ error_free:
return ret;
}
-static int __devexit ad5755_remove(struct spi_device *spi)
+static int ad5755_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
@@ -640,7 +640,7 @@ static struct spi_driver ad5755_driver = {
.owner = THIS_MODULE,
},
.probe = ad5755_probe,
- .remove = __devexit_p(ad5755_remove),
+ .remove = ad5755_remove,
.id_table = ad5755_id,
};
module_spi_driver(ad5755_driver);
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index ffce30447445..7f9045e6daa4 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -273,7 +273,7 @@ static const struct iio_info ad5764_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5764_probe(struct spi_device *spi)
+static int ad5764_probe(struct spi_device *spi)
{
enum ad5764_type type = spi_get_device_id(spi)->driver_data;
struct iio_dev *indio_dev;
@@ -340,7 +340,7 @@ error_free:
return ret;
}
-static int __devexit ad5764_remove(struct spi_device *spi)
+static int ad5764_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5764_state *st = iio_priv(indio_dev);
@@ -372,7 +372,7 @@ static struct spi_driver ad5764_driver = {
.owner = THIS_MODULE,
},
.probe = ad5764_probe,
- .remove = __devexit_p(ad5764_remove),
+ .remove = ad5764_remove,
.id_table = ad5764_ids,
};
module_spi_driver(ad5764_driver);
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 2bd2e37280ff..6407b5407ddd 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -346,7 +346,7 @@ static const struct iio_info ad5791_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit ad5791_probe(struct spi_device *spi)
+static int ad5791_probe(struct spi_device *spi)
{
struct ad5791_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -365,7 +365,11 @@ static int __devinit ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_pos;
- pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+ ret = regulator_get_voltage(st->reg_vdd);
+ if (ret < 0)
+ goto error_disable_reg_pos;
+
+ pos_voltage_uv = ret;
}
st->reg_vss = regulator_get(&spi->dev, "vss");
@@ -374,7 +378,11 @@ static int __devinit ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_neg;
- neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+ ret = regulator_get_voltage(st->reg_vss);
+ if (ret < 0)
+ goto error_disable_reg_neg;
+
+ neg_voltage_uv = ret;
}
st->pwr_down = true;
@@ -428,6 +436,7 @@ error_put_reg_neg:
if (!IS_ERR(st->reg_vss))
regulator_put(st->reg_vss);
+error_disable_reg_pos:
if (!IS_ERR(st->reg_vdd))
regulator_disable(st->reg_vdd);
error_put_reg_pos:
@@ -439,7 +448,7 @@ error_ret:
return ret;
}
-static int __devexit ad5791_remove(struct spi_device *spi)
+static int ad5791_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad5791_state *st = iio_priv(indio_dev);
@@ -475,7 +484,7 @@ static struct spi_driver ad5791_driver = {
.owner = THIS_MODULE,
},
.probe = ad5791_probe,
- .remove = __devexit_p(ad5791_remove),
+ .remove = ad5791_remove,
.id_table = ad5791_id,
};
module_spi_driver(ad5791_driver);
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index c3d748c25939..352abe2004a4 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -156,7 +156,7 @@ static const struct iio_chan_spec max517_channels[] = {
MAX517_CHANNEL(1)
};
-static int __devinit max517_probe(struct i2c_client *client,
+static int max517_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max517_data *data;
@@ -210,7 +210,7 @@ exit:
return err;
}
-static int __devexit max517_remove(struct i2c_client *client)
+static int max517_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
iio_device_free(i2c_get_clientdata(client));
@@ -232,7 +232,7 @@ static struct i2c_driver max517_driver = {
.pm = MAX517_PM_OPS,
},
.probe = max517_probe,
- .remove = __devexit_p(max517_remove),
+ .remove = max517_remove,
.id_table = max517_id,
};
module_i2c_driver(max517_driver);
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index e0e168bd5b45..8f88cc4059a2 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -141,8 +141,8 @@ static const struct iio_info mcp4725_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit mcp4725_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mcp4725_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct mcp4725_data *data;
struct iio_dev *indio_dev;
@@ -195,7 +195,7 @@ exit:
return err;
}
-static int __devexit mcp4725_remove(struct i2c_client *client)
+static int mcp4725_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -217,7 +217,7 @@ static struct i2c_driver mcp4725_driver = {
.pm = MCP4725_PM_OPS,
},
.probe = mcp4725_probe,
- .remove = __devexit_p(mcp4725_remove),
+ .remove = mcp4725_remove,
.id_table = mcp4725_id,
};
module_i2c_driver(mcp4725_driver);
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index b737c64a402d..80307473e3a9 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -959,7 +959,7 @@ static int ad9523_setup(struct iio_dev *indio_dev)
return 0;
}
-static int __devinit ad9523_probe(struct spi_device *spi)
+static int ad9523_probe(struct spi_device *spi)
{
struct ad9523_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -1020,7 +1020,7 @@ error_put_reg:
return ret;
}
-static int __devexit ad9523_remove(struct spi_device *spi)
+static int ad9523_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad9523_state *st = iio_priv(indio_dev);
@@ -1049,7 +1049,7 @@ static struct spi_driver ad9523_driver = {
.owner = THIS_MODULE,
},
.probe = ad9523_probe,
- .remove = __devexit_p(ad9523_remove),
+ .remove = ad9523_remove,
.id_table = ad9523_id,
};
module_spi_driver(ad9523_driver);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index e35bb8f6fe75..a884252ac66b 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -173,7 +173,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
} while (r_cnt == 0);
- tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
+ tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
st->r0_fract = do_div(tmp, st->r1_mod);
st->r0_int = tmp;
@@ -355,7 +355,7 @@ static const struct iio_info adf4350_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit adf4350_probe(struct spi_device *spi)
+static int adf4350_probe(struct spi_device *spi)
{
struct adf4350_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
@@ -440,7 +440,7 @@ error_put_reg:
return ret;
}
-static int __devexit adf4350_remove(struct spi_device *spi)
+static int adf4350_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adf4350_state *st = iio_priv(indio_dev);
@@ -476,7 +476,7 @@ static struct spi_driver adf4350_driver = {
.owner = THIS_MODULE,
},
.probe = adf4350_probe,
- .remove = __devexit_p(adf4350_remove),
+ .remove = adf4350_remove,
.id_table = adf4350_id,
};
module_spi_driver(adf4350_driver);
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 21e27e2fc68c..96b68f63a902 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -3,11 +3,21 @@
#
menu "Digital gyroscope sensors"
+config ADIS16136
+ tristate "Analog devices ADIS16136 and similar gyroscopes driver"
+ depends on SPI_MASTER
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for the Analog Devices ADIS16133, ADIS16135,
+ ADIS16136 gyroscope devices.
+
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Gyroscope 3D"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index 8a895d9fcbce..702a058907e3 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -2,4 +2,5 @@
# Makefile for industrial I/O gyroscope sensor drivers
#
+obj-$(CONFIG_ADIS16136) += adis16136.o
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
new file mode 100644
index 000000000000..8cb0bcbfd609
--- /dev/null
+++ b/drivers/iio/gyro/adis16136.c
@@ -0,0 +1,580 @@
+/*
+ * ADIS16133/ADIS16135/ADIS16136 gyroscope driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#include <linux/debugfs.h>
+
+#define ADIS16136_REG_FLASH_CNT 0x00
+#define ADIS16136_REG_TEMP_OUT 0x02
+#define ADIS16136_REG_GYRO_OUT2 0x04
+#define ADIS16136_REG_GYRO_OUT 0x06
+#define ADIS16136_REG_GYRO_OFF2 0x08
+#define ADIS16136_REG_GYRO_OFF 0x0A
+#define ADIS16136_REG_ALM_MAG1 0x10
+#define ADIS16136_REG_ALM_MAG2 0x12
+#define ADIS16136_REG_ALM_SAMPL1 0x14
+#define ADIS16136_REG_ALM_SAMPL2 0x16
+#define ADIS16136_REG_ALM_CTRL 0x18
+#define ADIS16136_REG_GPIO_CTRL 0x1A
+#define ADIS16136_REG_MSC_CTRL 0x1C
+#define ADIS16136_REG_SMPL_PRD 0x1E
+#define ADIS16136_REG_AVG_CNT 0x20
+#define ADIS16136_REG_DEC_RATE 0x22
+#define ADIS16136_REG_SLP_CTRL 0x24
+#define ADIS16136_REG_DIAG_STAT 0x26
+#define ADIS16136_REG_GLOB_CMD 0x28
+#define ADIS16136_REG_LOT1 0x32
+#define ADIS16136_REG_LOT2 0x34
+#define ADIS16136_REG_LOT3 0x36
+#define ADIS16136_REG_PROD_ID 0x38
+#define ADIS16136_REG_SERIAL_NUM 0x3A
+
+#define ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL 2
+#define ADIS16136_DIAG_STAT_SPI_FAIL 3
+#define ADIS16136_DIAG_STAT_SELF_TEST_FAIL 5
+#define ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL 6
+
+#define ADIS16136_MSC_CTRL_MEMORY_TEST BIT(11)
+#define ADIS16136_MSC_CTRL_SELF_TEST BIT(10)
+
+struct adis16136_chip_info {
+ unsigned int precision;
+ unsigned int fullscale;
+};
+
+struct adis16136 {
+ const struct adis16136_chip_info *chip_info;
+
+ struct adis adis;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16136_show_serial(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16136 *adis16136 = file->private_data;
+ uint16_t lot1, lot2, lot3, serial;
+ char buf[20];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SERIAL_NUM,
+ &serial);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT1, &lot1);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT2, &lot2);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT3, &lot3);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.4x%.4x%.4x-%.4x\n", lot1, lot2,
+ lot3, serial);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16136_serial_fops = {
+ .open = simple_open,
+ .read = adis16136_show_serial,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16136_show_product_id(void *arg, u64 *val)
+{
+ struct adis16136 *adis16136 = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
+ &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16136_product_id_fops,
+ adis16136_show_product_id, NULL, "%llu\n");
+
+static int adis16136_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16136 *adis16136 = arg;
+ uint16_t flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_FLASH_CNT,
+ &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16136_flash_count_fops,
+ adis16136_show_flash_count, NULL, "%lld\n");
+
+static int adis16136_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+
+ debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
+ adis16136, &adis16136_serial_fops);
+ debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+ adis16136, &adis16136_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ adis16136, &adis16136_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16136_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+static int adis16136_set_freq(struct adis16136 *adis16136, unsigned int freq)
+{
+ unsigned int t;
+
+ t = 32768 / freq;
+ if (t < 0xf)
+ t = 0xf;
+ else if (t > 0xffff)
+ t = 0xffff;
+ else
+ t--;
+
+ return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, t);
+}
+
+static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
+{
+ uint16_t t;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ *freq = 32768 / (t + 1);
+
+ return 0;
+}
+
+static ssize_t adis16136_write_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val == 0)
+ return -EINVAL;
+
+ ret = adis16136_set_freq(adis16136, val);
+
+ return ret ? ret : len;
+}
+
+static ssize_t adis16136_read_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int freq;
+ int ret;
+
+ ret = adis16136_get_freq(adis16136, &freq);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", freq);
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16136_read_frequency,
+ adis16136_write_frequency);
+
+static const unsigned adis16136_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 6,
+ [2] = 12,
+ [3] = 25,
+ [4] = 50,
+ [5] = 100,
+ [6] = 200,
+ [7] = 200, /* Not a valid setting */
+};
+
+static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int freq;
+ int i, ret;
+
+ ret = adis16136_get_freq(adis16136, &freq);
+ if (ret < 0)
+ return ret;
+
+ for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
+ if (freq / adis16136_3db_divisors[i] >= val)
+ break;
+ }
+
+ return adis_write_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, i);
+}
+
+static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int freq;
+ uint16_t val16;
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = adis16136_get_freq(adis16136, &freq);
+ if (ret < 0)
+ goto err_unlock;
+
+ *val = freq / adis16136_3db_divisors[val16 & 0x07];
+
+err_unlock:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : IIO_VAL_INT;
+}
+
+static int adis16136_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ uint32_t val32;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = adis16136->chip_info->precision;
+ *val2 = (adis16136->chip_info->fullscale << 16);
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = 10;
+ *val2 = 697000; /* 0.010697 degree Celsius */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&adis16136->adis,
+ ADIS16136_REG_GYRO_OFF2, &val32);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(val32, 31);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16136_get_filter(indio_dev, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16136_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2, long info)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&adis16136->adis,
+ ADIS16136_REG_GYRO_OFF2, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16136_set_filter(indio_dev, val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+enum {
+ ADIS16136_SCAN_GYRO,
+ ADIS16136_SCAN_TEMP,
+};
+
+static const struct iio_chan_spec adis16136_channels[] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT,
+ .address = ADIS16136_REG_GYRO_OUT2,
+ .scan_index = ADIS16136_SCAN_GYRO,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ .address = ADIS16136_REG_TEMP_OUT,
+ .scan_index = ADIS16136_SCAN_TEMP,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static struct attribute *adis16136_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16136_attribute_group = {
+ .attrs = adis16136_attributes,
+};
+
+static const struct iio_info adis16136_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &adis16136_attribute_group,
+ .read_raw = &adis16136_read_raw,
+ .write_raw = &adis16136_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static int adis16136_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&adis16136->adis, ADIS16136_REG_SLP_CTRL, 0xff);
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "Could not power down device: %d\n", ret);
+
+ return ret;
+}
+
+static int adis16136_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+ unsigned int device_id;
+ uint16_t prod_id;
+ int ret;
+
+ ret = adis_initial_startup(&adis16136->adis);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
+ &prod_id);
+ if (ret)
+ return ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ return 0;
+}
+
+static const char * const adis16136_status_error_msgs[] = {
+ [ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL] = "Flash update failed",
+ [ADIS16136_DIAG_STAT_SPI_FAIL] = "SPI failure",
+ [ADIS16136_DIAG_STAT_SELF_TEST_FAIL] = "Self test error",
+ [ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL] = "Flash checksum error",
+};
+
+static const struct adis_data adis16136_data = {
+ .diag_stat_reg = ADIS16136_REG_DIAG_STAT,
+ .glob_cmd_reg = ADIS16136_REG_GLOB_CMD,
+ .msc_ctrl_reg = ADIS16136_REG_MSC_CTRL,
+
+ .self_test_mask = ADIS16136_MSC_CTRL_SELF_TEST,
+ .startup_delay = 80,
+
+ .read_delay = 10,
+ .write_delay = 10,
+
+ .status_error_msgs = adis16136_status_error_msgs,
+ .status_error_mask = BIT(ADIS16136_DIAG_STAT_FLASH_UPDATE_FAIL) |
+ BIT(ADIS16136_DIAG_STAT_SPI_FAIL) |
+ BIT(ADIS16136_DIAG_STAT_SELF_TEST_FAIL) |
+ BIT(ADIS16136_DIAG_STAT_FLASH_CHKSUM_FAIL),
+};
+
+enum adis16136_id {
+ ID_ADIS16133,
+ ID_ADIS16135,
+ ID_ADIS16136,
+};
+
+static const struct adis16136_chip_info adis16136_chip_info[] = {
+ [ID_ADIS16133] = {
+ .precision = IIO_DEGREE_TO_RAD(1200),
+ .fullscale = 24000,
+ },
+ [ID_ADIS16135] = {
+ .precision = IIO_DEGREE_TO_RAD(300),
+ .fullscale = 24000,
+ },
+ [ID_ADIS16136] = {
+ .precision = IIO_DEGREE_TO_RAD(450),
+ .fullscale = 24623,
+ },
+};
+
+static int adis16136_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct adis16136 *adis16136;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = iio_device_alloc(sizeof(*adis16136));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ adis16136 = iio_priv(indio_dev);
+
+ adis16136->chip_info = &adis16136_chip_info[id->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = adis16136_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16136_channels);
+ indio_dev->info = &adis16136_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(&adis16136->adis, indio_dev, spi, &adis16136_data);
+ if (ret)
+ goto error_free_dev;
+
+ ret = adis_setup_buffer_and_trigger(&adis16136->adis, indio_dev, NULL);
+ if (ret)
+ goto error_free_dev;
+
+ ret = adis16136_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_stop_device;
+
+ adis16136_debugfs_init(indio_dev);
+
+ return 0;
+
+error_stop_device:
+ adis16136_stop_device(indio_dev);
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
+error_free_dev:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int adis16136_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16136 *adis16136 = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16136_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16136_ids[] = {
+ { "adis16133", ID_ADIS16133 },
+ { "adis16135", ID_ADIS16135 },
+ { "adis16136", ID_ADIS16136 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16136_ids);
+
+static struct spi_driver adis16136_driver = {
+ .driver = {
+ .name = "adis16136",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16136_ids,
+ .probe = adis16136_probe,
+ .remove = adis16136_remove,
+};
+module_spi_driver(adis16136_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16133/ADIS16135/ADIS16136 gyroscope driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 4c56ada51c39..06e7cc35450c 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -197,21 +197,8 @@ static const struct iio_info gyro_3d_info = {
/* Function to push data to buffer */
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
{
- struct iio_buffer *buffer = indio_dev->buffer;
- int datum_sz;
-
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- if (!buffer) {
- dev_err(&indio_dev->dev, "Buffer == NULL\n");
- return;
- }
- datum_sz = buffer->access->get_bytes_per_datum(buffer);
- if (len > datum_sz) {
- dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
- datum_sz);
- return;
- }
- iio_push_to_buffer(buffer, (u8 *)data);
+ iio_push_to_buffers(indio_dev, (u8 *)data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -291,7 +278,7 @@ static int gyro_3d_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_gyro_3d_probe(struct platform_device *pdev)
+static int hid_gyro_3d_probe(struct platform_device *pdev)
{
int ret = 0;
static const char *name = "gyro_3d";
@@ -319,10 +306,10 @@ static int __devinit hid_gyro_3d_probe(struct platform_device *pdev)
goto error_free_dev;
}
- channels = kmemdup(gyro_3d_channels,
- sizeof(gyro_3d_channels),
- GFP_KERNEL);
+ channels = kmemdup(gyro_3d_channels, sizeof(gyro_3d_channels),
+ GFP_KERNEL);
if (!channels) {
+ ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
goto error_free_dev;
}
@@ -388,7 +375,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_gyro_3d_remove(struct platform_device *pdev)
+static int hid_gyro_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
new file mode 100644
index 000000000000..3d79a40e916b
--- /dev/null
+++ b/drivers/iio/imu/Kconfig
@@ -0,0 +1,27 @@
+#
+# IIO imu drivers configuration
+#
+menu "Inertial measurement units"
+
+config ADIS16480
+ tristate "Analog Devices ADIS16480 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
+ ADIS16485, ADIS16488 inertial sensors.
+
+endmenu
+
+config IIO_ADIS_LIB
+ tristate
+ help
+ A set of IO helper functions for the Analog Devices ADIS* device family.
+
+config IIO_ADIS_LIB_BUFFER
+ bool
+ select IIO_TRIGGERED_BUFFER
+ help
+ A set of buffer helper functions for the Analog Devices ADIS* device
+ family.
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
new file mode 100644
index 000000000000..cfe57638f6f9
--- /dev/null
+++ b/drivers/iio/imu/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Inertial Measurement Units
+#
+
+obj-$(CONFIG_ADIS16480) += adis16480.o
+
+adis_lib-y += adis.o
+adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
+adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
+obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
new file mode 100644
index 000000000000..911255d41c1a
--- /dev/null
+++ b/drivers/iio/imu/adis.c
@@ -0,0 +1,440 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS_MSC_CTRL_DATA_RDY_EN BIT(2)
+#define ADIS_MSC_CTRL_DATA_RDY_POL_HIGH BIT(1)
+#define ADIS_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
+#define ADIS_GLOB_CMD_SW_RESET BIT(7)
+
+int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int value, unsigned int size)
+{
+ unsigned int page = reg / ADIS_PAGE_SIZE;
+ int ret, i;
+ struct spi_message msg;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = adis->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 4,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 6,
+ .bits_per_word = 8,
+ .len = 2,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 8,
+ .bits_per_word = 8,
+ .len = 2,
+ .delay_usecs = adis->data->write_delay,
+ },
+ };
+
+ mutex_lock(&adis->txrx_lock);
+
+ spi_message_init(&msg);
+
+ if (adis->current_page != page) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = page;
+ spi_message_add_tail(&xfers[0], &msg);
+ }
+
+ switch (size) {
+ case 4:
+ adis->tx[8] = ADIS_WRITE_REG(reg + 3);
+ adis->tx[9] = (value >> 24) & 0xff;
+ adis->tx[6] = ADIS_WRITE_REG(reg + 2);
+ adis->tx[7] = (value >> 16) & 0xff;
+ case 2:
+ adis->tx[4] = ADIS_WRITE_REG(reg + 1);
+ adis->tx[5] = (value >> 8) & 0xff;
+ case 1:
+ adis->tx[2] = ADIS_WRITE_REG(reg);
+ adis->tx[3] = value & 0xff;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ xfers[size].cs_change = 0;
+
+ for (i = 1; i <= size; i++)
+ spi_message_add_tail(&xfers[i], &msg);
+
+ ret = spi_sync(adis->spi, &msg);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to write register 0x%02X: %d\n",
+ reg, ret);
+ } else {
+ adis->current_page = page;
+ }
+
+out_unlock:
+ mutex_unlock(&adis->txrx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_write_reg);
+
+/**
+ * adis_read_reg() - read 2 bytes from a 16-bit register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size)
+{
+ unsigned int page = reg / ADIS_PAGE_SIZE;
+ struct spi_message msg;
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = adis->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->write_delay,
+ }, {
+ .tx_buf = adis->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->read_delay,
+ }, {
+ .tx_buf = adis->tx + 4,
+ .rx_buf = adis->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = adis->data->read_delay,
+ }, {
+ .rx_buf = adis->rx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .delay_usecs = adis->data->read_delay,
+ },
+ };
+
+ mutex_lock(&adis->txrx_lock);
+ spi_message_init(&msg);
+
+ if (adis->current_page != page) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = page;
+ spi_message_add_tail(&xfers[0], &msg);
+ }
+
+ switch (size) {
+ case 4:
+ adis->tx[2] = ADIS_READ_REG(reg + 2);
+ adis->tx[3] = 0;
+ spi_message_add_tail(&xfers[1], &msg);
+ case 2:
+ adis->tx[4] = ADIS_READ_REG(reg);
+ adis->tx[5] = 0;
+ spi_message_add_tail(&xfers[2], &msg);
+ spi_message_add_tail(&xfers[3], &msg);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = spi_sync(adis->spi, &msg);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to read register 0x%02X: %d\n",
+ reg, ret);
+ goto out_unlock;
+ } else {
+ adis->current_page = page;
+ }
+
+ switch (size) {
+ case 4:
+ *val = get_unaligned_be32(adis->rx);
+ break;
+ case 2:
+ *val = get_unaligned_be16(adis->rx + 2);
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&adis->txrx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_read_reg);
+
+#ifdef CONFIG_DEBUG_FS
+
+int adis_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval, unsigned int *readval)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+
+ if (readval) {
+ uint16_t val16;
+ int ret;
+
+ ret = adis_read_reg_16(adis, reg, &val16);
+ *readval = val16;
+
+ return ret;
+ } else {
+ return adis_write_reg_16(adis, reg, writeval);
+ }
+}
+EXPORT_SYMBOL(adis_debugfs_reg_access);
+
+#endif
+
+/**
+ * adis_enable_irq() - Enable or disable data ready IRQ
+ * @adis: The adis device
+ * @enable: Whether to enable the IRQ
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+int adis_enable_irq(struct adis *adis, bool enable)
+{
+ int ret = 0;
+ uint16_t msc;
+
+ if (adis->data->enable_irq)
+ return adis->data->enable_irq(adis, enable);
+
+ ret = adis_read_reg_16(adis, adis->data->msc_ctrl_reg, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS_MSC_CTRL_DATA_RDY_POL_HIGH;
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_DIO2;
+ if (enable)
+ msc |= ADIS_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg, msc);
+
+error_ret:
+ return ret;
+}
+EXPORT_SYMBOL(adis_enable_irq);
+
+/**
+ * adis_check_status() - Check the device for error conditions
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int adis_check_status(struct adis *adis)
+{
+ uint16_t status;
+ int ret;
+ int i;
+
+ ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+ if (ret < 0)
+ return ret;
+
+ status &= adis->data->status_error_mask;
+
+ if (status == 0)
+ return 0;
+
+ for (i = 0; i < 16; ++i) {
+ if (status & BIT(i)) {
+ dev_err(&adis->spi->dev, "%s.\n",
+ adis->data->status_error_msgs[i]);
+ }
+ }
+
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(adis_check_status);
+
+/**
+ * adis_reset() - Reset the device
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int adis_reset(struct adis *adis)
+{
+ int ret;
+
+ ret = adis_write_reg_8(adis, adis->data->glob_cmd_reg,
+ ADIS_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to reset device: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_reset);
+
+static int adis_self_test(struct adis *adis)
+{
+ int ret;
+
+ ret = adis_write_reg_16(adis, adis->data->msc_ctrl_reg,
+ adis->data->self_test_mask);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Failed to initiate self test: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(adis->data->startup_delay);
+
+ return adis_check_status(adis);
+}
+
+/**
+ * adis_inital_startup() - Performs device self-test
+ * @adis: The adis device
+ *
+ * Returns 0 if the device is operational, a negative error code otherwise.
+ *
+ * This function should be called early on in the device initialization sequence
+ * to ensure that the device is in a sane and known state and that it is usable.
+ */
+int adis_initial_startup(struct adis *adis)
+{
+ int ret;
+
+ ret = adis_self_test(adis);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Self-test failed, trying reset.\n");
+ adis_reset(adis);
+ msleep(adis->data->startup_delay);
+ ret = adis_self_test(adis);
+ if (ret) {
+ dev_err(&adis->spi->dev, "Second self-test failed, giving up.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adis_initial_startup);
+
+/**
+ * adis_single_conversion() - Performs a single sample conversion
+ * @indio_dev: The IIO device
+ * @chan: The IIO channel
+ * @error_mask: Mask for the error bit
+ * @val: Result of the conversion
+ *
+ * Returns IIO_VAL_INT on success, a negative error code otherwise.
+ *
+ * The function performs a single conversion on a given channel and post
+ * processes the value accordingly to the channel spec. If a error_mask is given
+ * the function will check if the mask is set in the returned raw value. If it
+ * is set the function will perform a self-check. If the device does not report
+ * a error bit in the channels raw value set error_mask to 0.
+ */
+int adis_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int error_mask, int *val)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ unsigned int uval;
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis_read_reg(adis, chan->address, &uval,
+ chan->scan_type.storagebits / 8);
+ if (ret)
+ goto err_unlock;
+
+ if (uval & error_mask) {
+ ret = adis_check_status(adis);
+ if (ret)
+ goto err_unlock;
+ }
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(uval, chan->scan_type.realbits - 1);
+ else
+ *val = uval & ((1 << chan->scan_type.realbits) - 1);
+
+ ret = IIO_VAL_INT;
+err_unlock:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_single_conversion);
+
+/**
+ * adis_init() - Initialize adis device structure
+ * @adis: The adis device
+ * @indio_dev: The iio device
+ * @spi: The spi device
+ * @data: Chip specific data
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function must be called, before any other adis helper function may be
+ * called.
+ */
+int adis_init(struct adis *adis, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct adis_data *data)
+{
+ mutex_init(&adis->txrx_lock);
+ adis->spi = spi;
+ adis->data = data;
+ iio_device_set_drvdata(indio_dev, adis);
+
+ if (data->has_paging) {
+ /* Need to set the page before first read/write */
+ adis->current_page = -1;
+ } else {
+ /* Page will always be 0 */
+ adis->current_page = 0;
+ }
+
+ return adis_enable_irq(adis, false);
+}
+EXPORT_SYMBOL_GPL(adis_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Common library code for ADIS16XXX devices");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
new file mode 100644
index 000000000000..8c26a5f7cd5d
--- /dev/null
+++ b/drivers/iio/imu/adis16480.c
@@ -0,0 +1,924 @@
+/*
+ * ADIS16480 and similar IMUs driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#include <linux/debugfs.h>
+
+#define ADIS16480_PAGE_SIZE 0x80
+
+#define ADIS16480_REG(page, reg) ((page) * ADIS16480_PAGE_SIZE + (reg))
+
+#define ADIS16480_REG_PAGE_ID 0x00 /* Same address on each page */
+#define ADIS16480_REG_SEQ_CNT ADIS16480_REG(0x00, 0x06)
+#define ADIS16480_REG_SYS_E_FLA ADIS16480_REG(0x00, 0x08)
+#define ADIS16480_REG_DIAG_STS ADIS16480_REG(0x00, 0x0A)
+#define ADIS16480_REG_ALM_STS ADIS16480_REG(0x00, 0x0C)
+#define ADIS16480_REG_TEMP_OUT ADIS16480_REG(0x00, 0x0E)
+#define ADIS16480_REG_X_GYRO_OUT ADIS16480_REG(0x00, 0x10)
+#define ADIS16480_REG_Y_GYRO_OUT ADIS16480_REG(0x00, 0x14)
+#define ADIS16480_REG_Z_GYRO_OUT ADIS16480_REG(0x00, 0x18)
+#define ADIS16480_REG_X_ACCEL_OUT ADIS16480_REG(0x00, 0x1C)
+#define ADIS16480_REG_Y_ACCEL_OUT ADIS16480_REG(0x00, 0x20)
+#define ADIS16480_REG_Z_ACCEL_OUT ADIS16480_REG(0x00, 0x24)
+#define ADIS16480_REG_X_MAGN_OUT ADIS16480_REG(0x00, 0x28)
+#define ADIS16480_REG_Y_MAGN_OUT ADIS16480_REG(0x00, 0x2A)
+#define ADIS16480_REG_Z_MAGN_OUT ADIS16480_REG(0x00, 0x2C)
+#define ADIS16480_REG_BAROM_OUT ADIS16480_REG(0x00, 0x2E)
+#define ADIS16480_REG_X_DELTAANG_OUT ADIS16480_REG(0x00, 0x40)
+#define ADIS16480_REG_Y_DELTAANG_OUT ADIS16480_REG(0x00, 0x44)
+#define ADIS16480_REG_Z_DELTAANG_OUT ADIS16480_REG(0x00, 0x48)
+#define ADIS16480_REG_X_DELTAVEL_OUT ADIS16480_REG(0x00, 0x4C)
+#define ADIS16480_REG_Y_DELTAVEL_OUT ADIS16480_REG(0x00, 0x50)
+#define ADIS16480_REG_Z_DELTAVEL_OUT ADIS16480_REG(0x00, 0x54)
+#define ADIS16480_REG_PROD_ID ADIS16480_REG(0x00, 0x7E)
+
+#define ADIS16480_REG_X_GYRO_SCALE ADIS16480_REG(0x02, 0x04)
+#define ADIS16480_REG_Y_GYRO_SCALE ADIS16480_REG(0x02, 0x06)
+#define ADIS16480_REG_Z_GYRO_SCALE ADIS16480_REG(0x02, 0x08)
+#define ADIS16480_REG_X_ACCEL_SCALE ADIS16480_REG(0x02, 0x0A)
+#define ADIS16480_REG_Y_ACCEL_SCALE ADIS16480_REG(0x02, 0x0C)
+#define ADIS16480_REG_Z_ACCEL_SCALE ADIS16480_REG(0x02, 0x0E)
+#define ADIS16480_REG_X_GYRO_BIAS ADIS16480_REG(0x02, 0x10)
+#define ADIS16480_REG_Y_GYRO_BIAS ADIS16480_REG(0x02, 0x14)
+#define ADIS16480_REG_Z_GYRO_BIAS ADIS16480_REG(0x02, 0x18)
+#define ADIS16480_REG_X_ACCEL_BIAS ADIS16480_REG(0x02, 0x1C)
+#define ADIS16480_REG_Y_ACCEL_BIAS ADIS16480_REG(0x02, 0x20)
+#define ADIS16480_REG_Z_ACCEL_BIAS ADIS16480_REG(0x02, 0x24)
+#define ADIS16480_REG_X_HARD_IRON ADIS16480_REG(0x02, 0x28)
+#define ADIS16480_REG_Y_HARD_IRON ADIS16480_REG(0x02, 0x2A)
+#define ADIS16480_REG_Z_HARD_IRON ADIS16480_REG(0x02, 0x2C)
+#define ADIS16480_REG_BAROM_BIAS ADIS16480_REG(0x02, 0x40)
+#define ADIS16480_REG_FLASH_CNT ADIS16480_REG(0x02, 0x7C)
+
+#define ADIS16480_REG_GLOB_CMD ADIS16480_REG(0x03, 0x02)
+#define ADIS16480_REG_FNCTIO_CTRL ADIS16480_REG(0x03, 0x06)
+#define ADIS16480_REG_GPIO_CTRL ADIS16480_REG(0x03, 0x08)
+#define ADIS16480_REG_CONFIG ADIS16480_REG(0x03, 0x0A)
+#define ADIS16480_REG_DEC_RATE ADIS16480_REG(0x03, 0x0C)
+#define ADIS16480_REG_SLP_CNT ADIS16480_REG(0x03, 0x10)
+#define ADIS16480_REG_FILTER_BNK0 ADIS16480_REG(0x03, 0x16)
+#define ADIS16480_REG_FILTER_BNK1 ADIS16480_REG(0x03, 0x18)
+#define ADIS16480_REG_ALM_CNFG0 ADIS16480_REG(0x03, 0x20)
+#define ADIS16480_REG_ALM_CNFG1 ADIS16480_REG(0x03, 0x22)
+#define ADIS16480_REG_ALM_CNFG2 ADIS16480_REG(0x03, 0x24)
+#define ADIS16480_REG_XG_ALM_MAGN ADIS16480_REG(0x03, 0x28)
+#define ADIS16480_REG_YG_ALM_MAGN ADIS16480_REG(0x03, 0x2A)
+#define ADIS16480_REG_ZG_ALM_MAGN ADIS16480_REG(0x03, 0x2C)
+#define ADIS16480_REG_XA_ALM_MAGN ADIS16480_REG(0x03, 0x2E)
+#define ADIS16480_REG_YA_ALM_MAGN ADIS16480_REG(0x03, 0x30)
+#define ADIS16480_REG_ZA_ALM_MAGN ADIS16480_REG(0x03, 0x32)
+#define ADIS16480_REG_XM_ALM_MAGN ADIS16480_REG(0x03, 0x34)
+#define ADIS16480_REG_YM_ALM_MAGN ADIS16480_REG(0x03, 0x36)
+#define ADIS16480_REG_ZM_ALM_MAGN ADIS16480_REG(0x03, 0x38)
+#define ADIS16480_REG_BR_ALM_MAGN ADIS16480_REG(0x03, 0x3A)
+#define ADIS16480_REG_FIRM_REV ADIS16480_REG(0x03, 0x78)
+#define ADIS16480_REG_FIRM_DM ADIS16480_REG(0x03, 0x7A)
+#define ADIS16480_REG_FIRM_Y ADIS16480_REG(0x03, 0x7C)
+
+#define ADIS16480_REG_SERIAL_NUM ADIS16480_REG(0x04, 0x20)
+
+/* Each filter coefficent bank spans two pages */
+#define ADIS16480_FIR_COEF(page) (x < 60 ? ADIS16480_REG(page, (x) + 8) : \
+ ADIS16480_REG((page) + 1, (x) - 60 + 8))
+#define ADIS16480_FIR_COEF_A(x) ADIS16480_FIR_COEF(0x05, (x))
+#define ADIS16480_FIR_COEF_B(x) ADIS16480_FIR_COEF(0x07, (x))
+#define ADIS16480_FIR_COEF_C(x) ADIS16480_FIR_COEF(0x09, (x))
+#define ADIS16480_FIR_COEF_D(x) ADIS16480_FIR_COEF(0x0B, (x))
+
+struct adis16480_chip_info {
+ unsigned int num_channels;
+ const struct iio_chan_spec *channels;
+};
+
+struct adis16480 {
+ const struct adis16480_chip_info *chip_info;
+
+ struct adis adis;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16480_show_firmware_revision(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16480 *adis16480 = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_REV, &rev);
+ if (ret < 0)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16480_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16480_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16480_show_firmware_date(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16480 *adis16480 = file->private_data;
+ u16 md, year;
+ char buf[12];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_Y, &year);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n",
+ md >> 8, md & 0xff, year);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16480_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16480_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16480_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16480 *adis16480 = arg;
+ u16 serial;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_SERIAL_NUM,
+ &serial);
+ if (ret < 0)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16480_serial_number_fops,
+ adis16480_show_serial_number, NULL, "0x%.4llx\n");
+
+static int adis16480_show_product_id(void *arg, u64 *val)
+{
+ struct adis16480 *adis16480 = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_PROD_ID,
+ &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16480_product_id_fops,
+ adis16480_show_product_id, NULL, "%llu\n");
+
+static int adis16480_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16480 *adis16480 = arg;
+ u32 flash_count;
+ int ret;
+
+ ret = adis_read_reg_32(&adis16480->adis, ADIS16480_REG_FLASH_CNT,
+ &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16480_flash_count_fops,
+ adis16480_show_flash_count, NULL, "%lld\n");
+
+static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16480 *adis16480 = iio_priv(indio_dev);
+
+ debugfs_create_file("firmware_revision", 0400,
+ indio_dev->debugfs_dentry, adis16480,
+ &adis16480_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_firmware_date_fops);
+ debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_serial_number_fops);
+ debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ adis16480, &adis16480_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+static int adis16480_set_freq(struct adis16480 *st, unsigned int freq)
+{
+ unsigned int t;
+
+ t = 2460000 / freq;
+ if (t > 2048)
+ t = 2048;
+
+ if (t != 0)
+ t--;
+
+ return adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
+}
+
+static int adis16480_get_freq(struct adis16480 *st, unsigned int *freq)
+{
+ uint16_t t;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
+ if (ret < 0)
+ return ret;
+
+ *freq = 2460000 / (t + 1);
+
+ return 0;
+}
+
+static ssize_t adis16480_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int freq;
+ int ret;
+
+ ret = adis16480_get_freq(st, &freq);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d.%.3d\n", freq / 1000, freq % 1000);
+}
+
+static ssize_t adis16480_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16480 *st = iio_priv(indio_dev);
+ int freq_int, freq_fract;
+ long val;
+ int ret;
+
+ ret = iio_str_to_fixpoint(buf, 100, &freq_int, &freq_fract);
+ if (ret)
+ return ret;
+
+ val = freq_int * 1000 + freq_fract;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ ret = adis16480_set_freq(st, val);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16480_read_frequency,
+ adis16480_write_frequency);
+
+enum {
+ ADIS16480_SCAN_GYRO_X,
+ ADIS16480_SCAN_GYRO_Y,
+ ADIS16480_SCAN_GYRO_Z,
+ ADIS16480_SCAN_ACCEL_X,
+ ADIS16480_SCAN_ACCEL_Y,
+ ADIS16480_SCAN_ACCEL_Z,
+ ADIS16480_SCAN_MAGN_X,
+ ADIS16480_SCAN_MAGN_Y,
+ ADIS16480_SCAN_MAGN_Z,
+ ADIS16480_SCAN_BARO,
+ ADIS16480_SCAN_TEMP,
+};
+
+static const unsigned int adis16480_calibbias_regs[] = {
+ [ADIS16480_SCAN_GYRO_X] = ADIS16480_REG_X_GYRO_BIAS,
+ [ADIS16480_SCAN_GYRO_Y] = ADIS16480_REG_Y_GYRO_BIAS,
+ [ADIS16480_SCAN_GYRO_Z] = ADIS16480_REG_Z_GYRO_BIAS,
+ [ADIS16480_SCAN_ACCEL_X] = ADIS16480_REG_X_ACCEL_BIAS,
+ [ADIS16480_SCAN_ACCEL_Y] = ADIS16480_REG_Y_ACCEL_BIAS,
+ [ADIS16480_SCAN_ACCEL_Z] = ADIS16480_REG_Z_ACCEL_BIAS,
+ [ADIS16480_SCAN_MAGN_X] = ADIS16480_REG_X_HARD_IRON,
+ [ADIS16480_SCAN_MAGN_Y] = ADIS16480_REG_Y_HARD_IRON,
+ [ADIS16480_SCAN_MAGN_Z] = ADIS16480_REG_Z_HARD_IRON,
+ [ADIS16480_SCAN_BARO] = ADIS16480_REG_BAROM_BIAS,
+};
+
+static const unsigned int adis16480_calibscale_regs[] = {
+ [ADIS16480_SCAN_GYRO_X] = ADIS16480_REG_X_GYRO_SCALE,
+ [ADIS16480_SCAN_GYRO_Y] = ADIS16480_REG_Y_GYRO_SCALE,
+ [ADIS16480_SCAN_GYRO_Z] = ADIS16480_REG_Z_GYRO_SCALE,
+ [ADIS16480_SCAN_ACCEL_X] = ADIS16480_REG_X_ACCEL_SCALE,
+ [ADIS16480_SCAN_ACCEL_Y] = ADIS16480_REG_Y_ACCEL_SCALE,
+ [ADIS16480_SCAN_ACCEL_Z] = ADIS16480_REG_Z_ACCEL_SCALE,
+};
+
+static int adis16480_set_calibbias(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int bias)
+{
+ unsigned int reg = adis16480_calibbias_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ case IIO_PRESSURE:
+ if (bias < -0x8000 || bias >= 0x8000)
+ return -EINVAL;
+ return adis_write_reg_16(&st->adis, reg, bias);
+ case IIO_ANGL_VEL:
+ case IIO_ACCEL:
+ return adis_write_reg_32(&st->adis, reg, bias);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int adis16480_get_calibbias(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *bias)
+{
+ unsigned int reg = adis16480_calibbias_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t val16;
+ uint32_t val32;
+ int ret;
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ case IIO_PRESSURE:
+ ret = adis_read_reg_16(&st->adis, reg, &val16);
+ *bias = sign_extend32(val16, 15);
+ break;
+ case IIO_ANGL_VEL:
+ case IIO_ACCEL:
+ ret = adis_read_reg_32(&st->adis, reg, &val32);
+ *bias = sign_extend32(val32, 31);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int adis16480_set_calibscale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int scale)
+{
+ unsigned int reg = adis16480_calibscale_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ if (scale < -0x8000 || scale >= 0x8000)
+ return -EINVAL;
+
+ return adis_write_reg_16(&st->adis, reg, scale);
+}
+
+static int adis16480_get_calibscale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *scale)
+{
+ unsigned int reg = adis16480_calibscale_regs[chan->scan_index];
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t val16;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, reg, &val16);
+ if (ret < 0)
+ return ret;
+
+ *scale = sign_extend32(val16, 15);
+ return IIO_VAL_INT;
+}
+
+static const unsigned int adis16480_def_filter_freqs[] = {
+ 310,
+ 55,
+ 275,
+ 63,
+};
+
+static const unsigned int ad16480_filter_data[][2] = {
+ [ADIS16480_SCAN_GYRO_X] = { ADIS16480_REG_FILTER_BNK0, 0 },
+ [ADIS16480_SCAN_GYRO_Y] = { ADIS16480_REG_FILTER_BNK0, 3 },
+ [ADIS16480_SCAN_GYRO_Z] = { ADIS16480_REG_FILTER_BNK0, 6 },
+ [ADIS16480_SCAN_ACCEL_X] = { ADIS16480_REG_FILTER_BNK0, 9 },
+ [ADIS16480_SCAN_ACCEL_Y] = { ADIS16480_REG_FILTER_BNK0, 12 },
+ [ADIS16480_SCAN_ACCEL_Z] = { ADIS16480_REG_FILTER_BNK1, 0 },
+ [ADIS16480_SCAN_MAGN_X] = { ADIS16480_REG_FILTER_BNK1, 3 },
+ [ADIS16480_SCAN_MAGN_Y] = { ADIS16480_REG_FILTER_BNK1, 6 },
+ [ADIS16480_SCAN_MAGN_Z] = { ADIS16480_REG_FILTER_BNK1, 9 },
+};
+
+static int adis16480_get_filter_freq(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *freq)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int enable_mask, offset, reg;
+ uint16_t val;
+ int ret;
+
+ reg = ad16480_filter_data[chan->scan_index][0];
+ offset = ad16480_filter_data[chan->scan_index][1];
+ enable_mask = BIT(offset + 2);
+
+ ret = adis_read_reg_16(&st->adis, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!(val & enable_mask))
+ *freq = 0;
+ else
+ *freq = adis16480_def_filter_freqs[(val >> offset) & 0x3];
+
+ return IIO_VAL_INT;
+}
+
+static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int freq)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ unsigned int enable_mask, offset, reg;
+ unsigned int diff, best_diff;
+ unsigned int i, best_freq;
+ uint16_t val;
+ int ret;
+
+ reg = ad16480_filter_data[chan->scan_index][0];
+ offset = ad16480_filter_data[chan->scan_index][1];
+ enable_mask = BIT(offset + 2);
+
+ ret = adis_read_reg_16(&st->adis, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ if (freq == 0) {
+ val &= ~enable_mask;
+ } else {
+ best_freq = 0;
+ best_diff = 310;
+ for (i = 0; i < ARRAY_SIZE(adis16480_def_filter_freqs); i++) {
+ if (adis16480_def_filter_freqs[i] >= freq) {
+ diff = adis16480_def_filter_freqs[i] - freq;
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_freq = i;
+ }
+ }
+ }
+
+ val &= ~(0x3 << offset);
+ val |= best_freq << offset;
+ val |= enable_mask;
+ }
+
+ return adis_write_reg_16(&st->adis, reg, val);
+}
+
+static int adis16480_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 100; /* 0.0001 gauss */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 5;
+ *val2 = 650000; /* 5.65 milli degree Celsius */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 4000; /* 40ubar = 0.004 kPa */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has a offset */
+ *val = 4425; /* 25 degree Celsius = 0x0000 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis16480_get_calibbias(indio_dev, chan, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return adis16480_get_calibscale(indio_dev, chan, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16480_get_filter_freq(indio_dev, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16480_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis16480_set_calibbias(indio_dev, chan, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return adis16480_set_calibscale(indio_dev, chan, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16480_set_filter_freq(indio_dev, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16480_MOD_CHANNEL(_type, _mod, _address, _si, _info, _bits) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ _info, \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_bits), \
+ .storagebits = (_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16480_GYRO_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _GYRO_OUT, ADIS16480_SCAN_GYRO_ ## _mod, \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT, \
+ 32)
+
+#define ADIS16480_ACCEL_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _ACCEL_OUT, ADIS16480_SCAN_ACCEL_ ## _mod, \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT, \
+ 32)
+
+#define ADIS16480_MAGN_CHANNEL(_mod) \
+ ADIS16480_MOD_CHANNEL(IIO_MAGN, IIO_MOD_ ## _mod, \
+ ADIS16480_REG_ ## _mod ## _MAGN_OUT, ADIS16480_SCAN_MAGN_ ## _mod, \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT, \
+ 16)
+
+#define ADIS16480_PRESSURE_CHANNEL() \
+ { \
+ .type = IIO_PRESSURE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = ADIS16480_REG_BAROM_OUT, \
+ .scan_index = ADIS16480_SCAN_BARO, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16480_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT, \
+ .address = ADIS16480_REG_TEMP_OUT, \
+ .scan_index = ADIS16480_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adis16480_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_MAGN_CHANNEL(X),
+ ADIS16480_MAGN_CHANNEL(Y),
+ ADIS16480_MAGN_CHANNEL(Z),
+ ADIS16480_PRESSURE_CHANNEL(),
+ ADIS16480_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+};
+
+static const struct iio_chan_spec adis16485_channels[] = {
+ ADIS16480_GYRO_CHANNEL(X),
+ ADIS16480_GYRO_CHANNEL(Y),
+ ADIS16480_GYRO_CHANNEL(Z),
+ ADIS16480_ACCEL_CHANNEL(X),
+ ADIS16480_ACCEL_CHANNEL(Y),
+ ADIS16480_ACCEL_CHANNEL(Z),
+ ADIS16480_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+enum adis16480_variant {
+ ADIS16375,
+ ADIS16480,
+ ADIS16485,
+ ADIS16488,
+};
+
+static const struct adis16480_chip_info adis16480_chip_info[] = {
+ [ADIS16375] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ },
+ [ADIS16480] = {
+ .channels = adis16480_channels,
+ .num_channels = ARRAY_SIZE(adis16480_channels),
+ },
+ [ADIS16485] = {
+ .channels = adis16485_channels,
+ .num_channels = ARRAY_SIZE(adis16485_channels),
+ },
+ [ADIS16488] = {
+ .channels = adis16480_channels,
+ .num_channels = ARRAY_SIZE(adis16480_channels),
+ },
+};
+
+static struct attribute *adis16480_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16480_attribute_group = {
+ .attrs = adis16480_attributes,
+};
+
+static const struct iio_info adis16480_info = {
+ .attrs = &adis16480_attribute_group,
+ .read_raw = &adis16480_read_raw,
+ .write_raw = &adis16480_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static int adis16480_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16480_REG_SLP_CNT, BIT(9));
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "Could not power down device: %d\n", ret);
+
+ return ret;
+}
+
+static int adis16480_enable_irq(struct adis *adis, bool enable)
+{
+ return adis_write_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL,
+ enable ? BIT(3) : 0);
+}
+
+static int adis16480_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16480 *st = iio_priv(indio_dev);
+ uint16_t prod_id;
+ unsigned int device_id;
+ int ret;
+
+ adis_reset(&st->adis);
+ msleep(70);
+
+ ret = adis_write_reg_16(&st->adis, ADIS16480_REG_GLOB_CMD, BIT(1));
+ if (ret)
+ return ret;
+ msleep(30);
+
+ ret = adis_check_status(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16480_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ return 0;
+}
+
+#define ADIS16480_DIAG_STAT_XGYRO_FAIL 0
+#define ADIS16480_DIAG_STAT_YGYRO_FAIL 1
+#define ADIS16480_DIAG_STAT_ZGYRO_FAIL 2
+#define ADIS16480_DIAG_STAT_XACCL_FAIL 3
+#define ADIS16480_DIAG_STAT_YACCL_FAIL 4
+#define ADIS16480_DIAG_STAT_ZACCL_FAIL 5
+#define ADIS16480_DIAG_STAT_XMAGN_FAIL 8
+#define ADIS16480_DIAG_STAT_YMAGN_FAIL 9
+#define ADIS16480_DIAG_STAT_ZMAGN_FAIL 10
+#define ADIS16480_DIAG_STAT_BARO_FAIL 11
+
+static const char * const adis16480_status_error_msgs[] = {
+ [ADIS16480_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16480_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16480_DIAG_STAT_XMAGN_FAIL] = "X-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_YMAGN_FAIL] = "Y-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_ZMAGN_FAIL] = "Z-axis magnetometer self-test failure",
+ [ADIS16480_DIAG_STAT_BARO_FAIL] = "Barometer self-test failure",
+};
+
+static const struct adis_data adis16480_data = {
+ .diag_stat_reg = ADIS16480_REG_DIAG_STS,
+ .glob_cmd_reg = ADIS16480_REG_GLOB_CMD,
+ .has_paging = true,
+
+ .read_delay = 5,
+ .write_delay = 5,
+
+ .status_error_msgs = adis16480_status_error_msgs,
+ .status_error_mask = BIT(ADIS16480_DIAG_STAT_XGYRO_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_YGYRO_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_ZGYRO_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_XACCL_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_YACCL_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_ZACCL_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_XMAGN_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_YMAGN_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_ZMAGN_FAIL) |
+ BIT(ADIS16480_DIAG_STAT_BARO_FAIL),
+
+ .enable_irq = adis16480_enable_irq,
+};
+
+static int adis16480_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct adis16480 *st;
+ int ret;
+
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st = iio_priv(indio_dev);
+
+ st->chip_info = &adis16480_chip_info[id->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+ indio_dev->info = &adis16480_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16480_data);
+ if (ret)
+ goto error_free_dev;
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ if (ret)
+ goto error_free_dev;
+
+ ret = adis16480_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_stop_device;
+
+ adis16480_debugfs_init(indio_dev);
+
+ return 0;
+
+error_stop_device:
+ adis16480_stop_device(indio_dev);
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+error_free_dev:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int adis16480_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16480 *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16480_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16480_ids[] = {
+ { "adis16375", ADIS16375 },
+ { "adis16480", ADIS16480 },
+ { "adis16485", ADIS16485 },
+ { "adis16488", ADIS16488 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16480_ids);
+
+static struct spi_driver adis16480_driver = {
+ .driver = {
+ .name = "adis16480",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16480_ids,
+ .probe = adis16480_probe,
+ .remove = adis16480_remove,
+};
+module_spi_driver(adis16480_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16480 IMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
new file mode 100644
index 000000000000..99d8e0b0dd34
--- /dev/null
+++ b/drivers/iio/imu/adis_buffer.c
@@ -0,0 +1,176 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/imu/adis.h>
+
+int adis_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ const struct iio_chan_spec *chan;
+ unsigned int scan_count;
+ unsigned int i, j;
+ __be16 *tx, *rx;
+
+ kfree(adis->xfer);
+ kfree(adis->buffer);
+
+ scan_count = indio_dev->scan_bytes / 2;
+
+ adis->xfer = kcalloc(scan_count + 1, sizeof(*adis->xfer), GFP_KERNEL);
+ if (!adis->xfer)
+ return -ENOMEM;
+
+ adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL);
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ rx = adis->buffer;
+ tx = rx + indio_dev->scan_bytes;
+
+ spi_message_init(&adis->msg);
+
+ for (j = 0; j <= scan_count; j++) {
+ adis->xfer[j].bits_per_word = 8;
+ if (j != scan_count)
+ adis->xfer[j].cs_change = 1;
+ adis->xfer[j].len = 2;
+ adis->xfer[j].delay_usecs = adis->data->read_delay;
+ if (j < scan_count)
+ adis->xfer[j].tx_buf = &tx[j];
+ if (j >= 1)
+ adis->xfer[j].rx_buf = &rx[j - 1];
+ spi_message_add_tail(&adis->xfer[j], &adis->msg);
+ }
+
+ chan = indio_dev->channels;
+ for (i = 0; i < indio_dev->num_channels; i++, chan++) {
+ if (!test_bit(chan->scan_index, scan_mask))
+ continue;
+ if (chan->scan_type.storagebits == 32)
+ *tx++ = cpu_to_be16((chan->address + 2) << 8);
+ *tx++ = cpu_to_be16(chan->address << 8);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adis_update_scan_mode);
+
+static irqreturn_t adis_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ if (adis->data->has_paging) {
+ mutex_lock(&adis->txrx_lock);
+ if (adis->current_page != 0) {
+ adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID);
+ adis->tx[1] = 0;
+ spi_write(adis->spi, adis->tx, 2);
+ }
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to read data: %d", ret);
+
+
+ if (adis->data->has_paging) {
+ adis->current_page = 0;
+ mutex_unlock(&adis->txrx_lock);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (indio_dev->scan_timestamp) {
+ void *b = adis->buffer + indio_dev->scan_bytes - sizeof(s64);
+ *(s64 *)b = pf->timestamp;
+ }
+
+ iio_push_to_buffers(indio_dev, adis->buffer);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device
+ * @adis: The adis device.
+ * @indio_dev: The IIO device.
+ * @trigger_handler: Optional trigger handler, may be NULL.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function sets up the buffer and trigger for a adis devices. If
+ * 'trigger_handler' is NULL the default trigger handler will be used. The
+ * default trigger handler will simply read the registers assigned to the
+ * currently active channels.
+ *
+ * adis_cleanup_buffer_and_trigger() should be called to free the resources
+ * allocated by this function.
+ */
+int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irqreturn_t (*trigger_handler)(int, void *))
+{
+ int ret;
+
+ if (!trigger_handler)
+ trigger_handler = adis_trigger_handler;
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ if (adis->spi->irq) {
+ ret = adis_probe_trigger(adis, indio_dev);
+ if (ret)
+ goto error_buffer_cleanup;
+ }
+ return 0;
+
+error_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
+
+/**
+ * adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources
+ * @adis: The adis device.
+ * @indio_dev: The IIO device.
+ *
+ * Frees resources allocated by adis_setup_buffer_and_trigger()
+ */
+void adis_cleanup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+ if (adis->spi->irq)
+ adis_remove_trigger(adis);
+ kfree(adis->buffer);
+ kfree(adis->xfer);
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+EXPORT_SYMBOL_GPL(adis_cleanup_buffer_and_trigger);
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
new file mode 100644
index 000000000000..5a24c9cac343
--- /dev/null
+++ b/drivers/iio/imu/adis_trigger.c
@@ -0,0 +1,89 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/export.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/imu/adis.h>
+
+static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis *adis = trig->private_data;
+
+ return adis_enable_irq(adis, state);
+}
+
+static const struct iio_trigger_ops adis_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &adis_data_rdy_trigger_set_state,
+};
+
+/**
+ * adis_probe_trigger() - Sets up trigger for a adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ *
+ * Returns 0 on success or a negative error code
+ *
+ * adis_remove_trigger() should be used to free the trigger.
+ */
+int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+{
+ int ret;
+
+ adis->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (adis->trig == NULL)
+ return -ENOMEM;
+
+ ret = request_irq(adis->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ indio_dev->name,
+ adis->trig);
+ if (ret)
+ goto error_free_trig;
+
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ adis->trig->private_data = adis;
+ ret = iio_trigger_register(adis->trig);
+
+ indio_dev->trig = adis->trig;
+ if (ret)
+ goto error_free_irq;
+
+ return 0;
+
+error_free_irq:
+ free_irq(adis->spi->irq, adis->trig);
+error_free_trig:
+ iio_trigger_free(adis->trig);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adis_probe_trigger);
+
+/**
+ * adis_remove_trigger() - Remove trigger for a adis devices
+ * @adis: The adis device
+ *
+ * Removes the trigger previously registered with adis_probe_trigger().
+ */
+void adis_remove_trigger(struct adis *adis)
+{
+ iio_trigger_unregister(adis->trig);
+ free_irq(adis->spi->irq, adis->trig);
+ iio_trigger_free(adis->trig);
+}
+EXPORT_SYMBOL_GPL(adis_remove_trigger);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d4ad37455a67..aaadd32f9f0d 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -31,6 +31,18 @@ static const char * const iio_endian_prefix[] = {
[IIO_LE] = "le",
};
+static bool iio_buffer_is_active(struct iio_dev *indio_dev,
+ struct iio_buffer *buf)
+{
+ struct list_head *p;
+
+ list_for_each(p, &indio_dev->buffer_list)
+ if (p == &buf->buffer_list)
+ return true;
+
+ return false;
+}
+
/**
* iio_buffer_read_first_n_outer() - chrdev read for buffer access
*
@@ -134,7 +146,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
+ if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -180,12 +192,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
+ if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
ret = -EBUSY;
goto error_ret;
}
indio_dev->buffer->scan_timestamp = state;
- indio_dev->scan_timestamp = state;
error_ret:
mutex_unlock(&indio_dev->mlock);
@@ -371,12 +382,12 @@ ssize_t iio_buffer_write_length(struct device *dev,
const char *buf,
size_t len)
{
- int ret;
- ulong val;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_buffer *buffer = indio_dev->buffer;
+ unsigned int val;
+ int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
@@ -385,7 +396,7 @@ ssize_t iio_buffer_write_length(struct device *dev,
return len;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
+ if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
ret = -EBUSY;
} else {
if (buffer->access->set_length)
@@ -398,102 +409,14 @@ ssize_t iio_buffer_write_length(struct device *dev,
}
EXPORT_SYMBOL(iio_buffer_write_length);
-ssize_t iio_buffer_store_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- bool requested_state, current_state;
- int previous_mode;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
-
- mutex_lock(&indio_dev->mlock);
- previous_mode = indio_dev->currentmode;
- requested_state = !(buf[0] == '0');
- current_state = iio_buffer_enabled(indio_dev);
- if (current_state == requested_state) {
- printk(KERN_INFO "iio-buffer, current state requested again\n");
- goto done;
- }
- if (requested_state) {
- if (indio_dev->setup_ops->preenable) {
- ret = indio_dev->setup_ops->preenable(indio_dev);
- if (ret) {
- printk(KERN_ERR
- "Buffer not started: "
- "buffer preenable failed\n");
- goto error_ret;
- }
- }
- if (buffer->access->request_update) {
- ret = buffer->access->request_update(buffer);
- if (ret) {
- printk(KERN_INFO
- "Buffer not started: "
- "buffer parameter update failed\n");
- goto error_ret;
- }
- }
- /* Definitely possible for devices to support both of these. */
- if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
- if (!indio_dev->trig) {
- printk(KERN_INFO
- "Buffer not started: no trigger\n");
- ret = -EINVAL;
- goto error_ret;
- }
- indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
- } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
- indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
- else { /* should never be reached */
- ret = -EINVAL;
- goto error_ret;
- }
-
- if (indio_dev->setup_ops->postenable) {
- ret = indio_dev->setup_ops->postenable(indio_dev);
- if (ret) {
- printk(KERN_INFO
- "Buffer not started: "
- "postenable failed\n");
- indio_dev->currentmode = previous_mode;
- if (indio_dev->setup_ops->postdisable)
- indio_dev->setup_ops->
- postdisable(indio_dev);
- goto error_ret;
- }
- }
- } else {
- if (indio_dev->setup_ops->predisable) {
- ret = indio_dev->setup_ops->predisable(indio_dev);
- if (ret)
- goto error_ret;
- }
- indio_dev->currentmode = INDIO_DIRECT_MODE;
- if (indio_dev->setup_ops->postdisable) {
- ret = indio_dev->setup_ops->postdisable(indio_dev);
- if (ret)
- goto error_ret;
- }
- }
-done:
- mutex_unlock(&indio_dev->mlock);
- return len;
-
-error_ret:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-EXPORT_SYMBOL(iio_buffer_store_enable);
-
ssize_t iio_buffer_show_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
+ return sprintf(buf, "%d\n",
+ iio_buffer_is_active(indio_dev,
+ indio_dev->buffer));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
@@ -537,35 +460,220 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
return bytes;
}
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
{
- struct iio_buffer *buffer = indio_dev->buffer;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ int ret;
+ int success = 0;
+ struct iio_buffer *buffer;
+ unsigned long *compound_mask;
+ const unsigned long *old_mask;
- /* How much space will the demuxed element take? */
- indio_dev->scan_bytes =
- iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
- buffer->scan_timestamp);
- buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes);
+ /* Wind down existing buffers - iff there are any */
+ if (!list_empty(&indio_dev->buffer_list)) {
+ if (indio_dev->setup_ops->predisable) {
+ ret = indio_dev->setup_ops->predisable(indio_dev);
+ if (ret)
+ goto error_ret;
+ }
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->setup_ops->postdisable) {
+ ret = indio_dev->setup_ops->postdisable(indio_dev);
+ if (ret)
+ goto error_ret;
+ }
+ }
+ /* Keep a copy of current setup to allow roll back */
+ old_mask = indio_dev->active_scan_mask;
+ if (!indio_dev->available_scan_masks)
+ indio_dev->active_scan_mask = NULL;
+
+ if (remove_buffer)
+ list_del(&remove_buffer->buffer_list);
+ if (insert_buffer)
+ list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
+
+ /* If no buffers in list, we are done */
+ if (list_empty(&indio_dev->buffer_list)) {
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->available_scan_masks == NULL)
+ kfree(old_mask);
+ return 0;
+ }
/* What scan mask do we actually have ?*/
- if (indio_dev->available_scan_masks)
+ compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(long), GFP_KERNEL);
+ if (compound_mask == NULL) {
+ if (indio_dev->available_scan_masks == NULL)
+ kfree(old_mask);
+ return -ENOMEM;
+ }
+ indio_dev->scan_timestamp = 0;
+
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
+ indio_dev->masklength);
+ indio_dev->scan_timestamp |= buffer->scan_timestamp;
+ }
+ if (indio_dev->available_scan_masks) {
indio_dev->active_scan_mask =
iio_scan_mask_match(indio_dev->available_scan_masks,
indio_dev->masklength,
- buffer->scan_mask);
- else
- indio_dev->active_scan_mask = buffer->scan_mask;
-
- if (indio_dev->active_scan_mask == NULL)
- return -EINVAL;
+ compound_mask);
+ if (indio_dev->active_scan_mask == NULL) {
+ /*
+ * Roll back.
+ * Note can only occur when adding a buffer.
+ */
+ list_del(&insert_buffer->buffer_list);
+ indio_dev->active_scan_mask = old_mask;
+ success = -EINVAL;
+ }
+ } else {
+ indio_dev->active_scan_mask = compound_mask;
+ }
iio_update_demux(indio_dev);
- if (indio_dev->info->update_scan_mode)
- return indio_dev->info
+ /* Wind up again */
+ if (indio_dev->setup_ops->preenable) {
+ ret = indio_dev->setup_ops->preenable(indio_dev);
+ if (ret) {
+ printk(KERN_ERR
+ "Buffer not started:"
+ "buffer preenable failed\n");
+ goto error_remove_inserted;
+ }
+ }
+ indio_dev->scan_bytes =
+ iio_compute_scan_bytes(indio_dev,
+ indio_dev->active_scan_mask,
+ indio_dev->scan_timestamp);
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
+ if (buffer->access->request_update) {
+ ret = buffer->access->request_update(buffer);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "buffer parameter update failed\n");
+ goto error_run_postdisable;
+ }
+ }
+ if (indio_dev->info->update_scan_mode) {
+ ret = indio_dev->info
->update_scan_mode(indio_dev,
indio_dev->active_scan_mask);
+ if (ret < 0) {
+ printk(KERN_INFO "update scan mode failed\n");
+ goto error_run_postdisable;
+ }
+ }
+ /* Definitely possible for devices to support both of these.*/
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
+ if (!indio_dev->trig) {
+ printk(KERN_INFO "Buffer not started: no trigger\n");
+ ret = -EINVAL;
+ /* Can only occur on first buffer */
+ goto error_run_postdisable;
+ }
+ indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
+ } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
+ indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
+ } else { /* should never be reached */
+ ret = -EINVAL;
+ goto error_run_postdisable;
+ }
+
+ if (indio_dev->setup_ops->postenable) {
+ ret = indio_dev->setup_ops->postenable(indio_dev);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started: postenable failed\n");
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->postdisable(indio_dev);
+ goto error_disable_all_buffers;
+ }
+ }
+
+ if (indio_dev->available_scan_masks)
+ kfree(compound_mask);
+ else
+ kfree(old_mask);
+
+ return success;
+
+error_disable_all_buffers:
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+error_run_postdisable:
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->postdisable(indio_dev);
+error_remove_inserted:
+
+ if (insert_buffer)
+ list_del(&insert_buffer->buffer_list);
+ indio_dev->active_scan_mask = old_mask;
+ kfree(compound_mask);
+error_ret:
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_update_buffers);
+
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool requested_state;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *pbuf = indio_dev->buffer;
+ bool inlist;
+
+ ret = strtobool(buf, &requested_state);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ /* Find out if it is in the list */
+ inlist = iio_buffer_is_active(indio_dev, pbuf);
+ /* Already in desired state */
+ if (inlist == requested_state)
+ goto done;
+
+ if (requested_state)
+ ret = iio_update_buffers(indio_dev,
+ indio_dev->buffer, NULL);
+ else
+ ret = iio_update_buffers(indio_dev,
+ NULL, indio_dev->buffer);
+
+ if (ret < 0)
+ goto done;
+done:
+ mutex_unlock(&indio_dev->mlock);
+ return (ret < 0) ? ret : len;
+}
+EXPORT_SYMBOL(iio_buffer_store_enable);
+
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+ unsigned bytes;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
+ if (buffer->access->set_bytes_per_datum) {
+ bytes = iio_compute_scan_bytes(indio_dev,
+ buffer->scan_mask,
+ buffer->scan_timestamp);
+
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+ }
return 0;
}
EXPORT_SYMBOL(iio_sw_buffer_preenable);
@@ -599,7 +707,11 @@ static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
- **/
+ *
+ * Note that at this point we have no way of knowing what other
+ * buffers might request, hence this code only verifies that the
+ * individual buffers request is plausible.
+ */
int iio_scan_mask_set(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit)
{
@@ -682,13 +794,12 @@ static unsigned char *iio_demux(struct iio_buffer *buffer,
return buffer->demux_bounce;
}
-int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
+static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
{
unsigned char *dataout = iio_demux(buffer, data);
return buffer->access->store_to(buffer, dataout);
}
-EXPORT_SYMBOL_GPL(iio_push_to_buffer);
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
@@ -699,10 +810,26 @@ static void iio_buffer_demux_free(struct iio_buffer *buffer)
}
}
-int iio_update_demux(struct iio_dev *indio_dev)
+
+int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
+{
+ int ret;
+ struct iio_buffer *buf;
+
+ list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
+ ret = iio_push_to_buffer(buf, data);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffers);
+
+static int iio_buffer_update_demux(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
{
const struct iio_chan_spec *ch;
- struct iio_buffer *buffer = indio_dev->buffer;
int ret, in_ind = -1, out_ind, length;
unsigned in_loc = 0, out_loc = 0;
struct iio_demux_table *p;
@@ -787,4 +914,23 @@ error_clear_mux_table:
return ret;
}
+
+int iio_update_demux(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
+ ret = iio_buffer_update_demux(indio_dev, buffer);
+ if (ret < 0)
+ goto error_clear_mux_table;
+ }
+ return 0;
+
+error_clear_mux_table:
+ list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
+ iio_buffer_demux_free(buffer);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(iio_update_demux);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 6eb24dbc081e..8848f16c547b 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -65,6 +65,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_CAPACITANCE] = "capacitance",
[IIO_ALTVOLTAGE] = "altvoltage",
[IIO_CCT] = "cct",
+ [IIO_PRESSURE] = "pressure",
};
static const char * const iio_modifier_names[] = {
@@ -397,11 +398,74 @@ static ssize_t iio_read_channel_info(struct device *dev,
val2 = do_div(tmp, 1000000000LL);
val = tmp;
return sprintf(buf, "%d.%09u\n", val, val2);
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = (s64)val * 1000000000LL >> val2;
+ val2 = do_div(tmp, 1000000000LL);
+ val = tmp;
+ return sprintf(buf, "%d.%09u\n", val, val2);
default:
return 0;
}
}
+/**
+ * iio_str_to_fixpoint() - Parse a fixed-point number from a string
+ * @str: The string to parse
+ * @fract_mult: Multiplier for the first decimal place, should be a power of 10
+ * @integer: The integer part of the number
+ * @fract: The fractional part of the number
+ *
+ * Returns 0 on success, or a negative error code if the string could not be
+ * parsed.
+ */
+int iio_str_to_fixpoint(const char *str, int fract_mult,
+ int *integer, int *fract)
+{
+ int i = 0, f = 0;
+ bool integer_part = true, negative = false;
+
+ if (str[0] == '-') {
+ negative = true;
+ str++;
+ } else if (str[0] == '+') {
+ str++;
+ }
+
+ while (*str) {
+ if ('0' <= *str && *str <= '9') {
+ if (integer_part) {
+ i = i * 10 + *str - '0';
+ } else {
+ f += fract_mult * (*str - '0');
+ fract_mult /= 10;
+ }
+ } else if (*str == '\n') {
+ if (*(str + 1) == '\0')
+ break;
+ else
+ return -EINVAL;
+ } else if (*str == '.' && integer_part) {
+ integer_part = false;
+ } else {
+ return -EINVAL;
+ }
+ str++;
+ }
+
+ if (negative) {
+ if (i)
+ i = -i;
+ else
+ f = -f;
+ }
+
+ *integer = i;
+ *fract = f;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
+
static ssize_t iio_write_channel_info(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -409,8 +473,8 @@ static ssize_t iio_write_channel_info(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret, integer = 0, fract = 0, fract_mult = 100000;
- bool integer_part = true, negative = false;
+ int ret, fract_mult = 100000;
+ int integer, fract;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
@@ -429,39 +493,9 @@ static ssize_t iio_write_channel_info(struct device *dev,
return -EINVAL;
}
- if (buf[0] == '-') {
- negative = true;
- buf++;
- }
-
- while (*buf) {
- if ('0' <= *buf && *buf <= '9') {
- if (integer_part)
- integer = integer*10 + *buf - '0';
- else {
- fract += fract_mult*(*buf - '0');
- if (fract_mult == 1)
- break;
- fract_mult /= 10;
- }
- } else if (*buf == '\n') {
- if (*(buf + 1) == '\0')
- break;
- else
- return -EINVAL;
- } else if (*buf == '.') {
- integer_part = false;
- } else {
- return -EINVAL;
- }
- buf++;
- }
- if (negative) {
- if (integer)
- integer = -integer;
- else
- fract = -fract;
- }
+ ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract);
+ if (ret)
+ return ret;
ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
integer, fract, this_attr->address);
@@ -851,6 +885,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
return NULL;
}
dev_set_name(&dev->dev, "iio:device%d", dev->id);
+ INIT_LIST_HEAD(&dev->buffer_list);
}
return dev;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index fa6543bf6731..261cae00557e 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -239,13 +239,13 @@ static ssize_t iio_ev_value_store(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned long val;
+ int val;
int ret;
if (!indio_dev->info->write_event_value)
return -EINVAL;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
@@ -350,15 +350,10 @@ static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
ret = iio_device_add_event_sysfs(indio_dev,
&indio_dev->channels[j]);
if (ret < 0)
- goto error_clear_attrs;
+ return ret;
attrcount += ret;
}
return attrcount;
-
-error_clear_attrs:
- __iio_remove_event_config_attrs(indio_dev);
-
- return ret;
}
static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index f2b78d4fe457..d55e98fb300e 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -78,7 +78,7 @@ int iio_map_array_unregister(struct iio_dev *indio_dev,
found_it = true;
break;
}
- if (found_it == false) {
+ if (!found_it) {
ret = -ENODEV;
goto error_ret;
}
@@ -203,6 +203,7 @@ struct iio_channel *iio_channel_get_all(const char *name)
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
continue;
chans[mapind].indio_dev = c->indio_dev;
+ chans[mapind].data = c->map->consumer_data;
chans[mapind].channel =
iio_chan_spec_from_name(chans[mapind].indio_dev,
c->map->adc_channel_label);
@@ -314,6 +315,9 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
*processed = div_s64(raw64 * (s64)scale_val * scale,
scale_val2);
break;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 1763c9bcb98a..dbf80abc834f 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -47,6 +47,7 @@ config HID_SENSOR_ALS
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID ALS"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 164b62b91a4b..d5b9d39d95b2 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -164,7 +164,6 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adjd_s311_data *data = iio_priv(indio_dev);
- struct iio_buffer *buffer = indio_dev->buffer;
s64 time_ns = iio_get_time_ns();
int len = 0;
int i, j = 0;
@@ -187,7 +186,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
if (indio_dev->scan_timestamp)
*(s64 *)((u8 *)data->buffer + ALIGN(len, sizeof(s64)))
= time_ns;
- iio_push_to_buffer(buffer, (u8 *)data->buffer);
+ iio_push_to_buffers(indio_dev, (u8 *)data->buffer);
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -287,8 +286,8 @@ static const struct iio_info adjd_s311_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit adjd_s311_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adjd_s311_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adjd_s311_data *data;
struct iio_dev *indio_dev;
@@ -331,7 +330,7 @@ exit:
return err;
}
-static int __devexit adjd_s311_remove(struct i2c_client *client)
+static int adjd_s311_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct adjd_s311_data *data = iio_priv(indio_dev);
@@ -355,7 +354,7 @@ static struct i2c_driver adjd_s311_driver = {
.name = ADJD_S311_DRV_NAME,
},
.probe = adjd_s311_probe,
- .remove = __devexit_p(adjd_s311_remove),
+ .remove = adjd_s311_remove,
.id_table = adjd_s311_id,
};
module_i2c_driver(adjd_s311_driver);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 96e3691e42c4..e2d042f2a544 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -176,21 +176,8 @@ static const struct iio_info als_info = {
/* Function to push data to buffer */
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
{
- struct iio_buffer *buffer = indio_dev->buffer;
- int datum_sz;
-
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- if (!buffer) {
- dev_err(&indio_dev->dev, "Buffer == NULL\n");
- return;
- }
- datum_sz = buffer->access->get_bytes_per_datum(buffer);
- if (len > datum_sz) {
- dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
- datum_sz);
- return;
- }
- iio_push_to_buffer(buffer, (u8 *)data);
+ iio_push_to_buffers(indio_dev, (u8 *)data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -258,7 +245,7 @@ static int als_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_als_probe(struct platform_device *pdev)
+static int hid_als_probe(struct platform_device *pdev)
{
int ret = 0;
static const char *name = "als";
@@ -285,10 +272,9 @@ static int __devinit hid_als_probe(struct platform_device *pdev)
goto error_free_dev;
}
- channels = kmemdup(als_channels,
- sizeof(als_channels),
- GFP_KERNEL);
+ channels = kmemdup(als_channels, sizeof(als_channels), GFP_KERNEL);
if (!channels) {
+ ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
goto error_free_dev;
}
@@ -355,7 +341,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_als_remove(struct platform_device *pdev)
+static int hid_als_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index e45712a921ce..7503012ce933 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -718,8 +718,7 @@ static struct attribute_group lm3533_als_attribute_group = {
.attrs = lm3533_als_attributes
};
-static int __devinit lm3533_als_set_input_mode(struct lm3533_als *als,
- bool pwm_mode)
+static int lm3533_als_set_input_mode(struct lm3533_als *als, bool pwm_mode)
{
u8 mask = LM3533_ALS_INPUT_MODE_MASK;
u8 val;
@@ -740,7 +739,7 @@ static int __devinit lm3533_als_set_input_mode(struct lm3533_als *als,
return 0;
}
-static int __devinit lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
+static int lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
{
int ret;
@@ -756,8 +755,8 @@ static int __devinit lm3533_als_set_resistor(struct lm3533_als *als, u8 val)
return 0;
}
-static int __devinit lm3533_als_setup(struct lm3533_als *als,
- struct lm3533_als_platform_data *pdata)
+static int lm3533_als_setup(struct lm3533_als *als,
+ struct lm3533_als_platform_data *pdata)
{
int ret;
@@ -775,7 +774,7 @@ static int __devinit lm3533_als_setup(struct lm3533_als *als,
return 0;
}
-static int __devinit lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
+static int lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
{
u8 mask = LM3533_ALS_INT_ENABLE_MASK;
int ret;
@@ -799,7 +798,7 @@ static int __devinit lm3533_als_setup_irq(struct lm3533_als *als, void *dev)
return 0;
}
-static int __devinit lm3533_als_enable(struct lm3533_als *als)
+static int lm3533_als_enable(struct lm3533_als *als)
{
u8 mask = LM3533_ALS_ENABLE_MASK;
int ret;
@@ -830,7 +829,7 @@ static const struct iio_info lm3533_als_info = {
.read_raw = &lm3533_als_read_raw,
};
-static int __devinit lm3533_als_probe(struct platform_device *pdev)
+static int lm3533_als_probe(struct platform_device *pdev)
{
struct lm3533 *lm3533;
struct lm3533_als_platform_data *pdata;
@@ -901,7 +900,7 @@ err_free_dev:
return ret;
}
-static int __devexit lm3533_als_remove(struct platform_device *pdev)
+static int lm3533_als_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct lm3533_als *als = iio_priv(indio_dev);
@@ -922,7 +921,7 @@ static struct platform_driver lm3533_als_driver = {
.owner = THIS_MODULE,
},
.probe = lm3533_als_probe,
- .remove = __devexit_p(lm3533_als_remove),
+ .remove = lm3533_als_remove,
};
module_platform_driver(lm3533_als_driver);
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index e49cb9784a6f..2aa748fbdc0e 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -150,8 +150,8 @@ static const struct iio_info vcnl4000_info = {
.driver_module = THIS_MODULE,
};
-static int __devinit vcnl4000_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int vcnl4000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct vcnl4000_data *data;
struct iio_dev *indio_dev;
@@ -190,7 +190,7 @@ error_free_dev:
return ret;
}
-static int __devexit vcnl4000_remove(struct i2c_client *client)
+static int vcnl4000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -206,7 +206,7 @@ static struct i2c_driver vcnl4000_driver = {
.owner = THIS_MODULE,
},
.probe = vcnl4000_probe,
- .remove = __devexit_p(vcnl4000_remove),
+ .remove = vcnl4000_remove,
.id_table = vcnl4000_id,
};
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index c1f0cdd57037..ff11d68225cf 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -8,6 +8,7 @@ config HID_SENSOR_MAGNETOMETER_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Magenetometer 3D"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index c4f0d274f577..7ac2c7483ba8 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -198,21 +198,8 @@ static const struct iio_info magn_3d_info = {
/* Function to push data to buffer */
static void hid_sensor_push_data(struct iio_dev *indio_dev, u8 *data, int len)
{
- struct iio_buffer *buffer = indio_dev->buffer;
- int datum_sz;
-
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- if (!buffer) {
- dev_err(&indio_dev->dev, "Buffer == NULL\n");
- return;
- }
- datum_sz = buffer->access->get_bytes_per_datum(buffer);
- if (len > datum_sz) {
- dev_err(&indio_dev->dev, "Datum size mismatch %d:%d\n", len,
- datum_sz);
- return;
- }
- iio_push_to_buffer(buffer, (u8 *)data);
+ iio_push_to_buffers(indio_dev, (u8 *)data);
}
/* Callback handler to send event after all samples are received and captured */
@@ -292,7 +279,7 @@ static int magn_3d_parse_report(struct platform_device *pdev,
}
/* Function to initialize the processing for usage id */
-static int __devinit hid_magn_3d_probe(struct platform_device *pdev)
+static int hid_magn_3d_probe(struct platform_device *pdev)
{
int ret = 0;
static char *name = "magn_3d";
@@ -320,10 +307,10 @@ static int __devinit hid_magn_3d_probe(struct platform_device *pdev)
goto error_free_dev;
}
- channels = kmemdup(magn_3d_channels,
- sizeof(magn_3d_channels),
- GFP_KERNEL);
+ channels = kmemdup(magn_3d_channels, sizeof(magn_3d_channels),
+ GFP_KERNEL);
if (!channels) {
+ ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
goto error_free_dev;
}
@@ -389,7 +376,7 @@ error_ret:
}
/* Function to deinitialize the processing for usage id */
-static int __devinit hid_magn_3d_remove(struct platform_device *pdev)
+static int hid_magn_3d_remove(struct platform_device *pdev)
{
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a7568c34a1aa..d789eea32168 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -345,17 +345,17 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu
err = ib_query_port(device, port_num, &props);
if (err)
- return 1;
+ return err;
for (i = 0; i < props.gid_tbl_len; ++i) {
err = ib_query_gid(device, port_num, i, &tmp);
if (err)
- return 1;
+ return err;
if (!memcmp(&tmp, gid, sizeof tmp))
return 0;
}
- return -EAGAIN;
+ return -EADDRNOTAVAIL;
}
static int cma_acquire_dev(struct rdma_id_private *id_priv)
@@ -388,8 +388,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
if (!ret) {
id_priv->id.port_num = port;
goto out;
- } else if (ret == 1)
- break;
+ }
}
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 5ce7b9e8bff6..7275e727e0f5 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -920,8 +920,7 @@ static struct net_device *c2_devinit(struct c2_dev *c2dev,
return netdev;
}
-static int __devinit c2_probe(struct pci_dev *pcidev,
- const struct pci_device_id *ent)
+static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
int ret = 0, i;
unsigned long reg0_start, reg0_flags, reg0_len;
@@ -1191,7 +1190,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
return ret;
}
-static void __devexit c2_remove(struct pci_dev *pcidev)
+static void c2_remove(struct pci_dev *pcidev)
{
struct c2_dev *c2dev = pci_get_drvdata(pcidev);
struct net_device *netdev = c2dev->netdev;
@@ -1236,7 +1235,7 @@ static struct pci_driver c2_pci_driver = {
.name = DRV_NAME,
.id_table = c2_pci_table,
.probe = c2_probe,
- .remove = __devexit_p(c2_remove),
+ .remove = c2_remove,
};
static int __init c2_init_module(void)
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 6ae698e68775..ba7a1208ff9e 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -498,16 +498,16 @@ extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct ib_send_wr **bad_wr);
extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
struct ib_recv_wr **bad_wr);
-extern void __devinit c2_init_qp_table(struct c2_dev *c2dev);
-extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev);
+extern void c2_init_qp_table(struct c2_dev *c2dev);
+extern void c2_cleanup_qp_table(struct c2_dev *c2dev);
extern void c2_set_qp_state(struct c2_qp *, int);
extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
/* PDs */
extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-extern int __devinit c2_init_pd_table(struct c2_dev *c2dev);
-extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev);
+extern int c2_init_pd_table(struct c2_dev *c2dev);
+extern void c2_cleanup_pd_table(struct c2_dev *c2dev);
/* CQs */
extern int c2_init_cq(struct c2_dev *c2dev, int entries,
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index 32d34e88d5cf..706cf97cbe8f 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -311,6 +311,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
+ break;
}
default:
diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c
index 161f2a285351..f3e81dc357bb 100644
--- a/drivers/infiniband/hw/amso1100/c2_pd.c
+++ b/drivers/infiniband/hw/amso1100/c2_pd.c
@@ -70,7 +70,7 @@ void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
spin_unlock(&c2dev->pd_table.lock);
}
-int __devinit c2_init_pd_table(struct c2_dev *c2dev)
+int c2_init_pd_table(struct c2_dev *c2dev)
{
c2dev->pd_table.last = 0;
@@ -84,7 +84,7 @@ int __devinit c2_init_pd_table(struct c2_dev *c2dev)
return 0;
}
-void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev)
+void c2_cleanup_pd_table(struct c2_dev *c2dev)
{
kfree(c2dev->pd_table.table);
}
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 0d7b6f23caff..28cd5cb51859 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -1010,13 +1010,13 @@ out:
return err;
}
-void __devinit c2_init_qp_table(struct c2_dev *c2dev)
+void c2_init_qp_table(struct c2_dev *c2dev)
{
spin_lock_init(&c2dev->qp_table.lock);
idr_init(&c2dev->qp_table.idr);
}
-void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
+void c2_cleanup_qp_table(struct c2_dev *c2dev)
{
idr_destroy(&c2dev->qp_table.idr);
}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index e4a73158fc7f..b7c986990053 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -442,7 +442,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
* involves initializing the various limits and resource pools that
* comprise the RNIC instance.
*/
-int __devinit c2_rnic_init(struct c2_dev *c2dev)
+int c2_rnic_init(struct c2_dev *c2dev)
{
int err;
u32 qsize, msgsize;
@@ -611,7 +611,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
/*
* Called by c2_remove to cleanup the RNIC resources.
*/
-void __devexit c2_rnic_term(struct c2_dev *c2dev)
+void c2_rnic_term(struct c2_dev *c2dev)
{
/* Close the open adapter instance */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index aaf88ef9409c..3e094cd6a0e3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -128,9 +128,8 @@ static void stop_ep_timer(struct iwch_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
+ WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -1756,9 +1755,8 @@ static void ep_timeout(unsigned long arg)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p state %u\n",
+ WARN(1, "%s unexpected state ep %p state %u\n",
__func__, ep, ep->com.state);
- WARN_ON(1);
abort = 0;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 6cfd4d8fd0bd..c13745cde7fa 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -38,10 +38,12 @@
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/if_vlan.h>
#include <net/neighbour.h>
#include <net/netevent.h>
#include <net/route.h>
+#include <net/tcp.h>
#include "iw_cxgb4.h"
@@ -61,6 +63,14 @@ static char *states[] = {
NULL,
};
+static int nocong;
+module_param(nocong, int, 0644);
+MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
+
+static int enable_ecn;
+module_param(enable_ecn, int, 0644);
+MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
+
static int dack_mode = 1;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
@@ -151,9 +161,8 @@ static void stop_ep_timer(struct c4iw_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
- printk(KERN_ERR "%s timer stopped when its not running! "
+ WARN(1, "%s timer stopped when its not running! "
"ep %p state %u\n", __func__, ep, ep->com.state);
- WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
@@ -266,6 +275,7 @@ void _c4iw_free_ep(struct kref *kref)
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
+ remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
}
kfree(ep);
}
@@ -442,6 +452,50 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
+#define VLAN_NONE 0xfff
+#define FILTER_SEL_VLAN_NONE 0xffff
+#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
+#define FILTER_SEL_WIDTH_VIN_P_FC \
+ (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
+#define FILTER_SEL_WIDTH_TAG_P_FC \
+ (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
+#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
+
+static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
+ struct l2t_entry *l2t)
+{
+ unsigned int ntuple = 0;
+ u32 viid;
+
+ switch (dev->rdev.lldi.filt_mode) {
+
+ /* default filter mode */
+ case HW_TPL_FR_MT_PR_IV_P_FC:
+ if (l2t->vlan == VLAN_NONE)
+ ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
+ else {
+ ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
+ ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ }
+ ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+ FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ break;
+ case HW_TPL_FR_MT_PR_OV_P_FC: {
+ viid = cxgb4_port_viid(l2t->neigh->dev);
+
+ ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
+ ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
+ ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
+ ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
+ FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ break;
+ }
+ default:
+ break;
+ }
+ return ntuple;
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -464,7 +518,8 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- opt0 = KEEP_ALIVE(1) |
+ opt0 = (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
@@ -475,6 +530,7 @@ static int send_connect(struct c4iw_ep *ep)
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
+ CCTRL_ECN(enable_ecn) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
@@ -493,8 +549,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = ep->com.local_addr.sin_addr.s_addr;
req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = 0;
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
req->opt2 = cpu_to_be32(opt2);
+ set_bit(ACT_OPEN_REQ, &ep->com.history);
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -771,6 +828,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
/* setup the hwtid for this connection */
ep->hwtid = tid;
cxgb4_insert_tid(t, ep, tid);
+ insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -778,7 +836,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_emss(ep, ntohs(req->tcp_opt));
/* dealloc the atid */
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
+ set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
send_flowc(ep, NULL);
@@ -804,6 +864,7 @@ static void close_complete_upcall(struct c4iw_ep *ep)
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
ep->com.qp = NULL;
+ set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
@@ -812,6 +873,7 @@ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
close_complete_upcall(ep);
state_set(&ep->com, ABORTING);
+ set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
@@ -826,6 +888,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
PDBG("peer close delivered ep %p cm_id %p tid %u\n",
ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ set_bit(DISCONN_UPCALL, &ep->com.history);
}
}
@@ -844,6 +907,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
ep->com.qp = NULL;
+ set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -876,6 +940,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
+ set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
if (status < 0) {
@@ -916,6 +981,7 @@ static void connect_request_upcall(struct c4iw_ep *ep)
ep->parent_ep->com.cm_id,
&event);
}
+ set_bit(CONNREQ_UPCALL, &ep->com.history);
c4iw_put_ep(&ep->parent_ep->com);
ep->parent_ep = NULL;
}
@@ -932,6 +998,7 @@ static void established_upcall(struct c4iw_ep *ep)
if (ep->com.cm_id) {
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ set_bit(ESTAB_UPCALL, &ep->com.history);
}
}
@@ -1317,6 +1384,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int dlen = ntohs(hdr->len);
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
+ __u8 status = hdr->status;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
@@ -1339,9 +1407,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
case MPA_REP_SENT:
break;
default:
- printk(KERN_ERR MOD "%s Unexpected streaming data."
- " ep %p state %d tid %u\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
+ pr_err("%s Unexpected streaming data." \
+ " ep %p state %d tid %u status %d\n",
+ __func__, ep, state_read(&ep->com), ep->hwtid, status);
/*
* The ep will timeout and inform the ULP of the failure.
@@ -1384,6 +1452,63 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+{
+ struct sk_buff *skb;
+ struct fw_ofld_connection_wr *req;
+ unsigned int mtu_idx;
+ int wscale;
+
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
+ req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ ep->l2t));
+ req->le.lport = ep->com.local_addr.sin_port;
+ req->le.pport = ep->com.remote_addr.sin_port;
+ req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
+ req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
+ req->tcb.t_state_to_astid =
+ htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
+ V_FW_OFLD_CONNECTION_WR_ASTID(atid));
+ req->tcb.cplrxdataack_cplpassacceptrpl =
+ htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
+ req->tcb.tx_max = jiffies;
+ req->tcb.rcv_adv = htons(1);
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ req->tcb.opt0 = TCAM_BYPASS(1) |
+ (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
+ DELACK(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ ULP_MODE(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ(rcv_win >> 10);
+ req->tcb.opt2 = PACE(1) |
+ TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
+ RX_CHANNEL(0) |
+ CCTRL_ECN(enable_ecn) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ if (enable_tcp_timestamps)
+ req->tcb.opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack)
+ req->tcb.opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ req->tcb.opt2 |= WND_SCALE_EN(1);
+ req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ set_bit(ACT_OFLD_CONN, &ep->com.history);
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
/*
* Return whether a failed active open has allocated a TID
*/
@@ -1393,6 +1518,111 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_ARP_MISS;
}
+#define ACT_OPEN_RETRY_COUNT 2
+
+static int c4iw_reconnect(struct c4iw_ep *ep)
+{
+ int err = 0;
+ struct rtable *rt;
+ struct port_info *pi;
+ struct net_device *pdev;
+ int step;
+ struct neighbour *neigh;
+
+ PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
+ init_timer(&ep->timer);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ pr_err("%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+ insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
+
+ /* find a route */
+ rt = find_route(ep->com.dev,
+ ep->com.cm_id->local_addr.sin_addr.s_addr,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->com.cm_id->local_addr.sin_port,
+ ep->com.cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ pr_err("%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->dst;
+
+ neigh = dst_neigh_lookup(ep->dst,
+ &ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ /* get a l2t entry */
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ ep->com.cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ neigh, neigh->dev, 0);
+ pi = (struct port_info *)netdev_priv(neigh->dev);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(neigh->dev);
+ ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
+ 0x7F) << 1;
+ }
+
+ step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = pi->port_id * step;
+ ep->ctrlq_idx = pi->port_id;
+ step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
+
+ if (!ep->l2t) {
+ pr_err("%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ /*
+ * remember to send notification to upper layer.
+ * We are in here so the upper layer is not aware that this is
+ * re-connect attempt and so, upper layer is still waiting for
+ * response of 1st connect request.
+ */
+ connect_reply_upcall(ep, -ECONNRESET);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
@@ -1413,6 +1643,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+ set_bit(ACT_OPEN_RPL, &ep->com.history);
+
/*
* Log interesting failures.
*/
@@ -1420,6 +1652,29 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_RESET:
case CPL_ERR_CONN_TIMEDOUT:
break;
+ case CPL_ERR_TCAM_FULL:
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.tcam_full++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ send_fw_act_open_req(ep,
+ GET_TID_TID(GET_AOPEN_ATID(
+ ntohl(rpl->atid_status))));
+ return 0;
+ }
+ break;
+ case CPL_ERR_CONN_EXIST:
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
+ atid);
+ cxgb4_free_atid(t, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_reconnect(ep);
+ return 0;
+ }
+ break;
default:
printk(KERN_INFO MOD "Active open failure - "
"atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
@@ -1437,6 +1692,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (status && act_open_has_tid(status))
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
cxgb4_free_atid(t, atid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
@@ -1453,13 +1709,14 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = lookup_stid(t, stid);
if (!ep) {
- printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
- return 0;
+ PDBG("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
}
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+out:
return 0;
}
@@ -1511,14 +1768,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
skb_get(skb);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
- opt0 = KEEP_ALIVE(1) |
+ opt0 = (nocong ? NO_CONG(1) : 0) |
+ KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
TX_CHAN(ep->tx_chan) |
SMAC_SEL(ep->smac_idx) |
- DSCP(ep->tos) |
+ DSCP(ep->tos >> 2) |
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
@@ -1530,6 +1788,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
+ if (enable_ecn) {
+ const struct tcphdr *tcph;
+ u32 hlen = ntohl(req->hdr_len);
+
+ tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
+ G_IP_HDR_LEN(hlen);
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN(1);
+ }
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
@@ -1646,22 +1913,30 @@ out:
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
- struct c4iw_ep *child_ep, *parent_ep;
+ struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
struct rtable *rt;
- __be32 local_ip, peer_ip;
+ __be32 local_ip, peer_ip = 0;
__be16 local_port, peer_port;
int err;
+ u16 peer_mss = ntohs(req->tcpopt.mss);
parent_ep = lookup_stid(t, stid);
- PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
+ if (!parent_ep) {
+ PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ goto reject;
+ }
get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
+ PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
+ "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
+ ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+
if (state_read(&parent_ep->com) != LISTEN) {
printk(KERN_ERR "%s - listening ep not in LISTEN\n",
__func__);
@@ -1695,6 +1970,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
+ if (peer_mss && child_ep->mtu > (peer_mss + 40))
+ child_ep->mtu = peer_mss + 40;
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1716,6 +1994,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
accept_cr(child_ep, peer_ip, skb, req);
+ set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
goto out;
reject:
reject_cr(dev, hwtid, peer_ip, skb);
@@ -1735,12 +2014,17 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+ PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+ ntohs(req->tcp_opt));
+
set_emss(ep, ntohs(req->tcp_opt));
+ insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
dst_confirm(ep->dst);
state_set(&ep->com, MPA_REQ_WAIT);
start_ep_timer(ep);
send_flowc(ep, skb);
+ set_bit(PASS_ESTAB, &ep->com.history);
return 0;
}
@@ -1760,6 +2044,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
+ set_bit(PEER_CLOSE, &ep->com.history);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case MPA_REQ_WAIT:
@@ -1839,74 +2124,6 @@ static int is_neg_adv_abort(unsigned int status)
status == CPL_ERR_PERSIST_NEG_ADVICE;
}
-static int c4iw_reconnect(struct c4iw_ep *ep)
-{
- struct rtable *rt;
- int err = 0;
-
- PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
- if (ep->atid == -1) {
- printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(ep->com.dev,
- ep->com.cm_id->local_addr.sin_addr.s_addr,
- ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->com.cm_id->local_addr.sin_port,
- ep->com.cm_id->remote_addr.sin_port, 0);
- if (!rt) {
- printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
-
- err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->dst, ep->com.dev, false);
- if (err) {
- printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail4;
- }
-
- PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
- ep->l2t->idx);
-
- state_set(&ep->com, CONNECTING);
- ep->tos = 0;
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- cxgb4_l2t_release(ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
-fail2:
- /*
- * remember to send notification to upper layer.
- * We are in here so the upper layer is not aware that this is
- * re-connect attempt and so, upper layer is still waiting for
- * response of 1st connect request.
- */
- connect_reply_upcall(ep, -ECONNRESET);
- c4iw_put_ep(&ep->com);
-out:
- return err;
-}
-
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -1927,6 +2144,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
+ set_bit(PEER_ABORT, &ep->com.history);
/*
* Wake up any threads in rdma_init() or rdma_fini().
@@ -2141,6 +2359,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
c4iw_put_ep(&ep->com);
return -ECONNRESET;
}
+ set_bit(ULP_REJECT, &ep->com.history);
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
if (mpa_rev == 0)
abort_connection(ep, NULL, GFP_KERNEL);
@@ -2170,6 +2389,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp);
+ set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
abort_connection(ep, NULL, GFP_KERNEL);
@@ -2293,6 +2513,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto fail2;
}
+ insert_handle(dev, &dev->atid_idr, ep, ep->atid);
PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
ntohl(cm_id->local_addr.sin_addr.s_addr),
@@ -2338,6 +2559,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
fail4:
dst_release(ep->dst);
fail3:
+ remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail2:
cm_id->rem_ref(cm_id);
@@ -2352,7 +2574,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
-
might_sleep();
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
@@ -2371,30 +2592,54 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ if (dev->rdev.lldi.enable_fw_ofld_conn)
+ ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
+ else
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+
if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
err = -ENOMEM;
goto fail2;
}
-
+ insert_handle(dev, &dev->stid_idr, ep, ep->stid);
state_set(&ep->com, LISTEN);
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.local_addr.sin_addr.s_addr,
- ep->com.local_addr.sin_port,
- ep->com.dev->rdev.lldi.rxq_ids[0]);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
- __func__);
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ do {
+ err = cxgb4_create_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0],
+ 0,
+ 0);
+ if (err == -EBUSY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(100));
+ }
+ } while (err == -EBUSY);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
+ ep->stid, ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (!err)
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+ &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
if (!err) {
cm_id->provider_data = ep;
goto out;
}
-fail3:
+ pr_err("%s cxgb4_create_server/filter failed err %d " \
+ "stid %d laddr %08x lport %d\n", \
+ __func__, err, ep->stid,
+ ntohl(ep->com.local_addr.sin_addr.s_addr),
+ ntohs(ep->com.local_addr.sin_port));
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
fail2:
cm_id->rem_ref(cm_id);
@@ -2413,12 +2658,19 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = listen_stop(ep);
- if (err)
- goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
- __func__);
+ if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
+ err = cxgb4_remove_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = listen_stop(ep);
+ if (err)
+ goto done;
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
+ remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
cm_id->rem_ref(cm_id);
@@ -2482,10 +2734,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
+ set_bit(EP_DISC_ABORT, &ep->com.history);
close_complete_upcall(ep);
ret = send_abort(ep, NULL, gfp);
- } else
+ } else {
+ set_bit(EP_DISC_CLOSE, &ep->com.history);
ret = send_halfclose(ep, gfp);
+ }
if (ret)
fatal = 1;
}
@@ -2495,10 +2750,323 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret;
}
-static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
+static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
+{
+ struct c4iw_ep *ep;
+ int atid = be32_to_cpu(req->tid);
+
+ ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
+ if (!ep)
+ return;
+
+ switch (req->retval) {
+ case FW_ENOMEM:
+ set_bit(ACT_RETRY_NOMEM, &ep->com.history);
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ send_fw_act_open_req(ep, atid);
+ return;
+ }
+ case FW_EADDRINUSE:
+ set_bit(ACT_RETRY_INUSE, &ep->com.history);
+ if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
+ send_fw_act_open_req(ep, atid);
+ return;
+ }
+ break;
+ default:
+ pr_info("%s unexpected ofld conn wr retval %d\n",
+ __func__, req->retval);
+ break;
+ }
+ pr_err("active ofld_connect_wr failure %d atid %d\n",
+ req->retval, atid);
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.act_ofld_conn_fails++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ connect_reply_upcall(ep, status2errno(req->retval));
+ state_set(&ep->com, DEAD);
+ remove_handle(dev, &dev->atid_idr, atid);
+ cxgb4_free_atid(dev->rdev.lldi.tids, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
+}
+
+static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
+{
+ struct sk_buff *rpl_skb;
+ struct cpl_pass_accept_req *cpl;
+ int ret;
+
+ rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
+ BUG_ON(!rpl_skb);
+ if (req->retval) {
+ PDBG("%s passive open failure %d\n", __func__, req->retval);
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.pas_ofld_conn_fails++;
+ mutex_unlock(&dev->rdev.stats.lock);
+ kfree_skb(rpl_skb);
+ } else {
+ cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
+ htonl(req->tid)));
+ ret = pass_accept_req(dev, rpl_skb);
+ if (!ret)
+ kfree_skb(rpl_skb);
+ }
+ return;
+}
+
+static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
- c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
+
+ switch (rpl->type) {
+ case FW6_TYPE_CQE:
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ break;
+ case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
+ req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
+ switch (req->t_state) {
+ case TCP_SYN_SENT:
+ active_ofld_conn_reply(dev, skb, req);
+ break;
+ case TCP_SYN_RECV:
+ passive_ofld_conn_reply(dev, skb, req);
+ break;
+ default:
+ pr_err("%s unexpected ofld conn wr state %d\n",
+ __func__, req->t_state);
+ break;
+ }
+ break;
+ }
+ return 0;
+}
+
+static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
+{
+ u32 l2info;
+ u16 vlantag, len, hdr_len;
+ u8 intf;
+ struct cpl_rx_pkt *cpl = cplhdr(skb);
+ struct cpl_pass_accept_req *req;
+ struct tcp_options_received tmp_opt;
+
+ /* Store values from cpl_rx_pkt in temporary location. */
+ vlantag = cpl->vlan;
+ len = cpl->len;
+ l2info = cpl->l2info;
+ hdr_len = cpl->hdr_len;
+ intf = cpl->iff;
+
+ __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
+
+ /*
+ * We need to parse the TCP options from SYN packet.
+ * to generate cpl_pass_accept_req.
+ */
+ memset(&tmp_opt, 0, sizeof(tmp_opt));
+ tcp_clear_options(&tmp_opt);
+ tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
+
+ req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
+ V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
+ F_SYN_XACT_MATCH);
+ req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
+ V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
+ V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
+ req->vlan = vlantag;
+ req->len = len;
+ req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
+ PASS_OPEN_TOS(tos));
+ req->tcpopt.mss = htons(tmp_opt.mss_clamp);
+ if (tmp_opt.wscale_ok)
+ req->tcpopt.wsf = tmp_opt.snd_wscale;
+ req->tcpopt.tstamp = tmp_opt.saw_tstamp;
+ if (tmp_opt.sack_ok)
+ req->tcpopt.sack = 1;
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
+ return;
+}
+
+static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
+ __be32 laddr, __be16 lport,
+ __be32 raddr, __be16 rport,
+ u32 rcv_isn, u32 filter, u16 window,
+ u32 rss_qid, u8 port_id)
+{
+ struct sk_buff *req_skb;
+ struct fw_ofld_connection_wr *req;
+ struct cpl_pass_accept_req *cpl = cplhdr(skb);
+
+ req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
+ req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
+ req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
+ req->le.filter = filter;
+ req->le.lport = lport;
+ req->le.pport = rport;
+ req->le.u.ipv4.lip = laddr;
+ req->le.u.ipv4.pip = raddr;
+ req->tcb.rcv_nxt = htonl(rcv_isn + 1);
+ req->tcb.rcv_adv = htons(window);
+ req->tcb.t_state_to_astid =
+ htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
+ V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
+ V_FW_OFLD_CONNECTION_WR_ASTID(
+ GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
+
+ /*
+ * We store the qid in opt2 which will be used by the firmware
+ * to send us the wr response.
+ */
+ req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
+
+ /*
+ * We initialize the MSS index in TCB to 0xF.
+ * So that when driver sends cpl_pass_accept_rpl
+ * TCB picks up the correct value. If this was 0
+ * TP will ignore any value > 0 for MSS index.
+ */
+ req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
+ req->cookie = cpu_to_be64((u64)skb);
+
+ set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
+ cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
+}
+
+/*
+ * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
+ * messages when a filter is being used instead of server to
+ * redirect a syn packet. When packets hit filter they are redirected
+ * to the offload queue and driver tries to establish the connection
+ * using firmware work request.
+ */
+static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ int stid;
+ unsigned int filter;
+ struct ethhdr *eh = NULL;
+ struct vlan_ethhdr *vlan_eh = NULL;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct rss_header *rss = (void *)skb->data;
+ struct cpl_rx_pkt *cpl = (void *)skb->data;
+ struct cpl_pass_accept_req *req = (void *)(rss + 1);
+ struct l2t_entry *e;
+ struct dst_entry *dst;
+ struct rtable *rt;
+ struct c4iw_ep *lep;
+ u16 window;
+ struct port_info *pi;
+ struct net_device *pdev;
+ u16 rss_qid;
+ int step;
+ u32 tx_chan;
+ struct neighbour *neigh;
+
+ /* Drop all non-SYN packets */
+ if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
+ goto reject;
+
+ /*
+ * Drop all packets which did not hit the filter.
+ * Unlikely to happen.
+ */
+ if (!(rss->filter_hit && rss->filter_tid))
+ goto reject;
+
+ /*
+ * Calculate the server tid from filter hit index from cpl_rx_pkt.
+ */
+ stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
+ + dev->rdev.lldi.tids->nstids;
+
+ lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
+ if (!lep) {
+ PDBG("%s connect request on invalid stid %d\n", __func__, stid);
+ goto reject;
+ }
+
+ if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
+ eh = (struct ethhdr *)(req + 1);
+ iph = (struct iphdr *)(eh + 1);
+ } else {
+ vlan_eh = (struct vlan_ethhdr *)(req + 1);
+ iph = (struct iphdr *)(vlan_eh + 1);
+ skb->vlan_tci = ntohs(cpl->vlan);
+ }
+
+ if (iph->version != 0x4)
+ goto reject;
+
+ tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)rss);
+ skb_set_transport_header(skb, (void *)tcph - (void *)rss);
+ skb_get(skb);
+
+ PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
+ ntohs(tcph->source), iph->tos);
+
+ rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
+ iph->tos);
+ if (!rt) {
+ pr_err("%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+ dst = &rt->dst;
+ neigh = dst_neigh_lookup_skb(dst, skb);
+
+ if (neigh->dev->flags & IFF_LOOPBACK) {
+ pdev = ip_dev_find(&init_net, iph->daddr);
+ e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ tx_chan = cxgb4_port_chan(pdev);
+ dev_put(pdev);
+ } else {
+ e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ neigh->dev, 0);
+ pi = (struct port_info *)netdev_priv(neigh->dev);
+ tx_chan = cxgb4_port_chan(neigh->dev);
+ }
+ if (!e) {
+ pr_err("%s - failed to allocate l2t entry!\n",
+ __func__);
+ goto free_dst;
+ }
+
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
+ window = htons(tcph->window);
+
+ /* Calcuate filter portion for LE region. */
+ filter = cpu_to_be32(select_ntuple(dev, dst, e));
+
+ /*
+ * Synthesize the cpl_pass_accept_req. We have everything except the
+ * TID. Once firmware sends a reply with TID we update the TID field
+ * in cpl and pass it through the regular cpl_pass_accept_req path.
+ */
+ build_cpl_pass_accept_req(skb, stid, iph->tos);
+ send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
+ tcph->source, ntohl(tcph->seq), filter, window,
+ rss_qid, pi->port_id);
+ cxgb4_l2t_release(e);
+free_dst:
+ dst_release(dst);
+reject:
return 0;
}
@@ -2521,7 +3089,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
- [CPL_FW6_MSG] = async_event
+ [CPL_FW6_MSG] = deferred_fw6_msg,
+ [CPL_RX_PKT] = rx_pkt
};
static void process_timeout(struct c4iw_ep *ep)
@@ -2532,6 +3101,7 @@ static void process_timeout(struct c4iw_ep *ep)
mutex_lock(&ep->com.mutex);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
+ set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
__state_set(&ep->com, ABORTING);
@@ -2551,9 +3121,8 @@ static void process_timeout(struct c4iw_ep *ep)
__state_set(&ep->com, ABORTING);
break;
default:
- printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+ WARN(1, "%s unexpected state ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, ep->com.state);
- WARN_ON(1);
abort = 0;
}
mutex_unlock(&ep->com.mutex);
@@ -2653,7 +3222,7 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s type %u\n", __func__, rpl->type);
switch (rpl->type) {
- case 1:
+ case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
@@ -2661,7 +3230,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_wake_up(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
- case 2:
+ case FW6_TYPE_CQE:
+ case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
sched(dev, skb);
break;
default:
@@ -2724,7 +3294,8 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
- [CPL_FW6_MSG] = fw6_msg
+ [CPL_FW6_MSG] = fw6_msg,
+ [CPL_RX_PKT] = sched
};
int __init c4iw_cm_init(void)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index cb4ecd783700..ba11c76c0b5a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -279,6 +279,11 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " DB State: %s Transitions %llu\n",
db_state_str[dev->db_state],
dev->rdev.stats.db_state_transitions);
+ seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
+ seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
+ dev->rdev.stats.act_ofld_conn_fails);
+ seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
+ dev->rdev.stats.pas_ofld_conn_fails);
return 0;
}
@@ -309,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
dev->rdev.stats.db_empty = 0;
dev->rdev.stats.db_drop = 0;
dev->rdev.stats.db_state_transitions = 0;
+ dev->rdev.stats.tcam_full = 0;
+ dev->rdev.stats.act_ofld_conn_fails = 0;
+ dev->rdev.stats.pas_ofld_conn_fails = 0;
mutex_unlock(&dev->rdev.stats.lock);
return count;
}
@@ -322,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {
.write = stats_clear,
};
+static int dump_ep(int id, void *p, void *data)
+{
+ struct c4iw_ep *ep = p;
+ struct c4iw_debugfs_data *epd = data;
+ int space;
+ int cc;
+
+ space = epd->bufsize - epd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
+ "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
+ ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
+ ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
+ &ep->com.local_addr.sin_addr.s_addr,
+ ntohs(ep->com.local_addr.sin_port),
+ &ep->com.remote_addr.sin_addr.s_addr,
+ ntohs(ep->com.remote_addr.sin_port));
+ if (cc < space)
+ epd->pos += cc;
+ return 0;
+}
+
+static int dump_listen_ep(int id, void *p, void *data)
+{
+ struct c4iw_listen_ep *ep = p;
+ struct c4iw_debugfs_data *epd = data;
+ int space;
+ int cc;
+
+ space = epd->bufsize - epd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
+ "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
+ ep->com.flags, ep->stid, ep->backlog,
+ &ep->com.local_addr.sin_addr.s_addr,
+ ntohs(ep->com.local_addr.sin_port));
+ if (cc < space)
+ epd->pos += cc;
+ return 0;
+}
+
+static int ep_release(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *epd = file->private_data;
+ if (!epd) {
+ pr_info("%s null qpd?\n", __func__);
+ return 0;
+ }
+ vfree(epd->buf);
+ kfree(epd);
+ return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+ struct c4iw_debugfs_data *epd;
+ int ret = 0;
+ int count = 1;
+
+ epd = kmalloc(sizeof(*epd), GFP_KERNEL);
+ if (!epd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ epd->devp = inode->i_private;
+ epd->pos = 0;
+
+ spin_lock_irq(&epd->devp->lock);
+ idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
+ idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
+ idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
+ spin_unlock_irq(&epd->devp->lock);
+
+ epd->bufsize = count * 160;
+ epd->buf = vmalloc(epd->bufsize);
+ if (!epd->buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ spin_lock_irq(&epd->devp->lock);
+ idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
+ idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
+ idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
+ spin_unlock_irq(&epd->devp->lock);
+
+ file->private_data = epd;
+ goto out;
+err1:
+ kfree(epd);
+out:
+ return ret;
+}
+
+static const struct file_operations ep_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = ep_open,
+ .release = ep_release,
+ .read = debugfs_read,
+};
+
static int setup_debugfs(struct c4iw_dev *devp)
{
struct dentry *de;
@@ -344,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
if (de && de->d_inode)
de->d_inode->i_size = 4096;
+ de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &ep_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+
return 0;
}
@@ -475,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
idr_destroy(&ctx->dev->cqidr);
idr_destroy(&ctx->dev->qpidr);
idr_destroy(&ctx->dev->mmidr);
+ idr_destroy(&ctx->dev->hwtid_idr);
+ idr_destroy(&ctx->dev->stid_idr);
+ idr_destroy(&ctx->dev->atid_idr);
iounmap(ctx->dev->rdev.oc_mw_kva);
ib_dealloc_device(&ctx->dev->ibdev);
ctx->dev = NULL;
@@ -532,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
idr_init(&devp->cqidr);
idr_init(&devp->qpidr);
idr_init(&devp->mmidr);
+ idr_init(&devp->hwtid_idr);
+ idr_init(&devp->stid_idr);
+ idr_init(&devp->atid_idr);
spin_lock_init(&devp->lock);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
@@ -577,14 +703,76 @@ out:
return ctx;
}
+static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ const __be64 *rsp,
+ u32 pktshift)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Allocate space for cpl_pass_accept_req which will be synthesized by
+ * driver. Once the driver synthesizes the request the skb will go
+ * through the regular cpl_pass_accept_req processing.
+ * The math here assumes sizeof cpl_pass_accept_req >= sizeof
+ * cpl_rx_pkt.
+ */
+ skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift);
+
+ /*
+ * This skb will contain:
+ * rss_header from the rspq descriptor (1 flit)
+ * cpl_rx_pkt struct from the rspq descriptor (2 flits)
+ * space for the difference between the size of an
+ * rx_pkt and pass_accept_req cpl (1 flit)
+ * the packet data from the gl
+ */
+ skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header));
+ skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
+ sizeof(struct cpl_pass_accept_req),
+ gl->va + pktshift,
+ gl->tot_len - pktshift);
+ return skb;
+}
+
+static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
+ const __be64 *rsp)
+{
+ unsigned int opcode = *(u8 *)rsp;
+ struct sk_buff *skb;
+
+ if (opcode != CPL_RX_PKT)
+ goto out;
+
+ skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
+ if (skb == NULL)
+ goto out;
+
+ if (c4iw_handlers[opcode] == NULL) {
+ pr_info("%s no handler opcode 0x%x...\n", __func__,
+ opcode);
+ kfree_skb(skb);
+ goto out;
+ }
+ c4iw_handlers[opcode](dev, skb);
+ return 1;
+out:
+ return 0;
+}
+
static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct uld_ctx *ctx = handle;
struct c4iw_dev *dev = ctx->dev;
struct sk_buff *skb;
- const struct cpl_act_establish *rpl;
- unsigned int opcode;
+ u8 opcode;
if (gl == NULL) {
/* omit RSS and rsp_ctrl at end of descriptor */
@@ -601,19 +789,29 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
u32 qid = be32_to_cpu(rc->pldbuflen_qid);
c4iw_ev_handler(dev, qid);
return 0;
+ } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
+ if (recv_rx_pkt(dev, gl, rsp))
+ return 0;
+
+ pr_info("%s: unexpected FL contents at %p, " \
+ "RSS %#llx, FL %#llx, len %u\n",
+ pci_name(ctx->lldi.pdev), gl->va,
+ (unsigned long long)be64_to_cpu(*rsp),
+ (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
+ gl->tot_len);
+
+ return 0;
} else {
skb = cxgb4_pktgl_to_skb(gl, 128, 128);
if (unlikely(!skb))
goto nomem;
}
- rpl = cplhdr(skb);
- opcode = rpl->ot.opcode;
-
+ opcode = *(u8 *)rsp;
if (c4iw_handlers[opcode])
c4iw_handlers[opcode](dev, skb);
else
- printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
+ pr_info("%s no handler opcode 0x%x...\n", __func__,
opcode);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9beb3a9f0336..9c1644fb0259 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -130,6 +130,9 @@ struct c4iw_stats {
u64 db_empty;
u64 db_drop;
u64 db_state_transitions;
+ u64 tcam_full;
+ u64 act_ofld_conn_fails;
+ u64 pas_ofld_conn_fails;
};
struct c4iw_rdev {
@@ -223,6 +226,9 @@ struct c4iw_dev {
struct dentry *debugfs_root;
enum db_state db_state;
int qpcnt;
+ struct idr hwtid_idr;
+ struct idr atid_idr;
+ struct idr stid_idr;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -712,6 +718,31 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
};
+enum c4iw_ep_history {
+ ACT_OPEN_REQ = 0,
+ ACT_OFLD_CONN = 1,
+ ACT_OPEN_RPL = 2,
+ ACT_ESTAB = 3,
+ PASS_ACCEPT_REQ = 4,
+ PASS_ESTAB = 5,
+ ABORT_UPCALL = 6,
+ ESTAB_UPCALL = 7,
+ CLOSE_UPCALL = 8,
+ ULP_ACCEPT = 9,
+ ULP_REJECT = 10,
+ TIMEDOUT = 11,
+ PEER_ABORT = 12,
+ PEER_CLOSE = 13,
+ CONNREQ_UPCALL = 14,
+ ABORT_CONN = 15,
+ DISCONN_UPCALL = 16,
+ EP_DISC_CLOSE = 17,
+ EP_DISC_ABORT = 18,
+ CONN_RPL_UPCALL = 19,
+ ACT_RETRY_NOMEM = 20,
+ ACT_RETRY_INUSE = 21
+};
+
struct c4iw_ep_common {
struct iw_cm_id *cm_id;
struct c4iw_qp *qp;
@@ -723,6 +754,7 @@ struct c4iw_ep_common {
struct sockaddr_in remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
+ unsigned long history;
};
struct c4iw_listen_ep {
@@ -760,6 +792,7 @@ struct c4iw_ep {
u8 tos;
u8 retry_with_mpa_v1;
u8 tried_with_mpa_v1;
+ unsigned int retry_count;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 832e7a7d0aee..f8a62918a88d 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -713,8 +713,8 @@ static struct attribute_group ehca_dev_attr_grp = {
.attrs = ehca_dev_attrs
};
-static int __devinit ehca_probe(struct platform_device *dev,
- const struct of_device_id *id)
+static int ehca_probe(struct platform_device *dev,
+ const struct of_device_id *id)
{
struct ehca_shca *shca;
const u64 *handle;
@@ -879,7 +879,7 @@ probe1:
return -EINVAL;
}
-static int __devexit ehca_remove(struct platform_device *dev)
+static int ehca_remove(struct platform_device *dev)
{
struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
unsigned long flags;
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 2d41d04fd959..89517ffb4389 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -90,26 +90,6 @@
static DEFINE_SPINLOCK(hcall_lock);
-static u32 get_longbusy_msecs(int longbusy_rc)
-{
- switch (longbusy_rc) {
- case H_LONG_BUSY_ORDER_1_MSEC:
- return 1;
- case H_LONG_BUSY_ORDER_10_MSEC:
- return 10;
- case H_LONG_BUSY_ORDER_100_MSEC:
- return 100;
- case H_LONG_BUSY_ORDER_1_SEC:
- return 1000;
- case H_LONG_BUSY_ORDER_10_SEC:
- return 10000;
- case H_LONG_BUSY_ORDER_100_SEC:
- return 100000;
- default:
- return 1;
- }
-}
-
static long ehca_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index bfca37b2432f..7b371f545ece 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -127,9 +127,8 @@ const char *ipath_ibcstatus_str[] = {
"LTState1C", "LTState1D", "LTState1E", "LTState1F"
};
-static void __devexit ipath_remove_one(struct pci_dev *);
-static int __devinit ipath_init_one(struct pci_dev *,
- const struct pci_device_id *);
+static void ipath_remove_one(struct pci_dev *);
+static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
@@ -148,7 +147,7 @@ MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
static struct pci_driver ipath_driver = {
.name = IPATH_DRV_NAME,
.probe = ipath_init_one,
- .remove = __devexit_p(ipath_remove_one),
+ .remove = ipath_remove_one,
.id_table = ipath_pci_tbl,
.driver = {
.groups = ipath_driver_attr_groups,
@@ -392,8 +391,7 @@ done:
static void cleanup_device(struct ipath_devdata *dd);
-static int __devinit ipath_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret, len, j;
struct ipath_devdata *dd;
@@ -737,7 +735,7 @@ static void cleanup_device(struct ipath_devdata *dd)
kfree(tmp);
}
-static void __devexit ipath_remove_one(struct pci_dev *pdev)
+static void ipath_remove_one(struct pci_dev *pdev)
{
struct ipath_devdata *dd = pci_get_drvdata(pdev);
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 49b09c697c7c..be2a60e142b0 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -719,16 +719,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
goto done;
/*
- * we ignore most issues after reporting them, but have to specially
- * handle hardware-disabled chips.
- */
- if (ret == 2) {
- /* unique error, known to ipath_init_one */
- ret = -EPERM;
- goto done;
- }
-
- /*
* We could bump this to allow for full rcvegrcnt + rcvtidcnt,
* but then it no longer nicely fits power of two, and since
* we now use routines that backend onto __get_free_pages, the
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 80079e5a2e30..dbc99d41605c 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
unsigned long flags;
- spin_lock_irqsave(&sriov->going_down_lock, flags);
spin_lock(&sriov->id_map_lock);
+ spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
- spin_unlock(&sriov->id_map_lock);
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
+ spin_unlock(&sriov->id_map_lock);
}
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index c9eb6a6815ce..ae67df35dd4d 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{
- return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
+ return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
}
static void *get_cqe(struct mlx4_ib_cq *cq, int n)
@@ -77,8 +77,9 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n)
static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
{
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
+ struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
- return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
}
@@ -99,12 +100,13 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
{
int err;
- err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
+ err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
+ buf->entry_size = dev->dev->caps.cqe_size;
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
&buf->mtt);
if (err)
@@ -120,8 +122,7 @@ err_mtt:
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
err_buf:
- mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
- &buf->buf);
+ mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
out:
return err;
@@ -129,7 +130,7 @@ out:
static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
{
- mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
+ mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
}
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
@@ -137,8 +138,9 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
u64 buf_addr, int cqe)
{
int err;
+ int cqe_size = dev->dev->caps.cqe_size;
- *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
+ *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -331,16 +333,23 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
{
struct mlx4_cqe *cqe, *new_cqe;
int i;
+ int cqe_size = cq->buf.entry_size;
+ int cqe_inc = cqe_size == 64 ? 1 : 0;
i = cq->mcq.cons_index;
cqe = get_cqe(cq, i & cq->ibcq.cqe);
+ cqe += cqe_inc;
+
while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
(i + 1) & cq->resize_buf->cqe);
- memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
+ memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
+ new_cqe += cqe_inc;
+
new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
+ cqe += cqe_inc;
}
++cq->mcq.cons_index;
}
@@ -438,6 +447,7 @@ err_buf:
out:
mutex_unlock(&cq->resize_mutex);
+
return err;
}
@@ -586,6 +596,9 @@ repoll:
if (!cqe)
return -EAGAIN;
+ if (cq->buf.entry_size == 64)
+ cqe++;
+
++cq->mcq.cons_index;
/*
@@ -807,6 +820,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
int nfreed = 0;
struct mlx4_cqe *cqe, *dest;
u8 owner_bit;
+ int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
/*
* First we need to find the current producer index, so we
@@ -825,12 +839,16 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+ cqe += cqe_inc;
+
if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
+ dest += cqe_inc;
+
owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
memcpy(dest, cqe, sizeof *cqe);
dest->owner_sr_opcode = owner_bit |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 718ec6b2bad2..e7d81c0d1ac5 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -563,15 +563,24 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_ucontext *context;
+ struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- resp.qp_tab_size = dev->dev->caps.num_qps;
- resp.bf_reg_size = dev->dev->caps.bf_reg_size;
- resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
+ resp_v3.qp_tab_size = dev->dev->caps.num_qps;
+ resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ } else {
+ resp.dev_caps = dev->dev->caps.userspace_caps;
+ resp.qp_tab_size = dev->dev->caps.num_qps;
+ resp.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+ resp.cqe_size = dev->dev->caps.cqe_size;
+ }
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
@@ -586,7 +595,11 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
- err = ib_copy_to_udata(udata, &resp, sizeof resp);
+ if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
+ err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
+ else
+ err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
kfree(context);
@@ -1342,7 +1355,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev;
- ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ if (dev->caps.userspace_caps)
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ else
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
+
ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e04cbc9a54a5..dcd845bc30f0 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -90,6 +90,7 @@ struct mlx4_ib_xrcd {
struct mlx4_ib_cq_buf {
struct mlx4_buf buf;
struct mlx4_mtt mtt;
+ int entry_size;
};
struct mlx4_ib_cq_resize {
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index 13beedeeef9f..07e6769ef43b 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -40,7 +40,9 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
-#define MLX4_IB_UVERBS_ABI_VERSION 3
+
+#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
+#define MLX4_IB_UVERBS_ABI_VERSION 4
/*
* Make sure that all structs defined in this file remain laid out so
@@ -50,10 +52,18 @@
* instead.
*/
+struct mlx4_ib_alloc_ucontext_resp_v3 {
+ __u32 qp_tab_size;
+ __u16 bf_reg_size;
+ __u16 bf_regs_per_page;
+};
+
struct mlx4_ib_alloc_ucontext_resp {
+ __u32 dev_caps;
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
+ __u32 cqe_size;
};
struct mlx4_ib_alloc_pd_resp {
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index aa12a533ae9e..87897b95666d 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -130,7 +130,7 @@ static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
-static char mthca_version[] __devinitdata =
+static char mthca_version[] =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -1139,8 +1139,7 @@ int __mthca_restart_one(struct pci_dev *pdev)
return __mthca_init_one(pdev, hca_type);
}
-static int __devinit mthca_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
@@ -1162,7 +1161,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
return ret;
}
-static void __devexit mthca_remove_one(struct pci_dev *pdev)
+static void mthca_remove_one(struct pci_dev *pdev)
{
mutex_lock(&mthca_device_mutex);
__mthca_remove_one(pdev);
@@ -1199,7 +1198,7 @@ static struct pci_driver mthca_driver = {
.name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
- .remove = __devexit_p(mthca_remove_one)
+ .remove = mthca_remove_one,
};
static void __init __mthca_check_profile_val(const char *name, int *pval,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 748db2d3e465..5b152a366dff 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -444,7 +444,7 @@ static irqreturn_t nes_interrupt(int irq, void *dev_id)
/**
* nes_probe - Device initialization
*/
-static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
+static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct nes_device *nesdev = NULL;
@@ -749,7 +749,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
/**
* nes_remove - unload from kernel
*/
-static void __devexit nes_remove(struct pci_dev *pcidev)
+static void nes_remove(struct pci_dev *pcidev)
{
struct nes_device *nesdev = pci_get_drvdata(pcidev);
struct net_device *netdev;
@@ -810,7 +810,7 @@ static struct pci_driver nes_pci_driver = {
.name = DRV_NAME,
.id_table = nes_pci_table,
.probe = nes_probe,
- .remove = __devexit_p(nes_remove),
+ .remove = nes_remove,
};
static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 5cac29e6bc1c..33cc58941a3e 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -532,6 +532,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
void nes_recheck_link_status(struct work_struct *work);
+void nes_terminate_timeout(unsigned long context);
/* nes_nic.c */
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index cfaacaf6bf5f..22ea67eea5dc 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -629,11 +629,9 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a
case SEND_RDMA_READ_ZERO:
default:
- if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) {
- printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n",
- __func__, __LINE__, cm_node->send_rdma0_op);
- WARN_ON(1);
- }
+ if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO)
+ WARN(1, "Unsupported RDMA0 len operation=%u\n",
+ cm_node->send_rdma0_op);
nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
@@ -671,7 +669,6 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
struct nes_cm_core *cm_core = cm_node->cm_core;
struct nes_timer_entry *new_send;
int ret = 0;
- u32 was_timer_set;
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send)
@@ -723,12 +720,8 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
}
}
- was_timer_set = timer_pending(&cm_core->tcp_timer);
-
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = new_send->timetosend;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, new_send->timetosend);
return ret;
}
@@ -946,10 +939,8 @@ static void nes_cm_timer_tick(unsigned long pass)
}
if (settimer) {
- if (!timer_pending(&cm_core->tcp_timer)) {
- cm_core->tcp_timer.expires = nexttimeout;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, nexttimeout);
}
}
@@ -1314,8 +1305,6 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- u32 was_timer_set;
-
cm_node->accelerated = 1;
if (cm_node->accept_pend) {
@@ -1325,11 +1314,8 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
- was_timer_set = timer_pending(&cm_core->tcp_timer);
- if (!was_timer_set) {
- cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
- add_timer(&cm_core->tcp_timer);
- }
+ if (!timer_pending(&cm_core->tcp_timer))
+ mod_timer(&cm_core->tcp_timer, (jiffies + NES_SHORT_TIME));
return 0;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index fe7965ee4096..67647e264611 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
static void process_critical_error(struct nes_device *nesdev);
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
-static void nes_terminate_timeout(unsigned long context);
static void nes_terminate_start_timer(struct nes_qp *nesqp);
#ifdef CONFIG_INFINIBAND_NES_DEBUG
@@ -3520,7 +3519,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
}
/* Timeout routine in case terminate fails to complete */
-static void nes_terminate_timeout(unsigned long context)
+void nes_terminate_timeout(unsigned long context)
{
struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
@@ -3530,11 +3529,7 @@ static void nes_terminate_timeout(unsigned long context)
/* Set a timer in case hw cannot complete the terminate sequence */
static void nes_terminate_start_timer(struct nes_qp *nesqp)
{
- init_timer(&nesqp->terminate_timer);
- nesqp->terminate_timer.function = nes_terminate_timeout;
- nesqp->terminate_timer.expires = jiffies + HZ;
- nesqp->terminate_timer.data = (unsigned long)nesqp;
- add_timer(&nesqp->terminate_timer);
+ mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
}
/**
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 3ba7be369452..416645259b0f 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -210,6 +210,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
}
while (1) {
+ if (skb_queue_empty(&nesqp->pau_list))
+ goto out;
+
seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd);
if (seq == nextseq) {
if (skb->len || processacks)
@@ -218,14 +221,13 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
goto out;
}
- if (skb->next == (struct sk_buff *)&nesqp->pau_list)
- goto out;
-
old_skb = skb;
skb = skb->next;
skb_unlink(old_skb, &nesqp->pau_list);
nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
nes_rem_ref_cm_node(nesqp->cm_node);
+ if (skb == (struct sk_buff *)&nesqp->pau_list)
+ goto out;
}
return skb;
@@ -245,7 +247,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
struct nes_rskb_cb *cb;
struct pau_fpdu_info *fpdu_info = NULL;
struct pau_fpdu_frag frags[MAX_FPDU_FRAGS];
- unsigned long flags;
u32 fpdu_len = 0;
u32 tmp_len;
int frag_cnt = 0;
@@ -260,12 +261,10 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
*pau_fpdu_info = NULL;
- spin_lock_irqsave(&nesqp->pau_lock, flags);
skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb)
goto out;
- }
+
cb = (struct nes_rskb_cb *)&skb->cb[0];
if (skb->len) {
fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING;
@@ -290,10 +289,9 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
skb = nes_get_next_skb(nesdev, nesqp, skb,
nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd);
- if (!skb) {
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb)
goto out;
- } else if (rst_rcvd) {
+ if (rst_rcvd) {
/* rst received in the middle of fpdu */
for (; i >= 0; i--) {
skb_unlink(frags[i].skb, &nesqp->pau_list);
@@ -320,8 +318,6 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
frag_cnt = 1;
}
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
-
/* Found one */
fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
if (fpdu_info == NULL) {
@@ -383,9 +379,8 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
if (frags[i].skb->len == 0) {
/* Pull skb off the list - it will be freed in the callback */
- spin_lock_irqsave(&nesqp->pau_lock, flags);
- skb_unlink(frags[i].skb, &nesqp->pau_list);
- spin_unlock_irqrestore(&nesqp->pau_lock, flags);
+ if (!skb_queue_empty(&nesqp->pau_list))
+ skb_unlink(frags[i].skb, &nesqp->pau_list);
} else {
/* Last skb still has data so update the seq */
iph = (struct iphdr *)(cb->data_start + ETH_HLEN);
@@ -414,14 +409,18 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
struct pau_fpdu_info *fpdu_info;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
+ unsigned long flags;
u64 u64tmp;
u32 u32tmp;
int rc;
while (1) {
+ spin_lock_irqsave(&nesqp->pau_lock, flags);
rc = get_fpdu_info(nesdev, nesqp, &fpdu_info);
- if (fpdu_info == NULL)
+ if (rc || (fpdu_info == NULL)) {
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
return rc;
+ }
cqp_request = fpdu_info->cqp_request;
cqp_wqe = &cqp_request->cqp_wqe;
@@ -447,7 +446,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
lower_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
- upper_32_bits(u64tmp >> 32));
+ upper_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
lower_32_bits(fpdu_info->frags[0].physaddr));
@@ -475,6 +474,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
atomic_set(&cqp_request->refcount, 1);
nes_post_cqp_request(nesdev, cqp_request);
+ spin_unlock_irqrestore(&nesqp->pau_lock, flags);
}
return 0;
@@ -649,11 +649,9 @@ static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request
nesqp = qh_chg->nesqp;
/* Should we handle the bad completion */
- if (cqp_request->major_code) {
- printk(KERN_ERR PFX "Invalid cqp_request major_code=0x%x\n",
+ if (cqp_request->major_code)
+ WARN(1, PFX "Invalid cqp_request major_code=0x%x\n",
cqp_request->major_code);
- WARN_ON(1);
- }
switch (nesqp->pau_state) {
case PAU_DEL_QH:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 0564be757d82..9542e1644a5c 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -944,12 +944,13 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
addr,
perfect_filter_register_address+(mc_index * 8),
mc_nic_index);
- macaddr_high = ((u16) addr[0]) << 8;
- macaddr_high += (u16) addr[1];
- macaddr_low = ((u32) addr[2]) << 24;
- macaddr_low += ((u32) addr[3]) << 16;
- macaddr_low += ((u32) addr[4]) << 8;
- macaddr_low += (u32) addr[5];
+ macaddr_high = ((u8) addr[0]) << 8;
+ macaddr_high += (u8) addr[1];
+ macaddr_low = ((u8) addr[2]) << 24;
+ macaddr_low += ((u8) addr[3]) << 16;
+ macaddr_low += ((u8) addr[4]) << 8;
+ macaddr_low += (u8) addr[5];
+
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index cd0ecb215cca..07e4fbad987a 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
+ init_timer(&nesqp->terminate_timer);
+ nesqp->terminate_timer.function = nes_terminate_timeout;
+ nesqp->terminate_timer.data = (unsigned long)nesqp;
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
@@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
return &nesqp->ibqp;
}
-
/**
* nes_clean_cq
*/
@@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ibmr;
case IWNES_MEMREG_TYPE_QP:
case IWNES_MEMREG_TYPE_CQ:
+ if (!region->length) {
+ nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
+ ib_umem_release(region);
+ return ERR_PTR(-EINVAL);
+ }
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
if (!nespbl) {
nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 4443adfcd9ee..ddf066d9abb6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1134,9 +1134,8 @@ void qib_disable_after_error(struct qib_devdata *dd)
*dd->devstatusp |= QIB_STATUS_HWERROR;
}
-static void __devexit qib_remove_one(struct pci_dev *);
-static int __devinit qib_init_one(struct pci_dev *,
- const struct pci_device_id *);
+static void qib_remove_one(struct pci_dev *);
+static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
@@ -1153,7 +1152,7 @@ MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
struct pci_driver qib_driver = {
.name = QIB_DRV_NAME,
.probe = qib_init_one,
- .remove = __devexit_p(qib_remove_one),
+ .remove = qib_remove_one,
.id_table = qib_pci_tbl,
.err_handler = &qib_pci_err_handler,
};
@@ -1342,8 +1341,7 @@ static void qib_postinit_cleanup(struct qib_devdata *dd)
qib_free_devdata(dd);
}
-static int __devinit qib_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret, j, pidx, initfail;
struct qib_devdata *dd = NULL;
@@ -1448,7 +1446,7 @@ bail:
return ret;
}
-static void __devexit qib_remove_one(struct pci_dev *pdev)
+static void qib_remove_one(struct pci_dev *pdev)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
int ret;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 4850d03870c2..35275099cafd 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
struct qib_qp __rcu **qpp;
qpp = &dev->qp_table[n];
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
- for (; q; qpp = &q->next) {
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qpt_lock))) != NULL;
+ qpp = &q->next)
if (q == qp) {
atomic_dec(&qp->refcount);
*qpp = qp->next;
rcu_assign_pointer(qp->next, NULL);
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
break;
}
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
- }
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 72ae63f0072d..67b0c1d23678 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
tx_req->mapping = addr;
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len);
if (unlikely(rc)) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10221f40803..2cfa76f5d99e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
netif_stop_queue(dev);
}
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen);
if (unlikely(rc)) {
@@ -615,8 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
address->last_send = priv->tx_head;
++priv->tx_head;
- skb_orphan(skb);
-
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 922d845f76b0..d5088ce78290 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -222,27 +222,29 @@ static int srp_new_cm_id(struct srp_target_port *target)
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
+ struct ib_cq *recv_cq, *send_cq;
+ struct ib_qp *qp;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
- target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
- if (IS_ERR(target->recv_cq)) {
- ret = PTR_ERR(target->recv_cq);
+ recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
+ if (IS_ERR(recv_cq)) {
+ ret = PTR_ERR(recv_cq);
goto err;
}
- target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
- if (IS_ERR(target->send_cq)) {
- ret = PTR_ERR(target->send_cq);
+ send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
+ if (IS_ERR(send_cq)) {
+ ret = PTR_ERR(send_cq);
goto err_recv_cq;
}
- ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
+ ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -251,30 +253,41 @@ static int srp_create_target_ib(struct srp_target_port *target)
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
- init_attr->send_cq = target->send_cq;
- init_attr->recv_cq = target->recv_cq;
+ init_attr->send_cq = send_cq;
+ init_attr->recv_cq = recv_cq;
- target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
- if (IS_ERR(target->qp)) {
- ret = PTR_ERR(target->qp);
+ qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto err_send_cq;
}
- ret = srp_init_qp(target, target->qp);
+ ret = srp_init_qp(target, qp);
if (ret)
goto err_qp;
+ if (target->qp)
+ ib_destroy_qp(target->qp);
+ if (target->recv_cq)
+ ib_destroy_cq(target->recv_cq);
+ if (target->send_cq)
+ ib_destroy_cq(target->send_cq);
+
+ target->qp = qp;
+ target->recv_cq = recv_cq;
+ target->send_cq = send_cq;
+
kfree(init_attr);
return 0;
err_qp:
- ib_destroy_qp(target->qp);
+ ib_destroy_qp(qp);
err_send_cq:
- ib_destroy_cq(target->send_cq);
+ ib_destroy_cq(send_cq);
err_recv_cq:
- ib_destroy_cq(target->recv_cq);
+ ib_destroy_cq(recv_cq);
err:
kfree(init_attr);
@@ -289,6 +302,9 @@ static void srp_free_target_ib(struct srp_target_port *target)
ib_destroy_cq(target->send_cq);
ib_destroy_cq(target->recv_cq);
+ target->qp = NULL;
+ target->send_cq = target->recv_cq = NULL;
+
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
for (i = 0; i < SRP_SQ_SIZE; ++i)
@@ -428,34 +444,50 @@ static int srp_send_req(struct srp_target_port *target)
return status;
}
-static void srp_disconnect_target(struct srp_target_port *target)
+static bool srp_queue_remove_work(struct srp_target_port *target)
{
- /* XXX should send SRP_I_LOGOUT request */
+ bool changed = false;
- init_completion(&target->done);
- if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
- shost_printk(KERN_DEBUG, target->scsi_host,
- PFX "Sending CM DREQ failed\n");
- return;
+ spin_lock_irq(&target->lock);
+ if (target->state != SRP_TARGET_REMOVED) {
+ target->state = SRP_TARGET_REMOVED;
+ changed = true;
}
- wait_for_completion(&target->done);
+ spin_unlock_irq(&target->lock);
+
+ if (changed)
+ queue_work(system_long_wq, &target->remove_work);
+
+ return changed;
}
-static bool srp_change_state(struct srp_target_port *target,
- enum srp_target_state old,
- enum srp_target_state new)
+static bool srp_change_conn_state(struct srp_target_port *target,
+ bool connected)
{
bool changed = false;
spin_lock_irq(&target->lock);
- if (target->state == old) {
- target->state = new;
+ if (target->connected != connected) {
+ target->connected = connected;
changed = true;
}
spin_unlock_irq(&target->lock);
+
return changed;
}
+static void srp_disconnect_target(struct srp_target_port *target)
+{
+ if (srp_change_conn_state(target, false)) {
+ /* XXX should send SRP_I_LOGOUT request */
+
+ if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM DREQ failed\n");
+ }
+ }
+}
+
static void srp_free_req_data(struct srp_target_port *target)
{
struct ib_device *ibdev = target->srp_host->srp_dev->dev;
@@ -489,32 +521,50 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
device_remove_file(&shost->shost_dev, *attr);
}
-static void srp_remove_work(struct work_struct *work)
+static void srp_remove_target(struct srp_target_port *target)
{
- struct srp_target_port *target =
- container_of(work, struct srp_target_port, work);
-
- if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
- return;
-
- spin_lock(&target->srp_host->target_lock);
- list_del(&target->list);
- spin_unlock(&target->srp_host->target_lock);
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
+ srp_disconnect_target(target);
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
srp_free_req_data(target);
scsi_host_put(target->scsi_host);
}
+static void srp_remove_work(struct work_struct *work)
+{
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, remove_work);
+
+ WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
+
+ spin_lock(&target->srp_host->target_lock);
+ list_del(&target->list);
+ spin_unlock(&target->srp_host->target_lock);
+
+ srp_remove_target(target);
+}
+
+static void srp_rport_delete(struct srp_rport *rport)
+{
+ struct srp_target_port *target = rport->lld_data;
+
+ srp_queue_remove_work(target);
+}
+
static int srp_connect_target(struct srp_target_port *target)
{
int retries = 3;
int ret;
+ WARN_ON_ONCE(target->connected);
+
+ target->qp_in_error = false;
+
ret = srp_lookup_path(target);
if (ret)
return ret;
@@ -534,6 +584,7 @@ static int srp_connect_target(struct srp_target_port *target)
*/
switch (target->status) {
case 0:
+ srp_change_conn_state(target, true);
return 0;
case SRP_PORT_REDIRECT:
@@ -646,13 +697,14 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
- struct ib_qp_attr qp_attr;
- struct ib_wc wc;
+ struct Scsi_Host *shost = target->scsi_host;
int i, ret;
- if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
+ if (target->state != SRP_TARGET_LIVE)
return -EAGAIN;
+ scsi_target_block(&shost->shost_gendev);
+
srp_disconnect_target(target);
/*
* Now get a new local CM ID so that we avoid confusing the
@@ -660,21 +712,11 @@ static int srp_reconnect_target(struct srp_target_port *target)
*/
ret = srp_new_cm_id(target);
if (ret)
- goto err;
+ goto unblock;
- qp_attr.qp_state = IB_QPS_RESET;
- ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
- if (ret)
- goto err;
-
- ret = srp_init_qp(target, target->qp);
+ ret = srp_create_target_ib(target);
if (ret)
- goto err;
-
- while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
- ; /* nothing */
- while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
- ; /* nothing */
+ goto unblock;
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
struct srp_request *req = &target->req_ring[i];
@@ -686,13 +728,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add(&target->tx_ring[i]->list, &target->free_tx);
- target->qp_in_error = 0;
ret = srp_connect_target(target);
+
+unblock:
+ scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
+ SDEV_TRANSPORT_OFFLINE);
+
if (ret)
goto err;
- if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
- ret = -EAGAIN;
+ shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
return ret;
@@ -705,17 +750,8 @@ err:
* However, we have to defer the real removal because we
* are in the context of the SCSI error handler now, which
* will deadlock if we call scsi_remove_host().
- *
- * Schedule our work inside the lock to avoid a race with
- * the flush_scheduled_work() in srp_remove_one().
*/
- spin_lock_irq(&target->lock);
- if (target->state == SRP_TARGET_CONNECTING) {
- target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work);
- queue_work(ib_wq, &target->work);
- }
- spin_unlock_irq(&target->lock);
+ srp_queue_remove_work(target);
return ret;
}
@@ -1262,6 +1298,19 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
PFX "Recv failed with error code %d\n", res);
}
+static void srp_handle_qp_err(enum ib_wc_status wc_status,
+ enum ib_wc_opcode wc_opcode,
+ struct srp_target_port *target)
+{
+ if (target->connected && !target->qp_in_error) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed %s status %d\n",
+ wc_opcode & IB_WC_RECV ? "receive" : "send",
+ wc_status);
+ }
+ target->qp_in_error = true;
+}
+
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
@@ -1269,15 +1318,11 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
- if (wc.status) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed receive status %d\n",
- wc.status);
- target->qp_in_error = 1;
- break;
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ srp_handle_recv(target, &wc);
+ } else {
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
-
- srp_handle_recv(target, &wc);
}
}
@@ -1288,16 +1333,12 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
struct srp_iu *iu;
while (ib_poll_cq(cq, 1, &wc) > 0) {
- if (wc.status) {
- shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed send status %d\n",
- wc.status);
- target->qp_in_error = 1;
- break;
+ if (likely(wc.status == IB_WC_SUCCESS)) {
+ iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
+ list_add(&iu->list, &target->free_tx);
+ } else {
+ srp_handle_qp_err(wc.status, wc.opcode, target);
}
-
- iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
- list_add(&iu->list, &target->free_tx);
}
}
@@ -1311,16 +1352,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
unsigned long flags;
int len;
- if (target->state == SRP_TARGET_CONNECTING)
- goto err;
-
- if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED) {
- scmnd->result = DID_BAD_TARGET << 16;
- scmnd->scsi_done(scmnd);
- return 0;
- }
-
spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (!iu)
@@ -1377,7 +1408,6 @@ err_iu:
err_unlock:
spin_unlock_irqrestore(&target->lock, flags);
-err:
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1419,6 +1449,33 @@ err:
return -ENOMEM;
}
+static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
+{
+ uint64_t T_tr_ns, max_compl_time_ms;
+ uint32_t rq_tmo_jiffies;
+
+ /*
+ * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
+ * table 91), both the QP timeout and the retry count have to be set
+ * for RC QP's during the RTR to RTS transition.
+ */
+ WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
+ (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
+
+ /*
+ * Set target->rq_tmo_jiffies to one second more than the largest time
+ * it can take before an error completion is generated. See also
+ * C9-140..142 in the IBTA spec for more information about how to
+ * convert the QP Local ACK Timeout value to nanoseconds.
+ */
+ T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
+ max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
+ do_div(max_compl_time_ms, NSEC_PER_MSEC);
+ rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
+
+ return rq_tmo_jiffies;
+}
+
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_login_rsp *lrsp,
struct srp_target_port *target)
@@ -1478,6 +1535,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (ret)
goto error_free;
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
@@ -1599,6 +1658,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
case IB_CM_DREQ_RECEIVED:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "DREQ received - connection closed\n");
+ srp_change_conn_state(target, false);
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
@@ -1608,7 +1668,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
- comp = 1;
target->status = 0;
break;
@@ -1636,10 +1695,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
- if (target->state == SRP_TARGET_DEAD ||
- target->state == SRP_TARGET_REMOVED)
- return -1;
-
init_completion(&target->tsk_mgmt_done);
spin_lock_irq(&target->lock);
@@ -1729,6 +1784,21 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return ret;
}
+static int srp_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_target_port *target = host_to_target(shost);
+ struct request_queue *q = sdev->request_queue;
+ unsigned long timeout;
+
+ if (sdev->type == TYPE_DISK) {
+ timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
+ blk_queue_rq_timeout(q, timeout);
+ }
+
+ return 0;
+}
+
static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1861,6 +1931,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
.eh_abort_handler = srp_abort,
@@ -1894,11 +1965,14 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
return PTR_ERR(rport);
}
+ rport->lld_data = target;
+
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
spin_unlock(&host->target_lock);
target->state = SRP_TARGET_LIVE;
+ target->connected = false;
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, 0);
@@ -2188,6 +2262,7 @@ static ssize_t srp_create_target(struct device *dev,
sizeof (struct srp_indirect_buf) +
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
+ INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs);
@@ -2232,7 +2307,6 @@ static ssize_t srp_create_target(struct device *dev,
if (ret)
goto err_free_ib;
- target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret) {
shost_printk(KERN_ERR, target->scsi_host,
@@ -2422,8 +2496,7 @@ static void srp_remove_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct srp_host *host, *tmp_host;
- LIST_HEAD(target_list);
- struct srp_target_port *target, *tmp_target;
+ struct srp_target_port *target;
srp_dev = ib_get_client_data(device, &srp_client);
@@ -2436,35 +2509,17 @@ static void srp_remove_one(struct ib_device *device)
wait_for_completion(&host->released);
/*
- * Mark all target ports as removed, so we stop queueing
- * commands and don't try to reconnect.
+ * Remove all target ports.
*/
spin_lock(&host->target_lock);
- list_for_each_entry(target, &host->target_list, list) {
- spin_lock_irq(&target->lock);
- target->state = SRP_TARGET_REMOVED;
- spin_unlock_irq(&target->lock);
- }
+ list_for_each_entry(target, &host->target_list, list)
+ srp_queue_remove_work(target);
spin_unlock(&host->target_lock);
/*
- * Wait for any reconnection tasks that may have
- * started before we marked our target ports as
- * removed, and any target port removal tasks.
+ * Wait for target port removal tasks.
*/
- flush_workqueue(ib_wq);
-
- list_for_each_entry_safe(target, tmp_target,
- &host->target_list, list) {
- srp_del_scsi_host_attr(target->scsi_host);
- srp_remove_host(target->scsi_host);
- scsi_remove_host(target->scsi_host);
- srp_disconnect_target(target);
- ib_destroy_cm_id(target->cm_id);
- srp_free_target_ib(target);
- srp_free_req_data(target);
- scsi_host_put(target->scsi_host);
- }
+ flush_workqueue(system_long_wq);
kfree(host);
}
@@ -2478,6 +2533,7 @@ static void srp_remove_one(struct ib_device *device)
}
static struct srp_function_template ib_srp_transport_functions = {
+ .rport_delete = srp_rport_delete,
};
static int __init srp_init_module(void)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 020caf0c3789..de2d0b3c0bfe 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -80,9 +80,7 @@ enum {
enum srp_target_state {
SRP_TARGET_LIVE,
- SRP_TARGET_CONNECTING,
- SRP_TARGET_DEAD,
- SRP_TARGET_REMOVED
+ SRP_TARGET_REMOVED,
};
enum srp_iu_type {
@@ -163,6 +161,9 @@ struct srp_target_port {
struct ib_sa_query *path_query;
int path_query_id;
+ u32 rq_tmo_jiffies;
+ bool connected;
+
struct ib_cm_id *cm_id;
int max_ti_iu_len;
@@ -173,12 +174,12 @@ struct srp_target_port {
struct srp_iu *rx_ring[SRP_RQ_SIZE];
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
- struct work_struct work;
+ struct work_struct remove_work;
struct list_head list;
struct completion done;
int status;
- int qp_in_error;
+ bool qp_in_error;
struct completion tsk_mgmt_done;
u8 tsk_mgmt_status;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index cf23c46185b2..c09d41b1a2ff 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- kref_init(&ioctx->kref);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0;
@@ -1291,39 +1290,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_put_send_ioctx() - Free up resources.
- */
-static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
-
- BUG_ON(!ioctx);
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
-
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- transport_generic_free_cmd(&ioctx->cmd, 0);
-
- if (ioctx->n_rbuf > 1) {
- kfree(ioctx->rbufs);
- ioctx->rbufs = NULL;
- ioctx->n_rbuf = 0;
- }
-
- spin_lock_irqsave(&ch->spinlock, flags);
- list_add(&ioctx->free_list, &ch->free_list);
- spin_unlock_irqrestore(&ch->spinlock, flags);
-}
-
-static void srpt_put_send_ioctx_kref(struct kref *kref)
-{
- srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
-}
-
-/**
* srpt_abort_cmd() - Abort a SCSI command.
* @ioctx: I/O context associated with the SCSI command.
* @context: Preferred execution context.
@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (state == SRPT_STATE_DONE)
+ if (state == SRPT_STATE_DONE) {
+ struct srpt_rdma_ch *ch = ioctx->ch;
+
+ BUG_ON(ch->sess == NULL);
+
+ target_put_sess_cmd(ch->sess, &ioctx->cmd);
goto out;
+ }
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->tag);
@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
default:
WARN_ON("ERROR: unexpected command state");
@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
&& state != SRPT_STATE_DONE))
pr_debug("state = %d\n", state);
- if (state != SRPT_STATE_DONE)
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- else
+ if (state != SRPT_STATE_DONE) {
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+ } else {
printk(KERN_ERR "IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index);
+ }
}
/**
@@ -1712,10 +1686,10 @@ out_err:
static int srpt_check_stop_free(struct se_cmd *cmd)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
/**
@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
- int ret;
+ sense_reason_t ret;
+ int rc;
BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf;
- kref_get(&send_ioctx->kref);
cmd = &send_ioctx->cmd;
send_ioctx->tag = srp_cmd->tag;
@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
- ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
- if (ret) {
+ if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+ ret = TCM_INVALID_CDB_FIELD;
goto send_sense;
}
- cmd->data_length = data_len;
- cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun));
- if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+ rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
+ &send_ioctx->sense_data[0], unpacked_lun, data_len,
+ MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense;
}
- ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
- if (ret < 0) {
- kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
- if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
- srpt_queue_status(cmd);
- return 0;
- } else
- goto send_sense;
- }
-
- transport_handle_cdb_direct(cmd);
return 0;
send_sense:
- transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
- 0);
+ transport_send_check_condition_and_sense(cmd, ret, 0);
return -1;
}
@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
{
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
+ struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
+ uint32_t tag = 0;
int tcm_tmr;
- int res;
+ int rc;
BUG_ON(!send_ioctx);
@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto process_tmr;
- }
- res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
- if (res < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
- goto process_tmr;
+ goto fail;
}
-
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
- res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
- if (res) {
- pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
- goto process_tmr;
- }
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
- srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
-
-process_tmr:
- kref_get(&send_ioctx->kref);
- if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
- transport_generic_handle_tmr(&send_ioctx->cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+ rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+ if (rc < 0) {
+ send_ioctx->cmd.se_tmr_req->response =
+ TMR_TASK_DOES_NOT_EXIST;
+ goto fail;
+ }
+ tag = srp_tsk->task_tag;
+ }
+ rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+ srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ TARGET_SCF_ACK_KREF);
+ if (rc != 0) {
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ goto fail;
+ }
+ return;
+fail:
+ transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
}
/**
@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
}
}
- transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
- 0, DMA_NONE, MSG_SIMPLE_TAG,
- send_ioctx->sense_data);
-
switch (srp_cmd->opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
sdev = ch->sport->sdev;
BUG_ON(!sdev);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
+ se_sess = ch->sess;
+ BUG_ON(!se_sess);
+
+ target_wait_for_sess_cmds(se_sess, 0);
+
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
ch->sess = NULL;
srpt_destroy_ch_ib(ch);
@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
out:
@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void srpt_release_cmd(struct se_cmd *se_cmd)
{
+ struct srpt_send_ioctx *ioctx = container_of(se_cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ unsigned long flags;
+
+ WARN_ON(ioctx->state != SRPT_STATE_DONE);
+ WARN_ON(ioctx->mapped_sg_count != 0);
+
+ if (ioctx->n_rbuf > 1) {
+ kfree(ioctx->rbufs);
+ ioctx->rbufs = NULL;
+ ioctx->n_rbuf = 0;
+ }
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 61e52b830816..4caf55cda7b1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch;
- struct kref kref;
struct rdma_iu *rdma_ius;
struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf;
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index daceafe7ee7d..fa7a95c1da0e 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -57,7 +57,7 @@ static const struct pci_device_id emu_tbl[] = {
MODULE_DEVICE_TABLE(pci, emu_tbl);
-static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct emu *emu;
struct gameport *port;
@@ -107,7 +107,7 @@ static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id
return error;
}
-static void __devexit emu_remove(struct pci_dev *pdev)
+static void emu_remove(struct pci_dev *pdev)
{
struct emu *emu = pci_get_drvdata(pdev);
@@ -122,7 +122,7 @@ static struct pci_driver emu_driver = {
.name = "Emu10k1_gameport",
.id_table = emu_tbl,
.probe = emu_probe,
- .remove = __devexit_p(emu_remove),
+ .remove = emu_remove,
};
module_pci_driver(emu_driver);
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 48ad3829ff20..ae912d3aee4e 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -78,7 +78,7 @@ static int fm801_gp_open(struct gameport *gameport, int mode)
return 0;
}
-static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id)
+static int fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct fm801_gp *gp;
struct gameport *port;
@@ -129,7 +129,7 @@ static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device
return error;
}
-static void __devexit fm801_gp_remove(struct pci_dev *pci)
+static void fm801_gp_remove(struct pci_dev *pci)
{
struct fm801_gp *gp = pci_get_drvdata(pci);
@@ -150,7 +150,7 @@ static struct pci_driver fm801_gp_driver = {
.name = "FM801_gameport",
.id_table = fm801_gp_id_table,
.probe = fm801_gp_probe,
- .remove = __devexit_p(fm801_gp_remove),
+ .remove = fm801_gp_remove,
};
module_pci_driver(fm801_gp_driver);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 1abbc170d8b7..47a6009dbf43 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -194,7 +194,7 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
if (!mt)
return;
- oldest = 0;
+ oldest = NULL;
oldid = mt->trkid;
count = 0;
@@ -251,7 +251,7 @@ void input_mt_sync_frame(struct input_dev *dev)
if (mt->flags & INPUT_MT_DROP_UNUSED) {
for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
- if (s->frame == mt->frame)
+ if (input_mt_is_used(mt, s))
continue;
input_mt_slot(dev, s - mt->slots);
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 53a0ddee7872..ce01332f7b3a 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -534,8 +534,11 @@ EXPORT_SYMBOL(input_grab_device);
static void __input_release_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
+ struct input_handle *grabber;
- if (dev->grab == handle) {
+ grabber = rcu_dereference_protected(dev->grab,
+ lockdep_is_held(&dev->mutex));
+ if (grabber == handle) {
rcu_assign_pointer(dev->grab, NULL);
/* Make sure input_pass_event() notices that grab is gone */
synchronize_rcu();
@@ -1723,7 +1726,7 @@ EXPORT_SYMBOL_GPL(input_class);
/**
* input_allocate_device - allocate memory for new input device
*
- * Returns prepared struct input_dev or NULL.
+ * Returns prepared struct input_dev or %NULL.
*
* NOTE: Use input_free_device() to free devices that have not been
* registered; input_unregister_device() should be used for already
@@ -1750,6 +1753,70 @@ struct input_dev *input_allocate_device(void)
}
EXPORT_SYMBOL(input_allocate_device);
+struct input_devres {
+ struct input_dev *input;
+};
+
+static int devm_input_device_match(struct device *dev, void *res, void *data)
+{
+ struct input_devres *devres = res;
+
+ return devres->input == data;
+}
+
+static void devm_input_device_release(struct device *dev, void *res)
+{
+ struct input_devres *devres = res;
+ struct input_dev *input = devres->input;
+
+ dev_dbg(dev, "%s: dropping reference to %s\n",
+ __func__, dev_name(&input->dev));
+ input_put_device(input);
+}
+
+/**
+ * devm_input_allocate_device - allocate managed input device
+ * @dev: device owning the input device being created
+ *
+ * Returns prepared struct input_dev or %NULL.
+ *
+ * Managed input devices do not need to be explicitly unregistered or
+ * freed as it will be done automatically when owner device unbinds from
+ * its driver (or binding fails). Once managed input device is allocated,
+ * it is ready to be set up and registered in the same fashion as regular
+ * input device. There are no special devm_input_device_[un]register()
+ * variants, regular ones work with both managed and unmanaged devices.
+ *
+ * NOTE: the owner device is set up as parent of input device and users
+ * should not override it.
+ */
+
+struct input_dev *devm_input_allocate_device(struct device *dev)
+{
+ struct input_dev *input;
+ struct input_devres *devres;
+
+ devres = devres_alloc(devm_input_device_release,
+ sizeof(struct input_devres), GFP_KERNEL);
+ if (!devres)
+ return NULL;
+
+ input = input_allocate_device();
+ if (!input) {
+ devres_free(devres);
+ return NULL;
+ }
+
+ input->dev.parent = dev;
+ input->devres_managed = true;
+
+ devres->input = input;
+ devres_add(dev, devres);
+
+ return input;
+}
+EXPORT_SYMBOL(devm_input_allocate_device);
+
/**
* input_free_device - free memory occupied by input_dev structure
* @dev: input device to free
@@ -1766,8 +1833,14 @@ EXPORT_SYMBOL(input_allocate_device);
*/
void input_free_device(struct input_dev *dev)
{
- if (dev)
+ if (dev) {
+ if (dev->devres_managed)
+ WARN_ON(devres_destroy(dev->dev.parent,
+ devm_input_device_release,
+ devm_input_device_match,
+ dev));
input_put_device(dev);
+ }
}
EXPORT_SYMBOL(input_free_device);
@@ -1888,6 +1961,38 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
INPUT_CLEANSE_BITMASK(dev, SW, sw);
}
+static void __input_unregister_device(struct input_dev *dev)
+{
+ struct input_handle *handle, *next;
+
+ input_disconnect_device(dev);
+
+ mutex_lock(&input_mutex);
+
+ list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
+ handle->handler->disconnect(handle);
+ WARN_ON(!list_empty(&dev->h_list));
+
+ del_timer_sync(&dev->timer);
+ list_del_init(&dev->node);
+
+ input_wakeup_procfs_readers();
+
+ mutex_unlock(&input_mutex);
+
+ device_del(&dev->dev);
+}
+
+static void devm_input_device_unregister(struct device *dev, void *res)
+{
+ struct input_devres *devres = res;
+ struct input_dev *input = devres->input;
+
+ dev_dbg(dev, "%s: unregistering device %s\n",
+ __func__, dev_name(&input->dev));
+ __input_unregister_device(input);
+}
+
/**
* input_register_device - register device with input core
* @dev: device to be registered
@@ -1903,11 +2008,21 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
int input_register_device(struct input_dev *dev)
{
static atomic_t input_no = ATOMIC_INIT(0);
+ struct input_devres *devres = NULL;
struct input_handler *handler;
unsigned int packet_size;
const char *path;
int error;
+ if (dev->devres_managed) {
+ devres = devres_alloc(devm_input_device_unregister,
+ sizeof(struct input_devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->input = dev;
+ }
+
/* Every input device generates EV_SYN/SYN_REPORT events. */
__set_bit(EV_SYN, dev->evbit);
@@ -1923,8 +2038,10 @@ int input_register_device(struct input_dev *dev)
dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2;
dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
- if (!dev->vals)
- return -ENOMEM;
+ if (!dev->vals) {
+ error = -ENOMEM;
+ goto err_devres_free;
+ }
/*
* If delay and period are pre-set by the driver, then autorepeating
@@ -1949,7 +2066,7 @@ int input_register_device(struct input_dev *dev)
error = device_add(&dev->dev);
if (error)
- return error;
+ goto err_free_vals;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
pr_info("%s as %s\n",
@@ -1958,10 +2075,8 @@ int input_register_device(struct input_dev *dev)
kfree(path);
error = mutex_lock_interruptible(&input_mutex);
- if (error) {
- device_del(&dev->dev);
- return error;
- }
+ if (error)
+ goto err_device_del;
list_add_tail(&dev->node, &input_dev_list);
@@ -1972,7 +2087,21 @@ int input_register_device(struct input_dev *dev)
mutex_unlock(&input_mutex);
+ if (dev->devres_managed) {
+ dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
+ __func__, dev_name(&dev->dev));
+ devres_add(dev->dev.parent, devres);
+ }
return 0;
+
+err_device_del:
+ device_del(&dev->dev);
+err_free_vals:
+ kfree(dev->vals);
+ dev->vals = NULL;
+err_devres_free:
+ devres_free(devres);
+ return error;
}
EXPORT_SYMBOL(input_register_device);
@@ -1985,24 +2114,20 @@ EXPORT_SYMBOL(input_register_device);
*/
void input_unregister_device(struct input_dev *dev)
{
- struct input_handle *handle, *next;
-
- input_disconnect_device(dev);
-
- mutex_lock(&input_mutex);
-
- list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
- handle->handler->disconnect(handle);
- WARN_ON(!list_empty(&dev->h_list));
-
- del_timer_sync(&dev->timer);
- list_del_init(&dev->node);
-
- input_wakeup_procfs_readers();
-
- mutex_unlock(&input_mutex);
-
- device_unregister(&dev->dev);
+ if (dev->devres_managed) {
+ WARN_ON(devres_destroy(dev->dev.parent,
+ devm_input_device_unregister,
+ devm_input_device_match,
+ dev));
+ __input_unregister_device(dev);
+ /*
+ * We do not do input_put_device() here because it will be done
+ * when 2nd devres fires up.
+ */
+ } else {
+ __input_unregister_device(dev);
+ input_put_device(dev);
+ }
}
EXPORT_SYMBOL(input_unregister_device);
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index c96653b58867..121cd63d3334 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -85,7 +85,10 @@ static int as5011_i2c_write(struct i2c_client *client,
{
uint8_t data[2] = { aregaddr, avalue };
struct i2c_msg msg = {
- client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+ .addr = client->addr,
+ .flags = I2C_M_IGNORE_NAK,
+ .len = 2,
+ .buf = (uint8_t *)data
};
int error;
@@ -98,8 +101,18 @@ static int as5011_i2c_read(struct i2c_client *client,
{
uint8_t data[2] = { aregaddr };
struct i2c_msg msg_set[2] = {
- { client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
- { client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+ {
+ .addr = client->addr,
+ .flags = I2C_M_REV_DIR_ADDR,
+ .len = 1,
+ .buf = (uint8_t *)data
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 1,
+ .buf = (uint8_t *)data
+ }
};
int error;
@@ -144,7 +157,7 @@ out:
return IRQ_HANDLED;
}
-static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+static int as5011_configure_chip(struct as5011_device *as5011,
const struct as5011_platform_data *plat_dat)
{
struct i2c_client *client = as5011->i2c_client;
@@ -212,8 +225,8 @@ static int __devinit as5011_configure_chip(struct as5011_device *as5011,
return 0;
}
-static int __devinit as5011_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int as5011_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct as5011_platform_data *plat_data;
struct as5011_device *as5011;
@@ -328,7 +341,7 @@ err_free_mem:
return error;
}
-static int __devexit as5011_remove(struct i2c_client *client)
+static int as5011_remove(struct i2c_client *client)
{
struct as5011_device *as5011 = i2c_get_clientdata(client);
@@ -353,7 +366,7 @@ static struct i2c_driver as5011_driver = {
.name = "as5011",
},
.probe = as5011_probe,
- .remove = __devexit_p(as5011_remove),
+ .remove = as5011_remove,
.id_table = as5011_id,
};
diff --git a/drivers/input/joystick/maplecontrol.c b/drivers/input/joystick/maplecontrol.c
index 77cfde571bd9..59c10ec5a2a1 100644
--- a/drivers/input/joystick/maplecontrol.c
+++ b/drivers/input/joystick/maplecontrol.c
@@ -78,7 +78,7 @@ static void dc_pad_close(struct input_dev *dev)
}
/* allow the controller to be used */
-static int __devinit probe_maple_controller(struct device *dev)
+static int probe_maple_controller(struct device *dev)
{
static const short btn_bit[32] = {
BTN_C, BTN_B, BTN_A, BTN_START, -1, -1, -1, -1,
@@ -157,7 +157,7 @@ fail:
return error;
}
-static int __devexit remove_maple_controller(struct device *dev)
+static int remove_maple_controller(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_pad *pad = maple_get_drvdata(mdev);
@@ -175,7 +175,7 @@ static struct maple_driver dc_pad_driver = {
.drv = {
.name = "Dreamcast_controller",
.probe = probe_maple_controller,
- .remove = __devexit_p(remove_maple_controller),
+ .remove = remove_maple_controller,
},
};
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 4dfa1eed4b7c..f8f892b076e8 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev)
struct walkera_dev *w = input_get_drvdata(dev);
parport_disable_irq(w->parport);
+ hrtimer_cancel(&w->timer);
}
static int walkera0701_connect(struct walkera_dev *w, int parport)
@@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
if (parport_claim(w->pardevice))
goto init_err1;
+ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ w->timer.function = timer_handler;
+
w->input_dev = input_allocate_device();
if (!w->input_dev)
goto init_err2;
@@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
if (err)
goto init_err3;
- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- w->timer.function = timer_handler;
return 0;
init_err3:
@@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
static void walkera0701_disconnect(struct walkera_dev *w)
{
- hrtimer_cancel(&w->timer);
input_unregister_device(w->input_dev);
parport_release(w->pardevice);
parport_unregister_device(w->pardevice);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83811e45d633..d6cbfe9df218 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -118,11 +118,12 @@ static const struct xpad_device {
u8 xtype;
} xpad_device[] = {
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
- { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+ { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
+ { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
- { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
@@ -136,9 +137,12 @@ static const struct xpad_device {
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
@@ -148,24 +152,28 @@ static const struct xpad_device {
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
- { 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
+ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
- { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
- { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -235,7 +243,7 @@ static const signed short xpad_abs_triggers[] = {
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
-static struct usb_device_id xpad_table [] = {
+static struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
@@ -248,10 +256,11 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
{ }
};
-MODULE_DEVICE_TABLE (usb, xpad_table);
+MODULE_DEVICE_TABLE(usb, xpad_table);
struct usb_xpad {
struct input_dev *dev; /* input device interface */
@@ -783,7 +792,7 @@ static int xpad_open(struct input_dev *dev)
struct usb_xpad *xpad = input_get_drvdata(dev);
/* URB was submitted in probe */
- if(xpad->xtype == XTYPE_XBOX360W)
+ if (xpad->xtype == XTYPE_XBOX360W)
return 0;
xpad->irq_in->dev = xpad->udev;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index de0874054e9f..5a240c60342d 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -134,7 +134,7 @@ config KEYBOARD_QT1070
config KEYBOARD_QT2160
tristate "Atmel AT42QT2160 Touch Sensor Chip"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
If you say yes here you get support for Atmel AT42QT2160 Touch
Sensor chip as a keyboard input.
@@ -409,7 +409,7 @@ config KEYBOARD_NEWTON
config KEYBOARD_NOMADIK
tristate "ST-Ericsson Nomadik SKE keyboard"
- depends on PLAT_NOMADIK
+ depends on (ARCH_NOMADIK || ARCH_U8500)
select INPUT_MATRIXKMAP
help
Say Y here if you want to use a keypad provided on the SKE controller
@@ -544,6 +544,7 @@ config KEYBOARD_OMAP
config KEYBOARD_OMAP4
tristate "TI OMAP4+ keypad support"
+ depends on ARCH_OMAP2PLUS
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP4+ keypad.
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index e9e8674dfda1..ef26b17fb159 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -69,7 +69,7 @@ static int adp5520_keys_notifier(struct notifier_block *nb,
return 0;
}
-static int __devinit adp5520_keys_probe(struct platform_device *pdev)
+static int adp5520_keys_probe(struct platform_device *pdev)
{
struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
struct input_dev *input;
@@ -182,7 +182,7 @@ err:
return ret;
}
-static int __devexit adp5520_keys_remove(struct platform_device *pdev)
+static int adp5520_keys_remove(struct platform_device *pdev)
{
struct adp5520_keys *dev = platform_get_drvdata(pdev);
@@ -200,7 +200,7 @@ static struct platform_driver adp5520_keys_driver = {
.owner = THIS_MODULE,
},
.probe = adp5520_keys_probe,
- .remove = __devexit_p(adp5520_keys_remove),
+ .remove = adp5520_keys_remove,
};
module_platform_driver(adp5520_keys_driver);
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b083bf10f139..dbd2047f1641 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -145,7 +145,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
+static int adp5588_build_gpiomap(struct adp5588_kpad *kpad,
const struct adp5588_kpad_platform_data *pdata)
{
bool pin_used[ADP5588_MAXGPIO];
@@ -170,7 +170,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
return n_unused;
}
-static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
+static int adp5588_gpio_add(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
@@ -224,7 +224,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
return 0;
}
-static void __devexit adp5588_gpio_remove(struct adp5588_kpad *kpad)
+static void adp5588_gpio_remove(struct adp5588_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5588_kpad_platform_data *pdata = dev->platform_data;
@@ -319,7 +319,7 @@ static irqreturn_t adp5588_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int __devinit adp5588_setup(struct i2c_client *client)
+static int adp5588_setup(struct i2c_client *client)
{
const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data;
@@ -382,7 +382,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
return 0;
}
-static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad)
+static void adp5588_report_switch_state(struct adp5588_kpad *kpad)
{
int gpi_stat1 = adp5588_read(kpad->client, GPIO_DAT_STAT1);
int gpi_stat2 = adp5588_read(kpad->client, GPIO_DAT_STAT2);
@@ -420,8 +420,8 @@ static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad)
}
-static int __devinit adp5588_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5588_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adp5588_kpad *kpad;
const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
@@ -587,7 +587,7 @@ static int __devinit adp5588_probe(struct i2c_client *client,
return error;
}
-static int __devexit adp5588_remove(struct i2c_client *client)
+static int adp5588_remove(struct i2c_client *client)
{
struct adp5588_kpad *kpad = i2c_get_clientdata(client);
@@ -650,7 +650,7 @@ static struct i2c_driver adp5588_driver = {
#endif
},
.probe = adp5588_probe,
- .remove = __devexit_p(adp5588_remove),
+ .remove = adp5588_remove,
.id_table = adp5588_id,
};
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 74e603213386..67d12b3427c9 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -464,7 +464,7 @@ static int adp5589_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
-static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
+static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
const struct adp5589_kpad_platform_data *pdata)
{
bool pin_used[ADP5589_MAXGPIO];
@@ -496,7 +496,7 @@ static int __devinit adp5589_build_gpiomap(struct adp5589_kpad *kpad,
return n_unused;
}
-static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
+static int adp5589_gpio_add(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
@@ -550,7 +550,7 @@ static int __devinit adp5589_gpio_add(struct adp5589_kpad *kpad)
return 0;
}
-static void __devexit adp5589_gpio_remove(struct adp5589_kpad *kpad)
+static void adp5589_gpio_remove(struct adp5589_kpad *kpad)
{
struct device *dev = &kpad->client->dev;
const struct adp5589_kpad_platform_data *pdata = dev->platform_data;
@@ -641,8 +641,7 @@ static irqreturn_t adp5589_irq(int irq, void *handle)
return IRQ_HANDLED;
}
-static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
- unsigned short key)
+static int adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key)
{
int i;
@@ -655,7 +654,7 @@ static int __devinit adp5589_get_evcode(struct adp5589_kpad *kpad,
return -EINVAL;
}
-static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
+static int adp5589_setup(struct adp5589_kpad *kpad)
{
struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
@@ -820,7 +819,7 @@ static int __devinit adp5589_setup(struct adp5589_kpad *kpad)
return 0;
}
-static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
+static void adp5589_report_switch_state(struct adp5589_kpad *kpad)
{
int gpi_stat_tmp, pin_loc;
int i;
@@ -860,8 +859,8 @@ static void __devinit adp5589_report_switch_state(struct adp5589_kpad *kpad)
input_sync(kpad->input);
}
-static int __devinit adp5589_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5589_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adp5589_kpad *kpad;
const struct adp5589_kpad_platform_data *pdata =
@@ -1045,7 +1044,7 @@ err_free_mem:
return error;
}
-static int __devexit adp5589_remove(struct i2c_client *client)
+static int adp5589_remove(struct i2c_client *client)
{
struct adp5589_kpad *kpad = i2c_get_clientdata(client);
@@ -1104,7 +1103,7 @@ static struct i2c_driver adp5589_driver = {
.pm = &adp5589_dev_pm_ops,
},
.probe = adp5589_probe,
- .remove = __devexit_p(adp5589_remove),
+ .remove = adp5589_remove,
.id_table = adp5589_id,
};
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 8eb9116e0a5f..20b9fa91fb9e 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -177,7 +177,7 @@ static irqreturn_t bfin_kpad_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_kpad_probe(struct platform_device *pdev)
+static int bfin_kpad_probe(struct platform_device *pdev)
{
struct bf54x_kpad *bf54x_kpad;
struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
@@ -331,7 +331,7 @@ out:
return error;
}
-static int __devexit bfin_kpad_remove(struct platform_device *pdev)
+static int bfin_kpad_remove(struct platform_device *pdev)
{
struct bfin_kpad_platform_data *pdata = pdev->dev.platform_data;
struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
@@ -390,7 +390,7 @@ static struct platform_driver bfin_kpad_device_driver = {
.owner = THIS_MODULE,
},
.probe = bfin_kpad_probe,
- .remove = __devexit_p(bfin_kpad_remove),
+ .remove = bfin_kpad_remove,
.suspend = bfin_kpad_suspend,
.resume = bfin_kpad_resume,
};
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index d5bacbb479b0..4e4e453ea15e 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -303,7 +303,7 @@ fail1:
return error;
}
-static int __devexit davinci_ks_remove(struct platform_device *pdev)
+static int davinci_ks_remove(struct platform_device *pdev)
{
struct davinci_ks *davinci_ks = platform_get_drvdata(pdev);
@@ -326,7 +326,7 @@ static struct platform_driver davinci_ks_driver = {
.name = "davinci_keyscan",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(davinci_ks_remove),
+ .remove = davinci_ks_remove,
};
static int __init davinci_ks_init(void)
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 7363402de8d4..9857e8fd0987 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -232,7 +232,7 @@ static int ep93xx_keypad_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
ep93xx_keypad_suspend, ep93xx_keypad_resume);
-static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
+static int ep93xx_keypad_probe(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad;
const struct matrix_keymap_data *keymap_data;
@@ -346,7 +346,7 @@ failed_free:
return err;
}
-static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
+static int ep93xx_keypad_remove(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -380,7 +380,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.pm = &ep93xx_keypad_pm_ops,
},
.probe = ep93xx_keypad_probe,
- .remove = __devexit_p(ep93xx_keypad_remove),
+ .remove = ep93xx_keypad_remove,
};
module_platform_driver(ep93xx_keypad_driver);
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6a68041c261d..b29ca651a395 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -423,10 +423,10 @@ out:
return IRQ_HANDLED;
}
-static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
- struct input_dev *input,
- struct gpio_button_data *bdata,
- const struct gpio_keys_button *button)
+static int gpio_keys_setup_key(struct platform_device *pdev,
+ struct input_dev *input,
+ struct gpio_button_data *bdata,
+ const struct gpio_keys_button *button)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
@@ -440,21 +440,13 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
if (gpio_is_valid(button->gpio)) {
- error = gpio_request(button->gpio, desc);
+ error = gpio_request_one(button->gpio, GPIOF_IN, desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
- error = gpio_direction_input(button->gpio);
- if (error < 0) {
- dev_err(dev,
- "Failed to configure direction for GPIO %d, error %d\n",
- button->gpio, error);
- goto fail;
- }
-
if (button->debounce_interval) {
error = gpio_set_debounce(button->gpio,
button->debounce_interval * 1000);
@@ -526,12 +518,35 @@ fail:
return error;
}
+static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
+{
+ struct input_dev *input = ddata->input;
+ int i;
+
+ for (i = 0; i < ddata->pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (gpio_is_valid(bdata->button->gpio))
+ gpio_keys_gpio_report_event(bdata);
+ }
+ input_sync(input);
+}
+
static int gpio_keys_open(struct input_dev *input)
{
struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = ddata->pdata;
+ int error;
- return pdata->enable ? pdata->enable(input->dev.parent) : 0;
+ if (pdata->enable) {
+ error = pdata->enable(input->dev.parent);
+ if (error)
+ return error;
+ }
+
+ /* Report current state of buttons that are connected to GPIOs */
+ gpio_keys_report_state(ddata);
+
+ return 0;
}
static void gpio_keys_close(struct input_dev *input)
@@ -551,7 +566,7 @@ static void gpio_keys_close(struct input_dev *input)
/*
* Translate OpenFirmware node properties into platform_data
*/
-static struct gpio_keys_platform_data * __devinit
+static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
struct device_node *node, *pp;
@@ -587,6 +602,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
+ int gpio;
enum of_gpio_flags flags;
if (!of_find_property(pp, "gpios", NULL)) {
@@ -595,9 +611,19 @@ gpio_keys_get_devtree_pdata(struct device *dev)
continue;
}
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ goto err_free_pdata;
+ }
+
button = &pdata->buttons[i++];
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
if (of_property_read_u32(pp, "linux,code", &button->code)) {
@@ -658,7 +684,7 @@ static void gpio_remove_key(struct gpio_button_data *bdata)
gpio_free(bdata->button->gpio);
}
-static int __devinit gpio_keys_probe(struct platform_device *pdev)
+static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
@@ -731,14 +757,6 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
goto fail3;
}
- /* get current state of buttons that are connected to GPIOs */
- for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
- gpio_keys_gpio_report_event(bdata);
- }
- input_sync(input);
-
device_init_wakeup(&pdev->dev, wakeup);
return 0;
@@ -760,7 +778,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
return error;
}
-static int __devexit gpio_keys_remove(struct platform_device *pdev)
+static int gpio_keys_remove(struct platform_device *pdev)
{
struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
struct input_dev *input = ddata->input;
@@ -788,6 +806,7 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
static int gpio_keys_suspend(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct input_dev *input = ddata->input;
int i;
if (device_may_wakeup(dev)) {
@@ -796,6 +815,11 @@ static int gpio_keys_suspend(struct device *dev)
if (bdata->button->wakeup)
enable_irq_wake(bdata->irq);
}
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ gpio_keys_close(input);
+ mutex_unlock(&input->mutex);
}
return 0;
@@ -804,18 +828,27 @@ static int gpio_keys_suspend(struct device *dev)
static int gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
+ struct input_dev *input = ddata->input;
+ int error = 0;
int i;
- for (i = 0; i < ddata->pdata->nbuttons; i++) {
- struct gpio_button_data *bdata = &ddata->data[i];
- if (bdata->button->wakeup && device_may_wakeup(dev))
- disable_irq_wake(bdata->irq);
-
- if (gpio_is_valid(bdata->button->gpio))
- gpio_keys_gpio_report_event(bdata);
+ if (device_may_wakeup(dev)) {
+ for (i = 0; i < ddata->pdata->nbuttons; i++) {
+ struct gpio_button_data *bdata = &ddata->data[i];
+ if (bdata->button->wakeup)
+ disable_irq_wake(bdata->irq);
+ }
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ error = gpio_keys_open(input);
+ mutex_unlock(&input->mutex);
}
- input_sync(ddata->input);
+ if (error)
+ return error;
+
+ gpio_keys_report_state(ddata);
return 0;
}
#endif
@@ -824,7 +857,7 @@ static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
- .remove = __devexit_p(gpio_keys_remove),
+ .remove = gpio_keys_remove,
.driver = {
.name = "gpio-keys",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index f2142de789e7..21147164874d 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -103,8 +103,7 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
}
#ifdef CONFIG_OF
-static struct gpio_keys_platform_data * __devinit
-gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct device_node *node, *pp;
struct gpio_keys_platform_data *pdata;
@@ -136,6 +135,7 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
+ int gpio;
enum of_gpio_flags flags;
if (!of_find_property(pp, "gpios", NULL)) {
@@ -144,9 +144,19 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
continue;
}
+ gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (gpio < 0) {
+ error = gpio;
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get gpio flags, error: %d\n",
+ error);
+ goto err_free_pdata;
+ }
+
button = &pdata->buttons[i++];
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ button->gpio = gpio;
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
if (of_property_read_u32(pp, "linux,code", &button->code)) {
@@ -196,7 +206,7 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev)
}
#endif
-static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
+static int gpio_keys_polled_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
@@ -246,7 +256,6 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
input = poll_dev->input;
- input->evbit[0] = BIT(EV_KEY);
input->name = pdev->name;
input->phys = DRV_NAME"/input0";
input->dev.parent = &pdev->dev;
@@ -256,6 +265,10 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ __set_bit(EV_KEY, input->evbit);
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
for (i = 0; i < pdata->nbuttons; i++) {
struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
@@ -268,22 +281,14 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
goto err_free_gpio;
}
- error = gpio_request(gpio,
- button->desc ? button->desc : DRV_NAME);
+ error = gpio_request_one(gpio, GPIOF_IN,
+ button->desc ?: DRV_NAME);
if (error) {
dev_err(dev, "unable to claim gpio %u, err=%d\n",
gpio, error);
goto err_free_gpio;
}
- error = gpio_direction_input(gpio);
- if (error) {
- dev_err(dev,
- "unable to set direction on gpio %u, err=%d\n",
- gpio, error);
- goto err_free_gpio;
- }
-
bdata->can_sleep = gpio_cansleep(gpio);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
@@ -329,7 +334,7 @@ err_free_pdata:
return error;
}
-static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
+static int gpio_keys_polled_remove(struct platform_device *pdev)
{
struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
@@ -357,7 +362,7 @@ static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
static struct platform_driver gpio_keys_polled_driver = {
.probe = gpio_keys_polled_probe,
- .remove = __devexit_p(gpio_keys_polled_remove),
+ .remove = gpio_keys_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 5f72440b50c8..198dc07a1be5 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -200,7 +200,7 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len)
/* initialize HIL */
-static int __devinit hil_keyb_init(void)
+static int hil_keyb_init(void)
{
unsigned char c;
unsigned int i, kbid;
@@ -286,7 +286,7 @@ err1:
return err;
}
-static void __devexit hil_keyb_exit(void)
+static void hil_keyb_exit(void)
{
if (HIL_IRQ)
free_irq(HIL_IRQ, hil_dev.dev_id);
@@ -299,7 +299,7 @@ static void __devexit hil_keyb_exit(void)
}
#if defined(CONFIG_PARISC)
-static int __devinit hil_probe_chip(struct parisc_device *dev)
+static int hil_probe_chip(struct parisc_device *dev)
{
/* Only allow one HIL keyboard */
if (hil_dev.dev)
@@ -320,7 +320,7 @@ static int __devinit hil_probe_chip(struct parisc_device *dev)
return hil_keyb_init();
}
-static int __devexit hil_remove_chip(struct parisc_device *dev)
+static int hil_remove_chip(struct parisc_device *dev)
{
hil_keyb_exit();
@@ -341,7 +341,7 @@ static struct parisc_driver hil_driver = {
.name = "hil",
.id_table = hil_tbl,
.probe = hil_probe_chip,
- .remove = __devexit_p(hil_remove_chip),
+ .remove = hil_remove_chip,
};
static int __init hil_init(void)
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index cdc252612c0b..6d150e3e1f55 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -362,7 +362,8 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
writew(reg_val, keypad->mmio_base + KPSR);
/* Colums as open drain and disable all rows */
- writew(0xff00, keypad->mmio_base + KPCR);
+ reg_val = (keypad->cols_en_mask & 0xff) << 8;
+ writew(reg_val, keypad->mmio_base + KPCR);
}
static void imx_keypad_close(struct input_dev *dev)
@@ -413,7 +414,7 @@ open_err:
return -EIO;
}
-static int __devinit imx_keypad_probe(struct platform_device *pdev)
+static int imx_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
struct imx_keypad *keypad;
@@ -554,7 +555,7 @@ failed_rel_mem:
return error;
}
-static int __devexit imx_keypad_remove(struct platform_device *pdev)
+static int imx_keypad_remove(struct platform_device *pdev)
{
struct imx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -632,7 +633,7 @@ static struct platform_driver imx_keypad_driver = {
.pm = &imx_kbd_pm_ops,
},
.probe = imx_keypad_probe,
- .remove = __devexit_p(imx_keypad_remove),
+ .remove = imx_keypad_remove,
};
module_platform_driver(imx_keypad_driver);
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 24f3ea01c4d5..74e75a6e8deb 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -179,7 +179,7 @@ static void jornadakbd680_poll(struct input_polled_dev *dev)
memcpy(jornadakbd->old_scan, jornadakbd->new_scan, JORNADA_SCAN_SIZE);
}
-static int __devinit jornada680kbd_probe(struct platform_device *pdev)
+static int jornada680kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_polled_dev *poll_dev;
@@ -240,7 +240,7 @@ static int __devinit jornada680kbd_probe(struct platform_device *pdev)
}
-static int __devexit jornada680kbd_remove(struct platform_device *pdev)
+static int jornada680kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
@@ -258,7 +258,7 @@ static struct platform_driver jornada680kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada680kbd_probe,
- .remove = __devexit_p(jornada680kbd_remove),
+ .remove = jornada680kbd_remove,
};
module_platform_driver(jornada680kbd_driver);
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 9d639fa1afbd..5ceef636df2f 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -94,7 +94,7 @@ static irqreturn_t jornada720_kbd_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
};
-static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
+static int jornada720_kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
struct input_dev *input_dev;
@@ -152,7 +152,7 @@ static int __devinit jornada720_kbd_probe(struct platform_device *pdev)
return err;
};
-static int __devexit jornada720_kbd_remove(struct platform_device *pdev)
+static int jornada720_kbd_remove(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd = platform_get_drvdata(pdev);
@@ -173,6 +173,6 @@ static struct platform_driver jornada720_kbd_driver = {
.owner = THIS_MODULE,
},
.probe = jornada720_kbd_probe,
- .remove = __devexit_p(jornada720_kbd_remove),
+ .remove = jornada720_kbd_remove,
};
module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 39ac2787e275..93c812662134 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -624,7 +624,7 @@ static ssize_t lm8323_set_disable(struct device *dev,
}
static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
-static int __devinit lm8323_probe(struct i2c_client *client,
+static int lm8323_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm8323_platform_data *pdata = client->dev.platform_data;
@@ -764,7 +764,7 @@ fail1:
return err;
}
-static int __devexit lm8323_remove(struct i2c_client *client)
+static int lm8323_remove(struct i2c_client *client)
{
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -846,7 +846,7 @@ static struct i2c_driver lm8323_i2c_driver = {
.pm = &lm8323_pm_ops,
},
.probe = lm8323_probe,
- .remove = __devexit_p(lm8323_remove),
+ .remove = lm8323_remove,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 081fd9effa8c..5a8ca35dc9af 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -128,7 +128,7 @@ static irqreturn_t lm8333_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit lm8333_probe(struct i2c_client *client,
+static int lm8333_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct lm8333_platform_data *pdata = client->dev.platform_data;
@@ -202,7 +202,7 @@ static int __devinit lm8333_probe(struct i2c_client *client,
return err;
}
-static int __devexit lm8333_remove(struct i2c_client *client)
+static int lm8333_remove(struct i2c_client *client)
{
struct lm8333 *lm8333 = i2c_get_clientdata(client);
@@ -225,7 +225,7 @@ static struct i2c_driver lm8333_driver = {
.owner = THIS_MODULE,
},
.probe = lm8333_probe,
- .remove = __devexit_p(lm8333_remove),
+ .remove = lm8333_remove,
.id_table = lm8333_id,
};
module_i2c_driver(lm8333_driver);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index b1ab29861e1c..c94d610b9d78 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -46,7 +46,7 @@ MODULE_LICENSE("GPL");
#define KEY_CENTER KEY_F15
static const unsigned char
-locomokbd_keycode[LOCOMOKBD_NUMKEYS] __devinitconst = {
+locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
0, KEY_ESC, KEY_ACTIVITY, 0, 0, 0, 0, 0, 0, 0, /* 0 - 9 */
0, 0, 0, 0, 0, 0, 0, KEY_MENU, KEY_HOME, KEY_CONTACT, /* 10 - 19 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 29 */
@@ -236,7 +236,7 @@ static void locomokbd_close(struct input_dev *dev)
locomo_writel(r, locomokbd->base + LOCOMO_KIC);
}
-static int __devinit locomokbd_probe(struct locomo_dev *dev)
+static int locomokbd_probe(struct locomo_dev *dev)
{
struct locomokbd *locomokbd;
struct input_dev *input_dev;
@@ -321,7 +321,7 @@ static int __devinit locomokbd_probe(struct locomo_dev *dev)
return err;
}
-static int __devexit locomokbd_remove(struct locomo_dev *dev)
+static int locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
@@ -345,7 +345,7 @@ static struct locomo_driver keyboard_driver = {
},
.devid = LOCOMO_DEVID_KEYBOARD,
.probe = locomokbd_probe,
- .remove = __devexit_p(locomokbd_remove),
+ .remove = locomokbd_remove,
};
static int __init locomokbd_init(void)
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index dd786c8a7584..1b8add6cfb9d 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -139,7 +139,7 @@ static void lpc32xx_kscan_close(struct input_dev *dev)
clk_disable_unprepare(kscandat->clk);
}
-static int __devinit lpc32xx_parse_dt(struct device *dev,
+static int lpc32xx_parse_dt(struct device *dev,
struct lpc32xx_kscan_drv *kscandat)
{
struct device_node *np = dev->of_node;
@@ -166,7 +166,7 @@ static int __devinit lpc32xx_parse_dt(struct device *dev,
return 0;
}
-static int __devinit lpc32xx_kscan_probe(struct platform_device *pdev)
+static int lpc32xx_kscan_probe(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat;
struct input_dev *input;
@@ -310,7 +310,7 @@ err_free_mem:
return error;
}
-static int __devexit lpc32xx_kscan_remove(struct platform_device *pdev)
+static int lpc32xx_kscan_remove(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat = platform_get_drvdata(pdev);
@@ -377,7 +377,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_kscan_match);
static struct platform_driver lpc32xx_kscan_driver = {
.probe = lpc32xx_kscan_probe,
- .remove = __devexit_p(lpc32xx_kscan_remove),
+ .remove = lpc32xx_kscan_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 18b72372028a..f4ff0dda7597 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -23,6 +23,9 @@
#include <linux/gpio.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
struct matrix_keypad {
const struct matrix_keypad_platform_data *pdata;
@@ -37,8 +40,6 @@ struct matrix_keypad {
bool scan_pending;
bool stopped;
bool gpio_all_disabled;
-
- unsigned short keycodes[];
};
/*
@@ -118,6 +119,7 @@ static void matrix_keypad_scan(struct work_struct *work)
struct matrix_keypad *keypad =
container_of(work, struct matrix_keypad, work.work);
struct input_dev *input_dev = keypad->input_dev;
+ const unsigned short *keycodes = input_dev->keycode;
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
@@ -153,7 +155,7 @@ static void matrix_keypad_scan(struct work_struct *work)
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev,
- keypad->keycodes[code],
+ keycodes[code],
new_state[col] & (1 << row));
}
}
@@ -299,8 +301,8 @@ static int matrix_keypad_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
matrix_keypad_suspend, matrix_keypad_resume);
-static int __devinit matrix_keypad_init_gpio(struct platform_device *pdev,
- struct matrix_keypad *keypad)
+static int matrix_keypad_init_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i, err;
@@ -394,33 +396,95 @@ static void matrix_keypad_free_gpio(struct matrix_keypad *keypad)
gpio_free(pdata->col_gpios[i]);
}
-static int __devinit matrix_keypad_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct matrix_keypad_platform_data *
+matrix_keypad_parse_dt(struct device *dev)
+{
+ struct matrix_keypad_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+ unsigned int *gpios;
+ int i;
+
+ if (!np) {
+ dev_err(dev, "device lacks DT data\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->num_row_gpios = of_gpio_named_count(np, "row-gpios");
+ pdata->num_col_gpios = of_gpio_named_count(np, "col-gpios");
+ if (!pdata->num_row_gpios || !pdata->num_col_gpios) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_get_property(np, "linux,no-autorepeat", NULL))
+ pdata->no_autorepeat = true;
+ if (of_get_property(np, "linux,wakeup", NULL))
+ pdata->wakeup = true;
+ if (of_get_property(np, "gpio-activelow", NULL))
+ pdata->active_low = true;
+
+ of_property_read_u32(np, "debounce-delay-ms", &pdata->debounce_ms);
+ of_property_read_u32(np, "col-scan-delay-us",
+ &pdata->col_scan_delay_us);
+
+ gpios = devm_kzalloc(dev,
+ sizeof(unsigned int) *
+ (pdata->num_row_gpios + pdata->num_col_gpios),
+ GFP_KERNEL);
+ if (!gpios) {
+ dev_err(dev, "could not allocate memory for gpios\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpios[pdata->num_row_gpios + i] =
+ of_get_named_gpio(np, "col-gpios", i);
+
+ pdata->row_gpios = gpios;
+ pdata->col_gpios = &gpios[pdata->num_row_gpios];
+
+ return pdata;
+}
+#else
+static inline struct matrix_keypad_platform_data *
+matrix_keypad_parse_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data defined\n");
+
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int matrix_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
- const struct matrix_keymap_data *keymap_data;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
- unsigned int row_shift;
- size_t keymap_size;
int err;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- keymap_data = pdata->keymap_data;
- if (!keymap_data) {
+ pdata = matrix_keypad_parse_dt(&pdev->dev);
+ if (IS_ERR(pdata)) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ return PTR_ERR(pdata);
+ }
+ } else if (!pdata->keymap_data) {
dev_err(&pdev->dev, "no keymap data defined\n");
return -EINVAL;
}
- row_shift = get_count_order(pdata->num_col_gpios);
- keymap_size = (pdata->num_row_gpios << row_shift) *
- sizeof(keypad->keycodes[0]);
- keypad = kzalloc(sizeof(struct matrix_keypad) + keymap_size,
- GFP_KERNEL);
+ keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
input_dev = input_allocate_device();
if (!keypad || !input_dev) {
err = -ENOMEM;
@@ -429,7 +493,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
keypad->input_dev = input_dev;
keypad->pdata = pdata;
- keypad->row_shift = row_shift;
+ keypad->row_shift = get_count_order(pdata->num_col_gpios);
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
spin_lock_init(&keypad->lock);
@@ -440,12 +504,14 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- err = matrix_keypad_build_keymap(keymap_data, NULL,
+ err = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
pdata->num_row_gpios,
pdata->num_col_gpios,
- keypad->keycodes, input_dev);
- if (err)
+ NULL, input_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
goto err_free_mem;
+ }
if (!pdata->no_autorepeat)
__set_bit(EV_REP, input_dev->evbit);
@@ -473,7 +539,7 @@ err_free_mem:
return err;
}
-static int __devexit matrix_keypad_remove(struct platform_device *pdev)
+static int matrix_keypad_remove(struct platform_device *pdev)
{
struct matrix_keypad *keypad = platform_get_drvdata(pdev);
@@ -488,13 +554,22 @@ static int __devexit matrix_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id matrix_keypad_dt_match[] = {
+ { .compatible = "gpio-matrix-keypad" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, matrix_keypad_dt_match);
+#endif
+
static struct platform_driver matrix_keypad_driver = {
.probe = matrix_keypad_probe,
- .remove = __devexit_p(matrix_keypad_remove),
+ .remove = matrix_keypad_remove,
.driver = {
.name = "matrix-keypad",
.owner = THIS_MODULE,
.pm = &matrix_keypad_pm_ops,
+ .of_match_table = of_match_ptr(matrix_keypad_dt_match),
},
};
module_platform_driver(matrix_keypad_driver);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 8edada8ae712..7c7af2b01e65 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -179,7 +179,7 @@ static void max7359_initialize(struct i2c_client *client)
max7359_fall_deepsleep(client);
}
-static int __devinit max7359_probe(struct i2c_client *client,
+static int max7359_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct matrix_keymap_data *keymap_data = client->dev.platform_data;
@@ -260,7 +260,7 @@ failed_free_mem:
return error;
}
-static int __devexit max7359_remove(struct i2c_client *client)
+static int max7359_remove(struct i2c_client *client)
{
struct max7359_keypad *keypad = i2c_get_clientdata(client);
@@ -312,7 +312,7 @@ static struct i2c_driver max7359_i2c_driver = {
.pm = &max7359_pm,
},
.probe = max7359_probe,
- .remove = __devexit_p(max7359_remove),
+ .remove = max7359_remove,
.id_table = max7359_ids,
};
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 0d77f6c84950..7c236f9c6a51 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -97,7 +97,7 @@ static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mcs_touchkey_probe(struct i2c_client *client,
+static int mcs_touchkey_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct mcs_platform_data *pdata;
@@ -200,7 +200,7 @@ err_free_mem:
return error;
}
-static int __devexit mcs_touchkey_remove(struct i2c_client *client)
+static int mcs_touchkey_remove(struct i2c_client *client)
{
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
@@ -270,7 +270,7 @@ static struct i2c_driver mcs_touchkey_driver = {
.pm = &mcs_touchkey_pm_ops,
},
.probe = mcs_touchkey_probe,
- .remove = __devexit_p(mcs_touchkey_remove),
+ .remove = mcs_touchkey_remove,
.shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 7613f1cac951..f7f3e9a9fd3f 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -71,7 +71,7 @@ struct mpr121_init_register {
u8 val;
};
-static const struct mpr121_init_register init_reg_table[] __devinitconst = {
+static const struct mpr121_init_register init_reg_table[] = {
{ MHD_RISING_ADDR, 0x1 },
{ NHD_RISING_ADDR, 0x1 },
{ MHD_FALLING_ADDR, 0x1 },
@@ -123,7 +123,7 @@ out:
return IRQ_HANDLED;
}
-static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
+static int mpr121_phys_init(const struct mpr121_platform_data *pdata,
struct mpr121_touchkey *mpr121,
struct i2c_client *client)
{
@@ -185,8 +185,8 @@ err_i2c_write:
return ret;
}
-static int __devinit mpr_touchkey_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mpr_touchkey_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct mpr121_platform_data *pdata = client->dev.platform_data;
struct mpr121_touchkey *mpr121;
@@ -272,7 +272,7 @@ err_free_mem:
return error;
}
-static int __devexit mpr_touchkey_remove(struct i2c_client *client)
+static int mpr_touchkey_remove(struct i2c_client *client)
{
struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client);
@@ -327,7 +327,7 @@ static struct i2c_driver mpr_touchkey_driver = {
},
.id_table = mpr121_id,
.probe = mpr_touchkey_probe,
- .remove = __devexit_p(mpr_touchkey_remove),
+ .remove = mpr_touchkey_remove,
};
module_i2c_driver(mpr_touchkey_driver);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 49f5fa64e0b1..0e6a8151fee3 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -67,6 +67,7 @@ struct ske_keypad {
const struct ske_keypad_platform_data *board;
unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
struct clk *clk;
+ struct clk *pclk;
spinlock_t ske_keypad_lock;
};
@@ -271,11 +272,18 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
goto err_free_mem_region;
}
+ keypad->pclk = clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(keypad->pclk)) {
+ dev_err(&pdev->dev, "failed to get pclk\n");
+ error = PTR_ERR(keypad->pclk);
+ goto err_iounmap;
+ }
+
keypad->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
error = PTR_ERR(keypad->clk);
- goto err_iounmap;
+ goto err_pclk;
}
input->id.bustype = BUS_HOST;
@@ -287,14 +295,25 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->keymap, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_iounmap;
+ goto err_clk;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- clk_enable(keypad->clk);
+ error = clk_prepare_enable(keypad->pclk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to prepare/enable pclk\n");
+ goto err_clk;
+ }
+
+ error = clk_prepare_enable(keypad->clk);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to prepare/enable clk\n");
+ goto err_pclk_disable;
+ }
+
/* go through board initialization helpers */
if (keypad->board->init)
@@ -330,8 +349,13 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
err_free_irq:
free_irq(keypad->irq, keypad);
err_clk_disable:
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
+err_pclk_disable:
+ clk_disable_unprepare(keypad->pclk);
+err_clk:
clk_put(keypad->clk);
+err_pclk:
+ clk_put(keypad->pclk);
err_iounmap:
iounmap(keypad->reg_base);
err_free_mem_region:
@@ -342,7 +366,7 @@ err_free_mem:
return error;
}
-static int __devexit ske_keypad_remove(struct platform_device *pdev)
+static int ske_keypad_remove(struct platform_device *pdev)
{
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -351,7 +375,7 @@ static int __devexit ske_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input);
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
clk_put(keypad->clk);
if (keypad->board->exit)
@@ -403,7 +427,7 @@ static struct platform_driver ske_keypad_driver = {
.owner = THIS_MODULE,
.pm = &ske_keypad_dev_pm_ops,
},
- .remove = __devexit_p(ske_keypad_remove),
+ .remove = ske_keypad_remove,
};
static int __init ske_keypad_init(void)
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 4a5fcc8026f5..d0d5226d9cd4 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -244,7 +244,7 @@ static int omap_kp_resume(struct platform_device *dev)
#define omap_kp_resume NULL
#endif
-static int __devinit omap_kp_probe(struct platform_device *pdev)
+static int omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
struct input_dev *input_dev;
@@ -357,7 +357,7 @@ err2:
return -EINVAL;
}
-static int __devexit omap_kp_remove(struct platform_device *pdev)
+static int omap_kp_remove(struct platform_device *pdev)
{
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
@@ -379,7 +379,7 @@ static int __devexit omap_kp_remove(struct platform_device *pdev)
static struct platform_driver omap_kp_driver = {
.probe = omap_kp_probe,
- .remove = __devexit_p(omap_kp_remove),
+ .remove = omap_kp_remove,
.suspend = omap_kp_suspend,
.resume = omap_kp_resume,
.driver = {
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index c05f98c41410..e25b022692cd 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -211,8 +211,8 @@ static void omap4_keypad_close(struct input_dev *input)
}
#ifdef CONFIG_OF
-static int __devinit omap4_keypad_parse_dt(struct device *dev,
- struct omap4_keypad *keypad_data)
+static int omap4_keypad_parse_dt(struct device *dev,
+ struct omap4_keypad *keypad_data)
{
struct device_node *np = dev->of_node;
@@ -241,7 +241,7 @@ static inline int omap4_keypad_parse_dt(struct device *dev,
}
#endif
-static int __devinit omap4_keypad_probe(struct platform_device *pdev)
+static int omap4_keypad_probe(struct platform_device *pdev)
{
const struct omap4_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -406,7 +406,7 @@ err_free_keypad:
return error;
}
-static int __devexit omap4_keypad_remove(struct platform_device *pdev)
+static int omap4_keypad_remove(struct platform_device *pdev)
{
struct omap4_keypad *keypad_data = platform_get_drvdata(pdev);
struct resource *res;
@@ -440,7 +440,7 @@ MODULE_DEVICE_TABLE(of, omap_keypad_dt_match);
static struct platform_driver omap4_keypad_driver = {
.probe = omap4_keypad_probe,
- .remove = __devexit_p(omap4_keypad_remove),
+ .remove = omap4_keypad_remove,
.driver = {
.name = "omap4-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index abe728c7b88e..7ac5f174c6f7 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -37,7 +37,7 @@ static irqreturn_t opencores_kbd_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit opencores_kbd_probe(struct platform_device *pdev)
+static int opencores_kbd_probe(struct platform_device *pdev)
{
struct input_dev *input;
struct opencores_kbd *opencores_kbd;
@@ -139,7 +139,7 @@ static int __devinit opencores_kbd_probe(struct platform_device *pdev)
return error;
}
-static int __devexit opencores_kbd_remove(struct platform_device *pdev)
+static int opencores_kbd_remove(struct platform_device *pdev)
{
struct opencores_kbd *opencores_kbd = platform_get_drvdata(pdev);
@@ -158,7 +158,7 @@ static int __devexit opencores_kbd_remove(struct platform_device *pdev)
static struct platform_driver opencores_kbd_device_driver = {
.probe = opencores_kbd_probe,
- .remove = __devexit_p(opencores_kbd_remove),
+ .remove = opencores_kbd_remove,
.driver = {
.name = "opencores-kbd",
},
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 52c34657d301..74339e139d43 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -397,7 +397,7 @@ static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
+static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
{
int bits, rc, cycles;
u8 scan_val = 0, ctrl_val = 0;
@@ -447,7 +447,7 @@ static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
}
-static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
+static int pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
{
int rc, i;
@@ -518,7 +518,7 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
* - set irq edge type.
* - enable the keypad controller.
*/
-static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
+static int pmic8xxx_kp_probe(struct platform_device *pdev)
{
const struct pm8xxx_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -712,7 +712,7 @@ err_alloc_device:
return rc;
}
-static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
+static int pmic8xxx_kp_remove(struct platform_device *pdev)
{
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
@@ -773,7 +773,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
static struct platform_driver pmic8xxx_kp_driver = {
.probe = pmic8xxx_kp_probe,
- .remove = __devexit_p(pmic8xxx_kp_remove),
+ .remove = pmic8xxx_kp_remove,
.driver = {
.name = PM8XXX_KEYPAD_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index cad9d5dd5973..5330d8fbf6c0 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -482,7 +482,7 @@ static const struct dev_pm_ops pxa27x_keypad_pm_ops = {
};
#endif
-static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
+static int pxa27x_keypad_probe(struct platform_device *pdev)
{
struct pxa27x_keypad_platform_data *pdata = pdev->dev.platform_data;
struct pxa27x_keypad *keypad;
@@ -595,7 +595,7 @@ failed_free:
return error;
}
-static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
+static int pxa27x_keypad_remove(struct platform_device *pdev)
{
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -620,7 +620,7 @@ MODULE_ALIAS("platform:pxa27x-keypad");
static struct platform_driver pxa27x_keypad_driver = {
.probe = pxa27x_keypad_probe,
- .remove = __devexit_p(pxa27x_keypad_remove),
+ .remove = pxa27x_keypad_remove,
.driver = {
.name = "pxa27x-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 41488f9add20..bcad95be73aa 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -82,7 +82,7 @@ static void pxa930_rotary_close(struct input_dev *dev)
clear_sbcr(r);
}
-static int __devinit pxa930_rotary_probe(struct platform_device *pdev)
+static int pxa930_rotary_probe(struct platform_device *pdev)
{
struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data;
struct pxa930_rotary *r;
@@ -174,7 +174,7 @@ failed_free:
return err;
}
-static int __devexit pxa930_rotary_remove(struct platform_device *pdev)
+static int pxa930_rotary_remove(struct platform_device *pdev)
{
struct pxa930_rotary *r = platform_get_drvdata(pdev);
@@ -193,7 +193,7 @@ static struct platform_driver pxa930_rotary_driver = {
.owner = THIS_MODULE,
},
.probe = pxa930_rotary_probe,
- .remove = __devexit_p(pxa930_rotary_remove),
+ .remove = pxa930_rotary_remove,
};
module_platform_driver(pxa930_rotary_driver);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index ca68f2992d72..42b773b3125a 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -91,7 +91,7 @@ static int qt1070_write(struct i2c_client *client, u8 reg, u8 data)
return ret;
}
-static bool __devinit qt1070_identify(struct i2c_client *client)
+static bool qt1070_identify(struct i2c_client *client)
{
int id, ver;
@@ -140,7 +140,7 @@ static irqreturn_t qt1070_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit qt1070_probe(struct i2c_client *client,
+static int qt1070_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct qt1070_data *data;
@@ -230,7 +230,7 @@ err_free_mem:
return err;
}
-static int __devexit qt1070_remove(struct i2c_client *client)
+static int qt1070_remove(struct i2c_client *client)
{
struct qt1070_data *data = i2c_get_clientdata(client);
@@ -256,7 +256,7 @@ static struct i2c_driver qt1070_driver = {
},
.id_table = qt1070_id,
.probe = qt1070_probe,
- .remove = __devexit_p(qt1070_remove),
+ .remove = qt1070_remove,
};
module_i2c_driver(qt1070_driver);
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 76b7d430d03a..3dc2b0f27b0c 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -183,7 +183,7 @@ static void qt2160_worker(struct work_struct *work)
qt2160_schedule_read(qt2160);
}
-static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
+static int qt2160_read(struct i2c_client *client, u8 reg)
{
int ret;
@@ -204,29 +204,20 @@ static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
return ret;
}
-static int __devinit qt2160_write(struct i2c_client *client, u8 reg, u8 data)
+static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
{
- int error;
-
- error = i2c_smbus_write_byte(client, reg);
- if (error) {
- dev_err(&client->dev,
- "couldn't send request. Returned %d\n", error);
- return error;
- }
+ int ret;
- error = i2c_smbus_write_byte(client, data);
- if (error) {
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
dev_err(&client->dev,
- "couldn't write data. Returned %d\n", error);
- return error;
- }
+ "couldn't write data. Returned %d\n", ret);
- return error;
+ return ret;
}
-static bool __devinit qt2160_identify(struct i2c_client *client)
+static bool qt2160_identify(struct i2c_client *client)
{
int id, ver, rev;
@@ -257,7 +248,7 @@ static bool __devinit qt2160_identify(struct i2c_client *client)
return true;
}
-static int __devinit qt2160_probe(struct i2c_client *client,
+static int qt2160_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct qt2160_data *qt2160;
@@ -344,7 +335,7 @@ err_free_mem:
return error;
}
-static int __devexit qt2160_remove(struct i2c_client *client)
+static int qt2160_remove(struct i2c_client *client)
{
struct qt2160_data *qt2160 = i2c_get_clientdata(client);
@@ -375,7 +366,7 @@ static struct i2c_driver qt2160_driver = {
.id_table = qt2160_idtable,
.probe = qt2160_probe,
- .remove = __devexit_p(qt2160_remove),
+ .remove = qt2160_remove,
};
module_i2c_driver(qt2160_driver);
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 9d7a111486f7..22e357b51024 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -309,7 +309,7 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
struct samsung_keypad *keypad)
{
struct device_node *np = dev->of_node;
- int gpio, ret, row, col;
+ int gpio, error, row, col;
for (row = 0; row < keypad->rows; row++) {
gpio = of_get_named_gpio(np, "row-gpios", row);
@@ -320,10 +320,11 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
continue;
}
- ret = gpio_request(gpio, "keypad-row");
- if (ret)
- dev_err(dev, "keypad row[%d] gpio request failed\n",
- row);
+ error = devm_gpio_request(dev, gpio, "keypad-row");
+ if (error)
+ dev_err(dev,
+ "keypad row[%d] gpio request failed: %d\n",
+ row, error);
}
for (col = 0; col < keypad->cols; col++) {
@@ -335,38 +336,22 @@ static void samsung_keypad_parse_dt_gpio(struct device *dev,
continue;
}
- ret = gpio_request(gpio, "keypad-col");
- if (ret)
- dev_err(dev, "keypad column[%d] gpio request failed\n",
- col);
+ error = devm_gpio_request(dev, gpio, "keypad-col");
+ if (error)
+ dev_err(dev,
+ "keypad column[%d] gpio request failed: %d\n",
+ col, error);
}
}
-
-static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
-{
- int cnt;
-
- for (cnt = 0; cnt < keypad->rows; cnt++)
- if (gpio_is_valid(keypad->row_gpios[cnt]))
- gpio_free(keypad->row_gpios[cnt]);
-
- for (cnt = 0; cnt < keypad->cols; cnt++)
- if (gpio_is_valid(keypad->col_gpios[cnt]))
- gpio_free(keypad->col_gpios[cnt]);
-}
#else
static
struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
{
return NULL;
}
-
-static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
-{
-}
#endif
-static int __devinit samsung_keypad_probe(struct platform_device *pdev)
+static int samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
const struct matrix_keymap_data *keymap_data;
@@ -405,36 +390,30 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
row_shift = get_count_order(pdata->cols);
keymap_size = (pdata->rows << row_shift) * sizeof(keypad->keycodes[0]);
- keypad = kzalloc(sizeof(*keypad) + keymap_size, GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad) + keymap_size,
+ GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!keypad || !input_dev)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- error = -ENODEV;
- goto err_free_mem;
- }
+ if (!res)
+ return -ENODEV;
- keypad->base = ioremap(res->start, resource_size(res));
- if (!keypad->base) {
- error = -EBUSY;
- goto err_free_mem;
- }
+ keypad->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!keypad->base)
+ return -EBUSY;
- keypad->clk = clk_get(&pdev->dev, "keypad");
+ keypad->clk = devm_clk_get(&pdev->dev, "keypad");
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_unmap_base;
+ return PTR_ERR(keypad->clk);
}
error = clk_prepare(keypad->clk);
if (error) {
dev_err(&pdev->dev, "keypad clock prepare failed\n");
- goto err_put_clk;
+ return error;
}
keypad->input_dev = input_dev;
@@ -479,14 +458,15 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
error = keypad->irq;
- goto err_put_clk;
+ goto err_unprepare_clk;
}
- error = request_threaded_irq(keypad->irq, NULL, samsung_keypad_irq,
- IRQF_ONESHOT, dev_name(&pdev->dev), keypad);
+ error = devm_request_threaded_irq(&pdev->dev, keypad->irq, NULL,
+ samsung_keypad_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), keypad);
if (error) {
dev_err(&pdev->dev, "failed to register keypad interrupt\n");
- goto err_put_clk;
+ goto err_unprepare_clk;
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
@@ -495,7 +475,7 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
error = input_register_device(keypad->input_dev);
if (error)
- goto err_free_irq;
+ goto err_disable_runtime_pm;
if (pdev->dev.of_node) {
devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
@@ -504,26 +484,16 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
}
return 0;
-err_free_irq:
- free_irq(keypad->irq, keypad);
+err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
err_unprepare_clk:
clk_unprepare(keypad->clk);
-err_put_clk:
- clk_put(keypad->clk);
- samsung_keypad_dt_gpio_free(keypad);
-err_unmap_base:
- iounmap(keypad->base);
-err_free_mem:
- input_free_device(input_dev);
- kfree(keypad);
-
return error;
}
-static int __devexit samsung_keypad_remove(struct platform_device *pdev)
+static int samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
@@ -533,18 +503,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input_dev);
- /*
- * It is safe to free IRQ after unregistering device because
- * samsung_keypad_close will shut off interrupts.
- */
- free_irq(keypad->irq, keypad);
-
clk_unprepare(keypad->clk);
- clk_put(keypad->clk);
- samsung_keypad_dt_gpio_free(keypad);
-
- iounmap(keypad->base);
- kfree(keypad);
return 0;
}
@@ -685,7 +644,7 @@ MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
static struct platform_driver samsung_keypad_driver = {
.probe = samsung_keypad_probe,
- .remove = __devexit_p(samsung_keypad_remove),
+ .remove = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index da54ad5db154..fdb9eb2df380 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -162,7 +162,7 @@ static irqreturn_t sh_keysc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit sh_keysc_probe(struct platform_device *pdev)
+static int sh_keysc_probe(struct platform_device *pdev)
{
struct sh_keysc_priv *priv;
struct sh_keysc_info *pdata;
@@ -272,7 +272,7 @@ static int __devinit sh_keysc_probe(struct platform_device *pdev)
return error;
}
-static int __devexit sh_keysc_remove(struct platform_device *pdev)
+static int sh_keysc_remove(struct platform_device *pdev)
{
struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
@@ -331,7 +331,7 @@ static SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops,
static struct platform_driver sh_keysc_device_driver = {
.probe = sh_keysc_probe,
- .remove = __devexit_p(sh_keysc_remove),
+ .remove = sh_keysc_remove,
.driver = {
.name = "sh_keysc",
.pm = &sh_keysc_dev_pm_ops,
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index c7ca97f44bfb..695d237417d6 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -55,15 +55,15 @@
struct spear_kbd {
struct input_dev *input;
- struct resource *res;
void __iomem *io_base;
struct clk *clk;
unsigned int irq;
unsigned int mode;
+ unsigned int suspended_rate;
unsigned short last_key;
unsigned short keycodes[NUM_ROWS * NUM_COLS];
bool rep;
- unsigned int suspended_rate;
+ bool irq_wake_enabled;
u32 mode_ctl_reg;
};
@@ -146,7 +146,7 @@ static void spear_kbd_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static int __devinit spear_kbd_parse_dt(struct platform_device *pdev,
+static int spear_kbd_parse_dt(struct platform_device *pdev,
struct spear_kbd *kbd)
{
struct device_node *np = pdev->dev.of_node;
@@ -181,7 +181,7 @@ static inline int spear_kbd_parse_dt(struct platform_device *pdev,
}
#endif
-static int __devinit spear_kbd_probe(struct platform_device *pdev)
+static int spear_kbd_probe(struct platform_device *pdev)
{
struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
@@ -203,12 +203,16 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
return irq;
}
- kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!kbd || !input_dev) {
- dev_err(&pdev->dev, "out of memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ kbd = devm_kzalloc(&pdev->dev, sizeof(*kbd), GFP_KERNEL);
+ if (!kbd) {
+ dev_err(&pdev->dev, "not enough memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "unable to allocate input device\n");
+ return -ENOMEM;
}
kbd->input = input_dev;
@@ -217,37 +221,25 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
if (!pdata) {
error = spear_kbd_parse_dt(pdev, kbd);
if (error)
- goto err_free_mem;
+ return error;
} else {
kbd->mode = pdata->mode;
kbd->rep = pdata->rep;
kbd->suspended_rate = pdata->suspended_rate;
}
- kbd->res = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!kbd->res) {
- dev_err(&pdev->dev, "keyboard region already claimed\n");
- error = -EBUSY;
- goto err_free_mem;
- }
-
- kbd->io_base = ioremap(res->start, resource_size(res));
+ kbd->io_base = devm_request_and_ioremap(&pdev->dev, res);
if (!kbd->io_base) {
- dev_err(&pdev->dev, "ioremap failed for kbd_region\n");
- error = -ENOMEM;
- goto err_release_mem_region;
+ dev_err(&pdev->dev, "request-ioremap failed for kbd_region\n");
+ return -ENOMEM;
}
- kbd->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(kbd->clk)) {
- error = PTR_ERR(kbd->clk);
- goto err_iounmap;
- }
+ kbd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(kbd->clk))
+ return PTR_ERR(kbd->clk);
input_dev->name = "Spear Keyboard";
input_dev->phys = "keyboard/input0";
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0001;
@@ -259,7 +251,7 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
kbd->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_put_clk;
+ return error;
}
if (kbd->rep)
@@ -268,48 +260,36 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, kbd);
- error = request_irq(irq, spear_kbd_interrupt, 0, "keyboard", kbd);
+ error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
+ "keyboard", kbd);
if (error) {
- dev_err(&pdev->dev, "request_irq fail\n");
- goto err_put_clk;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return error;
}
+ error = clk_prepare(kbd->clk);
+ if (error)
+ return error;
+
error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev, "Unable to register keyboard device\n");
- goto err_free_irq;
+ clk_unprepare(kbd->clk);
+ return error;
}
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, kbd);
return 0;
-
-err_free_irq:
- free_irq(kbd->irq, kbd);
-err_put_clk:
- clk_put(kbd->clk);
-err_iounmap:
- iounmap(kbd->io_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(kbd);
-
- return error;
}
-static int __devexit spear_kbd_remove(struct platform_device *pdev)
+static int spear_kbd_remove(struct platform_device *pdev)
{
struct spear_kbd *kbd = platform_get_drvdata(pdev);
- free_irq(kbd->irq, kbd);
input_unregister_device(kbd->input);
- clk_put(kbd->clk);
- iounmap(kbd->io_base);
- release_mem_region(kbd->res->start, resource_size(kbd->res));
- kfree(kbd);
+ clk_unprepare(kbd->clk);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
@@ -333,7 +313,8 @@ static int spear_kbd_suspend(struct device *dev)
mode_ctl_reg = readl_relaxed(kbd->io_base + MODE_CTL_REG);
if (device_may_wakeup(&pdev->dev)) {
- enable_irq_wake(kbd->irq);
+ if (!enable_irq_wake(kbd->irq))
+ kbd->irq_wake_enabled = true;
/*
* reprogram the keyboard operating frequency as on some
@@ -379,7 +360,10 @@ static int spear_kbd_resume(struct device *dev)
mutex_lock(&input_dev->mutex);
if (device_may_wakeup(&pdev->dev)) {
- disable_irq_wake(kbd->irq);
+ if (kbd->irq_wake_enabled) {
+ kbd->irq_wake_enabled = false;
+ disable_irq_wake(kbd->irq);
+ }
} else {
if (input_dev->users)
clk_enable(kbd->clk);
@@ -407,7 +391,7 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
- .remove = __devexit_p(spear_kbd_remove),
+ .remove = spear_kbd_remove,
.driver = {
.name = "keyboard",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 470a8778dec1..5cbec56f7720 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -166,7 +166,7 @@ static irqreturn_t stmpe_keypad_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
+static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_variant *variant = keypad->variant;
unsigned int col_gpios = variant->col_gpios;
@@ -207,7 +207,7 @@ static int __devinit stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
}
-static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
+static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
@@ -257,105 +257,131 @@ static int __devinit stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
(plat->debounce_ms << 1));
}
-static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
{
- struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ int row, col;
+
+ for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
+ for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ int code = MATRIX_SCAN_CODE(row, col,
+ STMPE_KEYPAD_ROW_SHIFT);
+ if (keypad->keymap[code] != KEY_RESERVED) {
+ keypad->rows |= 1 << row;
+ keypad->cols |= 1 << col;
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_OF
+static const struct stmpe_keypad_platform_data *
+stmpe_keypad_of_probe(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
struct stmpe_keypad_platform_data *plat;
+
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &plat->scan_count);
+
+ plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
+ return plat;
+}
+#else
+static inline const struct stmpe_keypad_platform_data *
+stmpe_keypad_of_probe(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int stmpe_keypad_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ const struct stmpe_keypad_platform_data *plat;
struct stmpe_keypad *keypad;
struct input_dev *input;
- int ret;
+ int error;
int irq;
- int i;
plat = stmpe->pdata->keypad;
- if (!plat)
- return -ENODEV;
+ if (!plat) {
+ plat = stmpe_keypad_of_probe(&pdev->dev);
+ if (IS_ERR(plat))
+ return PTR_ERR(plat);
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- keypad = kzalloc(sizeof(struct stmpe_keypad), GFP_KERNEL);
+ keypad = devm_kzalloc(&pdev->dev, sizeof(struct stmpe_keypad),
+ GFP_KERNEL);
if (!keypad)
return -ENOMEM;
- input = input_allocate_device();
- if (!input) {
- ret = -ENOMEM;
- goto out_freekeypad;
- }
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
input->name = "STMPE keypad";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- ret = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
- keypad->keymap, input);
- if (ret)
- goto out_freeinput;
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ STMPE_KEYPAD_MAX_ROWS,
+ STMPE_KEYPAD_MAX_COLS,
+ keypad->keymap, input);
+ if (error)
+ return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- for (i = 0; i < plat->keymap_data->keymap_size; i++) {
- unsigned int key = plat->keymap_data->keymap[i];
-
- keypad->cols |= 1 << KEY_COL(key);
- keypad->rows |= 1 << KEY_ROW(key);
- }
+ stmpe_keypad_fill_used_pins(keypad);
keypad->stmpe = stmpe;
keypad->plat = plat;
keypad->input = input;
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
- ret = stmpe_keypad_chip_init(keypad);
- if (ret < 0)
- goto out_freeinput;
+ error = stmpe_keypad_chip_init(keypad);
+ if (error < 0)
+ return error;
- ret = input_register_device(input);
- if (ret) {
- dev_err(&pdev->dev,
- "unable to register input device: %d\n", ret);
- goto out_freeinput;
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, stmpe_keypad_irq,
+ IRQF_ONESHOT, "stmpe-keypad", keypad);
+ if (error) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", error);
+ return error;
}
- ret = request_threaded_irq(irq, NULL, stmpe_keypad_irq, IRQF_ONESHOT,
- "stmpe-keypad", keypad);
- if (ret) {
- dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
- goto out_unregisterinput;
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", error);
+ return error;
}
platform_set_drvdata(pdev, keypad);
return 0;
-
-out_unregisterinput:
- input_unregister_device(input);
- input = NULL;
-out_freeinput:
- input_free_device(input);
-out_freekeypad:
- kfree(keypad);
- return ret;
}
-static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
+static int stmpe_keypad_remove(struct platform_device *pdev)
{
struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
- struct stmpe *stmpe = keypad->stmpe;
- int irq = platform_get_irq(pdev, 0);
-
- stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
- free_irq(irq, keypad);
- input_unregister_device(keypad->input);
- platform_set_drvdata(pdev, NULL);
- kfree(keypad);
+ stmpe_disable(keypad->stmpe, STMPE_BLOCK_KEYPAD);
return 0;
}
@@ -364,7 +390,7 @@ static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
.probe = stmpe_keypad_probe,
- .remove = __devexit_p(stmpe_keypad_remove),
+ .remove = stmpe_keypad_remove,
};
module_platform_driver(stmpe_keypad_driver);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 7d498e698508..2fb0d76a04c4 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -299,7 +299,7 @@ static void tc3589x_keypad_close(struct input_dev *input)
tc3589x_keypad_disable(keypad);
}
-static int __devinit tc3589x_keypad_probe(struct platform_device *pdev)
+static int tc3589x_keypad_probe(struct platform_device *pdev)
{
struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
struct tc_keypad *keypad;
@@ -382,7 +382,7 @@ err_free_mem:
return error;
}
-static int __devexit tc3589x_keypad_remove(struct platform_device *pdev)
+static int tc3589x_keypad_remove(struct platform_device *pdev)
{
struct tc_keypad *keypad = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -448,7 +448,7 @@ static struct platform_driver tc3589x_keypad_driver = {
.pm = &tc3589x_keypad_dev_pm_ops,
},
.probe = tc3589x_keypad_probe,
- .remove = __devexit_p(tc3589x_keypad_remove),
+ .remove = tc3589x_keypad_remove,
};
module_platform_driver(tc3589x_keypad_driver);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index c355cdde8d22..bfc832c35a7c 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -166,7 +166,7 @@ static void tca6416_keys_close(struct input_dev *dev)
disable_irq(chip->irqnum);
}
-static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
{
int error;
@@ -197,7 +197,7 @@ static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
return 0;
}
-static int __devinit tca6416_keypad_probe(struct i2c_client *client,
+static int tca6416_keypad_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tca6416_keys_platform_data *pdata;
@@ -313,7 +313,7 @@ fail1:
return error;
}
-static int __devexit tca6416_keypad_remove(struct i2c_client *client)
+static int tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
@@ -361,7 +361,7 @@ static struct i2c_driver tca6416_keypad_driver = {
.pm = &tca6416_keypad_dev_pm_ops,
},
.probe = tca6416_keypad_probe,
- .remove = __devexit_p(tca6416_keypad_remove),
+ .remove = tca6416_keypad_remove,
.id_table = tca6416_id,
};
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 893869b29ed9..a34cc6714e5b 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -35,6 +35,7 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/tca8418_keypad.h>
+#include <linux/of.h>
/* TCA8418 hardware limits */
#define TCA8418_MAX_ROWS 8
@@ -109,25 +110,11 @@
#define KEY_EVENT_CODE 0x7f
#define KEY_EVENT_VALUE 0x80
-
-static const struct i2c_device_id tca8418_id[] = {
- { TCA8418_NAME, 8418, },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tca8418_id);
-
struct tca8418_keypad {
- unsigned int rows;
- unsigned int cols;
- unsigned int keypad_mask; /* Mask for keypad col/rol regs */
- unsigned int irq;
- unsigned int row_shift;
-
struct i2c_client *client;
struct input_dev *input;
- /* Flexible array member, must be at end of struct */
- unsigned short keymap[];
+ unsigned int row_shift;
};
/*
@@ -172,6 +159,8 @@ static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
{
+ struct input_dev *input = keypad_data->input;
+ unsigned short *keymap = input->keycode;
int error, col, row;
u8 reg, state, code;
@@ -190,9 +179,8 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
- input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
- input_report_key(keypad_data->input,
- keypad_data->keymap[code], state);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keymap[code], state);
/* Read for next loop */
error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
@@ -202,7 +190,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
dev_err(&keypad_data->client->dev,
"unable to read REG_KEY_EVENT_A\n");
- input_sync(keypad_data->input);
+ input_sync(input);
}
/*
@@ -218,16 +206,18 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
if (error) {
dev_err(&keypad_data->client->dev,
"unable to read REG_INT_STAT\n");
- goto exit;
+ return IRQ_NONE;
}
+ if (!reg)
+ return IRQ_NONE;
+
if (reg & INT_STAT_OVR_FLOW_INT)
dev_warn(&keypad_data->client->dev, "overflow occurred\n");
if (reg & INT_STAT_K_INT)
tca8418_read_keypad(keypad_data);
-exit:
/* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
reg = 0xff;
error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
@@ -241,7 +231,8 @@ exit:
/*
* Configure the TCA8418 for keypad operation
*/
-static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+static int tca8418_configure(struct tca8418_keypad *keypad_data,
+ u32 rows, u32 cols)
{
int reg, error;
@@ -253,9 +244,8 @@ static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
/* Assemble a mask for row and column registers */
- reg = ~(~0 << keypad_data->rows);
- reg += (~(~0 << keypad_data->cols)) << 8;
- keypad_data->keypad_mask = reg;
+ reg = ~(~0 << rows);
+ reg += (~(~0 << cols)) << 8;
/* Set registers to keypad mode */
error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
@@ -270,145 +260,144 @@ static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
return error;
}
-static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+static int tca8418_keypad_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
const struct tca8418_keypad_platform_data *pdata =
- client->dev.platform_data;
+ dev_get_platdata(dev);
struct tca8418_keypad *keypad_data;
struct input_dev *input;
+ const struct matrix_keymap_data *keymap_data = NULL;
+ u32 rows = 0, cols = 0;
+ bool rep = false;
+ bool irq_is_gpio = false;
+ int irq;
int error, row_shift, max_keys;
/* Copy the platform data */
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
- }
-
- if (!pdata->keymap_data) {
- dev_err(&client->dev, "no keymap data defined\n");
- return -EINVAL;
+ if (pdata) {
+ if (!pdata->keymap_data) {
+ dev_err(dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+ keymap_data = pdata->keymap_data;
+ rows = pdata->rows;
+ cols = pdata->cols;
+ rep = pdata->rep;
+ irq_is_gpio = pdata->irq_is_gpio;
+ } else {
+ struct device_node *np = dev->of_node;
+ of_property_read_u32(np, "keypad,num-rows", &rows);
+ of_property_read_u32(np, "keypad,num-columns", &cols);
+ rep = of_property_read_bool(np, "keypad,autorepeat");
}
- if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
- dev_err(&client->dev, "invalid rows\n");
+ if (!rows || rows > TCA8418_MAX_ROWS) {
+ dev_err(dev, "invalid rows\n");
return -EINVAL;
}
- if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
- dev_err(&client->dev, "invalid columns\n");
+ if (!cols || cols > TCA8418_MAX_COLS) {
+ dev_err(dev, "invalid columns\n");
return -EINVAL;
}
/* Check i2c driver capabilities */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(&client->dev, "%s adapter not supported\n",
+ dev_err(dev, "%s adapter not supported\n",
dev_driver_string(&client->adapter->dev));
return -ENODEV;
}
- row_shift = get_count_order(pdata->cols);
- max_keys = pdata->rows << row_shift;
+ row_shift = get_count_order(cols);
+ max_keys = rows << row_shift;
- /* Allocate memory for keypad_data, keymap and input device */
- keypad_data = kzalloc(sizeof(*keypad_data) +
- max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+ /* Allocate memory for keypad_data and input device */
+ keypad_data = devm_kzalloc(dev, sizeof(*keypad_data), GFP_KERNEL);
if (!keypad_data)
return -ENOMEM;
- keypad_data->rows = pdata->rows;
- keypad_data->cols = pdata->cols;
keypad_data->client = client;
keypad_data->row_shift = row_shift;
/* Initialize the chip or fail if chip isn't present */
- error = tca8418_configure(keypad_data);
+ error = tca8418_configure(keypad_data, rows, cols);
if (error < 0)
- goto fail1;
+ return error;
/* Configure input device */
- input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto fail1;
- }
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
keypad_data->input = input;
input->name = client->name;
- input->dev.parent = &client->dev;
-
input->id.bustype = BUS_I2C;
input->id.vendor = 0x0001;
input->id.product = 0x001;
input->id.version = 0x0001;
- error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
- pdata->rows, pdata->cols,
- keypad_data->keymap, input);
+ error = matrix_keypad_build_keymap(keymap_data, NULL, rows, cols,
+ NULL, input);
if (error) {
- dev_dbg(&client->dev, "Failed to build keymap\n");
- goto fail2;
+ dev_err(dev, "Failed to build keymap\n");
+ return error;
}
- if (pdata->rep)
+ if (rep)
__set_bit(EV_REP, input->evbit);
input_set_capability(input, EV_MSC, MSC_SCAN);
input_set_drvdata(input, keypad_data);
- if (pdata->irq_is_gpio)
- client->irq = gpio_to_irq(client->irq);
+ irq = client->irq;
+ if (irq_is_gpio)
+ irq = gpio_to_irq(irq);
- error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, keypad_data);
+ error = devm_request_threaded_irq(dev, irq, NULL, tca8418_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_SHARED |
+ IRQF_ONESHOT,
+ client->name, keypad_data);
if (error) {
- dev_dbg(&client->dev,
- "Unable to claim irq %d; error %d\n",
+ dev_err(dev, "Unable to claim irq %d; error %d\n",
client->irq, error);
- goto fail2;
+ return error;
}
error = input_register_device(input);
if (error) {
- dev_dbg(&client->dev,
- "Unable to register input device, error: %d\n", error);
- goto fail3;
+ dev_err(dev, "Unable to register input device, error: %d\n",
+ error);
+ return error;
}
- i2c_set_clientdata(client, keypad_data);
return 0;
-
-fail3:
- free_irq(client->irq, keypad_data);
-fail2:
- input_free_device(input);
-fail1:
- kfree(keypad_data);
- return error;
}
-static int __devexit tca8418_keypad_remove(struct i2c_client *client)
-{
- struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
-
- free_irq(keypad_data->client->irq, keypad_data);
-
- input_unregister_device(keypad_data->input);
-
- kfree(keypad_data);
-
- return 0;
-}
+static const struct i2c_device_id tca8418_id[] = {
+ { TCA8418_NAME, 8418, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+#ifdef CONFIG_OF
+static const struct of_device_id tca8418_dt_ids[] = {
+ { .compatible = "ti,tca8418", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
+#endif
static struct i2c_driver tca8418_keypad_driver = {
.driver = {
.name = TCA8418_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tca8418_dt_ids),
},
.probe = tca8418_keypad_probe,
- .remove = __devexit_p(tca8418_keypad_remove),
.id_table = tca8418_id,
};
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 5faaf2553e33..c76f96872d31 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -87,7 +87,7 @@ struct tegra_kbc {
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] __devinitdata = {
+static const u32 tegra_kbc_default_keymap[] = {
KEY(0, 2, KEY_W),
KEY(0, 3, KEY_S),
KEY(0, 4, KEY_A),
@@ -223,7 +223,7 @@ static const u32 tegra_kbc_default_keymap[] __devinitdata = {
};
static const
-struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
+struct matrix_keymap_data tegra_kbc_default_keymap_data = {
.keymap = tegra_kbc_default_keymap,
.keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
};
@@ -573,7 +573,7 @@ static void tegra_kbc_close(struct input_dev *dev)
return tegra_kbc_stop(kbc);
}
-static bool __devinit
+static bool
tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
struct device *dev, unsigned int *num_rows)
{
@@ -619,7 +619,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
}
#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data * __devinit tegra_kbc_dt_parse_pdata(
+static struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
struct platform_device *pdev)
{
struct tegra_kbc_platform_data *pdata;
@@ -670,7 +670,7 @@ static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
}
#endif
-static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
+static int tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
{
const struct tegra_kbc_platform_data *pdata = kbc->pdata;
const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
@@ -697,7 +697,7 @@ static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
return retval;
}
-static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+static int tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
struct tegra_kbc *kbc;
@@ -838,7 +838,7 @@ err_free_pdata:
return err;
}
-static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+static int tegra_kbc_remove(struct platform_device *pdev)
{
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
struct resource *res;
@@ -954,7 +954,7 @@ MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
- .remove = __devexit_p(tegra_kbc_remove),
+ .remove = tegra_kbc_remove,
.driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 4c34f21fbe2d..ee1635011292 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -153,7 +153,7 @@ static void keypad_stop(struct input_dev *dev)
clk_disable(kp->clk);
}
-static int __devinit keypad_probe(struct platform_device *pdev)
+static int keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
const struct matrix_keymap_data *keymap_data;
@@ -301,7 +301,7 @@ error_res:
return error;
}
-static int __devexit keypad_remove(struct platform_device *pdev)
+static int keypad_remove(struct platform_device *pdev)
{
struct keypad_data *kp = platform_get_drvdata(pdev);
@@ -319,7 +319,7 @@ static int __devexit keypad_remove(struct platform_device *pdev)
static struct platform_driver keypad_driver = {
.probe = keypad_probe,
- .remove = __devexit_p(keypad_remove),
+ .remove = keypad_remove,
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a2c6f79aa101..04f84fd57173 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -271,7 +271,7 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
return IRQ_HANDLED;
}
-static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
+static int twl4030_kp_program(struct twl4030_keypad *kp)
{
u8 reg;
int i;
@@ -328,7 +328,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
* Registers keypad device with input subsystem
* and configures TWL4030 keypad registers
*/
-static int __devinit twl4030_kp_probe(struct platform_device *pdev)
+static int twl4030_kp_probe(struct platform_device *pdev)
{
struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
const struct matrix_keymap_data *keymap_data;
@@ -432,7 +432,7 @@ err1:
return error;
}
-static int __devexit twl4030_kp_remove(struct platform_device *pdev)
+static int twl4030_kp_remove(struct platform_device *pdev)
{
struct twl4030_keypad *kp = platform_get_drvdata(pdev);
@@ -452,7 +452,7 @@ static int __devexit twl4030_kp_remove(struct platform_device *pdev)
static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
- .remove = __devexit_p(twl4030_kp_remove),
+ .remove = twl4030_kp_remove,
.driver = {
.name = "twl4030_keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index e0f6cd1ad0fd..ee163bee8cce 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -118,7 +118,7 @@ static void w90p910_keypad_close(struct input_dev *dev)
clk_disable(keypad->clk);
}
-static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
+static int w90p910_keypad_probe(struct platform_device *pdev)
{
const struct w90p910_keypad_platform_data *pdata =
pdev->dev.platform_data;
@@ -234,7 +234,7 @@ failed_free:
return error;
}
-static int __devexit w90p910_keypad_remove(struct platform_device *pdev)
+static int w90p910_keypad_remove(struct platform_device *pdev)
{
struct w90p910_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
@@ -257,7 +257,7 @@ static int __devexit w90p910_keypad_remove(struct platform_device *pdev)
static struct platform_driver w90p910_keypad_driver = {
.probe = w90p910_keypad_probe,
- .remove = __devexit_p(w90p910_keypad_remove),
+ .remove = w90p910_keypad_remove,
.driver = {
.name = "nuc900-kpi",
.owner = THIS_MODULE,
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
index d88d9be1d1b7..3ae496ea5fe6 100644
--- a/drivers/input/matrix-keymap.c
+++ b/drivers/input/matrix-keymap.c
@@ -18,6 +18,7 @@
*/
#include <linux/device.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/input.h>
@@ -123,6 +124,11 @@ static int matrix_keypad_parse_of_keymap(const char *propname,
* it will attempt load the keymap from property specified by @keymap_name
* argument (or "linux,keymap" if @keymap_name is %NULL).
*
+ * If @keymap is %NULL the function will automatically allocate managed
+ * block of memory to store the keymap. This memory will be associated with
+ * the parent device and automatically freed when device unbinds from the
+ * driver.
+ *
* Callers are expected to set up input_dev->dev.parent before calling this
* function.
*/
@@ -133,12 +139,27 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
struct input_dev *input_dev)
{
unsigned int row_shift = get_count_order(cols);
+ size_t max_keys = rows << row_shift;
int i;
int error;
+ if (WARN_ON(!input_dev->dev.parent))
+ return -EINVAL;
+
+ if (!keymap) {
+ keymap = devm_kzalloc(input_dev->dev.parent,
+ max_keys * sizeof(*keymap),
+ GFP_KERNEL);
+ if (!keymap) {
+ dev_err(input_dev->dev.parent,
+ "Unable to allocate memory for keymap");
+ return -ENOMEM;
+ }
+ }
+
input_dev->keycode = keymap;
input_dev->keycodesize = sizeof(*keymap);
- input_dev->keycodemax = rows << row_shift;
+ input_dev->keycodemax = max_keys;
__set_bit(EV_KEY, input_dev->evbit);
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
index 7f26e7b6c228..ee43e5b7c881 100644
--- a/drivers/input/misc/88pm80x_onkey.c
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -62,7 +62,7 @@ static irqreturn_t pm80x_onkey_handler(int irq, void *data)
static SIMPLE_DEV_PM_OPS(pm80x_onkey_pm_ops, pm80x_dev_suspend,
pm80x_dev_resume);
-static int __devinit pm80x_onkey_probe(struct platform_device *pdev)
+static int pm80x_onkey_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -139,7 +139,7 @@ out:
return err;
}
-static int __devexit pm80x_onkey_remove(struct platform_device *pdev)
+static int pm80x_onkey_remove(struct platform_device *pdev)
{
struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
@@ -157,7 +157,7 @@ static struct platform_driver pm80x_onkey_driver = {
.pm = &pm80x_onkey_pm_ops,
},
.probe = pm80x_onkey_probe,
- .remove = __devexit_p(pm80x_onkey_remove),
+ .remove = pm80x_onkey_remove,
};
module_platform_driver(pm80x_onkey_driver);
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index f9ce1835e4d7..abd8453e5212 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -56,7 +56,7 @@ static irqreturn_t pm860x_onkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
+static int pm860x_onkey_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_onkey_info *info;
@@ -121,7 +121,7 @@ out:
return ret;
}
-static int __devexit pm860x_onkey_remove(struct platform_device *pdev)
+static int pm860x_onkey_remove(struct platform_device *pdev)
{
struct pm860x_onkey_info *info = platform_get_drvdata(pdev);
@@ -161,7 +161,7 @@ static struct platform_driver pm860x_onkey_driver = {
.pm = &pm860x_onkey_pm_ops,
},
.probe = pm860x_onkey_probe,
- .remove = __devexit_p(pm860x_onkey_remove),
+ .remove = pm860x_onkey_remove,
};
module_platform_driver(pm860x_onkey_driver);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7c0f1ecfdd7a..259ef31abb18 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -72,6 +72,16 @@ config INPUT_AD714X_SPI
To compile this driver as a module, choose M here: the
module will be called ad714x-spi.
+config INPUT_ARIZONA_HAPTICS
+ tristate "Arizona haptics support"
+ depends on MFD_ARIZONA && SND_SOC
+ select INPUT_FF_MEMLESS
+ help
+ Say Y to enable support for the haptics module in Arizona CODECs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called arizona-haptics.
+
config INPUT_BMA150
tristate "BMA150/SMB380 acceleration sensor support"
depends on I2C
@@ -290,8 +300,7 @@ config INPUT_ATI_REMOTE2
called ati_remote2.
config INPUT_KEYSPAN_REMOTE
- tristate "Keyspan DMR USB remote control (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Keyspan DMR USB remote control"
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -340,7 +349,6 @@ config INPUT_POWERMATE
config INPUT_YEALINK
tristate "Yealink usb-p1k voip phone"
- depends on EXPERIMENTAL
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -356,7 +364,6 @@ config INPUT_YEALINK
config INPUT_CM109
tristate "C-Media CM109 USB I/O Controller"
- depends on EXPERIMENTAL
depends on USB_ARCH_HAS_HCD
select USB
help
@@ -367,6 +374,16 @@ config INPUT_CM109
To compile this driver as a module, choose M here: the module will be
called cm109.
+config INPUT_RETU_PWRBUTTON
+ tristate "Retu Power button Driver"
+ depends on MFD_RETU
+ help
+ Say Y here if you want to enable power key reporting via the
+ Retu chips found in Nokia Internet Tablets (770, N800, N810).
+
+ To compile this driver as a module, choose M here. The module will
+ be called retu-pwrbutton.
+
config INPUT_TWL4030_PWRBUTTON
tristate "TWL4030 Power button Driver"
depends on TWL4030_CORE
@@ -434,7 +451,7 @@ config INPUT_PCF50633_PMU
config INPUT_PCF8574
tristate "PCF8574 Keypad input device"
- depends on I2C && EXPERIMENTAL
+ depends on I2C
help
Say Y here if you want to support a keypad connected via I2C
with a PCF8574.
@@ -444,7 +461,7 @@ config INPUT_PCF8574
config INPUT_PWM_BEEPER
tristate "PWM beeper support"
- depends on HAVE_PWM
+ depends on HAVE_PWM || PWM
help
Say Y here to get support for PWM based beeper devices.
@@ -486,6 +503,16 @@ config INPUT_DA9052_ONKEY
To compile this driver as a module, choose M here: the
module will be called da9052_onkey.
+config INPUT_DA9055_ONKEY
+ tristate "Dialog Semiconductor DA9055 ONKEY"
+ depends on MFD_DA9055
+ help
+ Support the ONKEY of DA9055 PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called da9055_onkey.
+
config INPUT_DM355EVM
tristate "TI DaVinci DM355 EVM Keypad and IR Remote"
depends on MFD_DM355EVM_MSP
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 83fe6f5b77d1..1f1e1b109d9d 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_INPUT_ADXL34X) += adxl34x.o
obj-$(CONFIG_INPUT_ADXL34X_I2C) += adxl34x-i2c.o
obj-$(CONFIG_INPUT_ADXL34X_SPI) += adxl34x-spi.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
+obj-$(CONFIG_INPUT_ARIZONA_HAPTICS) += arizona-haptics.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DA9052_ONKEY) += da9052_onkey.o
+obj-$(CONFIG_INPUT_DA9055_ONKEY) += da9055_onkey.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
@@ -45,6 +47,7 @@ obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
+obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 84ec691c05aa..2f090b46e716 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -45,7 +45,7 @@ static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
+static int ab8500_ponkey_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_ponkey *ponkey;
@@ -118,7 +118,7 @@ err_free_mem:
return error;
}
-static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
+static int ab8500_ponkey_remove(struct platform_device *pdev)
{
struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
@@ -146,7 +146,7 @@ static struct platform_driver ab8500_ponkey_driver = {
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
- .remove = __devexit_p(ab8500_ponkey_remove),
+ .remove = ab8500_ponkey_remove,
};
module_platform_driver(ab8500_ponkey_driver);
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index c8a79015472a..29d2064c26f2 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -72,7 +72,7 @@ static int ad714x_i2c_read(struct ad714x_chip *chip,
return 0;
}
-static int __devinit ad714x_i2c_probe(struct i2c_client *client,
+static int ad714x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad714x_chip *chip;
@@ -87,7 +87,7 @@ static int __devinit ad714x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad714x_i2c_remove(struct i2c_client *client)
+static int ad714x_i2c_remove(struct i2c_client *client)
{
struct ad714x_chip *chip = i2c_get_clientdata(client);
@@ -112,7 +112,7 @@ static struct i2c_driver ad714x_i2c_driver = {
.pm = &ad714x_i2c_pm,
},
.probe = ad714x_i2c_probe,
- .remove = __devexit_p(ad714x_i2c_remove),
+ .remove = ad714x_i2c_remove,
.id_table = ad714x_id,
};
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 75f6136d608e..bdccca42d138 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -83,7 +83,7 @@ static int ad714x_spi_write(struct ad714x_chip *chip,
return 0;
}
-static int __devinit ad714x_spi_probe(struct spi_device *spi)
+static int ad714x_spi_probe(struct spi_device *spi)
{
struct ad714x_chip *chip;
int err;
@@ -103,7 +103,7 @@ static int __devinit ad714x_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ad714x_spi_remove(struct spi_device *spi)
+static int ad714x_spi_remove(struct spi_device *spi)
{
struct ad714x_chip *chip = spi_get_drvdata(spi);
@@ -120,7 +120,7 @@ static struct spi_driver ad714x_spi_driver = {
.pm = &ad714x_spi_pm,
},
.probe = ad714x_spi_probe,
- .remove = __devexit_p(ad714x_spi_remove),
+ .remove = ad714x_spi_remove,
};
module_spi_driver(ad714x_spi_driver);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index dd1d1c145a7f..535dda48cace 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -73,7 +73,7 @@ static const struct adxl34x_bus_ops adxl34x_i2c_bops = {
.read_block = adxl34x_i2c_read_block,
};
-static int __devinit adxl34x_i2c_probe(struct i2c_client *client,
+static int adxl34x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adxl34x *ac;
@@ -98,7 +98,7 @@ static int __devinit adxl34x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit adxl34x_i2c_remove(struct i2c_client *client)
+static int adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
@@ -144,7 +144,7 @@ static struct i2c_driver adxl34x_driver = {
.pm = &adxl34x_i2c_pm,
},
.probe = adxl34x_i2c_probe,
- .remove = __devexit_p(adxl34x_i2c_remove),
+ .remove = adxl34x_i2c_remove,
.id_table = adxl34x_id,
};
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index 820a802a1e6e..ad5f40d37e48 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -65,7 +65,7 @@ static const struct adxl34x_bus_ops adxl34x_spi_bops = {
.read_block = adxl34x_spi_read_block,
};
-static int __devinit adxl34x_spi_probe(struct spi_device *spi)
+static int adxl34x_spi_probe(struct spi_device *spi)
{
struct adxl34x *ac;
@@ -87,7 +87,7 @@ static int __devinit adxl34x_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit adxl34x_spi_remove(struct spi_device *spi)
+static int adxl34x_spi_remove(struct spi_device *spi)
{
struct adxl34x *ac = dev_get_drvdata(&spi->dev);
@@ -126,7 +126,7 @@ static struct spi_driver adxl34x_driver = {
.pm = &adxl34x_spi_pm,
},
.probe = adxl34x_spi_probe,
- .remove = __devexit_p(adxl34x_spi_remove),
+ .remove = adxl34x_spi_remove,
};
module_spi_driver(adxl34x_driver);
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
new file mode 100644
index 000000000000..7a04f54ef961
--- /dev/null
+++ b/drivers/input/misc/arizona-haptics.c
@@ -0,0 +1,255 @@
+/*
+ * Arizona haptics driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+struct arizona_haptics {
+ struct arizona *arizona;
+ struct input_dev *input_dev;
+ struct work_struct work;
+
+ struct mutex mutex;
+ u8 intensity;
+};
+
+static void arizona_haptics_work(struct work_struct *work)
+{
+ struct arizona_haptics *haptics = container_of(work,
+ struct arizona_haptics,
+ work);
+ struct arizona *arizona = haptics->arizona;
+ struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
+ int ret;
+
+ if (!haptics->arizona->dapm) {
+ dev_err(arizona->dev, "No DAPM context\n");
+ return;
+ }
+
+ if (haptics->intensity) {
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_PHASE_2_INTENSITY,
+ ARIZONA_PHASE2_INTENSITY_MASK,
+ haptics->intensity);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set intensity: %d\n",
+ ret);
+ return;
+ }
+
+ /* This enable sequence will be a noop if already enabled */
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_CTRL_MASK,
+ 1 << ARIZONA_HAP_CTRL_SHIFT);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to start haptics: %d\n",
+ ret);
+ return;
+ }
+
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ mutex_unlock(dapm_mutex);
+
+ } else {
+ /* This disable sequence will be a noop if already enabled */
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+ mutex_unlock(dapm_mutex);
+ return;
+ }
+
+ mutex_unlock(dapm_mutex);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_CTRL_MASK,
+ 1 << ARIZONA_HAP_CTRL_SHIFT);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to stop haptics: %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
+static int arizona_haptics_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct arizona *arizona = haptics->arizona;
+
+ if (!arizona->dapm) {
+ dev_err(arizona->dev, "No DAPM context\n");
+ return -EBUSY;
+ }
+
+ if (effect->u.rumble.strong_magnitude) {
+ /* Scale the magnitude into the range the device supports */
+ if (arizona->pdata.hap_act) {
+ haptics->intensity =
+ effect->u.rumble.strong_magnitude >> 9;
+ if (effect->direction < 0x8000)
+ haptics->intensity += 0x7f;
+ } else {
+ haptics->intensity =
+ effect->u.rumble.strong_magnitude >> 8;
+ }
+ } else {
+ haptics->intensity = 0;
+ }
+
+ schedule_work(&haptics->work);
+
+ return 0;
+}
+
+static void arizona_haptics_close(struct input_dev *input)
+{
+ struct arizona_haptics *haptics = input_get_drvdata(input);
+ struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
+
+ cancel_work_sync(&haptics->work);
+
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ if (haptics->arizona->dapm)
+ snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+
+ mutex_unlock(dapm_mutex);
+}
+
+static int arizona_haptics_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct arizona_haptics *haptics;
+ int ret;
+
+ haptics = devm_kzalloc(&pdev->dev, sizeof(*haptics), GFP_KERNEL);
+ if (!haptics)
+ return -ENOMEM;
+
+ haptics->arizona = arizona;
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_ACT, arizona->pdata.hap_act);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set haptics actuator: %d\n",
+ ret);
+ return ret;
+ }
+
+ INIT_WORK(&haptics->work, arizona_haptics_work);
+
+ haptics->input_dev = input_allocate_device();
+ if (haptics->input_dev == NULL) {
+ dev_err(arizona->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input_set_drvdata(haptics->input_dev, haptics);
+
+ haptics->input_dev->name = "arizona:haptics";
+ haptics->input_dev->dev.parent = pdev->dev.parent;
+ haptics->input_dev->close = arizona_haptics_close;
+ __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
+
+ ret = input_ff_create_memless(haptics->input_dev, NULL,
+ arizona_haptics_play);
+ if (ret < 0) {
+ dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
+ ret);
+ goto err_ialloc;
+ }
+
+ ret = input_register_device(haptics->input_dev);
+ if (ret < 0) {
+ dev_err(arizona->dev, "couldn't register input device: %d\n",
+ ret);
+ goto err_iff;
+ }
+
+ platform_set_drvdata(pdev, haptics);
+
+ return 0;
+
+err_iff:
+ if (haptics->input_dev)
+ input_ff_destroy(haptics->input_dev);
+err_ialloc:
+ input_free_device(haptics->input_dev);
+
+ return ret;
+}
+
+static int arizona_haptics_remove(struct platform_device *pdev)
+{
+ struct arizona_haptics *haptics = platform_get_drvdata(pdev);
+
+ input_unregister_device(haptics->input_dev);
+
+ return 0;
+}
+
+static struct platform_driver arizona_haptics_driver = {
+ .probe = arizona_haptics_probe,
+ .remove = arizona_haptics_remove,
+ .driver = {
+ .name = "arizona-haptics",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(arizona_haptics_driver);
+
+MODULE_ALIAS("platform:arizona-haptics");
+MODULE_DESCRIPTION("Arizona haptics driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 1c4146fccfdf..a6666e142a91 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -90,7 +90,7 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit bfin_rotary_probe(struct platform_device *pdev)
+static int bfin_rotary_probe(struct platform_device *pdev)
{
struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data;
struct bfin_rot *rotary;
@@ -196,7 +196,7 @@ out1:
return error;
}
-static int __devexit bfin_rotary_remove(struct platform_device *pdev)
+static int bfin_rotary_remove(struct platform_device *pdev)
{
struct bfin_rot *rotary = platform_get_drvdata(pdev);
@@ -255,7 +255,7 @@ static const struct dev_pm_ops bfin_rotary_pm_ops = {
static struct platform_driver bfin_rotary_device_driver = {
.probe = bfin_rotary_probe,
- .remove = __devexit_p(bfin_rotary_remove),
+ .remove = bfin_rotary_remove,
.driver = {
.name = "bfin-rotary",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index e2f1e9f952b1..08ffcabd7220 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -158,7 +158,7 @@ struct bma150_data {
* are stated and verified by Bosch Sensortec where they are configured
* to provide a generic sensitivity performance.
*/
-static struct bma150_cfg default_cfg __devinitdata = {
+static struct bma150_cfg default_cfg = {
.any_motion_int = 1,
.hg_int = 1,
.lg_int = 1,
@@ -224,7 +224,7 @@ static int bma150_set_mode(struct bma150_data *bma150, u8 mode)
return 0;
}
-static int __devinit bma150_soft_reset(struct bma150_data *bma150)
+static int bma150_soft_reset(struct bma150_data *bma150)
{
int error;
@@ -237,19 +237,19 @@ static int __devinit bma150_soft_reset(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_set_range(struct bma150_data *bma150, u8 range)
+static int bma150_set_range(struct bma150_data *bma150, u8 range)
{
return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS,
BMA150_RANGE_MSK, BMA150_RANGE_REG);
}
-static int __devinit bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
+static int bma150_set_bandwidth(struct bma150_data *bma150, u8 bw)
{
return bma150_set_reg_bits(bma150->client, bw, BMA150_BANDWIDTH_POS,
BMA150_BANDWIDTH_MSK, BMA150_BANDWIDTH_REG);
}
-static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
+static int bma150_set_low_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
@@ -273,7 +273,7 @@ static int __devinit bma150_set_low_g_interrupt(struct bma150_data *bma150,
BMA150_LOW_G_EN_REG);
}
-static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
+static int bma150_set_high_g_interrupt(struct bma150_data *bma150,
u8 enable, u8 hyst, u8 dur, u8 thres)
{
int error;
@@ -300,7 +300,7 @@ static int __devinit bma150_set_high_g_interrupt(struct bma150_data *bma150,
}
-static int __devinit bma150_set_any_motion_interrupt(struct bma150_data *bma150,
+static int bma150_set_any_motion_interrupt(struct bma150_data *bma150,
u8 enable, u8 dur, u8 thres)
{
int error;
@@ -424,7 +424,7 @@ static void bma150_poll_close(struct input_polled_dev *ipoll_dev)
bma150_close(bma150);
}
-static int __devinit bma150_initialize(struct bma150_data *bma150,
+static int bma150_initialize(struct bma150_data *bma150,
const struct bma150_cfg *cfg)
{
int error;
@@ -465,7 +465,7 @@ static int __devinit bma150_initialize(struct bma150_data *bma150,
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static void __devinit bma150_init_input_device(struct bma150_data *bma150,
+static void bma150_init_input_device(struct bma150_data *bma150,
struct input_dev *idev)
{
idev->name = BMA150_DRIVER;
@@ -479,7 +479,7 @@ static void __devinit bma150_init_input_device(struct bma150_data *bma150,
input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
}
-static int __devinit bma150_register_input_device(struct bma150_data *bma150)
+static int bma150_register_input_device(struct bma150_data *bma150)
{
struct input_dev *idev;
int error;
@@ -504,7 +504,7 @@ static int __devinit bma150_register_input_device(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
+static int bma150_register_polled_device(struct bma150_data *bma150)
{
struct input_polled_dev *ipoll_dev;
int error;
@@ -535,7 +535,7 @@ static int __devinit bma150_register_polled_device(struct bma150_data *bma150)
return 0;
}
-static int __devinit bma150_probe(struct i2c_client *client,
+static int bma150_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct bma150_platform_data *pdata = client->dev.platform_data;
@@ -613,7 +613,7 @@ err_free_mem:
return error;
}
-static int __devexit bma150_remove(struct i2c_client *client)
+static int bma150_remove(struct i2c_client *client)
{
struct bma150_data *bma150 = i2c_get_clientdata(client);
@@ -670,7 +670,7 @@ static struct i2c_driver bma150_driver = {
.class = I2C_CLASS_HWMON,
.id_table = bma150_id,
.probe = bma150_probe,
- .remove = __devexit_p(bma150_remove),
+ .remove = bma150_remove,
};
module_i2c_driver(bma150_driver);
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index fe9b85f07792..4fdef98ceb56 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -55,7 +55,7 @@ static const struct cma3000_bus_ops cma3000_i2c_bops = {
.write = cma3000_i2c_set,
};
-static int __devinit cma3000_i2c_probe(struct i2c_client *client,
+static int cma3000_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cma3000_accl_data *data;
@@ -69,7 +69,7 @@ static int __devinit cma3000_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit cma3000_i2c_remove(struct i2c_client *client)
+static int cma3000_i2c_remove(struct i2c_client *client)
{
struct cma3000_accl_data *data = i2c_get_clientdata(client);
@@ -114,7 +114,7 @@ MODULE_DEVICE_TABLE(i2c, cma3000_i2c_id);
static struct i2c_driver cma3000_i2c_driver = {
.probe = cma3000_i2c_probe,
- .remove = __devexit_p(cma3000_i2c_remove),
+ .remove = cma3000_i2c_remove,
.id_table = cma3000_i2c_id,
.driver = {
.name = "cma3000_i2c_accl",
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 53e43d295148..4f77f87847e8 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -73,7 +73,7 @@ static void handle_buttons(struct input_polled_dev *dev)
}
}
-static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
+static int cobalt_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -135,7 +135,7 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
return error;
}
-static int __devexit cobalt_buttons_remove(struct platform_device *pdev)
+static int cobalt_buttons_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct buttons_dev *bdev = dev_get_drvdata(dev);
@@ -157,7 +157,7 @@ MODULE_ALIAS("platform:Cobalt buttons");
static struct platform_driver cobalt_buttons_driver = {
.probe = cobalt_buttons_probe,
- .remove = __devexit_p(cobalt_buttons_remove),
+ .remove = cobalt_buttons_remove,
.driver = {
.name = "Cobalt buttons",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 3c843cd725fa..020569a499f2 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -24,7 +24,6 @@ struct da9052_onkey {
struct da9052 *da9052;
struct input_dev *input;
struct delayed_work work;
- unsigned int irq;
};
static void da9052_onkey_query(struct da9052_onkey *onkey)
@@ -71,12 +70,11 @@ static irqreturn_t da9052_onkey_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit da9052_onkey_probe(struct platform_device *pdev)
+static int da9052_onkey_probe(struct platform_device *pdev)
{
struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
struct da9052_onkey *onkey;
struct input_dev *input_dev;
- int irq;
int error;
if (!da9052) {
@@ -84,13 +82,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
return -EINVAL;
}
- irq = platform_get_irq_byname(pdev, "ONKEY");
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Failed to get an IRQ for input device, %d\n", irq);
- return -EINVAL;
- }
-
onkey = kzalloc(sizeof(*onkey), GFP_KERNEL);
input_dev = input_allocate_device();
if (!onkey || !input_dev) {
@@ -101,7 +92,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
onkey->input = input_dev;
onkey->da9052 = da9052;
- onkey->irq = irq;
INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work);
input_dev->name = "da9052-onkey";
@@ -111,13 +101,11 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY);
__set_bit(KEY_POWER, input_dev->keybit);
- error = request_threaded_irq(onkey->irq, NULL, da9052_onkey_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ONKEY", onkey);
+ error = da9052_request_irq(onkey->da9052, DA9052_IRQ_NONKEY, "ONKEY",
+ da9052_onkey_irq, onkey);
if (error < 0) {
dev_err(onkey->da9052->dev,
- "Failed to register ONKEY IRQ %d, error = %d\n",
- onkey->irq, error);
+ "Failed to register ONKEY IRQ: %d\n", error);
goto err_free_mem;
}
@@ -132,7 +120,7 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
return 0;
err_free_irq:
- free_irq(onkey->irq, onkey);
+ da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
err_free_mem:
input_free_device(input_dev);
@@ -141,11 +129,11 @@ err_free_mem:
return error;
}
-static int __devexit da9052_onkey_remove(struct platform_device *pdev)
+static int da9052_onkey_remove(struct platform_device *pdev)
{
struct da9052_onkey *onkey = platform_get_drvdata(pdev);
- free_irq(onkey->irq, onkey);
+ da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
cancel_delayed_work_sync(&onkey->work);
input_unregister_device(onkey->input);
@@ -156,7 +144,7 @@ static int __devexit da9052_onkey_remove(struct platform_device *pdev)
static struct platform_driver da9052_onkey_driver = {
.probe = da9052_onkey_probe,
- .remove = __devexit_p(da9052_onkey_remove),
+ .remove = da9052_onkey_remove,
.driver = {
.name = "da9052-onkey",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
new file mode 100644
index 000000000000..ee6ae3a00174
--- /dev/null
+++ b/drivers/input/misc/da9055_onkey.c
@@ -0,0 +1,171 @@
+/*
+ * ON pin driver for Dialog DA9055 PMICs
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/da9055/core.h>
+#include <linux/mfd/da9055/reg.h>
+
+struct da9055_onkey {
+ struct da9055 *da9055;
+ struct input_dev *input;
+ struct delayed_work work;
+};
+
+static void da9055_onkey_query(struct da9055_onkey *onkey)
+{
+ int key_stat;
+
+ key_stat = da9055_reg_read(onkey->da9055, DA9055_REG_STATUS_A);
+ if (key_stat < 0) {
+ dev_err(onkey->da9055->dev,
+ "Failed to read onkey event %d\n", key_stat);
+ } else {
+ key_stat &= DA9055_NOKEY_STS;
+ /*
+ * Onkey status bit is cleared when onkey button is relased.
+ */
+ if (!key_stat) {
+ input_report_key(onkey->input, KEY_POWER, 0);
+ input_sync(onkey->input);
+ }
+ }
+
+ /*
+ * Interrupt is generated only when the ONKEY pin is asserted.
+ * Hence the deassertion of the pin is simulated through work queue.
+ */
+ if (key_stat)
+ schedule_delayed_work(&onkey->work, msecs_to_jiffies(10));
+
+}
+
+static void da9055_onkey_work(struct work_struct *work)
+{
+ struct da9055_onkey *onkey = container_of(work, struct da9055_onkey,
+ work.work);
+
+ da9055_onkey_query(onkey);
+}
+
+static irqreturn_t da9055_onkey_irq(int irq, void *data)
+{
+ struct da9055_onkey *onkey = data;
+
+ input_report_key(onkey->input, KEY_POWER, 1);
+ input_sync(onkey->input);
+
+ da9055_onkey_query(onkey);
+
+ return IRQ_HANDLED;
+}
+
+static int da9055_onkey_probe(struct platform_device *pdev)
+{
+ struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
+ struct da9055_onkey *onkey;
+ struct input_dev *input_dev;
+ int irq, err;
+
+ irq = platform_get_irq_byname(pdev, "ONKEY");
+ if (irq < 0) {
+ dev_err(&pdev->dev,
+ "Failed to get an IRQ for input device, %d\n", irq);
+ return -EINVAL;
+ }
+
+ onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ onkey->input = input_dev;
+ onkey->da9055 = da9055;
+ input_dev->name = "da9055-onkey";
+ input_dev->phys = "da9055-onkey/input0";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ __set_bit(KEY_POWER, input_dev->keybit);
+
+ INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
+
+ irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ONKEY", onkey);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to register ONKEY IRQ %d, error = %d\n",
+ irq, err);
+ goto err_free_input;
+ }
+
+ err = input_register_device(input_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to register input device, %d\n",
+ err);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, onkey);
+
+ return 0;
+
+err_free_irq:
+ free_irq(irq, onkey);
+ cancel_delayed_work_sync(&onkey->work);
+err_free_input:
+ input_free_device(input_dev);
+
+ return err;
+}
+
+static int da9055_onkey_remove(struct platform_device *pdev)
+{
+ struct da9055_onkey *onkey = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, "ONKEY");
+
+ irq = regmap_irq_get_virq(onkey->da9055->irq_data, irq);
+ free_irq(irq, onkey);
+ cancel_delayed_work_sync(&onkey->work);
+ input_unregister_device(onkey->input);
+
+ return 0;
+}
+
+static struct platform_driver da9055_onkey_driver = {
+ .probe = da9055_onkey_probe,
+ .remove = da9055_onkey_remove,
+ .driver = {
+ .name = "da9055-onkey",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9055_onkey_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Onkey driver for DA9055");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9055-onkey");
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index c1313d8535c3..a309a5c0899e 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -173,7 +173,7 @@ static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
/*----------------------------------------------------------------------*/
-static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
+static int dm355evm_keys_probe(struct platform_device *pdev)
{
struct dm355evm_keys *keys;
struct input_dev *input;
@@ -239,7 +239,7 @@ fail1:
return status;
}
-static int __devexit dm355evm_keys_remove(struct platform_device *pdev)
+static int dm355evm_keys_remove(struct platform_device *pdev)
{
struct dm355evm_keys *keys = platform_get_drvdata(pdev);
@@ -262,7 +262,7 @@ static int __devexit dm355evm_keys_remove(struct platform_device *pdev)
*/
static struct platform_driver dm355evm_keys_driver = {
.probe = dm355evm_keys_probe,
- .remove = __devexit_p(dm355evm_keys_remove),
+ .remove = dm355evm_keys_remove,
.driver = {
.owner = THIS_MODULE,
.name = "dm355evm_keys",
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index b6664cfa340a..fe30bd0fe4bd 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -98,7 +98,7 @@ static void gp2a_device_close(struct input_dev *dev)
"unable to deactivate, err %d\n", error);
}
-static int __devinit gp2a_initialize(struct gp2a_data *dt)
+static int gp2a_initialize(struct gp2a_data *dt)
{
int error;
@@ -122,7 +122,7 @@ static int __devinit gp2a_initialize(struct gp2a_data *dt)
return error;
}
-static int __devinit gp2a_probe(struct i2c_client *client,
+static int gp2a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct gp2a_platform_data *pdata = client->dev.platform_data;
@@ -205,7 +205,7 @@ err_hw_shutdown:
return error;
}
-static int __devexit gp2a_remove(struct i2c_client *client)
+static int gp2a_remove(struct i2c_client *client)
{
struct gp2a_data *dt = i2c_get_clientdata(client);
const struct gp2a_platform_data *pdata = dt->pdata;
@@ -277,7 +277,7 @@ static struct i2c_driver gp2a_i2c_driver = {
.pm = &gp2a_pm,
},
.probe = gp2a_probe,
- .remove = __devexit_p(gp2a_remove),
+ .remove = gp2a_remove,
.id_table = gp2a_i2c_id,
};
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
index 277a0574c199..da05cca8b562 100644
--- a/drivers/input/misc/gpio_tilt_polled.c
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -96,7 +96,7 @@ static void gpio_tilt_polled_close(struct input_polled_dev *dev)
pdata->disable(tdev->dev);
}
-static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+static int gpio_tilt_polled_probe(struct platform_device *pdev)
{
const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
@@ -179,7 +179,7 @@ err_free_tdev:
return error;
}
-static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+static int gpio_tilt_polled_remove(struct platform_device *pdev)
{
struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
const struct gpio_tilt_platform_data *pdata = tdev->pdata;
@@ -198,7 +198,7 @@ static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
static struct platform_driver gpio_tilt_polled_driver = {
.probe = gpio_tilt_polled_probe,
- .remove = __devexit_p(gpio_tilt_polled_remove),
+ .remove = gpio_tilt_polled_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 50e283068301..6ab3decc86e6 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -87,7 +87,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
+static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
@@ -132,7 +132,7 @@ static int __devinit ixp4xx_spkr_probe(struct platform_device *dev)
return err;
}
-static int __devexit ixp4xx_spkr_remove(struct platform_device *dev)
+static int ixp4xx_spkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
@@ -165,7 +165,7 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ixp4xx_spkr_probe,
- .remove = __devexit_p(ixp4xx_spkr_remove),
+ .remove = ixp4xx_spkr_remove,
.shutdown = ixp4xx_spkr_shutdown,
};
module_platform_driver(ixp4xx_spkr_platform_driver);
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index f46139f19ff1..a993b67a8a5b 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -295,7 +295,7 @@ static void kxtj9_input_close(struct input_dev *dev)
kxtj9_disable(tj9);
}
-static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
+static void kxtj9_init_input_device(struct kxtj9_data *tj9,
struct input_dev *input_dev)
{
__set_bit(EV_ABS, input_dev->evbit);
@@ -308,7 +308,7 @@ static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
input_dev->dev.parent = &tj9->client->dev;
}
-static int __devinit kxtj9_setup_input_device(struct kxtj9_data *tj9)
+static int kxtj9_setup_input_device(struct kxtj9_data *tj9)
{
struct input_dev *input_dev;
int err;
@@ -433,7 +433,7 @@ static void kxtj9_polled_input_close(struct input_polled_dev *dev)
kxtj9_disable(tj9);
}
-static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+static int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
{
int err;
struct input_polled_dev *poll_dev;
@@ -466,7 +466,7 @@ static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
return 0;
}
-static void __devexit kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
+static void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
{
input_unregister_polled_device(tj9->poll_dev);
input_free_polled_device(tj9->poll_dev);
@@ -485,7 +485,7 @@ static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
#endif
-static int __devinit kxtj9_verify(struct kxtj9_data *tj9)
+static int kxtj9_verify(struct kxtj9_data *tj9)
{
int retval;
@@ -506,7 +506,7 @@ out:
return retval;
}
-static int __devinit kxtj9_probe(struct i2c_client *client,
+static int kxtj9_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct kxtj9_platform_data *pdata = client->dev.platform_data;
@@ -594,7 +594,7 @@ err_free_mem:
return err;
}
-static int __devexit kxtj9_remove(struct i2c_client *client)
+static int kxtj9_remove(struct i2c_client *client)
{
struct kxtj9_data *tj9 = i2c_get_clientdata(client);
@@ -663,7 +663,7 @@ static struct i2c_driver kxtj9_driver = {
.pm = &kxtj9_pm_ops,
},
.probe = kxtj9_probe,
- .remove = __devexit_p(kxtj9_remove),
+ .remove = kxtj9_remove,
.id_table = kxtj9_id,
};
diff --git a/drivers/input/misc/m68kspkr.c b/drivers/input/misc/m68kspkr.c
index 0c64d9bb718e..b40ee4b47f4f 100644
--- a/drivers/input/misc/m68kspkr.c
+++ b/drivers/input/misc/m68kspkr.c
@@ -48,7 +48,7 @@ static int m68kspkr_event(struct input_dev *dev, unsigned int type, unsigned int
return 0;
}
-static int __devinit m68kspkr_probe(struct platform_device *dev)
+static int m68kspkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
int err;
@@ -80,7 +80,7 @@ static int __devinit m68kspkr_probe(struct platform_device *dev)
return 0;
}
-static int __devexit m68kspkr_remove(struct platform_device *dev)
+static int m68kspkr_remove(struct platform_device *dev)
{
struct input_dev *input_dev = platform_get_drvdata(dev);
@@ -104,7 +104,7 @@ static struct platform_driver m68kspkr_platform_driver = {
.owner = THIS_MODULE,
},
.probe = m68kspkr_probe,
- .remove = __devexit_p(m68kspkr_remove),
+ .remove = m68kspkr_remove,
.shutdown = m68kspkr_shutdown,
};
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 0a12b74140d3..369a39de4ff3 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -62,7 +62,7 @@ static irqreturn_t max8925_onkey_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+static int max8925_onkey_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_onkey_info *info;
@@ -141,7 +141,7 @@ err_free_mem:
return error;
}
-static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+static int max8925_onkey_remove(struct platform_device *pdev)
{
struct max8925_onkey_info *info = platform_get_drvdata(pdev);
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -195,7 +195,7 @@ static struct platform_driver max8925_onkey_driver = {
.pm = &max8925_onkey_pm_ops,
},
.probe = max8925_onkey_probe,
- .remove = __devexit_p(max8925_onkey_remove),
+ .remove = max8925_onkey_remove,
};
module_platform_driver(max8925_onkey_driver);
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 05b7b8bfaf0a..e973133212a5 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -241,7 +241,7 @@ static void max8997_haptic_close(struct input_dev *dev)
max8997_haptic_disable(chip);
}
-static int __devinit max8997_haptic_probe(struct platform_device *pdev)
+static int max8997_haptic_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
@@ -354,7 +354,7 @@ err_free_mem:
return error;
}
-static int __devexit max8997_haptic_remove(struct platform_device *pdev)
+static int max8997_haptic_remove(struct platform_device *pdev)
{
struct max8997_haptic *chip = platform_get_drvdata(pdev);
@@ -396,7 +396,7 @@ static struct platform_driver max8997_haptic_driver = {
.pm = &max8997_haptic_pm_ops,
},
.probe = max8997_haptic_probe,
- .remove = __devexit_p(max8997_haptic_remove),
+ .remove = max8997_haptic_remove,
.id_table = max8997_haptic_id,
};
module_platform_driver(max8997_haptic_driver);
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 8428f1e8e83e..0906ca593d5f 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -89,7 +89,7 @@ static irqreturn_t button_irq(int irq, void *_priv)
return IRQ_HANDLED;
}
-static int __devinit mc13783_pwrbutton_probe(struct platform_device *pdev)
+static int mc13783_pwrbutton_probe(struct platform_device *pdev)
{
const struct mc13xxx_buttons_platform_data *pdata;
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
@@ -230,7 +230,7 @@ free_input_dev:
return err;
}
-static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
+static int mc13783_pwrbutton_remove(struct platform_device *pdev)
{
struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
const struct mc13xxx_buttons_platform_data *pdata;
@@ -257,7 +257,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
- .remove = __devexit_p(mc13783_pwrbutton_remove),
+ .remove = mc13783_pwrbutton_remove,
.driver = {
.name = "mc13783-pwrbutton",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 873ebced544e..480557f14f23 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -167,7 +167,7 @@ static void mma8450_close(struct input_polled_dev *dev)
/*
* I2C init/probing/exit functions
*/
-static int __devinit mma8450_probe(struct i2c_client *c,
+static int mma8450_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
struct input_polled_dev *idev;
@@ -212,7 +212,7 @@ err_free_mem:
return err;
}
-static int __devexit mma8450_remove(struct i2c_client *c)
+static int mma8450_remove(struct i2c_client *c)
{
struct mma8450 *m = i2c_get_clientdata(c);
struct input_polled_dev *idev = m->idev;
@@ -243,7 +243,7 @@ static struct i2c_driver mma8450_driver = {
.of_match_table = mma8450_dt_ids,
},
.probe = mma8450_probe,
- .remove = __devexit_p(mma8450_remove),
+ .remove = mma8450_remove,
.id_table = mma8450_id,
};
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 306f84c2d8fb..dce0d95943c5 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -257,7 +257,7 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
*
* Called during device probe; configures the sampling method.
*/
-static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+static int mpu3050_hw_init(struct mpu3050_sensor *sensor)
{
struct i2c_client *client = sensor->client;
int ret;
@@ -306,7 +306,7 @@ static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
*
* If present install the relevant sysfs interfaces and input device.
*/
-static int __devinit mpu3050_probe(struct i2c_client *client,
+static int mpu3050_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mpu3050_sensor *sensor;
@@ -402,7 +402,7 @@ err_free_mem:
*
* Our sensor is going away, clean up the resources.
*/
-static int __devexit mpu3050_remove(struct i2c_client *client)
+static int mpu3050_remove(struct i2c_client *client)
{
struct mpu3050_sensor *sensor = i2c_get_clientdata(client);
@@ -471,7 +471,7 @@ static struct i2c_driver mpu3050_i2c_driver = {
.of_match_table = mpu3050_of_match,
},
.probe = mpu3050_probe,
- .remove = __devexit_p(mpu3050_remove),
+ .remove = mpu3050_remove,
.id_table = mpu3050_ids,
};
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index e09b4fe81913..40ac9a5adf89 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -48,7 +48,7 @@ static irqreturn_t pcap_keys_handler(int irq, void *_pcap_keys)
return IRQ_HANDLED;
}
-static int __devinit pcap_keys_probe(struct platform_device *pdev)
+static int pcap_keys_probe(struct platform_device *pdev)
{
int err = -ENOMEM;
struct pcap_keys *pcap_keys;
@@ -104,7 +104,7 @@ fail:
return err;
}
-static int __devexit pcap_keys_remove(struct platform_device *pdev)
+static int pcap_keys_remove(struct platform_device *pdev)
{
struct pcap_keys *pcap_keys = platform_get_drvdata(pdev);
@@ -119,7 +119,7 @@ static int __devexit pcap_keys_remove(struct platform_device *pdev)
static struct platform_driver pcap_keys_device_driver = {
.probe = pcap_keys_probe,
- .remove = __devexit_p(pcap_keys_remove),
+ .remove = pcap_keys_remove,
.driver = {
.name = "pcap-keys",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 53891de80b0e..73b13ebabe56 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -53,7 +53,7 @@ pcf50633_input_irq(int irq, void *data)
input_sync(input->input_dev);
}
-static int __devinit pcf50633_input_probe(struct platform_device *pdev)
+static int pcf50633_input_probe(struct platform_device *pdev)
{
struct pcf50633_input *input;
struct input_dev *input_dev;
@@ -93,7 +93,7 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcf50633_input_remove(struct platform_device *pdev)
+static int pcf50633_input_remove(struct platform_device *pdev)
{
struct pcf50633_input *input = platform_get_drvdata(pdev);
@@ -111,7 +111,7 @@ static struct platform_driver pcf50633_input_driver = {
.name = "pcf50633-input",
},
.probe = pcf50633_input_probe,
- .remove = __devexit_p(pcf50633_input_remove),
+ .remove = pcf50633_input_remove,
};
module_platform_driver(pcf50633_input_driver);
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 544c6635abe9..e37392976fdd 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -82,7 +82,7 @@ static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int i, ret;
struct input_dev *idev;
@@ -156,7 +156,7 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
return ret;
}
-static int __devexit pcf8574_kp_remove(struct i2c_client *client)
+static int pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
@@ -212,7 +212,7 @@ static struct i2c_driver pcf8574_kp_driver = {
#endif
},
.probe = pcf8574_kp_probe,
- .remove = __devexit_p(pcf8574_kp_remove),
+ .remove = pcf8574_kp_remove,
.id_table = pcf8574_kp_id,
};
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index b2484aa07f32..199db78acc4f 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -63,7 +63,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
return 0;
}
-static int __devinit pcspkr_probe(struct platform_device *dev)
+static int pcspkr_probe(struct platform_device *dev)
{
struct input_dev *pcspkr_dev;
int err;
@@ -95,7 +95,7 @@ static int __devinit pcspkr_probe(struct platform_device *dev)
return 0;
}
-static int __devexit pcspkr_remove(struct platform_device *dev)
+static int pcspkr_remove(struct platform_device *dev)
{
struct input_dev *pcspkr_dev = platform_get_drvdata(dev);
@@ -131,7 +131,7 @@ static struct platform_driver pcspkr_platform_driver = {
.pm = &pcspkr_pm_ops,
},
.probe = pcspkr_probe,
- .remove = __devexit_p(pcspkr_remove),
+ .remove = pcspkr_remove,
.shutdown = pcspkr_shutdown,
};
module_platform_driver(pcspkr_platform_driver);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index dfbfb463ea5d..a9da65e41c5b 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -178,7 +178,7 @@ static int pm8xxx_vib_play_effect(struct input_dev *dev, void *data,
return 0;
}
-static int __devinit pm8xxx_vib_probe(struct platform_device *pdev)
+static int pm8xxx_vib_probe(struct platform_device *pdev)
{
struct pm8xxx_vib *vib;
@@ -242,7 +242,7 @@ err_free_mem:
return error;
}
-static int __devexit pm8xxx_vib_remove(struct platform_device *pdev)
+static int pm8xxx_vib_remove(struct platform_device *pdev)
{
struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
@@ -270,7 +270,7 @@ static SIMPLE_DEV_PM_OPS(pm8xxx_vib_pm_ops, pm8xxx_vib_suspend, NULL);
static struct platform_driver pm8xxx_vib_driver = {
.probe = pm8xxx_vib_probe,
- .remove = __devexit_p(pm8xxx_vib_remove),
+ .remove = pm8xxx_vib_remove,
.driver = {
.name = "pm8xxx-vib",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 0f83d0f1d015..4b811be73974 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -81,7 +81,7 @@ static int pmic8xxx_pwrkey_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
-static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
{
struct input_dev *pwr;
int key_release_irq = platform_get_irq(pdev, 0);
@@ -187,7 +187,7 @@ free_pwrkey:
return err;
}
-static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
+static int pmic8xxx_pwrkey_remove(struct platform_device *pdev)
{
struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
int key_release_irq = platform_get_irq(pdev, 0);
@@ -206,7 +206,7 @@ static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
static struct platform_driver pmic8xxx_pwrkey_driver = {
.probe = pmic8xxx_pwrkey_probe,
- .remove = __devexit_p(pmic8xxx_pwrkey_remove),
+ .remove = pmic8xxx_pwrkey_remove,
.driver = {
.name = PM8XXX_PWRKEY_DEV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index fc84c8a51147..0808868461de 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -65,7 +65,7 @@ static int pwm_beeper_event(struct input_dev *input,
return 0;
}
-static int __devinit pwm_beeper_probe(struct platform_device *pdev)
+static int pwm_beeper_probe(struct platform_device *pdev)
{
unsigned long pwm_id = (unsigned long)pdev->dev.platform_data;
struct pwm_beeper *beeper;
@@ -75,7 +75,11 @@ static int __devinit pwm_beeper_probe(struct platform_device *pdev)
if (!beeper)
return -ENOMEM;
- beeper->pwm = pwm_request(pwm_id, "pwm beeper");
+ beeper->pwm = pwm_get(&pdev->dev, NULL);
+ if (IS_ERR(beeper->pwm)) {
+ dev_dbg(&pdev->dev, "unable to request PWM, trying legacy API\n");
+ beeper->pwm = pwm_request(pwm_id, "pwm beeper");
+ }
if (IS_ERR(beeper->pwm)) {
error = PTR_ERR(beeper->pwm);
@@ -125,7 +129,7 @@ err_free:
return error;
}
-static int __devexit pwm_beeper_remove(struct platform_device *pdev)
+static int pwm_beeper_remove(struct platform_device *pdev)
{
struct pwm_beeper *beeper = platform_get_drvdata(pdev);
@@ -171,13 +175,21 @@ static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
#define PWM_BEEPER_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id pwm_beeper_match[] = {
+ { .compatible = "pwm-beeper", },
+ { },
+};
+#endif
+
static struct platform_driver pwm_beeper_driver = {
.probe = pwm_beeper_probe,
- .remove = __devexit_p(pwm_beeper_remove),
+ .remove = pwm_beeper_remove,
.driver = {
.name = "pwm-beeper",
.owner = THIS_MODULE,
.pm = PWM_BEEPER_PM_OPS,
+ .of_match_table = of_match_ptr(pwm_beeper_match),
},
};
module_platform_driver(pwm_beeper_driver);
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index aeb02bcf7233..fb4f8ac3343b 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -51,7 +51,7 @@ static void rb532_button_poll(struct input_polled_dev *poll_dev)
input_sync(poll_dev->input);
}
-static int __devinit rb532_button_probe(struct platform_device *pdev)
+static int rb532_button_probe(struct platform_device *pdev)
{
struct input_polled_dev *poll_dev;
int error;
@@ -81,7 +81,7 @@ static int __devinit rb532_button_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit rb532_button_remove(struct platform_device *pdev)
+static int rb532_button_remove(struct platform_device *pdev)
{
struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
@@ -94,7 +94,7 @@ static int __devexit rb532_button_remove(struct platform_device *pdev)
static struct platform_driver rb532_button_driver = {
.probe = rb532_button_probe,
- .remove = __devexit_p(rb532_button_remove),
+ .remove = rb532_button_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/retu-pwrbutton.c b/drivers/input/misc/retu-pwrbutton.c
new file mode 100644
index 000000000000..7ca09baa0016
--- /dev/null
+++ b/drivers/input/misc/retu-pwrbutton.c
@@ -0,0 +1,99 @@
+/*
+ * Retu power button driver.
+ *
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Original code written by Ari Saastamoinen, Juha Yrjölä and Felipe Balbi.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#define RETU_STATUS_PWRONX (1 << 5)
+
+static irqreturn_t retu_pwrbutton_irq(int irq, void *_pwr)
+{
+ struct input_dev *idev = _pwr;
+ struct retu_dev *rdev = input_get_drvdata(idev);
+ bool state;
+
+ state = !(retu_read(rdev, RETU_REG_STATUS) & RETU_STATUS_PWRONX);
+ input_report_key(idev, KEY_POWER, state);
+ input_sync(idev);
+
+ return IRQ_HANDLED;
+}
+
+static int retu_pwrbutton_probe(struct platform_device *pdev)
+{
+ struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *idev;
+ int irq;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = "retu-pwrbutton";
+ idev->dev.parent = &pdev->dev;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+ input_set_drvdata(idev, rdev);
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, retu_pwrbutton_irq, 0,
+ "retu-pwrbutton", idev);
+ if (error)
+ return error;
+
+ error = input_register_device(idev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int retu_pwrbutton_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver retu_pwrbutton_driver = {
+ .probe = retu_pwrbutton_probe,
+ .remove = retu_pwrbutton_remove,
+ .driver = {
+ .name = "retu-pwrbutton",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(retu_pwrbutton_driver);
+
+MODULE_ALIAS("platform:retu-pwrbutton");
+MODULE_DESCRIPTION("Retu Power Button");
+MODULE_AUTHOR("Ari Saastamoinen");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 99a49e4968d2..aff47b2c38ff 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -149,8 +149,7 @@ static struct of_device_id rotary_encoder_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
-static struct rotary_encoder_platform_data * __devinit
-rotary_encoder_parse_dt(struct device *dev)
+static struct rotary_encoder_platform_data *rotary_encoder_parse_dt(struct device *dev)
{
const struct of_device_id *of_id =
of_match_device(rotary_encoder_of_match, dev);
@@ -192,7 +191,7 @@ rotary_encoder_parse_dt(struct device *dev)
}
#endif
-static int __devinit rotary_encoder_probe(struct platform_device *pdev)
+static int rotary_encoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
@@ -302,7 +301,7 @@ exit_free_mem:
return err;
}
-static int __devexit rotary_encoder_remove(struct platform_device *pdev)
+static int rotary_encoder_remove(struct platform_device *pdev)
{
struct rotary_encoder *encoder = platform_get_drvdata(pdev);
const struct rotary_encoder_platform_data *pdata = encoder->pdata;
@@ -325,7 +324,7 @@ static int __devexit rotary_encoder_remove(struct platform_device *pdev)
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
- .remove = __devexit_p(rotary_encoder_remove),
+ .remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 5d9fd5571199..ad6415ceaf5f 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -91,7 +91,7 @@ static void handle_buttons(struct input_polled_dev *dev)
}
}
-static int __devinit sgi_buttons_probe(struct platform_device *pdev)
+static int sgi_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -143,7 +143,7 @@ static int __devinit sgi_buttons_probe(struct platform_device *pdev)
return error;
}
-static int __devexit sgi_buttons_remove(struct platform_device *pdev)
+static int sgi_buttons_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct buttons_dev *bdev = dev_get_drvdata(dev);
@@ -158,7 +158,7 @@ static int __devexit sgi_buttons_remove(struct platform_device *pdev)
static struct platform_driver sgi_buttons_driver = {
.probe = sgi_buttons_probe,
- .remove = __devexit_p(sgi_buttons_remove),
+ .remove = sgi_buttons_remove,
.driver = {
.name = "sgibtns",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0122f5351577..a53586a7fbdb 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -139,7 +139,7 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
return 0;
}
-static int __devinit sparcspkr_probe(struct device *dev)
+static int sparcspkr_probe(struct device *dev)
{
struct sparcspkr_state *state = dev_get_drvdata(dev);
struct input_dev *input_dev;
@@ -182,7 +182,7 @@ static void sparcspkr_shutdown(struct platform_device *dev)
state->event(input_dev, EV_SND, SND_BELL, 0);
}
-static int __devinit bbc_beep_probe(struct platform_device *op)
+static int bbc_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct bbc_beep_info *info;
@@ -229,7 +229,7 @@ out_err:
return err;
}
-static int __devexit bbc_remove(struct platform_device *op)
+static int bbc_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct input_dev *input_dev = state->input_dev;
@@ -263,11 +263,11 @@ static struct platform_driver bbc_beep_driver = {
.of_match_table = bbc_beep_match,
},
.probe = bbc_beep_probe,
- .remove = __devexit_p(bbc_remove),
+ .remove = bbc_remove,
.shutdown = sparcspkr_shutdown,
};
-static int __devinit grover_beep_probe(struct platform_device *op)
+static int grover_beep_probe(struct platform_device *op)
{
struct sparcspkr_state *state;
struct grover_beep_info *info;
@@ -310,7 +310,7 @@ out_err:
return err;
}
-static int __devexit grover_remove(struct platform_device *op)
+static int grover_remove(struct platform_device *op)
{
struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
struct grover_beep_info *info = &state->u.grover;
@@ -345,7 +345,7 @@ static struct platform_driver grover_beep_driver = {
.of_match_table = grover_beep_match,
},
.probe = grover_beep_probe,
- .remove = __devexit_p(grover_remove),
+ .remove = grover_remove,
.shutdown = sparcspkr_shutdown,
};
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index b3dd96d6448b..27c2bc8aa890 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -39,8 +39,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
int err;
u8 value;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
- STS_HW_CONDITIONS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &value, STS_HW_CONDITIONS);
if (!err) {
pm_wakeup_event(pwr->dev.parent, 0);
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 2194a3c7236a..78eb6b30580a 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -207,7 +207,7 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
return false;
}
-static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
+static int twl4030_vibra_probe(struct platform_device *pdev)
{
struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
struct device_node *twl4030_core_node = pdev->dev.parent->of_node;
@@ -269,7 +269,7 @@ err_kzalloc:
return ret;
}
-static int __devexit twl4030_vibra_remove(struct platform_device *pdev)
+static int twl4030_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -283,7 +283,7 @@ static int __devexit twl4030_vibra_remove(struct platform_device *pdev)
static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
- .remove = __devexit_p(twl4030_vibra_remove),
+ .remove = twl4030_vibra_remove,
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index c8a288ae1d5b..71a28ee699f3 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -255,7 +255,7 @@ static int twl6040_vibra_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
-static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
+static int twl6040_vibra_probe(struct platform_device *pdev)
{
struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
struct device *twl6040_core_dev = pdev->dev.parent;
@@ -418,7 +418,7 @@ err_kzalloc:
return ret;
}
-static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
+static int twl6040_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
@@ -433,7 +433,7 @@ static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
- .remove = __devexit_p(twl6040_vibra_remove),
+ .remove = twl6040_vibra_remove,
.driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index e2bdfd4bea70..56536f4b9572 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -170,7 +170,7 @@ static u16 bios_pop_queue(void)
return regs.eax;
}
-static void __devinit bios_attach(void)
+static void bios_attach(void)
{
struct regs regs;
@@ -190,7 +190,7 @@ static void bios_detach(void)
call_bios(&regs);
}
-static u8 __devinit bios_get_cmos_address(void)
+static u8 bios_get_cmos_address(void)
{
struct regs regs;
@@ -202,7 +202,7 @@ static u8 __devinit bios_get_cmos_address(void)
return regs.ecx;
}
-static u16 __devinit bios_get_default_setting(u8 subsys)
+static u16 bios_get_default_setting(u8 subsys)
{
struct regs regs;
@@ -1052,7 +1052,7 @@ static struct led_classdev wistron_wifi_led = {
.brightness_set = wistron_wifi_led_set,
};
-static void __devinit wistron_led_init(struct device *parent)
+static void wistron_led_init(struct device *parent)
{
if (leds_present & FE_WIFI_LED) {
u16 wifi = bios_get_default_setting(WIFI);
@@ -1077,7 +1077,7 @@ static void __devinit wistron_led_init(struct device *parent)
}
}
-static void __devexit wistron_led_remove(void)
+static void wistron_led_remove(void)
{
if (leds_present & FE_MAIL_LED)
led_classdev_unregister(&wistron_mail_led);
@@ -1168,7 +1168,7 @@ static void wistron_poll(struct input_polled_dev *dev)
dev->poll_interval = POLL_INTERVAL_DEFAULT;
}
-static int __devinit wistron_setup_keymap(struct input_dev *dev,
+static int wistron_setup_keymap(struct input_dev *dev,
struct key_entry *entry)
{
switch (entry->type) {
@@ -1199,7 +1199,7 @@ static int __devinit wistron_setup_keymap(struct input_dev *dev,
return 0;
}
-static int __devinit setup_input_dev(void)
+static int setup_input_dev(void)
{
struct input_dev *input_dev;
int error;
@@ -1237,7 +1237,7 @@ static int __devinit setup_input_dev(void)
/* Driver core */
-static int __devinit wistron_probe(struct platform_device *dev)
+static int wistron_probe(struct platform_device *dev)
{
int err;
@@ -1277,7 +1277,7 @@ static int __devinit wistron_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wistron_remove(struct platform_device *dev)
+static int wistron_remove(struct platform_device *dev)
{
wistron_led_remove();
input_unregister_polled_device(wistron_idev);
@@ -1334,7 +1334,7 @@ static struct platform_driver wistron_driver = {
#endif
},
.probe = wistron_probe,
- .remove = __devexit_p(wistron_remove),
+ .remove = wistron_remove,
};
static int __init wb_module_init(void)
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 6790a812a1db..558767d8ebf4 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -69,14 +69,15 @@ static irqreturn_t wm831x_on_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit wm831x_on_probe(struct platform_device *pdev)
+static int wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
- wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
+ wm831x_on = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_on),
+ GFP_KERNEL);
if (!wm831x_on) {
dev_err(&pdev->dev, "Can't allocate data\n");
return -ENOMEM;
@@ -120,11 +121,10 @@ err_irq:
err_input_dev:
input_free_device(wm831x_on->dev);
err:
- kfree(wm831x_on);
return ret;
}
-static int __devexit wm831x_on_remove(struct platform_device *pdev)
+static int wm831x_on_remove(struct platform_device *pdev)
{
struct wm831x_on *wm831x_on = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -132,14 +132,13 @@ static int __devexit wm831x_on_remove(struct platform_device *pdev)
free_irq(irq, wm831x_on);
cancel_delayed_work_sync(&wm831x_on->work);
input_unregister_device(wm831x_on->dev);
- kfree(wm831x_on);
return 0;
}
static struct platform_driver wm831x_on_driver = {
.probe = wm831x_on_probe,
- .remove = __devexit_p(wm831x_on_remove),
+ .remove = wm831x_on_remove,
.driver = {
.name = "wm831x-on",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6f7d99013031..e21c1816a8f9 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -104,7 +104,7 @@ static irqreturn_t input_handler(int rq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit xenkbd_probe(struct xenbus_device *dev,
+static int xenkbd_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int ret, i, abs;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index cf5af1f495ec..e229fa3cad96 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -767,9 +767,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
psmouse->packet[5]) & 0x80) ||
(!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
psmouse_dbg(psmouse,
- "refusing packet %x %x %x %x (suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5], psmouse->packet[6]);
+ "refusing packet %4ph (suspected interleaved ps/2)\n",
+ psmouse->packet + 3);
return PSMOUSE_BAD_DATA;
}
@@ -831,9 +830,8 @@ static void alps_flush_packet(unsigned long data)
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) {
psmouse_dbg(psmouse,
- "refusing packet %x %x %x (suspected interleaved ps/2)\n",
- psmouse->packet[3], psmouse->packet[4],
- psmouse->packet[5]);
+ "refusing packet %3ph (suspected interleaved ps/2)\n",
+ psmouse->packet + 3);
} else {
alps_process_packet(psmouse);
}
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 39fe9b737cae..532eaca4cc56 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -46,7 +46,7 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
input_sync(input);
}
-static int __devinit gpio_mouse_probe(struct platform_device *pdev)
+static int gpio_mouse_probe(struct platform_device *pdev)
{
struct gpio_mouse_platform_data *pdata = pdev->dev.platform_data;
struct input_polled_dev *input_poll;
@@ -150,7 +150,7 @@ static int __devinit gpio_mouse_probe(struct platform_device *pdev)
return error;
}
-static int __devexit gpio_mouse_remove(struct platform_device *pdev)
+static int gpio_mouse_remove(struct platform_device *pdev)
{
struct input_polled_dev *input = platform_get_drvdata(pdev);
struct gpio_mouse_platform_data *pdata = input->private;
@@ -172,7 +172,7 @@ static int __devexit gpio_mouse_remove(struct platform_device *pdev)
static struct platform_driver gpio_mouse_device_driver = {
.probe = gpio_mouse_probe,
- .remove = __devexit_p(gpio_mouse_remove),
+ .remove = gpio_mouse_remove,
.driver = {
.name = "gpio_mouse",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c
index 5f278176eb9b..0a60717b91c6 100644
--- a/drivers/input/mouse/maplemouse.c
+++ b/drivers/input/mouse/maplemouse.c
@@ -64,7 +64,7 @@ static void dc_mouse_close(struct input_dev *dev)
}
/* allow the mouse to be used */
-static int __devinit probe_maple_mouse(struct device *dev)
+static int probe_maple_mouse(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
@@ -114,7 +114,7 @@ fail:
return error;
}
-static int __devexit remove_maple_mouse(struct device *dev)
+static int remove_maple_mouse(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct dc_mouse *mse = maple_get_drvdata(mdev);
@@ -132,7 +132,7 @@ static struct maple_driver dc_mouse_driver = {
.drv = {
.name = "Dreamcast_mouse",
.probe = probe_maple_mouse,
- .remove = __devexit_p(remove_maple_mouse),
+ .remove = remove_maple_mouse,
},
};
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index c29ae7654d5e..8e1b98ea5648 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -206,7 +206,7 @@ static void navpoint_close(struct input_dev *input)
navpoint_down(navpoint);
}
-static int __devinit navpoint_probe(struct platform_device *pdev)
+static int navpoint_probe(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -299,7 +299,7 @@ err_free_gpio:
return error;
}
-static int __devexit navpoint_remove(struct platform_device *pdev)
+static int navpoint_remove(struct platform_device *pdev)
{
const struct navpoint_platform_data *pdata =
dev_get_platdata(&pdev->dev);
@@ -353,7 +353,7 @@ static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
static struct platform_driver navpoint_driver = {
.probe = navpoint_probe,
- .remove = __devexit_p(navpoint_remove),
+ .remove = navpoint_remove,
.driver = {
.name = "navpoint",
.owner = THIS_MODULE,
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 4fe055f2c536..0ecb9e7945eb 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -143,7 +143,7 @@ static void pxa930_trkball_close(struct input_dev *dev)
pxa930_trkball_disable(trkball);
}
-static int __devinit pxa930_trkball_probe(struct platform_device *pdev)
+static int pxa930_trkball_probe(struct platform_device *pdev)
{
struct pxa930_trkball *trkball;
struct input_dev *input;
@@ -230,7 +230,7 @@ failed:
return error;
}
-static int __devexit pxa930_trkball_remove(struct platform_device *pdev)
+static int pxa930_trkball_remove(struct platform_device *pdev)
{
struct pxa930_trkball *trkball = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -248,7 +248,7 @@ static struct platform_driver pxa930_trkball_driver = {
.name = "pxa930-trkball",
},
.probe = pxa930_trkball_probe,
- .remove = __devexit_p(pxa930_trkball_remove),
+ .remove = pxa930_trkball_remove,
};
module_platform_driver(pxa930_trkball_driver);
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index e582922bacf7..cc7e0d4a8f93 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -791,7 +791,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y);
fsp_set_slot(dev, 1, false, 0, 0);
}
- if (fgrs > 0) {
+ if (fgrs == 1 || (fgrs == 2 && !(packet[0] & FSP_PB0_MFMC_FGR2))) {
input_report_abs(dev, ABS_X, abs_x);
input_report_abs(dev, ABS_Y, abs_y);
}
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 063a174d3a88..ad822608f6ee 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -535,7 +535,7 @@ static struct synaptics_i2c *synaptics_i2c_touch_create(struct i2c_client *clien
return touch;
}
-static int __devinit synaptics_i2c_probe(struct i2c_client *client,
+static int synaptics_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
int ret;
@@ -601,7 +601,7 @@ err_mem_free:
return ret;
}
-static int __devexit synaptics_i2c_remove(struct i2c_client *client)
+static int synaptics_i2c_remove(struct i2c_client *client)
{
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -662,7 +662,7 @@ static struct i2c_driver synaptics_i2c_driver = {
},
.probe = synaptics_i2c_probe,
- .remove = __devexit_p(synaptics_i2c_remove),
+ .remove = synaptics_i2c_remove,
.id_table = synaptics_i2c_id_table,
};
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 55f2c2293ec6..4a4e182c33e7 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -234,4 +234,13 @@ config SERIO_PS2MULT
To compile this driver as a module, choose M here: the
module will be called ps2mult.
+config SERIO_ARC_PS2
+ tristate "ARC PS/2 support"
+ help
+ Say Y here if you have an ARC FPGA platform with a PS/2
+ controller in it.
+
+ To compile this driver as a module, choose M here; the module
+ will be called arc_ps2.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index dbbe37616c92..4b0c8f84f1c1 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_SERIO_RAW) += serio_raw.o
obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
+obj-$(CONFIG_SERIO_ARC_PS2) += arc_ps2.o
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index cc11f4efe119..479ce5fe8955 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -81,7 +81,7 @@ static void altera_ps2_close(struct serio *io)
/*
* Add one device to this driver.
*/
-static int __devinit altera_ps2_probe(struct platform_device *pdev)
+static int altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
struct serio *serio;
@@ -159,7 +159,7 @@ static int __devinit altera_ps2_probe(struct platform_device *pdev)
/*
* Remove one device from this driver.
*/
-static int __devexit altera_ps2_remove(struct platform_device *pdev)
+static int altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
@@ -187,7 +187,7 @@ MODULE_DEVICE_TABLE(of, altera_ps2_match);
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
- .remove = __devexit_p(altera_ps2_remove),
+ .remove = altera_ps2_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 2e77246c2e5a..4e2fd44865e1 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
clk_disable_unprepare(kmi->clk);
}
-static int __devinit amba_kmi_probe(struct amba_device *dev,
+static int amba_kmi_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct amba_kmi_port *kmi;
@@ -163,7 +163,7 @@ static int __devinit amba_kmi_probe(struct amba_device *dev,
return ret;
}
-static int __devexit amba_kmi_remove(struct amba_device *dev)
+static int amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -204,7 +204,7 @@ static struct amba_driver ambakmi_driver = {
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
- .remove = __devexit_p(amba_kmi_remove),
+ .remove = amba_kmi_remove,
.resume = amba_kmi_resume,
};
diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c
new file mode 100644
index 000000000000..b571eb3e4efc
--- /dev/null
+++ b/drivers/input/serio/arc_ps2.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver is originally developed by Pavel Sokolov <psokolov@synopsys.com>
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#define ARC_PS2_PORTS 2
+
+#define ARC_ARC_PS2_ID 0x0001f609
+
+#define STAT_TIMEOUT 128
+
+#define PS2_STAT_RX_FRM_ERR (1)
+#define PS2_STAT_RX_BUF_OVER (1 << 1)
+#define PS2_STAT_RX_INT_EN (1 << 2)
+#define PS2_STAT_RX_VAL (1 << 3)
+#define PS2_STAT_TX_ISNOT_FUL (1 << 4)
+#define PS2_STAT_TX_INT_EN (1 << 5)
+
+struct arc_ps2_port {
+ void __iomem *data_addr;
+ void __iomem *status_addr;
+ struct serio *io;
+};
+
+struct arc_ps2_data {
+ struct arc_ps2_port port[ARC_PS2_PORTS];
+ void __iomem *addr;
+ unsigned int frame_error;
+ unsigned int buf_overflow;
+ unsigned int total_int;
+};
+
+static void arc_ps2_check_rx(struct arc_ps2_data *arc_ps2,
+ struct arc_ps2_port *port)
+{
+ unsigned int timeout = 1000;
+ unsigned int flag, status;
+ unsigned char data;
+
+ do {
+ status = ioread32(port->status_addr);
+ if (!(status & PS2_STAT_RX_VAL))
+ return;
+
+ data = ioread32(port->data_addr) & 0xff;
+
+ flag = 0;
+ arc_ps2->total_int++;
+ if (status & PS2_STAT_RX_FRM_ERR) {
+ arc_ps2->frame_error++;
+ flag |= SERIO_PARITY;
+ } else if (status & PS2_STAT_RX_BUF_OVER) {
+ arc_ps2->buf_overflow++;
+ flag |= SERIO_FRAME;
+ }
+
+ serio_interrupt(port->io, data, flag);
+ } while (--timeout);
+
+ dev_err(&port->io->dev, "PS/2 hardware stuck\n");
+}
+
+static irqreturn_t arc_ps2_interrupt(int irq, void *dev)
+{
+ struct arc_ps2_data *arc_ps2 = dev;
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++)
+ arc_ps2_check_rx(arc_ps2, &arc_ps2->port[i]);
+
+ return IRQ_HANDLED;
+}
+
+static int arc_ps2_write(struct serio *io, unsigned char val)
+{
+ unsigned status;
+ struct arc_ps2_port *port = io->port_data;
+ int timeout = STAT_TIMEOUT;
+
+ do {
+ status = ioread32(port->status_addr);
+ cpu_relax();
+
+ if (status & PS2_STAT_TX_ISNOT_FUL) {
+ iowrite32(val & 0xff, port->data_addr);
+ return 0;
+ }
+
+ } while (--timeout);
+
+ dev_err(&io->dev, "write timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int arc_ps2_open(struct serio *io)
+{
+ struct arc_ps2_port *port = io->port_data;
+
+ iowrite32(PS2_STAT_RX_INT_EN, port->status_addr);
+
+ return 0;
+}
+
+static void arc_ps2_close(struct serio *io)
+{
+ struct arc_ps2_port *port = io->port_data;
+
+ iowrite32(ioread32(port->status_addr) & ~PS2_STAT_RX_INT_EN,
+ port->status_addr);
+}
+
+static void __iomem *arc_ps2_calc_addr(struct arc_ps2_data *arc_ps2,
+ int index, bool status)
+{
+ void __iomem *addr;
+
+ addr = arc_ps2->addr + 4 + 4 * index;
+ if (status)
+ addr += ARC_PS2_PORTS * 4;
+
+ return addr;
+}
+
+static void arc_ps2_inhibit_ports(struct arc_ps2_data *arc_ps2)
+{
+ void __iomem *addr;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++) {
+ addr = arc_ps2_calc_addr(arc_ps2, i, true);
+ val = ioread32(addr);
+ val &= ~(PS2_STAT_RX_INT_EN | PS2_STAT_TX_INT_EN);
+ iowrite32(val, addr);
+ }
+}
+
+static int arc_ps2_create_port(struct platform_device *pdev,
+ struct arc_ps2_data *arc_ps2,
+ int index)
+{
+ struct arc_ps2_port *port = &arc_ps2->port[index];
+ struct serio *io;
+
+ io = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!io)
+ return -ENOMEM;
+
+ io->id.type = SERIO_8042;
+ io->write = arc_ps2_write;
+ io->open = arc_ps2_open;
+ io->close = arc_ps2_close;
+ snprintf(io->name, sizeof(io->name), "ARC PS/2 port%d", index);
+ snprintf(io->phys, sizeof(io->phys), "arc/serio%d", index);
+ io->port_data = port;
+
+ port->io = io;
+
+ port->data_addr = arc_ps2_calc_addr(arc_ps2, index, false);
+ port->status_addr = arc_ps2_calc_addr(arc_ps2, index, true);
+
+ dev_dbg(&pdev->dev, "port%d is allocated (data = 0x%p, status = 0x%p)\n",
+ index, port->data_addr, port->status_addr);
+
+ serio_register_port(port->io);
+ return 0;
+}
+
+static int arc_ps2_probe(struct platform_device *pdev)
+{
+ struct arc_ps2_data *arc_ps2;
+ struct resource *res;
+ int irq;
+ int error, id, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no IO memory defined\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq_byname(pdev, "arc_ps2_irq");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ defined\n");
+ return -EINVAL;
+ }
+
+ arc_ps2 = devm_kzalloc(&pdev->dev, sizeof(struct arc_ps2_data),
+ GFP_KERNEL);
+ if (!arc_ps2) {
+ dev_err(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ arc_ps2->addr = devm_request_and_ioremap(&pdev->dev, res);
+ if (!arc_ps2->addr)
+ return -EBUSY;
+
+ dev_info(&pdev->dev, "irq = %d, address = 0x%p, ports = %i\n",
+ irq, arc_ps2->addr, ARC_PS2_PORTS);
+
+ id = ioread32(arc_ps2->addr);
+ if (id != ARC_ARC_PS2_ID) {
+ dev_err(&pdev->dev, "device id does not match\n");
+ return -ENXIO;
+ }
+
+ arc_ps2_inhibit_ports(arc_ps2);
+
+ error = devm_request_irq(&pdev->dev, irq, arc_ps2_interrupt,
+ 0, "arc_ps2", arc_ps2);
+ if (error) {
+ dev_err(&pdev->dev, "Could not allocate IRQ\n");
+ return error;
+ }
+
+ for (i = 0; i < ARC_PS2_PORTS; i++) {
+ error = arc_ps2_create_port(pdev, arc_ps2, i);
+ if (error) {
+ while (--i >= 0)
+ serio_unregister_port(arc_ps2->port[i].io);
+ return error;
+ }
+ }
+
+ platform_set_drvdata(pdev, arc_ps2);
+
+ return 0;
+}
+
+static int arc_ps2_remove(struct platform_device *pdev)
+{
+ struct arc_ps2_data *arc_ps2 = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARC_PS2_PORTS; i++)
+ serio_unregister_port(arc_ps2->port[i].io);
+
+ dev_dbg(&pdev->dev, "interrupt count = %i\n", arc_ps2->total_int);
+ dev_dbg(&pdev->dev, "frame error count = %i\n", arc_ps2->frame_error);
+ dev_dbg(&pdev->dev, "buffer overflow count = %i\n",
+ arc_ps2->buf_overflow);
+
+ return 0;
+}
+
+static struct platform_driver arc_ps2_driver = {
+ .driver = {
+ .name = "arc_ps2",
+ .owner = THIS_MODULE,
+ },
+ .probe = arc_ps2_probe,
+ .remove = arc_ps2_remove,
+};
+
+module_platform_driver(arc_ps2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Sokolov <psokolov@synopsys.com>");
+MODULE_DESCRIPTION("ARC PS/2 Driver");
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 852816567241..cfe549d4eaa5 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -175,7 +175,7 @@ static int __init ct82c710_detect(void)
return 0;
}
-static int __devinit ct82c710_probe(struct platform_device *dev)
+static int ct82c710_probe(struct platform_device *dev)
{
ct82c710_port = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ct82c710_port)
@@ -199,7 +199,7 @@ static int __devinit ct82c710_probe(struct platform_device *dev)
return 0;
}
-static int __devexit ct82c710_remove(struct platform_device *dev)
+static int ct82c710_remove(struct platform_device *dev)
{
serio_unregister_port(ct82c710_port);
@@ -212,7 +212,7 @@ static struct platform_driver ct82c710_driver = {
.owner = THIS_MODULE,
},
.probe = ct82c710_probe,
- .remove = __devexit_p(ct82c710_remove),
+ .remove = ct82c710_remove,
};
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 4225f5d6b15f..8d9ba0c3827c 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -327,7 +327,7 @@ static void gscps2_close(struct serio *port)
* @return: success/error report
*/
-static int __devinit gscps2_probe(struct parisc_device *dev)
+static int gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
@@ -414,7 +414,7 @@ fail_nomem:
* @return: success/error report
*/
-static int __devexit gscps2_remove(struct parisc_device *dev)
+static int gscps2_remove(struct parisc_device *dev)
{
struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
@@ -444,7 +444,7 @@ static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
- .remove = __devexit_p(gscps2_remove),
+ .remove = gscps2_remove,
};
static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index bfd3865d886b..65605e4ef3cf 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -686,13 +686,12 @@ static int hilse_donode(hil_mlc *mlc)
write_lock_irqsave(&mlc->lock, flags);
pack = node->object.packet;
out:
- if (mlc->istarted)
- goto out2;
- /* Prepare to receive input */
- if ((node + 1)->act & HILSE_IN)
- hilse_setup_input(mlc, node + 1);
+ if (!mlc->istarted) {
+ /* Prepare to receive input */
+ if ((node + 1)->act & HILSE_IN)
+ hilse_setup_input(mlc, node + 1);
+ }
- out2:
write_unlock_irqrestore(&mlc->lock, flags);
if (down_trylock(&mlc->osem)) {
@@ -1010,8 +1009,6 @@ static int __init hil_mlc_init(void)
static void __exit hil_mlc_exit(void)
{
del_timer_sync(&hil_mlcs_kicker);
-
- tasklet_disable(&hil_mlcs_tasklet);
tasklet_kill(&hil_mlcs_tasklet);
}
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 5d48bb66aa73..a5eed2ade53d 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -76,7 +76,7 @@ static inline int i8042_platform_init(void)
if (check_legacy_ioport(I8042_DATA_REG))
return -ENODEV;
#endif
-#if !defined(__sh__) && !defined(__alpha__) && !defined(__mips__)
+#if !defined(__sh__) && !defined(__alpha__)
if (!request_region(I8042_DATA_REG, 16, "i8042"))
return -EBUSY;
#endif
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 395a9af3adcd..d6aa4c67dbb6 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -49,7 +49,7 @@ static inline void i8042_write_command(int val)
#define OBP_PS2MS_NAME1 "kdmouse"
#define OBP_PS2MS_NAME2 "mouse"
-static int __devinit sparc_i8042_probe(struct platform_device *op)
+static int sparc_i8042_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -80,7 +80,7 @@ static int __devinit sparc_i8042_probe(struct platform_device *op)
return 0;
}
-static int __devexit sparc_i8042_remove(struct platform_device *op)
+static int sparc_i8042_remove(struct platform_device *op)
{
of_iounmap(kbd_res, kbd_iobase, 8);
@@ -102,7 +102,7 @@ static struct platform_driver sparc_i8042_driver = {
.of_match_table = sparc_i8042_match,
},
.probe = sparc_i8042_probe,
- .remove = __devexit_p(sparc_i8042_remove),
+ .remove = sparc_i8042_remove,
};
static int __init i8042_platform_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index d6cc77a53c7e..5f306f79da0c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -921,6 +921,7 @@ static int __init i8042_platform_init(void)
int retval;
#ifdef CONFIG_X86
+ u8 a20_on = 0xdf;
/* Just return if pre-detection shows no i8042 controller exist */
if (!x86_platform.i8042_detect())
return -ENODEV;
@@ -960,6 +961,14 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+
+ /*
+ * A20 was already enabled during early kernel init. But some buggy
+ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
+ * resume from S3. So we do it here and hope that nothing breaks.
+ */
+ i8042_command(&a20_on, 0x10d1);
+ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
#endif /* CONFIG_X86 */
return retval;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 86564414b75a..78e4de42efaa 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1284,7 +1284,7 @@ static void __init i8042_register_ports(void)
}
}
-static void __devexit i8042_unregister_ports(void)
+static void i8042_unregister_ports(void)
{
int i;
@@ -1437,7 +1437,7 @@ static int __init i8042_probe(struct platform_device *dev)
return error;
}
-static int __devexit i8042_remove(struct platform_device *dev)
+static int i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
@@ -1455,7 +1455,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops,
#endif
},
- .remove = __devexit_p(i8042_remove),
+ .remove = i8042_remove,
.shutdown = i8042_shutdown,
};
diff --git a/drivers/input/serio/maceps2.c b/drivers/input/serio/maceps2.c
index 61da763b1209..bc85e1cc66d8 100644
--- a/drivers/input/serio/maceps2.c
+++ b/drivers/input/serio/maceps2.c
@@ -116,7 +116,7 @@ static void maceps2_close(struct serio *dev)
}
-static struct serio * __devinit maceps2_allocate_port(int idx)
+static struct serio *maceps2_allocate_port(int idx)
{
struct serio *serio;
@@ -135,7 +135,7 @@ static struct serio * __devinit maceps2_allocate_port(int idx)
return serio;
}
-static int __devinit maceps2_probe(struct platform_device *dev)
+static int maceps2_probe(struct platform_device *dev)
{
maceps2_port[0] = maceps2_allocate_port(0);
maceps2_port[1] = maceps2_allocate_port(1);
@@ -151,7 +151,7 @@ static int __devinit maceps2_probe(struct platform_device *dev)
return 0;
}
-static int __devexit maceps2_remove(struct platform_device *dev)
+static int maceps2_remove(struct platform_device *dev)
{
serio_unregister_port(maceps2_port[0]);
serio_unregister_port(maceps2_port[1]);
@@ -165,7 +165,7 @@ static struct platform_driver maceps2_driver = {
.owner = THIS_MODULE,
},
.probe = maceps2_probe,
- .remove = __devexit_p(maceps2_remove),
+ .remove = maceps2_remove,
};
static int __init maceps2_init(void)
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 0c42497aaaf4..76f83836fd5a 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -127,7 +127,7 @@ static void pcips2_close(struct serio *io)
free_irq(ps2if->dev->irq, ps2if);
}
-static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int pcips2_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pcips2_data *ps2if;
struct serio *serio;
@@ -176,7 +176,7 @@ static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_i
return ret;
}
-static void __devexit pcips2_remove(struct pci_dev *dev)
+static void pcips2_remove(struct pci_dev *dev)
{
struct pcips2_data *ps2if = pci_get_drvdata(dev);
@@ -212,7 +212,7 @@ static struct pci_driver pcips2_driver = {
.name = "pcips2",
.id_table = pcips2_ids,
.probe = pcips2_probe,
- .remove = __devexit_p(pcips2_remove),
+ .remove = pcips2_remove,
};
module_pci_driver(pcips2_driver);
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index 0c0df7f73802..70fe542839fb 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -122,7 +122,7 @@ static void q40kbd_close(struct serio *port)
q40kbd_flush(q40kbd);
}
-static int __devinit q40kbd_probe(struct platform_device *pdev)
+static int q40kbd_probe(struct platform_device *pdev)
{
struct q40kbd *q40kbd;
struct serio *port;
@@ -168,7 +168,7 @@ err_free_mem:
return error;
}
-static int __devexit q40kbd_remove(struct platform_device *pdev)
+static int q40kbd_remove(struct platform_device *pdev)
{
struct q40kbd *q40kbd = platform_get_drvdata(pdev);
@@ -190,7 +190,7 @@ static struct platform_driver q40kbd_driver = {
.name = "q40kbd",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(q40kbd_remove),
+ .remove = q40kbd_remove,
};
static int __init q40kbd_init(void)
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 2af5df6a8fba..567566ae0dae 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -114,7 +114,7 @@ static void rpckbd_close(struct serio *port)
* Allocate and initialize serio structure for subsequent registration
* with serio core.
*/
-static int __devinit rpckbd_probe(struct platform_device *dev)
+static int rpckbd_probe(struct platform_device *dev)
{
struct rpckbd_data *rpckbd;
struct serio *serio;
@@ -153,7 +153,7 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
return 0;
}
-static int __devexit rpckbd_remove(struct platform_device *dev)
+static int rpckbd_remove(struct platform_device *dev)
{
struct serio *serio = platform_get_drvdata(dev);
struct rpckbd_data *rpckbd = serio->port_data;
@@ -166,7 +166,7 @@ static int __devexit rpckbd_remove(struct platform_device *dev)
static struct platform_driver rpckbd_driver = {
.probe = rpckbd_probe,
- .remove = __devexit_p(rpckbd_remove),
+ .remove = rpckbd_remove,
.driver = {
.name = "kart",
.owner = THIS_MODULE,
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 389766707534..b3e688911fd9 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -193,7 +193,7 @@ static void ps2_close(struct serio *io)
/*
* Clear the input buffer.
*/
-static void __devinit ps2_clear_input(struct ps2if *ps2if)
+static void ps2_clear_input(struct ps2if *ps2if)
{
int maxread = 100;
@@ -203,7 +203,7 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
}
}
-static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
+static unsigned int ps2_test_one(struct ps2if *ps2if,
unsigned int mask)
{
unsigned int val;
@@ -220,7 +220,7 @@ static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
-static int __devinit ps2_test(struct ps2if *ps2if)
+static int ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
@@ -251,7 +251,7 @@ static int __devinit ps2_test(struct ps2if *ps2if)
/*
* Add one device to this driver.
*/
-static int __devinit ps2_probe(struct sa1111_dev *dev)
+static int ps2_probe(struct sa1111_dev *dev)
{
struct ps2if *ps2if;
struct serio *serio;
@@ -334,7 +334,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int __devexit ps2_remove(struct sa1111_dev *dev)
+static int ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -357,7 +357,7 @@ static struct sa1111_driver ps2_driver = {
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
- .remove = __devexit_p(ps2_remove),
+ .remove = ps2_remove,
};
static int __init ps2_init(void)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index d0f7533dbf88..25fc5971f426 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -891,8 +891,6 @@ static int serio_bus_match(struct device *dev, struct device_driver *drv)
return serio_match_port(serio_drv->id_table, serio);
}
-#ifdef CONFIG_HOTPLUG
-
#define SERIO_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(env, fmt, val); \
@@ -920,15 +918,6 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#undef SERIO_ADD_UEVENT_VAR
-#else
-
-static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_HOTPLUG */
-
#ifdef CONFIG_PM
static int serio_suspend(struct device *dev)
{
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 1e983bec7d86..17be85948ffd 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -233,7 +233,7 @@ static void sxps2_close(struct serio *pserio)
* It returns 0, if the driver is bound to the PS/2 device, or a negative
* value if there is an error.
*/
-static int __devinit xps2_of_probe(struct platform_device *ofdev)
+static int xps2_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -333,7 +333,7 @@ failed1:
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*/
-static int __devexit xps2_of_remove(struct platform_device *of_dev)
+static int xps2_of_remove(struct platform_device *of_dev)
{
struct xps2data *drvdata = platform_get_drvdata(of_dev);
struct resource r_mem; /* IO mem resources */
@@ -355,7 +355,7 @@ static int __devexit xps2_of_remove(struct platform_device *of_dev)
}
/* Match table for of_platform binding */
-static const struct of_device_id xps2_of_match[] __devinitconst = {
+static const struct of_device_id xps2_of_match[] = {
{ .compatible = "xlnx,xps-ps2-1.00.a", },
{ /* end of list */ },
};
@@ -368,7 +368,7 @@ static struct platform_driver xps2_of_driver = {
.of_match_table = xps2_of_match,
},
.probe = xps2_of_probe,
- .remove = __devexit_p(xps2_of_remove),
+ .remove = xps2_of_remove,
};
module_platform_driver(xps2_of_driver);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 858ad446de91..f92d34f45a1c 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -386,23 +386,40 @@ static int wacom_parse_hid(struct usb_interface *intf,
if (usage == WCM_DESKTOP) {
if (finger) {
features->device_type = BTN_TOOL_FINGER;
- if (features->type == TABLETPC2FG) {
- /* need to reset back */
+
+ switch (features->type) {
+ case TABLETPC2FG:
features->pktlen = WACOM_PKGLEN_TPC2FG;
- }
+ break;
- if (features->type == MTSCREEN || features->type == WACOM_24HDT)
+ case MTSCREEN:
+ case WACOM_24HDT:
features->pktlen = WACOM_PKGLEN_MTOUCH;
+ break;
- if (features->type == BAMBOO_PT) {
- /* need to reset back */
+ case MTTPC:
+ features->pktlen = WACOM_PKGLEN_MTTPC;
+ break;
+
+ case BAMBOO_PT:
features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ break;
+
+ default:
+ features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ break;
+ }
+
+ switch (features->type) {
+ case BAMBOO_PT:
features->x_phy =
get_unaligned_le16(&report[i + 5]);
features->x_max =
get_unaligned_le16(&report[i + 8]);
i += 15;
- } else if (features->type == WACOM_24HDT) {
+ break;
+
+ case WACOM_24HDT:
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->x_phy =
@@ -410,7 +427,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
features->unit = report[i - 1];
features->unitExpo = report[i - 3];
i += 12;
- } else {
+ break;
+
+ default:
features->x_max =
get_unaligned_le16(&report[i + 3]);
features->x_phy =
@@ -418,10 +437,11 @@ static int wacom_parse_hid(struct usb_interface *intf,
features->unit = report[i + 9];
features->unitExpo = report[i + 11];
i += 12;
+ break;
}
} else if (pen) {
/* penabled only accepts exact bytes of data */
- if (features->type == TABLETPC2FG)
+ if (features->type >= TABLETPC)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->x_max =
@@ -434,32 +454,40 @@ static int wacom_parse_hid(struct usb_interface *intf,
case HID_USAGE_Y:
if (usage == WCM_DESKTOP) {
if (finger) {
- int type = features->type;
-
- if (type == TABLETPC2FG || type == MTSCREEN) {
+ switch (features->type) {
+ case TABLETPC2FG:
+ case MTSCREEN:
+ case MTTPC:
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
- } else if (type == WACOM_24HDT) {
+ break;
+
+ case WACOM_24HDT:
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i - 2]);
i += 7;
- } else if (type == BAMBOO_PT) {
+ break;
+
+ case BAMBOO_PT:
features->y_phy =
get_unaligned_le16(&report[i + 3]);
features->y_max =
get_unaligned_le16(&report[i + 6]);
i += 12;
- } else {
+ break;
+
+ default:
features->y_max =
features->x_max;
features->y_phy =
get_unaligned_le16(&report[i + 3]);
i += 4;
+ break;
}
} else if (pen) {
features->y_max =
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0a67031ffc13..264138f3217e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -467,9 +467,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
/* general pen packet */
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
- (features->type >= WACOM_21UX2 && features->type <= WACOM_24HD)) {
+ if (features->type >= INTUOS4S && features->type <= WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -877,6 +875,11 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
int i;
int current_num_contacts = data[2];
int contacts_to_send = 0;
+ int x_offset = 0;
+
+ /* MTTPC does not support Height and Width */
+ if (wacom->features.type == MTTPC)
+ x_offset = -4;
/*
* First packet resets the counter since only the first
@@ -889,7 +892,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
contacts_to_send = min(5, wacom->num_contacts_left);
for (i = 0; i < contacts_to_send; i++) {
- int offset = (WACOM_BYTES_PER_MT_PACKET * i) + 3;
+ int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
bool touch = data[offset] & 0x1;
int id = le16_to_cpup((__le16 *)&data[offset + 1]);
int slot = find_slot_from_contactid(wacom, id);
@@ -900,8 +903,8 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int x = le16_to_cpup((__le16 *)&data[offset + 7]);
- int y = le16_to_cpup((__le16 *)&data[offset + 9]);
+ int x = le16_to_cpup((__le16 *)&data[offset + x_offset + 7]);
+ int y = le16_to_cpup((__le16 *)&data[offset + x_offset + 9]);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
@@ -1336,6 +1339,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case TABLETPCE:
case TABLETPC2FG:
case MTSCREEN:
+ case MTTPC:
sync = wacom_tpc_irq(wacom_wac, len);
break;
@@ -1657,6 +1661,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case MTSCREEN:
+ case MTTPC:
if (features->device_type == BTN_TOOL_FINGER) {
wacom_wac->slots = kmalloc(features->touch_max *
sizeof(int),
@@ -2018,6 +2023,15 @@ static const struct wacom_features wacom_features_0xED =
static const struct wacom_features wacom_features_0xEF =
{ "Wacom ISDv4 EF", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x100 =
+ { "Wacom ISDv4 100", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x101 =
+ { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x4001 =
+ { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
+ 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2034,7 +2048,8 @@ static const struct wacom_features wacom_features_0xD1 =
.touch_max = 2 };
static const struct wacom_features wacom_features_0xD2 =
{ "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
@@ -2194,6 +2209,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0xED) },
{ USB_DEVICE_WACOM(0xEF) },
+ { USB_DEVICE_WACOM(0x100) },
+ { USB_DEVICE_WACOM(0x101) },
+ { USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 345f1e76975e..9396d7769f86 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -26,6 +26,7 @@
#define WACOM_PKGLEN_BBPEN 10
#define WACOM_PKGLEN_WIRELESS 32
#define WACOM_PKGLEN_MTOUCH 62
+#define WACOM_PKGLEN_MTTPC 40
/* wacom data size per MT contact */
#define WACOM_BYTES_PER_MT_PACKET 11
@@ -88,6 +89,7 @@ enum {
TABLETPCE,
TABLETPC2FG,
MTSCREEN,
+ MTTPC,
MAX_TYPE
};
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 326218dbd6e6..c7068942ebe8 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -115,7 +115,7 @@ static void pm860x_touch_close(struct input_dev *dev)
}
#ifdef CONFIG_OF
-static int __devinit pm860x_touch_dt_init(struct platform_device *pdev,
+static int pm860x_touch_dt_init(struct platform_device *pdev,
struct pm860x_chip *chip,
int *res_x)
{
@@ -169,7 +169,7 @@ static int __devinit pm860x_touch_dt_init(struct platform_device *pdev,
#define pm860x_touch_dt_init(x, y, z) (-1)
#endif
-static int __devinit pm860x_touch_probe(struct platform_device *pdev)
+static int pm860x_touch_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_touch_pdata *pdata = pdev->dev.platform_data;
@@ -293,7 +293,7 @@ out:
return ret;
}
-static int __devexit pm860x_touch_remove(struct platform_device *pdev)
+static int pm860x_touch_remove(struct platform_device *pdev)
{
struct pm860x_touch *touch = platform_get_drvdata(pdev);
@@ -310,7 +310,7 @@ static struct platform_driver pm860x_touch_driver = {
.owner = THIS_MODULE,
},
.probe = pm860x_touch_probe,
- .remove = __devexit_p(pm860x_touch_remove),
+ .remove = pm860x_touch_remove,
};
module_platform_driver(pm860x_touch_driver);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f7668b24c378..515cfe790543 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -111,18 +111,6 @@ config TOUCHSCREEN_AUO_PIXCIR
To compile this driver as a module, choose M here: the
module will be called auo-pixcir-ts.
-config TOUCHSCREEN_BITSY
- tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
- depends on SA1100_BITSY
- select SERIO
- help
- Say Y here if you have the h3600 (Bitsy) touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called h3600_ts_input.
-
config TOUCHSCREEN_BU21013
tristate "BU21013 based touch panel controllers"
depends on I2C
@@ -529,9 +517,9 @@ config TOUCHSCREEN_TOUCHWIN
To compile this driver as a module, choose M here: the
module will be called touchwin.
-config TOUCHSCREEN_TI_TSCADC
+config TOUCHSCREEN_TI_AM335X_TSC
tristate "TI Touchscreen Interface"
- depends on ARCH_OMAP2PLUS
+ depends on MFD_TI_AM335X_TSCADC
help
Say Y here if you have 4/5/8 wire touchscreen controller
to be connected to the ADC controller on your TI AM335x SoC.
@@ -539,7 +527,7 @@ config TOUCHSCREEN_TI_TSCADC
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called ti_tscadc.
+ module will be called ti_am335x_tsc.
config TOUCHSCREEN_ATMEL_TSADCC
tristate "Atmel Touchscreen Interface"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 178eb128d90f..6bfbeab67c9f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
-obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
@@ -52,7 +51,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
-obj-$(CONFIG_TOUCHSCREEN_TI_TSCADC) += ti_tscadc.o
+obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 2c7692108e6c..23fa829b869d 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -682,7 +682,7 @@ static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
}
}
-static int __devinit ad7877_probe(struct spi_device *spi)
+static int ad7877_probe(struct spi_device *spi)
{
struct ad7877 *ts;
struct input_dev *input_dev;
@@ -810,7 +810,7 @@ err_free_mem:
return err;
}
-static int __devexit ad7877_remove(struct spi_device *spi)
+static int ad7877_remove(struct spi_device *spi)
{
struct ad7877 *ts = dev_get_drvdata(&spi->dev);
@@ -857,7 +857,7 @@ static struct spi_driver ad7877_driver = {
.pm = &ad7877_pm,
},
.probe = ad7877_probe,
- .remove = __devexit_p(ad7877_remove),
+ .remove = ad7877_remove,
};
module_spi_driver(ad7877_driver);
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index 3054354d0dd3..dcf390771549 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -54,7 +54,7 @@ static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
.write = ad7879_i2c_write,
};
-static int __devinit ad7879_i2c_probe(struct i2c_client *client,
+static int ad7879_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad7879 *ts;
@@ -75,7 +75,7 @@ static int __devinit ad7879_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit ad7879_i2c_remove(struct i2c_client *client)
+static int ad7879_i2c_remove(struct i2c_client *client)
{
struct ad7879 *ts = i2c_get_clientdata(client);
@@ -98,7 +98,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
- .remove = __devexit_p(ad7879_i2c_remove),
+ .remove = ad7879_i2c_remove,
.id_table = ad7879_id,
};
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index db49abf056ba..606da5bd6115 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -110,7 +110,7 @@ static const struct ad7879_bus_ops ad7879_spi_bus_ops = {
.write = ad7879_spi_write,
};
-static int __devinit ad7879_spi_probe(struct spi_device *spi)
+static int ad7879_spi_probe(struct spi_device *spi)
{
struct ad7879 *ts;
int err;
@@ -137,7 +137,7 @@ static int __devinit ad7879_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit ad7879_spi_remove(struct spi_device *spi)
+static int ad7879_spi_remove(struct spi_device *spi)
{
struct ad7879 *ts = spi_get_drvdata(spi);
@@ -154,7 +154,7 @@ static struct spi_driver ad7879_spi_driver = {
.pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
- .remove = __devexit_p(ad7879_spi_remove),
+ .remove = ad7879_spi_remove,
};
module_spi_driver(ad7879_spi_driver);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 78e5d9ab0ba7..4f702b3ec1a3 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -955,7 +955,7 @@ static int ads7846_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
-static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+static int ads7846_setup_pendown(struct spi_device *spi,
struct ads7846 *ts)
{
struct ads7846_platform_data *pdata = spi->dev.platform_data;
@@ -997,7 +997,7 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi,
* Set up the transfers to read touchscreen state; this assumes we
* use formula #2 for pressure, not #3.
*/
-static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
+static void ads7846_setup_spi_msg(struct ads7846 *ts,
const struct ads7846_platform_data *pdata)
{
struct spi_message *m = &ts->msg[0];
@@ -1196,7 +1196,7 @@ static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts,
spi_message_add_tail(x, m);
}
-static int __devinit ads7846_probe(struct spi_device *spi)
+static int ads7846_probe(struct spi_device *spi)
{
struct ads7846 *ts;
struct ads7846_packet *packet;
@@ -1390,7 +1390,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
return err;
}
-static int __devexit ads7846_remove(struct spi_device *spi)
+static int ads7846_remove(struct spi_device *spi)
{
struct ads7846 *ts = dev_get_drvdata(&spi->dev);
@@ -1434,7 +1434,7 @@ static struct spi_driver ads7846_driver = {
.pm = &ads7846_pm,
},
.probe = ads7846_probe,
- .remove = __devexit_p(ads7846_remove),
+ .remove = ads7846_remove,
};
module_spi_driver(ads7846_driver);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 1df2396af008..d04f810cb1dd 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1095,7 +1095,7 @@ static void mxt_input_close(struct input_dev *dev)
mxt_stop(data);
}
-static int __devinit mxt_probe(struct i2c_client *client,
+static int mxt_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct mxt_platform_data *pdata = client->dev.platform_data;
@@ -1200,7 +1200,7 @@ err_free_mem:
return error;
}
-static int __devexit mxt_remove(struct i2c_client *client)
+static int mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
@@ -1270,7 +1270,7 @@ static struct i2c_driver mxt_driver = {
.pm = &mxt_pm_ops,
},
.probe = mxt_probe,
- .remove = __devexit_p(mxt_remove),
+ .remove = mxt_remove,
.id_table = mxt_id,
};
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index 201b2d2ec1b3..95f6785a94b0 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -22,7 +22,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <mach/board.h>
+#include <linux/platform_data/atmel.h>
#include <mach/cpu.h>
/* Register definitions based on AT91SAM9RL64 preliminary draft datasheet */
@@ -177,7 +177,7 @@ static irqreturn_t atmel_tsadcc_interrupt(int irq, void *dev)
* The functions for inserting/removing us as a module.
*/
-static int __devinit atmel_tsadcc_probe(struct platform_device *pdev)
+static int atmel_tsadcc_probe(struct platform_device *pdev)
{
struct atmel_tsadcc *ts_dev;
struct input_dev *input_dev;
@@ -323,7 +323,7 @@ err_free_mem:
return err;
}
-static int __devexit atmel_tsadcc_remove(struct platform_device *pdev)
+static int atmel_tsadcc_remove(struct platform_device *pdev)
{
struct atmel_tsadcc *ts_dev = dev_get_drvdata(&pdev->dev);
struct resource *res;
@@ -346,7 +346,7 @@ static int __devexit atmel_tsadcc_remove(struct platform_device *pdev)
static struct platform_driver atmel_tsadcc_driver = {
.probe = atmel_tsadcc_probe,
- .remove = __devexit_p(atmel_tsadcc_remove),
+ .remove = atmel_tsadcc_remove,
.driver = {
.name = "atmel_tsadcc",
},
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index c7047b6bb020..c6e19a96348e 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -286,7 +286,7 @@ static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
return 0;
}
-static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+static int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
int int_setting)
{
struct i2c_client *client = ts->client;
@@ -482,7 +482,7 @@ unlock:
static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
auo_pixcir_resume);
-static int __devinit auo_pixcir_probe(struct i2c_client *client,
+static int auo_pixcir_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
@@ -599,7 +599,7 @@ err_gpio_int:
return ret;
}
-static int __devexit auo_pixcir_remove(struct i2c_client *client)
+static int auo_pixcir_remove(struct i2c_client *client)
{
struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
@@ -631,7 +631,7 @@ static struct i2c_driver auo_pixcir_driver = {
.pm = &auo_pixcir_pm_ops,
},
.probe = auo_pixcir_probe,
- .remove = __devexit_p(auo_pixcir_remove),
+ .remove = auo_pixcir_remove,
.id_table = auo_pixcir_idtable,
};
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 5c487d23f11c..b9b5ddad6658 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -14,6 +14,9 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#define PEN_DOWN_INTR 0
#define MAX_FINGERS 2
@@ -148,11 +151,12 @@
struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
- bool touch_stopped;
const struct bu21013_platform_device *chip;
struct input_dev *in_dev;
- unsigned int intr_pin;
struct regulator *regulator;
+ unsigned int irq;
+ unsigned int intr_pin;
+ bool touch_stopped;
};
/**
@@ -262,7 +266,7 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
return IRQ_NONE;
}
- data->intr_pin = data->chip->irq_read_val();
+ data->intr_pin = gpio_get_value(data->chip->touch_pin);
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
msecs_to_jiffies(2));
@@ -418,8 +422,70 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
{
bu21013_data->touch_stopped = true;
wake_up(&bu21013_data->wait);
- free_irq(bu21013_data->chip->irq, bu21013_data);
+ free_irq(bu21013_data->irq, bu21013_data);
+}
+
+/**
+ * bu21013_cs_disable() - deconfigures the touch panel controller
+ * @bu21013_data: device structure pointer
+ *
+ * This function is used to deconfigure the chip selection
+ * for touch panel controller.
+ */
+static void bu21013_cs_disable(struct bu21013_ts_data *bu21013_data)
+{
+ int error;
+
+ error = gpio_direction_output(bu21013_data->chip->cs_pin, 0);
+ if (error < 0)
+ dev_warn(&bu21013_data->client->dev,
+ "%s: gpio direction failed, error: %d\n",
+ __func__, error);
+ else
+ gpio_set_value(bu21013_data->chip->cs_pin, 0);
+
+ gpio_free(bu21013_data->chip->cs_pin);
+}
+
+#ifdef CONFIG_OF
+static const struct bu21013_platform_device *
+bu21013_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct bu21013_platform_device *pdata;
+
+ if (!np) {
+ dev_err(dev, "no device tree or platform data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->y_flip = pdata->x_flip = false;
+
+ pdata->x_flip = of_property_read_bool(np, "rohm,flip-x");
+ pdata->y_flip = of_property_read_bool(np, "rohm,flip-y");
+
+ of_property_read_u32(np, "rohm,touch-max-x", &pdata->touch_x_max);
+ of_property_read_u32(np, "rohm,touch-max-y", &pdata->touch_y_max);
+
+ pdata->touch_pin = of_get_named_gpio(np, "touch-gpio", 0);
+ pdata->cs_pin = of_get_named_gpio(np, "reset-gpio", 0);
+
+ pdata->ext_clk = false;
+
+ return pdata;
}
+#else
+static inline const struct bu21013_platform_device *
+bu21013_parse_dt(struct device *dev)
+{
+ dev_err(dev, "no platform data available\n");
+ return ERR_PTR(-EINVAL);
+}
+#endif
/**
* bu21013_probe() - initializes the i2c-client touchscreen driver
@@ -429,13 +495,13 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
* This function used to initializes the i2c-client touchscreen
* driver and returns integer.
*/
-static int __devinit bu21013_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bu21013_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
+ const struct bu21013_platform_device *pdata =
+ dev_get_platdata(&client->dev);
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
- const struct bu21013_platform_device *pdata =
- client->dev.platform_data;
int error;
if (!i2c_check_functionality(client->adapter,
@@ -445,7 +511,13 @@ static int __devinit bu21013_probe(struct i2c_client *client,
}
if (!pdata) {
- dev_err(&client->dev, "platform data not defined\n");
+ pdata = bu21013_parse_dt(&client->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
+ if (!gpio_is_valid(pdata->touch_pin)) {
+ dev_err(&client->dev, "invalid touch_pin supplied\n");
return -EINVAL;
}
@@ -460,8 +532,9 @@ static int __devinit bu21013_probe(struct i2c_client *client,
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
+ bu21013_data->irq = gpio_to_irq(pdata->touch_pin);
- bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ bu21013_data->regulator = regulator_get(&client->dev, "avdd");
if (IS_ERR(bu21013_data->regulator)) {
dev_err(&client->dev, "regulator_get failed\n");
error = PTR_ERR(bu21013_data->regulator);
@@ -478,12 +551,11 @@ static int __devinit bu21013_probe(struct i2c_client *client,
init_waitqueue_head(&bu21013_data->wait);
/* configure the gpio pins */
- if (pdata->cs_en) {
- error = pdata->cs_en(pdata->cs_pin);
- if (error < 0) {
- dev_err(&client->dev, "chip init failed\n");
- goto err_disable_regulator;
- }
+ error = gpio_request_one(pdata->cs_pin, GPIOF_OUT_INIT_HIGH,
+ "touchp_reset");
+ if (error < 0) {
+ dev_err(&client->dev, "Unable to request gpio reset_pin\n");
+ goto err_disable_regulator;
}
/* configure the touch panel controller */
@@ -508,12 +580,13 @@ static int __devinit bu21013_probe(struct i2c_client *client,
pdata->touch_y_max, 0, 0);
input_set_drvdata(in_dev, bu21013_data);
- error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ error = request_threaded_irq(bu21013_data->irq, NULL, bu21013_gpio_irq,
IRQF_TRIGGER_FALLING | IRQF_SHARED |
IRQF_ONESHOT,
DRIVER_TP, bu21013_data);
if (error) {
- dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
+ dev_err(&client->dev, "request irq %d failed\n",
+ bu21013_data->irq);
goto err_cs_disable;
}
@@ -531,7 +604,7 @@ static int __devinit bu21013_probe(struct i2c_client *client,
err_free_irq:
bu21013_free_irq(bu21013_data);
err_cs_disable:
- pdata->cs_dis(pdata->cs_pin);
+ bu21013_cs_disable(bu21013_data);
err_disable_regulator:
regulator_disable(bu21013_data->regulator);
err_put_regulator:
@@ -549,13 +622,13 @@ err_free_mem:
* This function uses to remove the i2c-client
* touchscreen driver and returns integer.
*/
-static int __devexit bu21013_remove(struct i2c_client *client)
+static int bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
bu21013_free_irq(bu21013_data);
- bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
+ bu21013_cs_disable(bu21013_data);
input_unregister_device(bu21013_data->in_dev);
@@ -584,9 +657,9 @@ static int bu21013_suspend(struct device *dev)
bu21013_data->touch_stopped = true;
if (device_may_wakeup(&client->dev))
- enable_irq_wake(bu21013_data->chip->irq);
+ enable_irq_wake(bu21013_data->irq);
else
- disable_irq(bu21013_data->chip->irq);
+ disable_irq(bu21013_data->irq);
regulator_disable(bu21013_data->regulator);
@@ -621,9 +694,9 @@ static int bu21013_resume(struct device *dev)
bu21013_data->touch_stopped = false;
if (device_may_wakeup(&client->dev))
- disable_irq_wake(bu21013_data->chip->irq);
+ disable_irq_wake(bu21013_data->irq);
else
- enable_irq(bu21013_data->chip->irq);
+ enable_irq(bu21013_data->irq);
return 0;
}
@@ -649,7 +722,7 @@ static struct i2c_driver bu21013_driver = {
#endif
},
.probe = bu21013_probe,
- .remove = __devexit_p(bu21013_remove),
+ .remove = bu21013_remove,
.id_table = bu21013_id,
};
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 464f1bf4b61d..96e0eedcc7e5 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -99,9 +99,18 @@ static int cy8ctmg110_read_regs(struct cy8ctmg110 *tsc,
int ret;
struct i2c_msg msg[2] = {
/* first write slave position to i2c devices */
- { client->addr, 0, 1, &cmd },
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &cmd
+ },
/* Second read data from position */
- { client->addr, I2C_M_RD, len, data }
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data
+ }
};
ret = i2c_transfer(client->adapter, msg, 2);
@@ -166,7 +175,7 @@ static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit cy8ctmg110_probe(struct i2c_client *client,
+static int cy8ctmg110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct cy8ctmg110_pdata *pdata = client->dev.platform_data;
@@ -314,7 +323,7 @@ static int cy8ctmg110_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
#endif
-static int __devexit cy8ctmg110_remove(struct i2c_client *client)
+static int cy8ctmg110_remove(struct i2c_client *client)
{
struct cy8ctmg110 *ts = i2c_get_clientdata(client);
@@ -348,7 +357,7 @@ static struct i2c_driver cy8ctmg110_driver = {
},
.id_table = cy8ctmg110_idtable,
.probe = cy8ctmg110_probe,
- .remove = __devexit_p(cy8ctmg110_remove),
+ .remove = cy8ctmg110_remove,
};
module_i2c_driver(cy8ctmg110_driver);
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 2af1d0c52bcd..4dbdf44b8fc5 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -81,7 +81,7 @@ static const struct cyttsp_bus_ops cyttsp_i2c_bus_ops = {
.read = cyttsp_i2c_read_block_data,
};
-static int __devinit cyttsp_i2c_probe(struct i2c_client *client,
+static int cyttsp_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cyttsp *ts;
@@ -102,7 +102,7 @@ static int __devinit cyttsp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit cyttsp_i2c_remove(struct i2c_client *client)
+static int cyttsp_i2c_remove(struct i2c_client *client)
{
struct cyttsp *ts = i2c_get_clientdata(client);
@@ -124,7 +124,7 @@ static struct i2c_driver cyttsp_i2c_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_i2c_probe,
- .remove = __devexit_p(cyttsp_i2c_remove),
+ .remove = cyttsp_i2c_remove,
.id_table = cyttsp_i2c_id,
};
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 9f263410407b..638e20310f12 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -147,7 +147,7 @@ static const struct cyttsp_bus_ops cyttsp_spi_bus_ops = {
.read = cyttsp_spi_read_block_data,
};
-static int __devinit cyttsp_spi_probe(struct spi_device *spi)
+static int cyttsp_spi_probe(struct spi_device *spi)
{
struct cyttsp *ts;
int error;
@@ -172,7 +172,7 @@ static int __devinit cyttsp_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit cyttsp_spi_remove(struct spi_device *spi)
+static int cyttsp_spi_remove(struct spi_device *spi)
{
struct cyttsp *ts = spi_get_drvdata(spi);
@@ -188,7 +188,7 @@ static struct spi_driver cyttsp_spi_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_spi_probe,
- .remove = __devexit_p(cyttsp_spi_remove),
+ .remove = cyttsp_spi_remove,
};
module_spi_driver(cyttsp_spi_driver);
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 36b65cf10d7f..34ad84105e6e 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -297,7 +297,7 @@ static void da9034_touch_close(struct input_dev *dev)
}
-static int __devinit da9034_touch_probe(struct platform_device *pdev)
+static int da9034_touch_probe(struct platform_device *pdev)
{
struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
struct da9034_touch *touch;
@@ -361,7 +361,7 @@ err_free_touch:
return ret;
}
-static int __devexit da9034_touch_remove(struct platform_device *pdev)
+static int da9034_touch_remove(struct platform_device *pdev)
{
struct da9034_touch *touch = platform_get_drvdata(pdev);
@@ -377,7 +377,7 @@ static struct platform_driver da9034_touch_driver = {
.owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
- .remove = __devexit_p(da9034_touch_remove),
+ .remove = da9034_touch_remove,
};
module_platform_driver(da9034_touch_driver);
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
index e8df341090c0..8f561e22bdd4 100644
--- a/drivers/input/touchscreen/da9052_tsi.c
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -27,8 +27,6 @@ struct da9052_tsi {
struct input_dev *dev;
struct delayed_work ts_pen_work;
struct mutex mutex;
- unsigned int irq_pendwn;
- unsigned int irq_datardy;
bool stopped;
bool adc_on;
};
@@ -45,8 +43,8 @@ static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
if (!tsi->stopped) {
/* Mask PEN_DOWN event and unmask TSI_READY event */
- disable_irq_nosync(tsi->irq_pendwn);
- enable_irq(tsi->irq_datardy);
+ da9052_disable_irq_nosync(tsi->da9052, DA9052_IRQ_PENDOWN);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
da9052_ts_adc_toggle(tsi, true);
@@ -137,13 +135,13 @@ static void da9052_ts_pen_work(struct work_struct *work)
return;
/* Mask TSI_READY event and unmask PEN_DOWN event */
- disable_irq(tsi->irq_datardy);
- enable_irq(tsi->irq_pendwn);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
}
}
}
-static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
+static int da9052_ts_configure_gpio(struct da9052 *da9052)
{
int error;
@@ -162,7 +160,7 @@ static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
return 0;
}
-static int __devinit da9052_configure_tsi(struct da9052_tsi *tsi)
+static int da9052_configure_tsi(struct da9052_tsi *tsi)
{
int error;
@@ -197,7 +195,7 @@ static int da9052_ts_input_open(struct input_dev *input_dev)
mb();
/* Unmask PEN_DOWN event */
- enable_irq(tsi->irq_pendwn);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
/* Enable Pen Detect Circuit */
return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
@@ -210,11 +208,11 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
tsi->stopped = true;
mb();
- disable_irq(tsi->irq_pendwn);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
cancel_delayed_work_sync(&tsi->ts_pen_work);
if (tsi->adc_on) {
- disable_irq(tsi->irq_datardy);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
da9052_ts_adc_toggle(tsi, false);
/*
@@ -222,33 +220,24 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
* twice and we need to enable it to keep enable/disable
* counter balanced. IRQ is still off though.
*/
- enable_irq(tsi->irq_pendwn);
+ da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
}
/* Disable Pen Detect Circuit */
da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
}
-static int __devinit da9052_ts_probe(struct platform_device *pdev)
+static int da9052_ts_probe(struct platform_device *pdev)
{
struct da9052 *da9052;
struct da9052_tsi *tsi;
struct input_dev *input_dev;
- int irq_pendwn;
- int irq_datardy;
int error;
da9052 = dev_get_drvdata(pdev->dev.parent);
if (!da9052)
return -EINVAL;
- irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
- irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
- if (irq_pendwn < 0 || irq_datardy < 0) {
- dev_err(da9052->dev, "Unable to determine device interrupts\n");
- return -ENXIO;
- }
-
tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
input_dev = input_allocate_device();
if (!tsi || !input_dev) {
@@ -258,8 +247,6 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
tsi->da9052 = da9052;
tsi->dev = input_dev;
- tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
- tsi->irq_datardy = da9052->irq_base + irq_datardy;
tsi->stopped = true;
INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
@@ -287,31 +274,25 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
/* Disable ADC */
da9052_ts_adc_toggle(tsi, false);
- error = request_threaded_irq(tsi->irq_pendwn,
- NULL, da9052_ts_pendwn_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "PENDWN", tsi);
+ error = da9052_request_irq(tsi->da9052, DA9052_IRQ_PENDOWN,
+ "pendown-irq", da9052_ts_pendwn_irq, tsi);
if (error) {
dev_err(tsi->da9052->dev,
- "Failed to register PENDWN IRQ %d, error = %d\n",
- tsi->irq_pendwn, error);
+ "Failed to register PENDWN IRQ: %d\n", error);
goto err_free_mem;
}
- error = request_threaded_irq(tsi->irq_datardy,
- NULL, da9052_ts_datardy_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "TSIRDY", tsi);
+ error = da9052_request_irq(tsi->da9052, DA9052_IRQ_TSIREADY,
+ "tsiready-irq", da9052_ts_datardy_irq, tsi);
if (error) {
dev_err(tsi->da9052->dev,
- "Failed to register TSIRDY IRQ %d, error = %d\n",
- tsi->irq_datardy, error);
+ "Failed to register TSIRDY IRQ :%d\n", error);
goto err_free_pendwn_irq;
}
/* Mask PEN_DOWN and TSI_READY events */
- disable_irq(tsi->irq_pendwn);
- disable_irq(tsi->irq_datardy);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
+ da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
error = da9052_configure_tsi(tsi);
if (error)
@@ -326,9 +307,9 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
return 0;
err_free_datardy_irq:
- free_irq(tsi->irq_datardy, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
err_free_pendwn_irq:
- free_irq(tsi->irq_pendwn, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
err_free_mem:
kfree(tsi);
input_free_device(input_dev);
@@ -336,14 +317,14 @@ err_free_mem:
return error;
}
-static int __devexit da9052_ts_remove(struct platform_device *pdev)
+static int da9052_ts_remove(struct platform_device *pdev)
{
struct da9052_tsi *tsi = platform_get_drvdata(pdev);
da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
- free_irq(tsi->irq_pendwn, tsi);
- free_irq(tsi->irq_datardy, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
+ da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
input_unregister_device(tsi->dev);
kfree(tsi);
@@ -355,7 +336,7 @@ static int __devexit da9052_ts_remove(struct platform_device *pdev)
static struct platform_driver da9052_tsi_driver = {
.probe = da9052_ts_probe,
- .remove = __devexit_p(da9052_ts_remove),
+ .remove = da9052_ts_remove,
.driver = {
.name = "da9052-tsi",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 099d144ab7c9..a9170157b442 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -491,14 +491,6 @@ static int edt_ft5x06_debugfs_mode_set(void *data, u64 mode)
DEFINE_SIMPLE_ATTRIBUTE(debugfs_mode_fops, edt_ft5x06_debugfs_mode_get,
edt_ft5x06_debugfs_mode_set, "%llu\n");
-static int edt_ft5x06_debugfs_raw_data_open(struct inode *inode,
- struct file *file)
-{
- file->private_data = inode->i_private;
-
- return 0;
-}
-
static ssize_t edt_ft5x06_debugfs_raw_data_read(struct file *file,
char __user *buf, size_t count, loff_t *off)
{
@@ -579,11 +571,11 @@ out:
static const struct file_operations debugfs_raw_data_fops = {
- .open = edt_ft5x06_debugfs_raw_data_open,
+ .open = simple_open,
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void __devinit
+static void
edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
const char *debugfs_name)
{
@@ -600,7 +592,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
}
-static void __devexit
+static void
edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
if (tsdata->debug_dir)
@@ -625,7 +617,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
-static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
+static int edt_ft5x06_ts_reset(struct i2c_client *client,
int reset_pin)
{
int error;
@@ -649,7 +641,7 @@ static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
return 0;
}
-static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
+static int edt_ft5x06_ts_identify(struct i2c_client *client,
char *model_name,
char *fw_version)
{
@@ -683,7 +675,7 @@ static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
edt_ft5x06_register_write(tsdata, reg, pdata->name)
-static void __devinit
+static void
edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
const struct edt_ft5x06_platform_data *pdata)
{
@@ -697,7 +689,7 @@ edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
EDT_ATTR_CHECKSET(report_rate, WORK_REGISTER_REPORT_RATE);
}
-static void __devinit
+static void
edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
{
tsdata->threshold = edt_ft5x06_register_read(tsdata,
@@ -710,7 +702,7 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
tsdata->num_y = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_Y);
}
-static int __devinit edt_ft5x06_ts_probe(struct i2c_client *client,
+static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct edt_ft5x06_platform_data *pdata =
@@ -830,7 +822,7 @@ err_free_mem:
return error;
}
-static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
+static int edt_ft5x06_ts_remove(struct i2c_client *client)
{
const struct edt_ft5x06_platform_data *pdata =
dev_get_platdata(&client->dev);
@@ -891,7 +883,7 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
},
.id_table = edt_ft5x06_ts_id,
.probe = edt_ft5x06_ts_probe,
- .remove = __devexit_p(edt_ft5x06_ts_remove),
+ .remove = edt_ft5x06_ts_remove,
};
module_i2c_driver(edt_ft5x06_ts_driver);
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 908407efc672..55255a940072 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -154,7 +154,7 @@ static void eeti_ts_close(struct input_dev *dev)
eeti_ts_stop(priv);
}
-static int __devinit eeti_ts_probe(struct i2c_client *client,
+static int eeti_ts_probe(struct i2c_client *client,
const struct i2c_device_id *idp)
{
struct eeti_ts_platform_data *pdata = client->dev.platform_data;
@@ -248,7 +248,7 @@ err0:
return err;
}
-static int __devexit eeti_ts_remove(struct i2c_client *client)
+static int eeti_ts_remove(struct i2c_client *client)
{
struct eeti_ts_priv *priv = i2c_get_clientdata(client);
@@ -321,7 +321,7 @@ static struct i2c_driver eeti_ts_driver = {
#endif
},
.probe = eeti_ts_probe,
- .remove = __devexit_p(eeti_ts_remove),
+ .remove = eeti_ts_remove,
.id_table = eeti_ts_id,
};
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 13fa62fdfb0b..17c9097f3b5d 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -153,7 +153,7 @@ static int egalax_wake_up_device(struct i2c_client *client)
return 0;
}
-static int __devinit egalax_firmware_version(struct i2c_client *client)
+static int egalax_firmware_version(struct i2c_client *client)
{
static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
int ret;
@@ -165,7 +165,7 @@ static int __devinit egalax_firmware_version(struct i2c_client *client)
return 0;
}
-static int __devinit egalax_ts_probe(struct i2c_client *client,
+static int egalax_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct egalax_ts *ts;
@@ -246,7 +246,7 @@ err_free_ts:
return error;
}
-static __devexit int egalax_ts_remove(struct i2c_client *client)
+static int egalax_ts_remove(struct i2c_client *client)
{
struct egalax_ts *ts = i2c_get_clientdata(client);
@@ -301,7 +301,7 @@ static struct i2c_driver egalax_ts_driver = {
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
- .remove = __devexit_p(egalax_ts_remove),
+ .remove = egalax_ts_remove,
};
module_i2c_driver(egalax_ts_driver);
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
deleted file mode 100644
index b9e8686a6f1c..000000000000
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Copyright (c) 2001 "Crazy" James Simmons jsimmons@transvirtual.com
- *
- * Sponsored by Transvirtual Technology.
- *
- * Derived from the code in h3600_ts.[ch] by Charles Flynn
- */
-
-/*
- * Driver for the h3600 Touch Screen and other Atmel controlled devices.
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so by
- * e-mail - mail your message to <jsimmons@transvirtual.com>.
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/serio.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-/* SA1100 serial defines */
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-#define DRIVER_DESC "H3600 touchscreen driver"
-
-MODULE_AUTHOR("James Simmons <jsimmons@transvirtual.com>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
-/*
- * Definitions & global arrays.
- */
-
-/* The start and end of frame characters SOF and EOF */
-#define CHAR_SOF 0x02
-#define CHAR_EOF 0x03
-#define FRAME_OVERHEAD 3 /* CHAR_SOF,CHAR_EOF,LENGTH = 3 */
-
-/*
- Atmel events and response IDs contained in frame.
- Programmer has no control over these numbers.
- TODO there are holes - specifically 1,7,0x0a
-*/
-#define VERSION_ID 0 /* Get Version (request/response) */
-#define KEYBD_ID 2 /* Keyboard (event) */
-#define TOUCHS_ID 3 /* Touch Screen (event)*/
-#define EEPROM_READ_ID 4 /* (request/response) */
-#define EEPROM_WRITE_ID 5 /* (request/response) */
-#define THERMAL_ID 6 /* (request/response) */
-#define NOTIFY_LED_ID 8 /* (request/response) */
-#define BATTERY_ID 9 /* (request/response) */
-#define SPI_READ_ID 0x0b /* ( request/response) */
-#define SPI_WRITE_ID 0x0c /* ( request/response) */
-#define FLITE_ID 0x0d /* backlight ( request/response) */
-#define STX_ID 0xa1 /* extension pack status (req/resp) */
-
-#define MAX_ID 14
-
-#define H3600_MAX_LENGTH 16
-#define H3600_KEY 0xf
-
-#define H3600_SCANCODE_RECORD 1 /* 1 -> record button */
-#define H3600_SCANCODE_CALENDAR 2 /* 2 -> calendar */
-#define H3600_SCANCODE_CONTACTS 3 /* 3 -> contact */
-#define H3600_SCANCODE_Q 4 /* 4 -> Q button */
-#define H3600_SCANCODE_START 5 /* 5 -> start menu */
-#define H3600_SCANCODE_UP 6 /* 6 -> up */
-#define H3600_SCANCODE_RIGHT 7 /* 7 -> right */
-#define H3600_SCANCODE_LEFT 8 /* 8 -> left */
-#define H3600_SCANCODE_DOWN 9 /* 9 -> down */
-
-/*
- * Per-touchscreen data.
- */
-struct h3600_dev {
- struct input_dev *dev;
- struct serio *serio;
- unsigned char event; /* event ID from packet */
- unsigned char chksum;
- unsigned char len;
- unsigned char idx;
- unsigned char buf[H3600_MAX_LENGTH];
- char phys[32];
-};
-
-static irqreturn_t action_button_handler(int irq, void *dev_id)
-{
- int down = (GPLR & GPIO_BITSY_ACTION_BUTTON) ? 0 : 1;
- struct input_dev *dev = dev_id;
-
- input_report_key(dev, KEY_ENTER, down);
- input_sync(dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t npower_button_handler(int irq, void *dev_id)
-{
- int down = (GPLR & GPIO_BITSY_NPOWER_BUTTON) ? 0 : 1;
- struct input_dev *dev = dev_id;
-
- /*
- * This interrupt is only called when we release the key. So we have
- * to fake a key press.
- */
- input_report_key(dev, KEY_SUSPEND, 1);
- input_report_key(dev, KEY_SUSPEND, down);
- input_sync(dev);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PM
-
-static int flite_brightness = 25;
-
-enum flite_pwr {
- FLITE_PWR_OFF = 0,
- FLITE_PWR_ON = 1
-};
-
-/*
- * h3600_flite_power: enables or disables power to frontlight, using last bright */
-unsigned int h3600_flite_power(struct input_dev *dev, enum flite_pwr pwr)
-{
- unsigned char brightness = (pwr == FLITE_PWR_OFF) ? 0 : flite_brightness;
- struct h3600_dev *ts = input_get_drvdata(dev);
-
- /* Must be in this order */
- serio_write(ts->serio, 1);
- serio_write(ts->serio, pwr);
- serio_write(ts->serio, brightness);
-
- return 0;
-}
-
-#endif
-
-/*
- * This function translates the native event packets to linux input event
- * packets. Some packets coming from serial are not touchscreen related. In
- * this case we send them off to be processed elsewhere.
- */
-static void h3600ts_process_packet(struct h3600_dev *ts)
-{
- struct input_dev *dev = ts->dev;
- static int touched = 0;
- int key, down = 0;
-
- switch (ts->event) {
- /*
- Buttons - returned as a single byte
- 7 6 5 4 3 2 1 0
- S x x x N N N N
-
- S switch state ( 0=pressed 1=released)
- x Unused.
- NNNN switch number 0-15
-
- Note: This is true for non interrupt generated key events.
- */
- case KEYBD_ID:
- down = (ts->buf[0] & 0x80) ? 0 : 1;
-
- switch (ts->buf[0] & 0x7f) {
- case H3600_SCANCODE_RECORD:
- key = KEY_RECORD;
- break;
- case H3600_SCANCODE_CALENDAR:
- key = KEY_PROG1;
- break;
- case H3600_SCANCODE_CONTACTS:
- key = KEY_PROG2;
- break;
- case H3600_SCANCODE_Q:
- key = KEY_Q;
- break;
- case H3600_SCANCODE_START:
- key = KEY_PROG3;
- break;
- case H3600_SCANCODE_UP:
- key = KEY_UP;
- break;
- case H3600_SCANCODE_RIGHT:
- key = KEY_RIGHT;
- break;
- case H3600_SCANCODE_LEFT:
- key = KEY_LEFT;
- break;
- case H3600_SCANCODE_DOWN:
- key = KEY_DOWN;
- break;
- default:
- key = 0;
- }
- if (key)
- input_report_key(dev, key, down);
- break;
- /*
- * Native touchscreen event data is formatted as shown below:-
- *
- * +-------+-------+-------+-------+
- * | Xmsb | Xlsb | Ymsb | Ylsb |
- * +-------+-------+-------+-------+
- * byte 0 1 2 3
- */
- case TOUCHS_ID:
- if (!touched) {
- input_report_key(dev, BTN_TOUCH, 1);
- touched = 1;
- }
-
- if (ts->len) {
- unsigned short x, y;
-
- x = ts->buf[0]; x <<= 8; x += ts->buf[1];
- y = ts->buf[2]; y <<= 8; y += ts->buf[3];
-
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
- } else {
- input_report_key(dev, BTN_TOUCH, 0);
- touched = 0;
- }
- break;
- default:
- /* Send a non input event elsewhere */
- break;
- }
-
- input_sync(dev);
-}
-
-/*
- * h3600ts_event() handles events from the input module.
- */
-static int h3600ts_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
-{
-#if 0
- struct h3600_dev *ts = input_get_drvdata(dev);
-
- switch (type) {
- case EV_LED: {
- // serio_write(ts->serio, SOME_CMD);
- return 0;
- }
- }
- return -1;
-#endif
- return 0;
-}
-
-/*
- Frame format
- byte 1 2 3 len + 4
- +-------+---------------+---------------+--=------------+
- |SOF |id |len | len bytes | Chksum |
- +-------+---------------+---------------+--=------------+
- bit 0 7 8 11 12 15 16
-
- +-------+---------------+-------+
- |SOF |id |0 |Chksum | - Note Chksum does not include SOF
- +-------+---------------+-------+
- bit 0 7 8 11 12 15 16
-
-*/
-
-static int state;
-
-/* decode States */
-#define STATE_SOF 0 /* start of FRAME */
-#define STATE_ID 1 /* state where we decode the ID & len */
-#define STATE_DATA 2 /* state where we decode data */
-#define STATE_EOF 3 /* state where we decode checksum or EOF */
-
-static irqreturn_t h3600ts_interrupt(struct serio *serio, unsigned char data,
- unsigned int flags)
-{
- struct h3600_dev *ts = serio_get_drvdata(serio);
-
- /*
- * We have a new frame coming in.
- */
- switch (state) {
- case STATE_SOF:
- if (data == CHAR_SOF)
- state = STATE_ID;
- break;
- case STATE_ID:
- ts->event = (data & 0xf0) >> 4;
- ts->len = (data & 0xf);
- ts->idx = 0;
- if (ts->event >= MAX_ID) {
- state = STATE_SOF;
- break;
- }
- ts->chksum = data;
- state = (ts->len > 0) ? STATE_DATA : STATE_EOF;
- break;
- case STATE_DATA:
- ts->chksum += data;
- ts->buf[ts->idx]= data;
- if (++ts->idx == ts->len)
- state = STATE_EOF;
- break;
- case STATE_EOF:
- state = STATE_SOF;
- if (data == CHAR_EOF || data == ts->chksum)
- h3600ts_process_packet(ts);
- break;
- default:
- printk("Error3\n");
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * h3600ts_connect() is the routine that is called when someone adds a
- * new serio device that supports H3600 protocol and registers it as
- * an input device.
- */
-static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
-{
- struct h3600_dev *ts;
- struct input_dev *input_dev;
- int err;
-
- ts = kzalloc(sizeof(struct h3600_dev), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto fail1;
- }
-
- ts->serio = serio;
- ts->dev = input_dev;
- snprintf(ts->phys, sizeof(ts->phys), "%s/input0", serio->phys);
-
- input_dev->name = "H3600 TouchScreen";
- input_dev->phys = ts->phys;
- input_dev->id.bustype = BUS_RS232;
- input_dev->id.vendor = SERIO_H3600;
- input_dev->id.product = 0x0666; /* FIXME !!! We can ask the hardware */
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &serio->dev;
-
- input_set_drvdata(input_dev, ts);
-
- input_dev->event = h3600ts_event;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) |
- BIT_MASK(EV_LED) | BIT_MASK(EV_PWR);
- input_dev->ledbit[0] = BIT_MASK(LED_SLEEP);
- input_set_abs_params(input_dev, ABS_X, 60, 985, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 35, 1024, 0, 0);
-
- set_bit(KEY_RECORD, input_dev->keybit);
- set_bit(KEY_Q, input_dev->keybit);
- set_bit(KEY_PROG1, input_dev->keybit);
- set_bit(KEY_PROG2, input_dev->keybit);
- set_bit(KEY_PROG3, input_dev->keybit);
- set_bit(KEY_UP, input_dev->keybit);
- set_bit(KEY_RIGHT, input_dev->keybit);
- set_bit(KEY_LEFT, input_dev->keybit);
- set_bit(KEY_DOWN, input_dev->keybit);
- set_bit(KEY_ENTER, input_dev->keybit);
- set_bit(KEY_SUSPEND, input_dev->keybit);
- set_bit(BTN_TOUCH, input_dev->keybit);
-
- /* Device specific stuff */
- set_GPIO_IRQ_edge(GPIO_BITSY_ACTION_BUTTON, GPIO_BOTH_EDGES);
- set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE);
-
- if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler,
- IRQF_SHARED, "h3600_action", ts->dev)) {
- printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
- err = -EBUSY;
- goto fail1;
- }
-
- if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
- IRQF_SHARED, "h3600_suspend", ts->dev)) {
- printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
- err = -EBUSY;
- goto fail2;
- }
-
- serio_set_drvdata(serio, ts);
-
- err = serio_open(serio, drv);
- if (err)
- goto fail3;
-
- //h3600_flite_control(1, 25); /* default brightness */
- err = input_register_device(ts->dev);
- if (err)
- goto fail4;
-
- return 0;
-
-fail4: serio_close(serio);
-fail3: serio_set_drvdata(serio, NULL);
- free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
-fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: input_free_device(input_dev);
- kfree(ts);
- return err;
-}
-
-/*
- * h3600ts_disconnect() is the opposite of h3600ts_connect()
- */
-
-static void h3600ts_disconnect(struct serio *serio)
-{
- struct h3600_dev *ts = serio_get_drvdata(serio);
-
- free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
- free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
- input_get_device(ts->dev);
- input_unregister_device(ts->dev);
- serio_close(serio);
- serio_set_drvdata(serio, NULL);
- input_put_device(ts->dev);
- kfree(ts);
-}
-
-/*
- * The serio driver structure.
- */
-
-static struct serio_device_id h3600ts_serio_ids[] = {
- {
- .type = SERIO_RS232,
- .proto = SERIO_H3600,
- .id = SERIO_ANY,
- .extra = SERIO_ANY,
- },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(serio, h3600ts_serio_ids);
-
-static struct serio_driver h3600ts_drv = {
- .driver = {
- .name = "h3600ts",
- },
- .description = DRIVER_DESC,
- .id_table = h3600ts_serio_ids,
- .interrupt = h3600ts_interrupt,
- .connect = h3600ts_connect,
- .disconnect = h3600ts_disconnect,
-};
-
-module_serio_driver(h3600ts_drv);
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index d13143b68b3e..6c4fb8436957 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -102,7 +102,7 @@ static void htcpen_close(struct input_dev *dev)
synchronize_irq(HTCPEN_IRQ);
}
-static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
+static int htcpen_isa_probe(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev;
int err = -EBUSY;
@@ -174,7 +174,7 @@ static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int __devexit htcpen_isa_remove(struct device *dev, unsigned int id)
+static int htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
@@ -210,7 +210,7 @@ static int htcpen_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver htcpen_isa_driver = {
.probe = htcpen_isa_probe,
- .remove = __devexit_p(htcpen_isa_remove),
+ .remove = htcpen_isa_remove,
#ifdef CONFIG_PM
.suspend = htcpen_isa_suspend,
.resume = htcpen_isa_resume,
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 4ac69760ec08..1418bdda61bb 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -180,7 +180,7 @@ static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
};
-static int __devinit ili210x_i2c_probe(struct i2c_client *client,
+static int ili210x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
@@ -298,7 +298,7 @@ err_free_mem:
return error;
}
-static int __devexit ili210x_i2c_remove(struct i2c_client *client)
+static int ili210x_i2c_remove(struct i2c_client *client)
{
struct ili210x *priv = i2c_get_clientdata(client);
@@ -350,7 +350,7 @@ static struct i2c_driver ili210x_ts_driver = {
},
.id_table = ili210x_i2c_id,
.probe = ili210x_i2c_probe,
- .remove = __devexit_p(ili210x_i2c_remove),
+ .remove = ili210x_i2c_remove,
};
module_i2c_driver(ili210x_ts_driver);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index cf299377fc49..465db5dba8b4 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -427,7 +427,7 @@ out:
}
/* Utility to read PMIC ID */
-static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
+static int mrstouch_read_pmic_id(uint *vendor, uint *rev)
{
int err;
u8 r;
@@ -446,7 +446,7 @@ static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
* Parse ADC channels to find end of the channel configured by other ADC user
* NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
*/
-static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
+static int mrstouch_chan_parse(struct mrstouch_dev *tsdev)
{
int found = 0;
int err, i;
@@ -478,7 +478,7 @@ static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
/*
* Writes touch screen channels to ADC address selection registers
*/
-static int __devinit mrstouch_ts_chan_set(uint offset)
+static int mrstouch_ts_chan_set(uint offset)
{
u16 chan;
@@ -494,7 +494,7 @@ static int __devinit mrstouch_ts_chan_set(uint offset)
}
/* Initialize ADC */
-static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
+static int mrstouch_adc_init(struct mrstouch_dev *tsdev)
{
int err, start;
u8 ra, rm;
@@ -568,7 +568,7 @@ static int __devinit mrstouch_adc_init(struct mrstouch_dev *tsdev)
/* Probe function for touch screen driver */
-static int __devinit mrstouch_probe(struct platform_device *pdev)
+static int mrstouch_probe(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev;
struct input_dev *input;
@@ -643,7 +643,7 @@ err_free_mem:
return err;
}
-static int __devexit mrstouch_remove(struct platform_device *pdev)
+static int mrstouch_remove(struct platform_device *pdev)
{
struct mrstouch_dev *tsdev = platform_get_drvdata(pdev);
@@ -662,7 +662,7 @@ static struct platform_driver mrstouch_driver = {
.owner = THIS_MODULE,
},
.probe = mrstouch_probe,
- .remove = __devexit_p(mrstouch_remove),
+ .remove = mrstouch_remove,
};
module_platform_driver(mrstouch_driver);
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 7f03d1bd916e..282d7c7ad2fc 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -99,7 +99,7 @@ static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit jornada720_ts_probe(struct platform_device *pdev)
+static int jornada720_ts_probe(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts;
struct input_dev *input_dev;
@@ -151,7 +151,7 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev)
return error;
}
-static int __devexit jornada720_ts_remove(struct platform_device *pdev)
+static int jornada720_ts_remove(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts = platform_get_drvdata(pdev);
@@ -168,7 +168,7 @@ MODULE_ALIAS("platform:jornada_ts");
static struct platform_driver jornada720_ts_driver = {
.probe = jornada720_ts_probe,
- .remove = __devexit_p(jornada720_ts_remove),
+ .remove = jornada720_ts_remove,
.driver = {
.name = "jornada_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 4c2b8ed3bf16..9101ee529c92 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -203,7 +203,7 @@ static void lpc32xx_ts_close(struct input_dev *dev)
lpc32xx_stop_tsc(tsc);
}
-static int __devinit lpc32xx_ts_probe(struct platform_device *pdev)
+static int lpc32xx_ts_probe(struct platform_device *pdev)
{
struct lpc32xx_tsc *tsc;
struct input_dev *input;
@@ -309,7 +309,7 @@ err_free_mem:
return error;
}
-static int __devexit lpc32xx_ts_remove(struct platform_device *pdev)
+static int lpc32xx_ts_remove(struct platform_device *pdev)
{
struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev);
struct resource *res;
@@ -394,7 +394,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
- .remove = __devexit_p(lpc32xx_ts_remove),
+ .remove = lpc32xx_ts_remove,
.driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4eab50b856d7..00bc6caa27f5 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -156,7 +156,7 @@ out:
return IRQ_HANDLED;
}
-static void __devinit max11801_ts_phy_init(struct max11801_data *data)
+static void max11801_ts_phy_init(struct max11801_data *data)
{
struct i2c_client *client = data->client;
@@ -174,7 +174,7 @@ static void __devinit max11801_ts_phy_init(struct max11801_data *data)
max11801_write_reg(client, OP_MODE_CONF_REG, 0x36);
}
-static int __devinit max11801_ts_probe(struct i2c_client *client,
+static int max11801_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max11801_data *data;
@@ -228,7 +228,7 @@ err_free_mem:
return error;
}
-static __devexit int max11801_ts_remove(struct i2c_client *client)
+static int max11801_ts_remove(struct i2c_client *client)
{
struct max11801_data *data = i2c_get_clientdata(client);
@@ -252,7 +252,7 @@ static struct i2c_driver max11801_ts_driver = {
},
.id_table = max11801_ts_id,
.probe = max11801_ts_probe,
- .remove = __devexit_p(max11801_ts_remove),
+ .remove = max11801_ts_remove,
};
module_i2c_driver(max11801_ts_driver);
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index 48dc5b0d26f1..02103b6abb39 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -229,7 +229,7 @@ err_free_mem:
return ret;
}
-static int __devexit mc13783_ts_remove(struct platform_device *pdev)
+static int mc13783_ts_remove(struct platform_device *pdev)
{
struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
@@ -243,7 +243,7 @@ static int __devexit mc13783_ts_remove(struct platform_device *pdev)
}
static struct platform_driver mc13783_ts_driver = {
- .remove = __devexit_p(mc13783_ts_remove),
+ .remove = mc13783_ts_remove,
.driver = {
.owner = THIS_MODULE,
.name = MC13783_TS_NAME,
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index b528511861ce..f9f4e0c56eda 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -187,7 +187,7 @@ static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data)
OP_MODE_ACTIVE | REPORT_RATE_80);
}
-static int __devinit mcs5000_ts_probe(struct i2c_client *client,
+static int mcs5000_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcs5000_ts_data *data;
@@ -249,7 +249,7 @@ err_free_mem:
return ret;
}
-static int __devexit mcs5000_ts_remove(struct i2c_client *client)
+static int mcs5000_ts_remove(struct i2c_client *client)
{
struct mcs5000_ts_data *data = i2c_get_clientdata(client);
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
static struct i2c_driver mcs5000_ts_driver = {
.probe = mcs5000_ts_probe,
- .remove = __devexit_p(mcs5000_ts_remove),
+ .remove = mcs5000_ts_remove,
.driver = {
.name = "mcs5000_ts",
#ifdef CONFIG_PM
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 560cf09d1c5a..98841d8aa635 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i2c/mms114.h>
#include <linux/input/mt.h>
@@ -360,14 +361,63 @@ static void mms114_input_close(struct input_dev *dev)
mms114_stop(data);
}
-static int __devinit mms114_probe(struct i2c_client *client,
+#ifdef CONFIG_OF
+static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
+{
+ struct mms114_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "failed to allocate platform data\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
+ dev_err(dev, "failed to get x-size property\n");
+ return NULL;
+ };
+
+ if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
+ dev_err(dev, "failed to get y-size property\n");
+ return NULL;
+ };
+
+ of_property_read_u32(np, "contact-threshold",
+ &pdata->contact_threshold);
+ of_property_read_u32(np, "moving-threshold",
+ &pdata->moving_threshold);
+
+ if (of_find_property(np, "x-invert", NULL))
+ pdata->x_invert = true;
+ if (of_find_property(np, "y-invert", NULL))
+ pdata->y_invert = true;
+
+ return pdata;
+}
+#else
+static inline struct mms114_platform_data *mms114_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static int mms114_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct mms114_platform_data *pdata;
struct mms114_data *data;
struct input_dev *input_dev;
int error;
- if (!client->dev.platform_data) {
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata)
+ pdata = mms114_parse_dt(&client->dev);
+
+ if (!pdata) {
dev_err(&client->dev, "Need platform data\n");
return -EINVAL;
}
@@ -389,7 +439,7 @@ static int __devinit mms114_probe(struct i2c_client *client,
data->client = client;
data->input_dev = input_dev;
- data->pdata = client->dev.platform_data;
+ data->pdata = pdata;
input_dev->name = "MELPAS MMS114 Touchscreen";
input_dev->id.bustype = BUS_I2C;
@@ -458,7 +508,7 @@ err_free_mem:
return error;
}
-static int __devexit mms114_remove(struct i2c_client *client)
+static int mms114_remove(struct i2c_client *client)
{
struct mms114_data *data = i2c_get_clientdata(client);
@@ -525,14 +575,22 @@ static const struct i2c_device_id mms114_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mms114_id);
+#ifdef CONFIG_OF
+static struct of_device_id mms114_dt_match[] = {
+ { .compatible = "melfas,mms114" },
+ { }
+};
+#endif
+
static struct i2c_driver mms114_driver = {
.driver = {
.name = "mms114",
.owner = THIS_MODULE,
.pm = &mms114_pm_ops,
+ .of_match_table = of_match_ptr(mms114_dt_match),
},
.probe = mms114_probe,
- .remove = __devexit_p(mms114_remove),
+ .remove = mms114_remove,
.id_table = mms114_id,
};
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index f57aeb80f7e3..f22e04dd4e16 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -137,7 +137,7 @@ static void pcap_ts_close(struct input_dev *dev)
pcap_ts->read_state << PCAP_ADC_TS_M_SHIFT);
}
-static int __devinit pcap_ts_probe(struct platform_device *pdev)
+static int pcap_ts_probe(struct platform_device *pdev)
{
struct input_dev *input_dev;
struct pcap_ts *pcap_ts;
@@ -202,7 +202,7 @@ fail:
return err;
}
-static int __devexit pcap_ts_remove(struct platform_device *pdev)
+static int pcap_ts_remove(struct platform_device *pdev)
{
struct pcap_ts *pcap_ts = platform_get_drvdata(pdev);
@@ -245,7 +245,7 @@ static const struct dev_pm_ops pcap_ts_pm_ops = {
static struct platform_driver pcap_ts_driver = {
.probe = pcap_ts_probe,
- .remove = __devexit_p(pcap_ts_remove),
+ .remove = pcap_ts_remove,
.driver = {
.name = "pcap-ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 953b4c105cad..6cc6b36663ff 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -125,7 +125,7 @@ static int pixcir_i2c_ts_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
-static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
@@ -189,7 +189,7 @@ err_free_mem:
return error;
}
-static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+static int pixcir_i2c_ts_remove(struct i2c_client *client)
{
struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
@@ -218,7 +218,7 @@ static struct i2c_driver pixcir_i2c_ts_driver = {
.pm = &pixcir_dev_pm_ops,
},
.probe = pixcir_i2c_ts_probe,
- .remove = __devexit_p(pixcir_i2c_ts_remove),
+ .remove = pixcir_i2c_ts_remove,
.id_table = pixcir_i2c_ts_id,
};
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 549fa29548f8..b061af2c8376 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -238,7 +238,7 @@ static void s3c24xx_ts_select(struct s3c_adc_client *client, unsigned select)
* Initialise, find and allocate any resources we need to run and then
* register with the ADC and input systems.
*/
-static int __devinit s3c2410ts_probe(struct platform_device *pdev)
+static int s3c2410ts_probe(struct platform_device *pdev)
{
struct s3c2410_ts_mach_info *info;
struct device *dev = &pdev->dev;
@@ -365,7 +365,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
*
* Free up our state ready to be removed.
*/
-static int __devexit s3c2410ts_remove(struct platform_device *pdev)
+static int s3c2410ts_remove(struct platform_device *pdev)
{
free_irq(ts.irq_tc, ts.input);
del_timer_sync(&touch_timer);
@@ -430,7 +430,7 @@ static struct platform_driver s3c_ts_driver = {
},
.id_table = s3cts_driver_ids,
.probe = s3c2410ts_probe,
- .remove = __devexit_p(s3c2410ts_remove),
+ .remove = s3c2410ts_remove,
};
module_platform_driver(s3c_ts_driver);
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 6cb68a1981bf..d9d05e222428 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -139,7 +139,7 @@ end:
return IRQ_HANDLED;
}
-static int __devinit st1232_ts_probe(struct i2c_client *client,
+static int st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
@@ -206,7 +206,7 @@ err_free_mem:
return error;
}
-static int __devexit st1232_ts_remove(struct i2c_client *client)
+static int st1232_ts_remove(struct i2c_client *client)
{
struct st1232_ts_data *ts = i2c_get_clientdata(client);
@@ -255,7 +255,7 @@ static const struct i2c_device_id st1232_ts_id[] = {
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
#ifdef CONFIG_OF
-static const struct of_device_id st1232_ts_dt_ids[] __devinitconst = {
+static const struct of_device_id st1232_ts_dt_ids[] = {
{ .compatible = "sitronix,st1232", },
{ }
};
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
- .remove = __devexit_p(st1232_ts_remove),
+ .remove = st1232_ts_remove,
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 692b685720ce..84d884b4ec3e 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -1,4 +1,5 @@
-/* STMicroelectronics STMPE811 Touchscreen Driver
+/*
+ * STMicroelectronics STMPE811 Touchscreen Driver
*
* (C) 2010 Luotao Fu <l.fu@pengutronix.de>
* All rights reserved.
@@ -16,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -166,7 +168,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit stmpe_init_hw(struct stmpe_touch *ts)
+static int stmpe_init_hw(struct stmpe_touch *ts)
{
int ret;
u8 adc_ctrl1, adc_ctrl1_mask, tsc_cfg, tsc_cfg_mask;
@@ -261,41 +263,18 @@ static void stmpe_ts_close(struct input_dev *dev)
STMPE_TSC_CTRL_TSC_EN, 0);
}
-static int __devinit stmpe_input_probe(struct platform_device *pdev)
+static void stmpe_ts_get_platform_info(struct platform_device *pdev,
+ struct stmpe_touch *ts)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- struct stmpe_platform_data *pdata = stmpe->pdata;
- struct stmpe_touch *ts;
- struct input_dev *idev;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_ts_platform_data *ts_pdata = NULL;
- int ret;
- int ts_irq;
-
- ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
- if (ts_irq < 0)
- return ts_irq;
-
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- if (!ts) {
- ret = -ENOMEM;
- goto err_out;
- }
- idev = input_allocate_device();
- if (!idev) {
- ret = -ENOMEM;
- goto err_free_ts;
- }
-
- platform_set_drvdata(pdev, ts);
ts->stmpe = stmpe;
- ts->idev = idev;
- ts->dev = &pdev->dev;
- if (pdata)
- ts_pdata = pdata->ts;
+ if (stmpe->pdata && stmpe->pdata->ts) {
+ ts_pdata = stmpe->pdata->ts;
- if (ts_pdata) {
ts->sample_time = ts_pdata->sample_time;
ts->mod_12b = ts_pdata->mod_12b;
ts->ref_sel = ts_pdata->ref_sel;
@@ -305,22 +284,71 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
ts->settling = ts_pdata->settling;
ts->fraction_z = ts_pdata->fraction_z;
ts->i_drive = ts_pdata->i_drive;
+ } else if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "st,sample-time", &val))
+ ts->sample_time = val;
+ if (!of_property_read_u32(np, "st,mod-12b", &val))
+ ts->mod_12b = val;
+ if (!of_property_read_u32(np, "st,ref-sel", &val))
+ ts->ref_sel = val;
+ if (!of_property_read_u32(np, "st,adc-freq", &val))
+ ts->adc_freq = val;
+ if (!of_property_read_u32(np, "st,ave-ctrl", &val))
+ ts->ave_ctrl = val;
+ if (!of_property_read_u32(np, "st,touch-det-delay", &val))
+ ts->touch_det_delay = val;
+ if (!of_property_read_u32(np, "st,settling", &val))
+ ts->settling = val;
+ if (!of_property_read_u32(np, "st,fraction-z", &val))
+ ts->fraction_z = val;
+ if (!of_property_read_u32(np, "st,i-drive", &val))
+ ts->i_drive = val;
}
+}
+
+static int stmpe_input_probe(struct platform_device *pdev)
+{
+ struct stmpe_touch *ts;
+ struct input_dev *idev;
+ int error;
+ int ts_irq;
+
+ ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
+ if (ts_irq < 0)
+ return ts_irq;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(&pdev->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ts);
+ ts->idev = idev;
+ ts->dev = &pdev->dev;
+
+ stmpe_ts_get_platform_info(pdev, ts);
INIT_DELAYED_WORK(&ts->work, stmpe_work);
- ret = request_threaded_irq(ts_irq, NULL, stmpe_ts_handler,
- IRQF_ONESHOT, STMPE_TS_NAME, ts);
- if (ret) {
+ error = devm_request_threaded_irq(&pdev->dev, ts_irq,
+ NULL, stmpe_ts_handler,
+ IRQF_ONESHOT, STMPE_TS_NAME, ts);
+ if (error) {
dev_err(&pdev->dev, "Failed to request IRQ %d\n", ts_irq);
- goto err_free_input;
+ return error;
}
- ret = stmpe_init_hw(ts);
- if (ret)
- goto err_free_irq;
+ error = stmpe_init_hw(ts);
+ if (error)
+ return error;
idev->name = STMPE_TS_NAME;
+ idev->phys = STMPE_TS_NAME"/input0";
idev->id.bustype = BUS_I2C;
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
@@ -334,40 +362,21 @@ static int __devinit stmpe_input_probe(struct platform_device *pdev)
input_set_abs_params(idev, ABS_Y, 0, XY_MASK, 0, 0);
input_set_abs_params(idev, ABS_PRESSURE, 0x0, 0xff, 0, 0);
- ret = input_register_device(idev);
- if (ret) {
+ error = input_register_device(idev);
+ if (error) {
dev_err(&pdev->dev, "Could not register input device\n");
- goto err_free_irq;
+ return error;
}
- return ret;
-
-err_free_irq:
- free_irq(ts_irq, ts);
-err_free_input:
- input_free_device(idev);
- platform_set_drvdata(pdev, NULL);
-err_free_ts:
- kfree(ts);
-err_out:
- return ret;
+ return 0;
}
-static int __devexit stmpe_ts_remove(struct platform_device *pdev)
+static int stmpe_ts_remove(struct platform_device *pdev)
{
struct stmpe_touch *ts = platform_get_drvdata(pdev);
- unsigned int ts_irq = platform_get_irq_byname(pdev, "FIFO_TH");
stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
- free_irq(ts_irq, ts);
-
- platform_set_drvdata(pdev, NULL);
-
- input_unregister_device(ts->idev);
-
- kfree(ts);
-
return 0;
}
@@ -377,7 +386,7 @@ static struct platform_driver stmpe_ts_driver = {
.owner = THIS_MODULE,
},
.probe = stmpe_input_probe,
- .remove = __devexit_p(stmpe_ts_remove),
+ .remove = stmpe_ts_remove,
};
module_platform_driver(stmpe_ts_driver);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
new file mode 100644
index 000000000000..51e7b87827a4
--- /dev/null
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -0,0 +1,398 @@
+/*
+ * TI Touch Screen driver
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+
+#define ADCFSM_STEPID 0x10
+#define SEQ_SETTLE 275
+#define MAX_12BIT ((1 << 12) - 1)
+
+struct titsc {
+ struct input_dev *input;
+ struct ti_tscadc_dev *mfd_tscadc;
+ unsigned int irq;
+ unsigned int wires;
+ unsigned int x_plate_resistance;
+ bool pen_down;
+ int steps_to_configure;
+};
+
+static unsigned int titsc_readl(struct titsc *ts, unsigned int reg)
+{
+ return readl(ts->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_writel(struct titsc *tsc, unsigned int reg,
+ unsigned int val)
+{
+ writel(val, tsc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_step_config(struct titsc *ts_dev)
+{
+ unsigned int config;
+ int i, total_steps;
+
+ /* Configure the Step registers */
+ total_steps = 2 * ts_dev->steps_to_configure;
+
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_XPP;
+ switch (ts_dev->wires) {
+ case 4:
+ config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+ break;
+ case 5:
+ config |= STEPCONFIG_YNN |
+ STEPCONFIG_INP_AN4 | STEPCONFIG_XNN |
+ STEPCONFIG_YPP;
+ break;
+ case 8:
+ config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+ break;
+ }
+
+ for (i = 1; i <= ts_dev->steps_to_configure; i++) {
+ titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ }
+
+ config = 0;
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_YNN |
+ STEPCONFIG_INM_ADCREFM | STEPCONFIG_FIFO1;
+ switch (ts_dev->wires) {
+ case 4:
+ config |= STEPCONFIG_YPP;
+ break;
+ case 5:
+ config |= STEPCONFIG_XPP | STEPCONFIG_INP_AN4 |
+ STEPCONFIG_XNP | STEPCONFIG_YPN;
+ break;
+ case 8:
+ config |= STEPCONFIG_YPP;
+ break;
+ }
+
+ for (i = (ts_dev->steps_to_configure + 1); i <= total_steps; i++) {
+ titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ }
+
+ config = 0;
+ /* Charge step configuration */
+ config = STEPCONFIG_XPP | STEPCONFIG_YNN |
+ STEPCHARGE_RFP_XPUL | STEPCHARGE_RFM_XNUR |
+ STEPCHARGE_INM_AN1 | STEPCHARGE_INP_AN1;
+
+ titsc_writel(ts_dev, REG_CHARGECONFIG, config);
+ titsc_writel(ts_dev, REG_CHARGEDELAY, CHARGEDLY_OPENDLY);
+
+ config = 0;
+ /* Configure to calculate pressure */
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | STEPCONFIG_YPP |
+ STEPCONFIG_XNN | STEPCONFIG_INM_ADCREFM;
+ titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 1), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 1),
+ STEPCONFIG_OPENDLY);
+
+ config |= STEPCONFIG_INP_AN3 | STEPCONFIG_FIFO1;
+ titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 2), config);
+ titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 2),
+ STEPCONFIG_OPENDLY);
+
+ titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+}
+
+static void titsc_read_coordinates(struct titsc *ts_dev,
+ unsigned int *x, unsigned int *y)
+{
+ unsigned int fifocount = titsc_readl(ts_dev, REG_FIFO0CNT);
+ unsigned int prev_val_x = ~0, prev_val_y = ~0;
+ unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
+ unsigned int read, diff;
+ unsigned int i, channel;
+
+ /*
+ * Delta filter is used to remove large variations in sampled
+ * values from ADC. The filter tries to predict where the next
+ * coordinate could be. This is done by taking a previous
+ * coordinate and subtracting it form current one. Further the
+ * algorithm compares the difference with that of a present value,
+ * if true the value is reported to the sub system.
+ */
+ for (i = 0; i < fifocount - 1; i++) {
+ read = titsc_readl(ts_dev, REG_FIFO0);
+ channel = read & 0xf0000;
+ channel = channel >> 0x10;
+ if ((channel >= 0) && (channel < ts_dev->steps_to_configure)) {
+ read &= 0xfff;
+ diff = abs(read - prev_val_x);
+ if (diff < prev_diff_x) {
+ prev_diff_x = diff;
+ *x = read;
+ }
+ prev_val_x = read;
+ }
+
+ read = titsc_readl(ts_dev, REG_FIFO1);
+ channel = read & 0xf0000;
+ channel = channel >> 0x10;
+ if ((channel >= ts_dev->steps_to_configure) &&
+ (channel < (2 * ts_dev->steps_to_configure - 1))) {
+ read &= 0xfff;
+ diff = abs(read - prev_val_y);
+ if (diff < prev_diff_y) {
+ prev_diff_y = diff;
+ *y = read;
+ }
+ prev_val_y = read;
+ }
+ }
+}
+
+static irqreturn_t titsc_irq(int irq, void *dev)
+{
+ struct titsc *ts_dev = dev;
+ struct input_dev *input_dev = ts_dev->input;
+ unsigned int status, irqclr = 0;
+ unsigned int x = 0, y = 0;
+ unsigned int z1, z2, z;
+ unsigned int fsm;
+ unsigned int fifo1count, fifo0count;
+ int i;
+
+ status = titsc_readl(ts_dev, REG_IRQSTATUS);
+ if (status & IRQENB_FIFO0THRES) {
+ titsc_read_coordinates(ts_dev, &x, &y);
+
+ z1 = titsc_readl(ts_dev, REG_FIFO0) & 0xfff;
+ z2 = titsc_readl(ts_dev, REG_FIFO1) & 0xfff;
+
+ fifo1count = titsc_readl(ts_dev, REG_FIFO1CNT);
+ for (i = 0; i < fifo1count; i++)
+ titsc_readl(ts_dev, REG_FIFO1);
+
+ fifo0count = titsc_readl(ts_dev, REG_FIFO0CNT);
+ for (i = 0; i < fifo0count; i++)
+ titsc_readl(ts_dev, REG_FIFO0);
+
+ if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
+ /*
+ * Calculate pressure using formula
+ * Resistance(touch) = x plate resistance *
+ * x postion/4096 * ((z2 / z1) - 1)
+ */
+ z = z2 - z1;
+ z *= x;
+ z *= ts_dev->x_plate_resistance;
+ z /= z1;
+ z = (z + 2047) >> 12;
+
+ if (z <= MAX_12BIT) {
+ input_report_abs(input_dev, ABS_X, x);
+ input_report_abs(input_dev, ABS_Y, y);
+ input_report_abs(input_dev, ABS_PRESSURE, z);
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_sync(input_dev);
+ }
+ }
+ irqclr |= IRQENB_FIFO0THRES;
+ }
+
+ /*
+ * Time for sequencer to settle, to read
+ * correct state of the sequencer.
+ */
+ udelay(SEQ_SETTLE);
+
+ status = titsc_readl(ts_dev, REG_RAWIRQSTATUS);
+ if (status & IRQENB_PENUP) {
+ /* Pen up event */
+ fsm = titsc_readl(ts_dev, REG_ADCFSM);
+ if (fsm == ADCFSM_STEPID) {
+ ts_dev->pen_down = false;
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ } else {
+ ts_dev->pen_down = true;
+ }
+ irqclr |= IRQENB_PENUP;
+ }
+
+ titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
+
+ titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The functions for inserting/removing driver as a module.
+ */
+
+static int titsc_probe(struct platform_device *pdev)
+{
+ struct titsc *ts_dev;
+ struct input_dev *input_dev;
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct mfd_tscadc_board *pdata;
+ int err;
+
+ pdata = tscadc_dev->dev->platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for device */
+ ts_dev = kzalloc(sizeof(struct titsc), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ts_dev || !input_dev) {
+ dev_err(&pdev->dev, "failed to allocate memory.\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tscadc_dev->tsc = ts_dev;
+ ts_dev->mfd_tscadc = tscadc_dev;
+ ts_dev->input = input_dev;
+ ts_dev->irq = tscadc_dev->irq;
+ ts_dev->wires = pdata->tsc_init->wires;
+ ts_dev->x_plate_resistance = pdata->tsc_init->x_plate_resistance;
+ ts_dev->steps_to_configure = pdata->tsc_init->steps_to_configure;
+
+ err = request_irq(ts_dev->irq, titsc_irq,
+ 0, pdev->dev.driver->name, ts_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to allocate irq.\n");
+ goto err_free_mem;
+ }
+
+ titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
+ titsc_step_config(ts_dev);
+ titsc_writel(ts_dev, REG_FIFO0THR, ts_dev->steps_to_configure);
+
+ input_dev->name = "ti-tsc";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
+
+ /* register to the input system */
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, ts_dev);
+ return 0;
+
+err_free_irq:
+ free_irq(ts_dev->irq, ts_dev);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(ts_dev);
+ return err;
+}
+
+static int titsc_remove(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+
+ free_irq(ts_dev->irq, ts_dev);
+
+ input_unregister_device(ts_dev->input);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ts_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int titsc_suspend(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+ unsigned int idle;
+
+ if (device_may_wakeup(tscadc_dev->dev)) {
+ idle = titsc_readl(ts_dev, REG_IRQENABLE);
+ titsc_writel(ts_dev, REG_IRQENABLE,
+ (idle | IRQENB_HW_PEN));
+ titsc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
+ }
+ return 0;
+}
+
+static int titsc_resume(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+ struct titsc *ts_dev = tscadc_dev->tsc;
+
+ if (device_may_wakeup(tscadc_dev->dev)) {
+ titsc_writel(ts_dev, REG_IRQWAKEUP,
+ 0x00);
+ titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
+ }
+ titsc_step_config(ts_dev);
+ titsc_writel(ts_dev, REG_FIFO0THR,
+ ts_dev->steps_to_configure);
+ return 0;
+}
+
+static const struct dev_pm_ops titsc_pm_ops = {
+ .suspend = titsc_suspend,
+ .resume = titsc_resume,
+};
+#define TITSC_PM_OPS (&titsc_pm_ops)
+#else
+#define TITSC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tsc_driver = {
+ .probe = titsc_probe,
+ .remove = titsc_remove,
+ .driver = {
+ .name = "tsc",
+ .owner = THIS_MODULE,
+ .pm = TITSC_PM_OPS,
+ },
+};
+module_platform_driver(ti_tsc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ti_tscadc.c b/drivers/input/touchscreen/ti_tscadc.c
deleted file mode 100644
index d229c741d544..000000000000
--- a/drivers/input/touchscreen/ti_tscadc.c
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * TI Touch Screen driver
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/input/ti_tscadc.h>
-#include <linux/delay.h>
-
-#define REG_IRQEOI 0x020
-#define REG_RAWIRQSTATUS 0x024
-#define REG_IRQSTATUS 0x028
-#define REG_IRQENABLE 0x02C
-#define REG_IRQWAKEUP 0x034
-#define REG_CTRL 0x040
-#define REG_ADCFSM 0x044
-#define REG_CLKDIV 0x04C
-#define REG_SE 0x054
-#define REG_IDLECONFIG 0x058
-#define REG_CHARGECONFIG 0x05C
-#define REG_CHARGEDELAY 0x060
-#define REG_STEPCONFIG(n) (0x64 + ((n - 1) * 8))
-#define REG_STEPDELAY(n) (0x68 + ((n - 1) * 8))
-#define REG_STEPCONFIG13 0x0C4
-#define REG_STEPDELAY13 0x0C8
-#define REG_STEPCONFIG14 0x0CC
-#define REG_STEPDELAY14 0x0D0
-#define REG_FIFO0CNT 0xE4
-#define REG_FIFO1THR 0xF4
-#define REG_FIFO0 0x100
-#define REG_FIFO1 0x200
-
-/* Register Bitfields */
-#define IRQWKUP_ENB BIT(0)
-#define STPENB_STEPENB 0x7FFF
-#define IRQENB_FIFO1THRES BIT(5)
-#define IRQENB_PENUP BIT(9)
-#define STEPCONFIG_MODE_HWSYNC 0x2
-#define STEPCONFIG_SAMPLES_AVG (1 << 4)
-#define STEPCONFIG_XPP (1 << 5)
-#define STEPCONFIG_XNN (1 << 6)
-#define STEPCONFIG_YPP (1 << 7)
-#define STEPCONFIG_YNN (1 << 8)
-#define STEPCONFIG_XNP (1 << 9)
-#define STEPCONFIG_YPN (1 << 10)
-#define STEPCONFIG_INM (1 << 18)
-#define STEPCONFIG_INP (1 << 20)
-#define STEPCONFIG_INP_5 (1 << 21)
-#define STEPCONFIG_FIFO1 (1 << 26)
-#define STEPCONFIG_OPENDLY 0xff
-#define STEPCONFIG_Z1 (3 << 19)
-#define STEPIDLE_INP (1 << 22)
-#define STEPCHARGE_RFP (1 << 12)
-#define STEPCHARGE_INM (1 << 15)
-#define STEPCHARGE_INP (1 << 19)
-#define STEPCHARGE_RFM (1 << 23)
-#define STEPCHARGE_DELAY 0x1
-#define CNTRLREG_TSCSSENB (1 << 0)
-#define CNTRLREG_STEPID (1 << 1)
-#define CNTRLREG_STEPCONFIGWRT (1 << 2)
-#define CNTRLREG_4WIRE (1 << 5)
-#define CNTRLREG_5WIRE (1 << 6)
-#define CNTRLREG_8WIRE (3 << 5)
-#define CNTRLREG_TSCENB (1 << 7)
-#define ADCFSM_STEPID 0x10
-
-#define SEQ_SETTLE 275
-#define ADC_CLK 3000000
-#define MAX_12BIT ((1 << 12) - 1)
-#define TSCADC_DELTA_X 15
-#define TSCADC_DELTA_Y 15
-
-struct tscadc {
- struct input_dev *input;
- struct clk *tsc_ick;
- void __iomem *tsc_base;
- unsigned int irq;
- unsigned int wires;
- unsigned int x_plate_resistance;
- bool pen_down;
-};
-
-static unsigned int tscadc_readl(struct tscadc *ts, unsigned int reg)
-{
- return readl(ts->tsc_base + reg);
-}
-
-static void tscadc_writel(struct tscadc *tsc, unsigned int reg,
- unsigned int val)
-{
- writel(val, tsc->tsc_base + reg);
-}
-
-static void tscadc_step_config(struct tscadc *ts_dev)
-{
- unsigned int config;
- int i;
-
- /* Configure the Step registers */
-
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_XPP;
- switch (ts_dev->wires) {
- case 4:
- config |= STEPCONFIG_INP | STEPCONFIG_XNN;
- break;
- case 5:
- config |= STEPCONFIG_YNN |
- STEPCONFIG_INP_5 | STEPCONFIG_XNN |
- STEPCONFIG_YPP;
- break;
- case 8:
- config |= STEPCONFIG_INP | STEPCONFIG_XNN;
- break;
- }
-
- for (i = 1; i < 7; i++) {
- tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
- tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
- }
-
- config = 0;
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YNN |
- STEPCONFIG_INM | STEPCONFIG_FIFO1;
- switch (ts_dev->wires) {
- case 4:
- config |= STEPCONFIG_YPP;
- break;
- case 5:
- config |= STEPCONFIG_XPP | STEPCONFIG_INP_5 |
- STEPCONFIG_XNP | STEPCONFIG_YPN;
- break;
- case 8:
- config |= STEPCONFIG_YPP;
- break;
- }
-
- for (i = 7; i < 13; i++) {
- tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
- tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
- }
-
- config = 0;
- /* Charge step configuration */
- config = STEPCONFIG_XPP | STEPCONFIG_YNN |
- STEPCHARGE_RFP | STEPCHARGE_RFM |
- STEPCHARGE_INM | STEPCHARGE_INP;
-
- tscadc_writel(ts_dev, REG_CHARGECONFIG, config);
- tscadc_writel(ts_dev, REG_CHARGEDELAY, STEPCHARGE_DELAY);
-
- config = 0;
- /* Configure to calculate pressure */
- config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YPP |
- STEPCONFIG_XNN | STEPCONFIG_INM;
- tscadc_writel(ts_dev, REG_STEPCONFIG13, config);
- tscadc_writel(ts_dev, REG_STEPDELAY13, STEPCONFIG_OPENDLY);
-
- config |= STEPCONFIG_Z1 | STEPCONFIG_FIFO1;
- tscadc_writel(ts_dev, REG_STEPCONFIG14, config);
- tscadc_writel(ts_dev, REG_STEPDELAY14, STEPCONFIG_OPENDLY);
-
- tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
-}
-
-static void tscadc_idle_config(struct tscadc *ts_config)
-{
- unsigned int idleconfig;
-
- idleconfig = STEPCONFIG_YNN |
- STEPCONFIG_INM |
- STEPCONFIG_YPN | STEPIDLE_INP;
- tscadc_writel(ts_config, REG_IDLECONFIG, idleconfig);
-}
-
-static void tscadc_read_coordinates(struct tscadc *ts_dev,
- unsigned int *x, unsigned int *y)
-{
- unsigned int fifocount = tscadc_readl(ts_dev, REG_FIFO0CNT);
- unsigned int prev_val_x = ~0, prev_val_y = ~0;
- unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
- unsigned int read, diff;
- unsigned int i;
-
- /*
- * Delta filter is used to remove large variations in sampled
- * values from ADC. The filter tries to predict where the next
- * coordinate could be. This is done by taking a previous
- * coordinate and subtracting it form current one. Further the
- * algorithm compares the difference with that of a present value,
- * if true the value is reported to the sub system.
- */
- for (i = 0; i < fifocount - 1; i++) {
- read = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
- diff = abs(read - prev_val_x);
- if (diff < prev_diff_x) {
- prev_diff_x = diff;
- *x = read;
- }
- prev_val_x = read;
-
- read = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
- diff = abs(read - prev_val_y);
- if (diff < prev_diff_y) {
- prev_diff_y = diff;
- *y = read;
- }
- prev_val_y = read;
- }
-}
-
-static irqreturn_t tscadc_irq(int irq, void *dev)
-{
- struct tscadc *ts_dev = dev;
- struct input_dev *input_dev = ts_dev->input;
- unsigned int status, irqclr = 0;
- unsigned int x = 0, y = 0;
- unsigned int z1, z2, z;
- unsigned int fsm;
-
- status = tscadc_readl(ts_dev, REG_IRQSTATUS);
- if (status & IRQENB_FIFO1THRES) {
- tscadc_read_coordinates(ts_dev, &x, &y);
-
- z1 = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
- z2 = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
-
- if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
- /*
- * Calculate pressure using formula
- * Resistance(touch) = x plate resistance *
- * x postion/4096 * ((z2 / z1) - 1)
- */
- z = z2 - z1;
- z *= x;
- z *= ts_dev->x_plate_resistance;
- z /= z1;
- z = (z + 2047) >> 12;
-
- if (z <= MAX_12BIT) {
- input_report_abs(input_dev, ABS_X, x);
- input_report_abs(input_dev, ABS_Y, y);
- input_report_abs(input_dev, ABS_PRESSURE, z);
- input_report_key(input_dev, BTN_TOUCH, 1);
- input_sync(input_dev);
- }
- }
- irqclr |= IRQENB_FIFO1THRES;
- }
-
- /*
- * Time for sequencer to settle, to read
- * correct state of the sequencer.
- */
- udelay(SEQ_SETTLE);
-
- status = tscadc_readl(ts_dev, REG_RAWIRQSTATUS);
- if (status & IRQENB_PENUP) {
- /* Pen up event */
- fsm = tscadc_readl(ts_dev, REG_ADCFSM);
- if (fsm == ADCFSM_STEPID) {
- ts_dev->pen_down = false;
- input_report_key(input_dev, BTN_TOUCH, 0);
- input_report_abs(input_dev, ABS_PRESSURE, 0);
- input_sync(input_dev);
- } else {
- ts_dev->pen_down = true;
- }
- irqclr |= IRQENB_PENUP;
- }
-
- tscadc_writel(ts_dev, REG_IRQSTATUS, irqclr);
- /* check pending interrupts */
- tscadc_writel(ts_dev, REG_IRQEOI, 0x0);
-
- tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
- return IRQ_HANDLED;
-}
-
-/*
- * The functions for inserting/removing driver as a module.
- */
-
-static int __devinit tscadc_probe(struct platform_device *pdev)
-{
- const struct tsc_data *pdata = pdev->dev.platform_data;
- struct resource *res;
- struct tscadc *ts_dev;
- struct input_dev *input_dev;
- struct clk *clk;
- int err;
- int clk_value, ctrl, irq;
-
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform data.\n");
- return -EINVAL;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no memory resource defined.\n");
- return -EINVAL;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq ID is specified.\n");
- return -EINVAL;
- }
-
- /* Allocate memory for device */
- ts_dev = kzalloc(sizeof(struct tscadc), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts_dev || !input_dev) {
- dev_err(&pdev->dev, "failed to allocate memory.\n");
- err = -ENOMEM;
- goto err_free_mem;
- }
-
- ts_dev->input = input_dev;
- ts_dev->irq = irq;
- ts_dev->wires = pdata->wires;
- ts_dev->x_plate_resistance = pdata->x_plate_resistance;
-
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "failed to reserve registers.\n");
- err = -EBUSY;
- goto err_free_mem;
- }
-
- ts_dev->tsc_base = ioremap(res->start, resource_size(res));
- if (!ts_dev->tsc_base) {
- dev_err(&pdev->dev, "failed to map registers.\n");
- err = -ENOMEM;
- goto err_release_mem_region;
- }
-
- err = request_irq(ts_dev->irq, tscadc_irq,
- 0, pdev->dev.driver->name, ts_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to allocate irq.\n");
- goto err_unmap_regs;
- }
-
- ts_dev->tsc_ick = clk_get(&pdev->dev, "adc_tsc_ick");
- if (IS_ERR(ts_dev->tsc_ick)) {
- dev_err(&pdev->dev, "failed to get TSC ick\n");
- goto err_free_irq;
- }
- clk_enable(ts_dev->tsc_ick);
-
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get TSC fck\n");
- err = PTR_ERR(clk);
- goto err_disable_clk;
- }
-
- clk_value = clk_get_rate(clk) / ADC_CLK;
- clk_put(clk);
-
- if (clk_value < 7) {
- dev_err(&pdev->dev, "clock input less than min clock requirement\n");
- goto err_disable_clk;
- }
- /* CLKDIV needs to be configured to the value minus 1 */
- tscadc_writel(ts_dev, REG_CLKDIV, clk_value - 1);
-
- /* Enable wake-up of the SoC using touchscreen */
- tscadc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
-
- ctrl = CNTRLREG_STEPCONFIGWRT |
- CNTRLREG_TSCENB |
- CNTRLREG_STEPID;
- switch (ts_dev->wires) {
- case 4:
- ctrl |= CNTRLREG_4WIRE;
- break;
- case 5:
- ctrl |= CNTRLREG_5WIRE;
- break;
- case 8:
- ctrl |= CNTRLREG_8WIRE;
- break;
- }
- tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
- tscadc_idle_config(ts_dev);
- tscadc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO1THRES);
- tscadc_step_config(ts_dev);
- tscadc_writel(ts_dev, REG_FIFO1THR, 6);
-
- ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
- input_dev->name = "ti-tsc-adc";
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
-
- /* register to the input system */
- err = input_register_device(input_dev);
- if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, ts_dev);
- return 0;
-
-err_disable_clk:
- clk_disable(ts_dev->tsc_ick);
- clk_put(ts_dev->tsc_ick);
-err_free_irq:
- free_irq(ts_dev->irq, ts_dev);
-err_unmap_regs:
- iounmap(ts_dev->tsc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts_dev);
- return err;
-}
-
-static int __devexit tscadc_remove(struct platform_device *pdev)
-{
- struct tscadc *ts_dev = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(ts_dev->irq, ts_dev);
-
- input_unregister_device(ts_dev->input);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(ts_dev->tsc_base);
- release_mem_region(res->start, resource_size(res));
-
- clk_disable(ts_dev->tsc_ick);
- clk_put(ts_dev->tsc_ick);
-
- kfree(ts_dev);
-
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-static struct platform_driver ti_tsc_driver = {
- .probe = tscadc_probe,
- .remove = __devexit_p(tscadc_remove),
- .driver = {
- .name = "tsc",
- .owner = THIS_MODULE,
- },
-};
-module_platform_driver(ti_tsc_driver);
-
-MODULE_DESCRIPTION("TI touchscreen controller driver");
-MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 368d2c6cf780..acfb87607b87 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -243,7 +243,7 @@ static void tsc_stop(struct input_dev *dev)
clk_disable(ts->clk);
}
-static int __devinit tsc_probe(struct platform_device *pdev)
+static int tsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tsc_data *ts;
@@ -357,7 +357,7 @@ error_res:
return error;
}
-static int __devexit tsc_remove(struct platform_device *pdev)
+static int tsc_remove(struct platform_device *pdev)
{
struct tsc_data *ts = platform_get_drvdata(pdev);
@@ -374,7 +374,7 @@ static int __devexit tsc_remove(struct platform_device *pdev)
static struct platform_driver tsc_driver = {
.probe = tsc_probe,
- .remove = __devexit_p(tsc_remove),
+ .remove = tsc_remove,
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index f7eda3d00fad..820a066c3b8a 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -345,7 +345,7 @@ err0:
return error;
}
-static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+static int tps6507x_ts_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
struct tps6507x_ts *tsc = tps6507x_dev->ts;
@@ -367,7 +367,7 @@ static struct platform_driver tps6507x_ts_driver = {
.owner = THIS_MODULE,
},
.probe = tps6507x_ts_probe,
- .remove = __devexit_p(tps6507x_ts_remove),
+ .remove = tps6507x_ts_remove,
};
module_platform_driver(tps6507x_ts_driver);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 5ce3fa8ce646..9c0cdc7ea449 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -555,7 +555,7 @@ static void tsc2005_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
+static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
{
tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
@@ -569,7 +569,7 @@ static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
}
-static int __devinit tsc2005_probe(struct spi_device *spi)
+static int tsc2005_probe(struct spi_device *spi)
{
const struct tsc2005_platform_data *pdata = spi->dev.platform_data;
struct tsc2005 *ts;
@@ -686,7 +686,7 @@ err_free_mem:
return error;
}
-static int __devexit tsc2005_remove(struct spi_device *spi)
+static int tsc2005_remove(struct spi_device *spi)
{
struct tsc2005 *ts = spi_get_drvdata(spi);
@@ -745,7 +745,7 @@ static struct spi_driver tsc2005_driver = {
.pm = &tsc2005_pm_ops,
},
.probe = tsc2005_probe,
- .remove = __devexit_p(tsc2005_remove),
+ .remove = tsc2005_remove,
};
module_spi_driver(tsc2005_driver);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 1473d2382afd..0b67ba476b4c 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -273,7 +273,7 @@ static void tsc2007_close(struct input_dev *input_dev)
tsc2007_stop(ts);
}
-static int __devinit tsc2007_probe(struct i2c_client *client,
+static int tsc2007_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tsc2007 *ts;
@@ -366,7 +366,7 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
return err;
}
-static int __devexit tsc2007_remove(struct i2c_client *client)
+static int tsc2007_remove(struct i2c_client *client)
{
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
@@ -396,7 +396,7 @@ static struct i2c_driver tsc2007_driver = {
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
- .remove = __devexit_p(tsc2007_remove),
+ .remove = tsc2007_remove,
};
module_i2c_driver(tsc2007_driver);
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 46e83ad53f43..1271f97b4079 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -274,7 +274,7 @@ static void ucb1400_ts_close(struct input_dev *idev)
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
-static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
struct platform_device *pdev)
{
unsigned long mask, timeout;
@@ -318,7 +318,7 @@ static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
return 0;
}
-static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
+static int ucb1400_ts_probe(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
@@ -397,7 +397,7 @@ err:
return error;
}
-static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
+static int ucb1400_ts_remove(struct platform_device *pdev)
{
struct ucb1400_ts *ucb = pdev->dev.platform_data;
@@ -442,7 +442,7 @@ static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
- .remove = __devexit_p(ucb1400_ts_remove),
+ .remove = ucb1400_ts_remove,
.driver = {
.name = "ucb1400_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 9396b21d0e8f..d2ef8f05c66e 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -215,7 +215,7 @@ static void w90p910_close(struct input_dev *dev)
clk_disable(w90p910_ts->clk);
}
-static int __devinit w90x900ts_probe(struct platform_device *pdev)
+static int w90x900ts_probe(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts;
struct input_dev *input_dev;
@@ -301,7 +301,7 @@ fail1: input_free_device(input_dev);
return err;
}
-static int __devexit w90x900ts_remove(struct platform_device *pdev)
+static int w90x900ts_remove(struct platform_device *pdev)
{
struct w90p910_ts *w90p910_ts = platform_get_drvdata(pdev);
struct resource *res;
@@ -325,7 +325,7 @@ static int __devexit w90x900ts_remove(struct platform_device *pdev)
static struct platform_driver w90x900ts_driver = {
.probe = w90x900ts_probe,
- .remove = __devexit_p(w90x900ts_remove),
+ .remove = w90x900ts_remove,
.driver = {
.name = "nuc900-ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 0c01657132fd..bf0d07620bac 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -144,7 +144,7 @@ static void wacom_i2c_close(struct input_dev *dev)
disable_irq(client->irq);
}
-static int __devinit wacom_i2c_probe(struct i2c_client *client,
+static int wacom_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wacom_i2c *wac_i2c;
@@ -225,7 +225,7 @@ err_free_mem:
return error;
}
-static int __devexit wacom_i2c_remove(struct i2c_client *client)
+static int wacom_i2c_remove(struct i2c_client *client)
{
struct wacom_i2c *wac_i2c = i2c_get_clientdata(client);
@@ -272,7 +272,7 @@ static struct i2c_driver wacom_i2c_driver = {
},
.probe = wacom_i2c_probe,
- .remove = __devexit_p(wacom_i2c_remove),
+ .remove = wacom_i2c_remove,
.id_table = wacom_i2c_id,
};
module_i2c_driver(wacom_i2c_driver);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 52abb98a8ae5..f88fab56178c 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -233,7 +233,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
}
}
-static __devinit int wm831x_ts_probe(struct platform_device *pdev)
+static int wm831x_ts_probe(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts;
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -245,7 +245,8 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
if (core_pdata)
pdata = core_pdata->touch;
- wm831x_ts = kzalloc(sizeof(struct wm831x_ts), GFP_KERNEL);
+ wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts),
+ GFP_KERNEL);
input_dev = input_allocate_device();
if (!wm831x_ts || !input_dev) {
error = -ENOMEM;
@@ -376,21 +377,18 @@ err_data_irq:
free_irq(wm831x_ts->data_irq, wm831x_ts);
err_alloc:
input_free_device(input_dev);
- kfree(wm831x_ts);
return error;
}
-static __devexit int wm831x_ts_remove(struct platform_device *pdev)
+static int wm831x_ts_remove(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
free_irq(wm831x_ts->pd_irq, wm831x_ts);
free_irq(wm831x_ts->data_irq, wm831x_ts);
input_unregister_device(wm831x_ts->input_dev);
- kfree(wm831x_ts);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -400,7 +398,7 @@ static struct platform_driver wm831x_ts_driver = {
.owner = THIS_MODULE,
},
.probe = wm831x_ts_probe,
- .remove = __devexit_p(wm831x_ts_remove),
+ .remove = wm831x_ts_remove,
};
module_platform_driver(wm831x_ts_driver);
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 14a4d5fc94fa..f66b816d455c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
+obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 55074cba20eb..c1c74e030a58 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -57,17 +57,9 @@
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
- * Traditionally the IOMMU core just handed us the mappings directly,
- * after making sure the size is an order of a 4KiB page and that the
- * mapping has natural alignment.
- *
- * To retain this behavior, we currently advertise that we support
- * all page sizes that are an order of 4KiB.
- *
- * If at some point we'd like to utilize the IOMMU core's new behavior,
- * we could change this to advertise the real page sizes we support.
+ * 512GB Pages are not supported due to a hardware bug
*/
-#define AMD_IOMMU_PGSIZES (~0xFFFUL)
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
list_del(&dev_data->dev_data_list);
spin_unlock_irqrestore(&dev_data_list_lock, flags);
+ if (dev_data->group)
+ iommu_group_put(dev_data->group);
+
kfree(dev_data);
}
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
*from = to;
}
-#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
-
-static int iommu_init_device(struct device *dev)
+static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
{
- struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
- struct iommu_dev_data *dev_data;
- struct iommu_group *group;
- u16 alias;
- int ret;
-
- if (dev->archdata.iommu)
- return 0;
-
- dev_data = find_dev_data(get_device_id(dev));
- if (!dev_data)
- return -ENOMEM;
-
- alias = amd_iommu_alias_table[dev_data->devid];
- if (alias != dev_data->devid) {
- struct iommu_dev_data *alias_data;
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ return ERR_PTR(-ENODEV);
+ }
- alias_data = find_dev_data(alias);
- if (alias_data == NULL) {
- pr_err("AMD-Vi: Warning: Unhandled device %s\n",
- dev_name(dev));
- free_dev_data(dev_data);
- return -ENOTSUPP;
- }
- dev_data->alias_data = alias_data;
+ return bus;
+}
- dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
- }
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
- if (dma_pdev == NULL)
- dma_pdev = pci_dev_get(pdev);
+static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
+{
+ struct pci_dev *dma_pdev = pdev;
/* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
* Finding the next device may require skipping virtual buses.
*/
while (!pci_is_root_bus(dma_pdev->bus)) {
- struct pci_bus *bus = dma_pdev->bus;
-
- while (!bus->self) {
- if (!pci_is_root_bus(bus))
- bus = bus->parent;
- else
- goto root_bus;
- }
+ struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
+ if (IS_ERR(bus))
+ break;
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
-root_bus:
- group = iommu_group_get(&dma_pdev->dev);
- pci_dev_put(dma_pdev);
+ return dma_pdev;
+}
+
+static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
+{
+ struct iommu_group *group = iommu_group_get(&pdev->dev);
+ int ret;
+
if (!group) {
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
+
+ WARN_ON(&pdev->dev != dev);
}
ret = iommu_group_add_device(group, dev);
-
iommu_group_put(group);
+ return ret;
+}
+
+static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
+ struct device *dev)
+{
+ if (!dev_data->group) {
+ struct iommu_group *group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ dev_data->group = group;
+ }
+
+ return iommu_group_add_device(dev_data->group, dev);
+}
+
+static int init_iommu_group(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_group *group;
+ struct pci_dev *dma_pdev;
+ int ret;
+
+ group = iommu_group_get(dev);
+ if (group) {
+ iommu_group_put(group);
+ return 0;
+ }
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ if (dev_data->alias_data) {
+ u16 alias;
+ struct pci_bus *bus;
+
+ if (dev_data->alias_data->group)
+ goto use_group;
+
+ /*
+ * If the alias device exists, it's effectively just a first
+ * level quirk for finding the DMA source.
+ */
+ alias = amd_iommu_alias_table[dev_data->devid];
+ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
+ if (dma_pdev) {
+ dma_pdev = get_isolation_root(dma_pdev);
+ goto use_pdev;
+ }
+
+ /*
+ * If the alias is virtual, try to find a parent device
+ * and test whether the IOMMU group is actualy rooted above
+ * the alias. Be careful to also test the parent device if
+ * we think the alias is the root of the group.
+ */
+ bus = pci_find_bus(0, alias >> 8);
+ if (!bus)
+ goto use_group;
+
+ bus = find_hosted_bus(bus);
+ if (IS_ERR(bus) || !bus->self)
+ goto use_group;
+
+ dma_pdev = get_isolation_root(pci_dev_get(bus->self));
+ if (dma_pdev != bus->self || (dma_pdev->multifunction &&
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
+ goto use_pdev;
+
+ pci_dev_put(dma_pdev);
+ goto use_group;
+ }
+
+ dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
+use_pdev:
+ ret = use_pdev_iommu_group(dma_pdev, dev);
+ pci_dev_put(dma_pdev);
+ return ret;
+use_group:
+ return use_dev_data_iommu_group(dev_data->alias_data, dev);
+}
+
+static int iommu_init_device(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct iommu_dev_data *dev_data;
+ u16 alias;
+ int ret;
+
+ if (dev->archdata.iommu)
+ return 0;
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ struct iommu_dev_data *alias_data;
+
+ alias_data = find_dev_data(alias);
+ if (alias_data == NULL) {
+ pr_err("AMD-Vi: Warning: Unhandled device %s\n",
+ dev_name(dev));
+ free_dev_data(dev_data);
+ return -ENOTSUPP;
+ }
+ dev_data->alias_data = alias_data;
+ }
+ ret = init_iommu_group(dev);
if (ret)
return ret;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 81837b0710a9..faf10ba1ed9a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -975,6 +975,38 @@ static void __init free_iommu_all(void)
}
/*
+ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
+ * Workaround:
+ * BIOS should disable L2B micellaneous clock gating by setting
+ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
+ */
+static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x10) ||
+ (boot_cpu_data.x86_model > 0x1f))
+ return;
+
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+ pci_read_config_dword(iommu->dev, 0xf4, &value);
+
+ if (value & BIT(2))
+ return;
+
+ /* Select NB indirect register 0x90 and enable writing */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
+
+ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
+ pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
+ dev_name(&iommu->dev->dev));
+
+ /* Clear the enable writing bit */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+}
+
+/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1172,6 +1204,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
+ amd_iommu_erratum_746_workaround(iommu);
+
return pci_enable_device(iommu->dev);
}
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9aa3d079ff0..e38ab438bb34 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -426,6 +426,7 @@ struct iommu_dev_data {
struct iommu_dev_data *alias_data;/* The alias dev_data */
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reference count */
+ struct iommu_group *group; /* IOMMU group for virtual aliases */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0badfa48b32b..eca28014ef3e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1827,10 +1827,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (!pte)
return -ENOMEM;
/* It is large page*/
- if (largepage_lvl > 1)
+ if (largepage_lvl > 1) {
pteval |= DMA_PTE_LARGE_PAGE;
- else
+ /* Ensure that old small page tables are removed to make room
+ for superpage, if they exist. */
+ dma_pte_clear_range(domain, iov_pfn,
+ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ dma_pte_free_pagetable(domain, iov_pfn,
+ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ } else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ }
}
/* We don't need lock here, nobody else
@@ -2320,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
return 0;
}
+static bool device_has_rmrr(struct pci_dev *dev)
+{
+ struct dmar_rmrr_unit *rmrr;
+ int i;
+
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ /*
+ * Return TRUE if this RMRR contains the device that
+ * is passed in.
+ */
+ if (rmrr->devices[i] == dev)
+ return true;
+ }
+ }
+ return false;
+}
+
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
{
+
+ /*
+ * We want to prevent any device associated with an RMRR from
+ * getting placed into the SI Domain. This is done because
+ * problems exist when devices are moved in and out of domains
+ * and their respective RMRR info is lost. We exempt USB devices
+ * from this process due to their usage of RMRRs that are known
+ * to not be needed after BIOS hand-off to OS.
+ */
+ if (device_has_rmrr(pdev) &&
+ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
+ return 0;
+
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return 1;
@@ -4196,7 +4234,22 @@ static struct iommu_ops intel_iommu_ops = {
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
-static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+{
+ /* G4x/GM45 integrated gfx dmar support is totally busted. */
+ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+ dmar_map_gfx = 0;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+
+static void quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
@@ -4204,12 +4257,6 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
-
- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
- if (dev->revision == 0x07) {
- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
@@ -4224,7 +4271,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
-static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{
unsigned short ggc;
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index f55fc5dfbadc..d97fbe4fb9b1 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -18,11 +18,11 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
+#include <linux/omap-iommu.h>
+#include <linux/platform_data/iommu-omap.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-
-#include <plat/iopgtable.h>
+#include "omap-iopgtable.h"
+#include "omap-iommu.h"
#define MAXCOLUMN 100 /* for short messages */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d0b1234581be..d33c980e9c20 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -16,17 +16,20 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/iommu.h>
+#include <linux/omap-iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <asm/cacheflush.h>
-#include <plat/iommu.h>
+#include <linux/platform_data/iommu-omap.h>
-#include <plat/iopgtable.h>
+#include "omap-iopgtable.h"
+#include "omap-iommu.h"
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
@@ -51,6 +54,21 @@ struct omap_iommu_domain {
spinlock_t lock;
};
+#define MMU_LOCK_BASE_SHIFT 10
+#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
+#define MMU_LOCK_BASE(x) \
+ ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
+
+#define MMU_LOCK_VICT_SHIFT 4
+#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
+#define MMU_LOCK_VICT(x) \
+ ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
+
+struct iotlb_lock {
+ short base;
+ short vict;
+};
+
/* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu;
@@ -125,31 +143,44 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
+ struct platform_device *pdev = to_platform_device(obj->dev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (!obj)
+ if (!obj || !pdata)
return -EINVAL;
if (!arch_iommu)
return -ENODEV;
- clk_enable(obj->clk);
+ if (pdata->deassert_reset) {
+ err = pdata->deassert_reset(pdev, pdata->reset_name);
+ if (err) {
+ dev_err(obj->dev, "deassert_reset failed: %d\n", err);
+ return err;
+ }
+ }
+
+ pm_runtime_get_sync(obj->dev);
err = arch_iommu->enable(obj);
- clk_disable(obj->clk);
return err;
}
static void iommu_disable(struct omap_iommu *obj)
{
- if (!obj)
- return;
+ struct platform_device *pdev = to_platform_device(obj->dev);
+ struct iommu_platform_data *pdata = pdev->dev.platform_data;
- clk_enable(obj->clk);
+ if (!obj || !pdata)
+ return;
arch_iommu->disable(obj);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
+
+ if (pdata->assert_reset)
+ pdata->assert_reset(pdev, pdata->reset_name);
}
/*
@@ -272,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
if (!obj || !obj->nr_tlb_entries || !e)
return -EINVAL;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &l);
if (l.base == obj->nr_tlb_entries) {
@@ -302,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
cr = iotlb_alloc_cr(obj, e);
if (IS_ERR(cr)) {
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return PTR_ERR(cr);
}
@@ -316,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return err;
}
@@ -346,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
int i;
struct cr_regs cr;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
@@ -365,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
@@ -379,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
{
struct iotlb_lock l;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
l.base = 0;
l.vict = 0;
@@ -387,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj)
iommu_write_reg(obj, 1, MMU_GFLUSH);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
}
#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
@@ -397,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
if (!obj || !buf)
return -EINVAL;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
bytes = arch_iommu->dump_ctx(obj, buf, bytes);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return bytes;
}
@@ -415,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
struct cr_regs tmp;
struct cr_regs *p = crs;
- clk_enable(obj->clk);
+ pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
@@ -425,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
}
iotlb_lock_set(obj, &saved);
- clk_disable(obj->clk);
+ pm_runtime_put_sync(obj->dev);
return p - crs;
}
@@ -789,9 +820,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!obj->refcount)
return IRQ_NONE;
- clk_enable(obj->clk);
errs = iommu_report_fault(obj, &da);
- clk_disable(obj->clk);
if (errs == 0)
return IRQ_HANDLED;
@@ -905,7 +934,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
/*
* OMAP Device MMU(IOMMU) detection
*/
-static int __devinit omap_iommu_probe(struct platform_device *pdev)
+static int omap_iommu_probe(struct platform_device *pdev)
{
int err = -ENODEV;
int irq;
@@ -913,17 +942,10 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data;
- if (pdev->num_resources != 2)
- return -EINVAL;
-
obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- obj->clk = clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(obj->clk))
- goto err_clk;
-
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
obj->dev = &pdev->dev;
@@ -966,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
goto err_irq;
platform_set_drvdata(pdev, obj);
+ pm_runtime_irq_safe(obj->dev);
+ pm_runtime_enable(obj->dev);
+
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
@@ -974,13 +999,11 @@ err_irq:
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_mem:
- clk_put(obj->clk);
-err_clk:
kfree(obj);
return err;
}
-static int __devexit omap_iommu_remove(struct platform_device *pdev)
+static int omap_iommu_remove(struct platform_device *pdev)
{
int irq;
struct resource *res;
@@ -996,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
iounmap(obj->regbase);
- clk_put(obj->clk);
+ pm_runtime_disable(obj->dev);
+
dev_info(&pdev->dev, "%s removed\n", obj->name);
kfree(obj);
return 0;
@@ -1004,7 +1028,7 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
- .remove = __devexit_p(omap_iommu_remove),
+ .remove = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
},
@@ -1015,6 +1039,23 @@ static void iopte_cachep_ctor(void *iopte)
clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}
+static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
+ u32 flags)
+{
+ memset(e, 0, sizeof(*e));
+
+ e->da = da;
+ e->pa = pa;
+ e->valid = 1;
+ /* FIXME: add OMAP1 support */
+ e->pgsz = flags & MMU_CAM_PGSZ_MASK;
+ e->endian = flags & MMU_RAM_ENDIAN_MASK;
+ e->elsz = flags & MMU_RAM_ELSZ_MASK;
+ e->mixed = flags & MMU_RAM_MIXED_MASK;
+
+ return iopgsz_to_bytes(e->pgsz);
+}
+
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
new file mode 100644
index 000000000000..120084206602
--- /dev/null
+++ b/drivers/iommu/omap-iommu.h
@@ -0,0 +1,225 @@
+/*
+ * omap iommu: main structures
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if defined(CONFIG_ARCH_OMAP1)
+#error "iommu for this processor not implemented yet"
+#endif
+
+struct iotlb_entry {
+ u32 da;
+ u32 pa;
+ u32 pgsz, prsvd, valid;
+ union {
+ u16 ap;
+ struct {
+ u32 endian, elsz, mixed;
+ };
+ };
+};
+
+struct omap_iommu {
+ const char *name;
+ struct module *owner;
+ void __iomem *regbase;
+ struct device *dev;
+ void *isr_priv;
+ struct iommu_domain *domain;
+
+ unsigned int refcount;
+ spinlock_t iommu_lock; /* global for this whole object */
+
+ /*
+ * We don't change iopgd for a situation like pgd for a task,
+ * but share it globally for each iommu.
+ */
+ u32 *iopgd;
+ spinlock_t page_table_lock; /* protect iopgd */
+
+ int nr_tlb_entries;
+
+ struct list_head mmap;
+ struct mutex mmap_lock; /* protect mmap */
+
+ void *ctx; /* iommu context: registres saved area */
+ u32 da_start;
+ u32 da_end;
+};
+
+struct cr_regs {
+ union {
+ struct {
+ u16 cam_l;
+ u16 cam_h;
+ };
+ u32 cam;
+ };
+ union {
+ struct {
+ u16 ram_l;
+ u16 ram_h;
+ };
+ u32 ram;
+ };
+};
+
+/* architecture specific functions */
+struct iommu_functions {
+ unsigned long version;
+
+ int (*enable)(struct omap_iommu *obj);
+ void (*disable)(struct omap_iommu *obj);
+ void (*set_twl)(struct omap_iommu *obj, bool on);
+ u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
+
+ void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
+ void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
+
+ struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
+ struct iotlb_entry *e);
+ int (*cr_valid)(struct cr_regs *cr);
+ u32 (*cr_to_virt)(struct cr_regs *cr);
+ void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
+ ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
+ char *buf);
+
+ u32 (*get_pte_attr)(struct iotlb_entry *e);
+
+ void (*save_ctx)(struct omap_iommu *obj);
+ void (*restore_ctx)(struct omap_iommu *obj);
+ ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
+};
+
+#ifdef CONFIG_IOMMU_API
+/**
+ * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
+ * @dev: iommu client device
+ */
+static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+
+ return arch_data->iommu_dev;
+}
+#endif
+
+/*
+ * MMU Register offsets
+ */
+#define MMU_REVISION 0x00
+#define MMU_IRQSTATUS 0x18
+#define MMU_IRQENABLE 0x1c
+#define MMU_WALKING_ST 0x40
+#define MMU_CNTL 0x44
+#define MMU_FAULT_AD 0x48
+#define MMU_TTB 0x4c
+#define MMU_LOCK 0x50
+#define MMU_LD_TLB 0x54
+#define MMU_CAM 0x58
+#define MMU_RAM 0x5c
+#define MMU_GFLUSH 0x60
+#define MMU_FLUSH_ENTRY 0x64
+#define MMU_READ_CAM 0x68
+#define MMU_READ_RAM 0x6c
+#define MMU_EMU_FAULT_AD 0x70
+
+#define MMU_REG_SIZE 256
+
+/*
+ * MMU Register bit definitions
+ */
+#define MMU_CAM_VATAG_SHIFT 12
+#define MMU_CAM_VATAG_MASK \
+ ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
+#define MMU_CAM_P (1 << 3)
+#define MMU_CAM_V (1 << 2)
+#define MMU_CAM_PGSZ_MASK 3
+#define MMU_CAM_PGSZ_1M (0 << 0)
+#define MMU_CAM_PGSZ_64K (1 << 0)
+#define MMU_CAM_PGSZ_4K (2 << 0)
+#define MMU_CAM_PGSZ_16M (3 << 0)
+
+#define MMU_RAM_PADDR_SHIFT 12
+#define MMU_RAM_PADDR_MASK \
+ ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
+
+#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
+
+#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
+#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
+#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT)
+#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
+#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
+#define MMU_RAM_MIXED_SHIFT 6
+#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
+#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
+
+/*
+ * utilities for super page(16MB, 1MB, 64KB and 4KB)
+ */
+
+#define iopgsz_max(bytes) \
+ (((bytes) >= SZ_16M) ? SZ_16M : \
+ ((bytes) >= SZ_1M) ? SZ_1M : \
+ ((bytes) >= SZ_64K) ? SZ_64K : \
+ ((bytes) >= SZ_4K) ? SZ_4K : 0)
+
+#define bytes_to_iopgsz(bytes) \
+ (((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M : \
+ ((bytes) == SZ_1M) ? MMU_CAM_PGSZ_1M : \
+ ((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K : \
+ ((bytes) == SZ_4K) ? MMU_CAM_PGSZ_4K : -1)
+
+#define iopgsz_to_bytes(iopgsz) \
+ (((iopgsz) == MMU_CAM_PGSZ_16M) ? SZ_16M : \
+ ((iopgsz) == MMU_CAM_PGSZ_1M) ? SZ_1M : \
+ ((iopgsz) == MMU_CAM_PGSZ_64K) ? SZ_64K : \
+ ((iopgsz) == MMU_CAM_PGSZ_4K) ? SZ_4K : 0)
+
+#define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0)
+
+/*
+ * global functions
+ */
+extern u32 omap_iommu_arch_version(void);
+
+extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
+
+extern int
+omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
+
+extern void omap_iommu_save_ctx(struct device *dev);
+extern void omap_iommu_restore_ctx(struct device *dev);
+
+extern int omap_foreach_iommu_device(void *data,
+ int (*fn)(struct device *, void *));
+
+extern int omap_install_iommu_arch(const struct iommu_functions *ops);
+extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
+
+extern ssize_t
+omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
+extern size_t
+omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
+
+/*
+ * register accessors
+ */
+static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs)
+{
+ return __raw_readl(obj->regbase + offs);
+}
+
+static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
+{
+ __raw_writel(val, obj->regbase + offs);
+}
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
new file mode 100644
index 000000000000..d745094a69dd
--- /dev/null
+++ b/drivers/iommu/omap-iommu2.c
@@ -0,0 +1,334 @@
+/*
+ * omap iommu: omap2/3 architecture specific functions
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
+ * Paul Mundt and Toshihiro Kobayashi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/omap-iommu.h>
+#include <linux/slab.h>
+#include <linux/stringify.h>
+#include <linux/platform_data/iommu-omap.h>
+
+#include "omap-iommu.h"
+
+/*
+ * omap2 architecture specific register bit definitions
+ */
+#define IOMMU_ARCH_VERSION 0x00000011
+
+/* IRQSTATUS & IRQENABLE */
+#define MMU_IRQ_MULTIHITFAULT (1 << 4)
+#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
+#define MMU_IRQ_EMUMISS (1 << 2)
+#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
+#define MMU_IRQ_TLBMISS (1 << 0)
+
+#define __MMU_IRQ_FAULT \
+ (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
+#define MMU_IRQ_MASK \
+ (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
+#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
+#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
+
+/* MMU_CNTL */
+#define MMU_CNTL_SHIFT 1
+#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
+#define MMU_CNTL_EML_TLB (1 << 3)
+#define MMU_CNTL_TWL_EN (1 << 2)
+#define MMU_CNTL_MMU_EN (1 << 1)
+
+#define get_cam_va_mask(pgsz) \
+ (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
+ ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
+ ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
+ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
+
+/* IOMMU errors */
+#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
+#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
+#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
+#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
+#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
+
+static void __iommu_set_twl(struct omap_iommu *obj, bool on)
+{
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ if (on)
+ iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
+ else
+ iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
+
+ l &= ~MMU_CNTL_MASK;
+ if (on)
+ l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
+ else
+ l |= (MMU_CNTL_MMU_EN);
+
+ iommu_write_reg(obj, l, MMU_CNTL);
+}
+
+
+static int omap2_iommu_enable(struct omap_iommu *obj)
+{
+ u32 l, pa;
+
+ if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ return -EINVAL;
+
+ pa = virt_to_phys(obj->iopgd);
+ if (!IS_ALIGNED(pa, SZ_16K))
+ return -EINVAL;
+
+ l = iommu_read_reg(obj, MMU_REVISION);
+ dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
+ (l >> 4) & 0xf, l & 0xf);
+
+ iommu_write_reg(obj, pa, MMU_TTB);
+
+ __iommu_set_twl(obj, true);
+
+ return 0;
+}
+
+static void omap2_iommu_disable(struct omap_iommu *obj)
+{
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ l &= ~MMU_CNTL_MASK;
+ iommu_write_reg(obj, l, MMU_CNTL);
+
+ dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
+}
+
+static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
+{
+ __iommu_set_twl(obj, false);
+}
+
+static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
+{
+ u32 stat, da;
+ u32 errs = 0;
+
+ stat = iommu_read_reg(obj, MMU_IRQSTATUS);
+ stat &= MMU_IRQ_MASK;
+ if (!stat) {
+ *ra = 0;
+ return 0;
+ }
+
+ da = iommu_read_reg(obj, MMU_FAULT_AD);
+ *ra = da;
+
+ if (stat & MMU_IRQ_TLBMISS)
+ errs |= OMAP_IOMMU_ERR_TLB_MISS;
+ if (stat & MMU_IRQ_TRANSLATIONFAULT)
+ errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
+ if (stat & MMU_IRQ_EMUMISS)
+ errs |= OMAP_IOMMU_ERR_EMU_MISS;
+ if (stat & MMU_IRQ_TABLEWALKFAULT)
+ errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
+ if (stat & MMU_IRQ_MULTIHITFAULT)
+ errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
+ iommu_write_reg(obj, stat, MMU_IRQSTATUS);
+
+ return errs;
+}
+
+static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
+{
+ cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
+ cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
+}
+
+static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
+{
+ iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
+ iommu_write_reg(obj, cr->ram, MMU_RAM);
+}
+
+static u32 omap2_cr_to_virt(struct cr_regs *cr)
+{
+ u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
+ u32 mask = get_cam_va_mask(cr->cam & page_size);
+
+ return cr->cam & mask;
+}
+
+static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
+ struct iotlb_entry *e)
+{
+ struct cr_regs *cr;
+
+ if (e->da & ~(get_cam_va_mask(e->pgsz))) {
+ dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
+ e->da);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cr = kmalloc(sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return ERR_PTR(-ENOMEM);
+
+ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
+ cr->ram = e->pa | e->endian | e->elsz | e->mixed;
+
+ return cr;
+}
+
+static inline int omap2_cr_valid(struct cr_regs *cr)
+{
+ return cr->cam & MMU_CAM_V;
+}
+
+static u32 omap2_get_pte_attr(struct iotlb_entry *e)
+{
+ u32 attr;
+
+ attr = e->mixed << 5;
+ attr |= e->endian;
+ attr |= e->elsz >> 3;
+ attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
+ (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
+ return attr;
+}
+
+static ssize_t
+omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
+{
+ char *p = buf;
+
+ /* FIXME: Need more detail analysis of cam/ram */
+ p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
+
+ return p - buf;
+}
+
+#define pr_reg(name) \
+ do { \
+ ssize_t bytes; \
+ const char *str = "%20s: %08x\n"; \
+ const int maxcol = 32; \
+ bytes = snprintf(p, maxcol, str, __stringify(name), \
+ iommu_read_reg(obj, MMU_##name)); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ goto out; \
+ } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ pr_reg(REVISION);
+ pr_reg(IRQSTATUS);
+ pr_reg(IRQENABLE);
+ pr_reg(WALKING_ST);
+ pr_reg(CNTL);
+ pr_reg(FAULT_AD);
+ pr_reg(TTB);
+ pr_reg(LOCK);
+ pr_reg(LD_TLB);
+ pr_reg(CAM);
+ pr_reg(RAM);
+ pr_reg(GFLUSH);
+ pr_reg(FLUSH_ENTRY);
+ pr_reg(READ_CAM);
+ pr_reg(READ_RAM);
+ pr_reg(EMU_FAULT_AD);
+out:
+ return p - buf;
+}
+
+static void omap2_iommu_save_ctx(struct omap_iommu *obj)
+{
+ int i;
+ u32 *p = obj->ctx;
+
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ p[i] = iommu_read_reg(obj, i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
+
+ BUG_ON(p[0] != IOMMU_ARCH_VERSION);
+}
+
+static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
+{
+ int i;
+ u32 *p = obj->ctx;
+
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ iommu_write_reg(obj, p[i], i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
+
+ BUG_ON(p[0] != IOMMU_ARCH_VERSION);
+}
+
+static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
+{
+ e->da = cr->cam & MMU_CAM_VATAG_MASK;
+ e->pa = cr->ram & MMU_RAM_PADDR_MASK;
+ e->valid = cr->cam & MMU_CAM_V;
+ e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
+ e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
+ e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
+ e->mixed = cr->ram & MMU_RAM_MIXED;
+}
+
+static const struct iommu_functions omap2_iommu_ops = {
+ .version = IOMMU_ARCH_VERSION,
+
+ .enable = omap2_iommu_enable,
+ .disable = omap2_iommu_disable,
+ .set_twl = omap2_iommu_set_twl,
+ .fault_isr = omap2_iommu_fault_isr,
+
+ .tlb_read_cr = omap2_tlb_read_cr,
+ .tlb_load_cr = omap2_tlb_load_cr,
+
+ .cr_to_e = omap2_cr_to_e,
+ .cr_to_virt = omap2_cr_to_virt,
+ .alloc_cr = omap2_alloc_cr,
+ .cr_valid = omap2_cr_valid,
+ .dump_cr = omap2_dump_cr,
+
+ .get_pte_attr = omap2_get_pte_attr,
+
+ .save_ctx = omap2_iommu_save_ctx,
+ .restore_ctx = omap2_iommu_restore_ctx,
+ .dump_ctx = omap2_iommu_dump_ctx,
+};
+
+static int __init omap2_iommu_init(void)
+{
+ return omap_install_iommu_arch(&omap2_iommu_ops);
+}
+module_init(omap2_iommu_init);
+
+static void __exit omap2_iommu_exit(void)
+{
+ omap_uninstall_iommu_arch(&omap2_iommu_ops);
+}
+module_exit(omap2_iommu_exit);
+
+MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
+MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
new file mode 100644
index 000000000000..cd4ae9e5b0c6
--- /dev/null
+++ b/drivers/iommu/omap-iopgtable.h
@@ -0,0 +1,98 @@
+/*
+ * omap iommu: pagetable definitions
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * "L2 table" address mask and size definitions.
+ */
+#define IOPGD_SHIFT 20
+#define IOPGD_SIZE (1UL << IOPGD_SHIFT)
+#define IOPGD_MASK (~(IOPGD_SIZE - 1))
+
+/*
+ * "section" address mask and size definitions.
+ */
+#define IOSECTION_SHIFT 20
+#define IOSECTION_SIZE (1UL << IOSECTION_SHIFT)
+#define IOSECTION_MASK (~(IOSECTION_SIZE - 1))
+
+/*
+ * "supersection" address mask and size definitions.
+ */
+#define IOSUPER_SHIFT 24
+#define IOSUPER_SIZE (1UL << IOSUPER_SHIFT)
+#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
+
+#define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT))
+#define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32))
+
+/*
+ * "small page" address mask and size definitions.
+ */
+#define IOPTE_SHIFT 12
+#define IOPTE_SIZE (1UL << IOPTE_SHIFT)
+#define IOPTE_MASK (~(IOPTE_SIZE - 1))
+
+/*
+ * "large page" address mask and size definitions.
+ */
+#define IOLARGE_SHIFT 16
+#define IOLARGE_SIZE (1UL << IOLARGE_SHIFT)
+#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
+
+#define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
+#define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32))
+
+#define IOPAGE_MASK IOPTE_MASK
+
+/**
+ * omap_iommu_translate() - va to pa translation
+ * @d: omap iommu descriptor
+ * @va: virtual address
+ * @mask: omap iommu descriptor mask
+ *
+ * va to pa translation
+ */
+static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
+{
+ return (d & mask) | (va & (~mask));
+}
+
+/*
+ * some descriptor attributes.
+ */
+#define IOPGD_TABLE (1 << 0)
+#define IOPGD_SECTION (2 << 0)
+#define IOPGD_SUPER (1 << 18 | 2 << 0)
+
+#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
+#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
+#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
+
+#define IOPTE_SMALL (2 << 0)
+#define IOPTE_LARGE (1 << 0)
+
+#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
+#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
+
+/* to find an entry in a page-table-directory */
+#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
+#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
+
+#define iopgd_page_paddr(iopgd) (*iopgd & ~((1 << 10) - 1))
+#define iopgd_page_vaddr(iopgd) ((u32 *)phys_to_virt(iopgd_page_paddr(iopgd)))
+
+/* to find an entry in the second-level page table. */
+#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
+#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
+
+#define to_iommu(dev) \
+ (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 2e10c3e0a7ae..46d875690739 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -17,14 +17,58 @@
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/iommu.h>
+#include <linux/omap-iommu.h>
+#include <linux/platform_data/iommu-omap.h>
#include <asm/cacheflush.h>
#include <asm/mach/map.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
+#include "omap-iopgtable.h"
+#include "omap-iommu.h"
-#include <plat/iopgtable.h>
+/*
+ * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
+ *
+ * lower 16 bit is used for h/w and upper 16 bit is for s/w.
+ */
+#define IOVMF_SW_SHIFT 16
+
+/*
+ * iovma: h/w flags derived from cam and ram attribute
+ */
+#define IOVMF_CAM_MASK (~((1 << 10) - 1))
+#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
+
+#define IOVMF_PGSZ_MASK (3 << 0)
+#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
+#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
+#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
+#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
+
+#define IOVMF_ENDIAN_MASK (1 << 9)
+#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
+
+#define IOVMF_ELSZ_MASK (3 << 7)
+#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
+#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
+#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
+
+#define IOVMF_MIXED_MASK (1 << 6)
+#define IOVMF_MIXED MMU_RAM_MIXED
+
+/*
+ * iovma: s/w flags, used for mapping and umapping internally.
+ */
+#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
+#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
+#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
+
+/* "superpages" is supported just with physically linear pages */
+#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
+#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
+#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
+
+#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
static struct kmem_cache *iovm_area_cachep;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index c16e8fc8a4bd..8219f1d596ee 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -398,6 +398,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
+ bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return 0;
fail:
@@ -430,7 +431,7 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
};
#ifdef CONFIG_OF
-static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+static struct of_device_id tegra_gart_of_match[] = {
{ .compatible = "nvidia,tegra20-gart", },
{ },
};
@@ -448,9 +449,8 @@ static struct platform_driver tegra_gart_driver = {
},
};
-static int __devinit tegra_gart_init(void)
+static int tegra_gart_init(void)
{
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
return platform_driver_register(&tegra_gart_driver);
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index c0f7a4266263..fc178893789a 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -34,13 +34,11 @@
#include <linux/of_iommu.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/tegra-ahb.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
-#include <mach/iomap.h>
-#include <mach/tegra-ahb.h>
-
enum smmu_hwgrp {
HWGRP_AFI,
HWGRP_AVPC,
@@ -696,10 +694,8 @@ static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
*pte = _PTE_VACANT(iova);
FLUSH_CPU_DCACHE(pte, page, sizeof(*pte));
flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0);
- if (!--(*count)) {
+ if (!--(*count))
free_ptbl(as, iova);
- smmu_flush_regs(as->smmu, 0);
- }
}
static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova,
@@ -1234,6 +1230,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
smmu_debugfs_create(smmu);
smmu_handle = smmu;
+ bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return 0;
}
@@ -1258,7 +1255,7 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
};
#ifdef CONFIG_OF
-static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
+static struct of_device_id tegra_smmu_of_match[] = {
{ .compatible = "nvidia,tegra30-smmu", },
{ },
};
@@ -1276,9 +1273,8 @@ static struct platform_driver tegra_smmu_driver = {
},
};
-static int __devinit tegra_smmu_init(void)
+static int tegra_smmu_init(void)
{
- bus_set_iommu(&platform_bus_type, &smmu_iommu_ops);
return platform_driver_register(&tegra_smmu_driver);
}
diff --git a/drivers/ipack/Kconfig b/drivers/ipack/Kconfig
new file mode 100644
index 000000000000..3949e5589560
--- /dev/null
+++ b/drivers/ipack/Kconfig
@@ -0,0 +1,24 @@
+#
+# IPACK configuration.
+#
+
+menuconfig IPACK_BUS
+ tristate "IndustryPack bus support"
+ depends on HAS_IOMEM
+ ---help---
+ This option provides support for the IndustryPack framework. There
+ are IndustryPack carrier boards, which interface another bus (such as
+ PCI) to an IndustryPack bus, and IndustryPack modules, that are
+ hosted on these buses. While IndustryPack modules can provide a
+ large variety of functionality, they are most often found in
+ industrial control applications.
+
+ Say N if unsure.
+
+if IPACK_BUS
+
+source "drivers/ipack/carriers/Kconfig"
+
+source "drivers/ipack/devices/Kconfig"
+
+endif # IPACK
diff --git a/drivers/staging/ipack/Makefile b/drivers/ipack/Makefile
index 85ff223616fd..6f14ade0f8f3 100644
--- a/drivers/staging/ipack/Makefile
+++ b/drivers/ipack/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_IPACK_BUS) += ipack.o
obj-y += devices/
-obj-y += bridges/
+obj-y += carriers/
diff --git a/drivers/ipack/carriers/Kconfig b/drivers/ipack/carriers/Kconfig
new file mode 100644
index 000000000000..922ff5c35acc
--- /dev/null
+++ b/drivers/ipack/carriers/Kconfig
@@ -0,0 +1,7 @@
+config BOARD_TPCI200
+ tristate "Support for the TEWS TPCI-200 IndustryPack carrier board"
+ depends on IPACK_BUS
+ depends on PCI
+ help
+ This driver adds support for the TEWS TPCI200 IndustryPack carrier board.
+ default n
diff --git a/drivers/staging/ipack/bridges/Makefile b/drivers/ipack/carriers/Makefile
index d8b76459300f..d8b76459300f 100644
--- a/drivers/staging/ipack/bridges/Makefile
+++ b/drivers/ipack/carriers/Makefile
diff --git a/drivers/staging/ipack/bridges/tpci200.c b/drivers/ipack/carriers/tpci200.c
index 46d6657280b8..0246b1fddffe 100644
--- a/drivers/staging/ipack/bridges/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -2,9 +2,10 @@
* tpci200.c
*
* driver for the TEWS TPCI-200 device
- * Copyright (c) 2009 Nicolas Serafini, EIC2 SA
- * Copyright (c) 2010,2011 Samuel Iglesias Gonsalvez <siglesia@cern.ch>, CERN
- * Copyright (c) 2012 Samuel Iglesias Gonsalvez <siglesias@igalia.com>, Igalia
+ *
+ * Copyright (C) 2009-2012 CERN (www.cern.ch)
+ * Author: Nicolas Serafini, EIC2 SA
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -15,20 +16,36 @@
#include <linux/slab.h>
#include "tpci200.h"
-static u16 tpci200_status_timeout[] = {
+static const u16 tpci200_status_timeout[] = {
TPCI200_A_TIMEOUT,
TPCI200_B_TIMEOUT,
TPCI200_C_TIMEOUT,
TPCI200_D_TIMEOUT,
};
-static u16 tpci200_status_error[] = {
+static const u16 tpci200_status_error[] = {
TPCI200_A_ERROR,
TPCI200_B_ERROR,
TPCI200_C_ERROR,
TPCI200_D_ERROR,
};
+static const size_t tpci200_space_size[IPACK_SPACE_COUNT] = {
+ [IPACK_IO_SPACE] = TPCI200_IO_SPACE_SIZE,
+ [IPACK_ID_SPACE] = TPCI200_ID_SPACE_SIZE,
+ [IPACK_INT_SPACE] = TPCI200_INT_SPACE_SIZE,
+ [IPACK_MEM8_SPACE] = TPCI200_MEM8_SPACE_SIZE,
+ [IPACK_MEM16_SPACE] = TPCI200_MEM16_SPACE_SIZE,
+};
+
+static const size_t tpci200_space_interval[IPACK_SPACE_COUNT] = {
+ [IPACK_IO_SPACE] = TPCI200_IO_SPACE_INTERVAL,
+ [IPACK_ID_SPACE] = TPCI200_ID_SPACE_INTERVAL,
+ [IPACK_INT_SPACE] = TPCI200_INT_SPACE_INTERVAL,
+ [IPACK_MEM8_SPACE] = TPCI200_MEM8_SPACE_INTERVAL,
+ [IPACK_MEM16_SPACE] = TPCI200_MEM16_SPACE_INTERVAL,
+};
+
static struct tpci200_board *check_slot(struct ipack_device *dev)
{
struct tpci200_board *tpci200;
@@ -47,7 +64,7 @@ static struct tpci200_board *check_slot(struct ipack_device *dev)
if (dev->slot >= TPCI200_NB_SLOT) {
dev_info(&dev->dev,
"Slot [%d:%d] doesn't exist! Last tpci200 slot is %d.\n",
- dev->bus_nr, dev->slot, TPCI200_NB_SLOT-1);
+ dev->bus->bus_nr, dev->slot, TPCI200_NB_SLOT-1);
return NULL;
}
@@ -74,33 +91,19 @@ static void tpci200_set_mask(struct tpci200_board *tpci200,
static void tpci200_unregister(struct tpci200_board *tpci200)
{
- int i;
-
free_irq(tpci200->info->pdev->irq, (void *) tpci200);
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
- pci_iounmap(tpci200->info->pdev, tpci200->info->ioidint_space);
- pci_iounmap(tpci200->info->pdev, tpci200->info->mem8_space);
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
+ pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
pci_disable_device(tpci200->info->pdev);
pci_dev_put(tpci200->info->pdev);
-
- for (i = 0; i < TPCI200_NB_SLOT; i++) {
- tpci200->slots[i].io_phys.address = NULL;
- tpci200->slots[i].io_phys.size = 0;
- tpci200->slots[i].id_phys.address = NULL;
- tpci200->slots[i].id_phys.size = 0;
- tpci200->slots[i].int_phys.address = NULL;
- tpci200->slots[i].int_phys.size = 0;
- tpci200->slots[i].mem_phys.address = NULL;
- tpci200->slots[i].mem_phys.size = 0;
- }
}
static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -207,7 +210,8 @@ static int tpci200_request_irq(struct ipack_device *dev,
if (tpci200->slots[dev->slot].irq != NULL) {
dev_err(&dev->dev,
- "Slot [%d:%d] IRQ already registered !\n", dev->bus_nr,
+ "Slot [%d:%d] IRQ already registered !\n",
+ dev->bus->bus_nr,
dev->slot);
res = -EINVAL;
goto out_unlock;
@@ -217,7 +221,7 @@ static int tpci200_request_irq(struct ipack_device *dev,
if (slot_irq == NULL) {
dev_err(&dev->dev,
"Slot [%d:%d] unable to allocate memory for IRQ !\n",
- dev->bus_nr, dev->slot);
+ dev->bus->bus_nr, dev->slot);
res = -ENOMEM;
goto out_unlock;
}
@@ -244,8 +248,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
{
int i;
int res;
- unsigned long ioidint_base;
- unsigned long mem_base;
+ phys_addr_t ioidint_base;
unsigned short slot_ctrl;
if (pci_enable_device(tpci200->info->pdev) < 0)
@@ -274,38 +277,49 @@ static int tpci200_register(struct tpci200_board *tpci200)
goto out_release_ip_space;
}
- /* Request MEM space (Bar 4) */
+ /* Request MEM8 space (Bar 5) */
res = pci_request_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR,
- "Carrier MEM space");
+ "Carrier MEM8 space");
if (res) {
dev_err(&tpci200->info->pdev->dev,
- "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
+ "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ioid_int_space;
}
+ /* Request MEM16 space (Bar 4) */
+ res = pci_request_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR,
+ "Carrier MEM16 space");
+ if (res) {
+ dev_err(&tpci200->info->pdev->dev,
+ "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
+ tpci200->info->pdev->bus->number,
+ tpci200->info->pdev->devfn);
+ goto out_release_mem8_space;
+ }
+
/* Map internal tpci200 driver user space */
tpci200->info->interface_regs =
ioremap_nocache(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
- tpci200->info->ioidint_space =
- ioremap_nocache(pci_resource_start(tpci200->info->pdev,
- TPCI200_IO_ID_INT_SPACES_BAR),
- TPCI200_IOIDINT_SIZE);
- tpci200->info->mem8_space =
- ioremap_nocache(pci_resource_start(tpci200->info->pdev,
- TPCI200_MEM8_SPACE_BAR),
- TPCI200_MEM8_SIZE);
/* Initialize lock that protects interface_regs */
spin_lock_init(&tpci200->regs_lock);
ioidint_base = pci_resource_start(tpci200->info->pdev,
TPCI200_IO_ID_INT_SPACES_BAR);
- mem_base = pci_resource_start(tpci200->info->pdev,
- TPCI200_MEM8_SPACE_BAR);
+ tpci200->mod_mem[IPACK_IO_SPACE] = ioidint_base + TPCI200_IO_SPACE_OFF;
+ tpci200->mod_mem[IPACK_ID_SPACE] = ioidint_base + TPCI200_ID_SPACE_OFF;
+ tpci200->mod_mem[IPACK_INT_SPACE] =
+ ioidint_base + TPCI200_INT_SPACE_OFF;
+ tpci200->mod_mem[IPACK_MEM8_SPACE] =
+ pci_resource_start(tpci200->info->pdev,
+ TPCI200_MEM8_SPACE_BAR);
+ tpci200->mod_mem[IPACK_MEM16_SPACE] =
+ pci_resource_start(tpci200->info->pdev,
+ TPCI200_MEM16_SPACE_BAR);
/* Set the default parameters of the slot
* INT0 disabled, level sensitive
@@ -316,30 +330,8 @@ static int tpci200_register(struct tpci200_board *tpci200)
* clock rate 8 MHz
*/
slot_ctrl = 0;
-
- /* Set all slot physical address space */
- for (i = 0; i < TPCI200_NB_SLOT; i++) {
- tpci200->slots[i].io_phys.address =
- (void __iomem *)ioidint_base +
- TPCI200_IO_SPACE_OFF + TPCI200_IO_SPACE_GAP*i;
- tpci200->slots[i].io_phys.size = TPCI200_IO_SPACE_SIZE;
-
- tpci200->slots[i].id_phys.address =
- (void __iomem *)ioidint_base +
- TPCI200_ID_SPACE_OFF + TPCI200_ID_SPACE_GAP*i;
- tpci200->slots[i].id_phys.size = TPCI200_ID_SPACE_SIZE;
-
- tpci200->slots[i].int_phys.address =
- (void __iomem *)ioidint_base +
- TPCI200_INT_SPACE_OFF + TPCI200_INT_SPACE_GAP * i;
- tpci200->slots[i].int_phys.size = TPCI200_INT_SPACE_SIZE;
-
- tpci200->slots[i].mem_phys.address =
- (void __iomem *)mem_base + TPCI200_MEM8_GAP*i;
- tpci200->slots[i].mem_phys.size = TPCI200_MEM8_SIZE;
-
+ for (i = 0; i < TPCI200_NB_SLOT; i++)
writew(slot_ctrl, &tpci200->info->interface_regs->control[i]);
- }
res = request_irq(tpci200->info->pdev->irq,
tpci200_interrupt, IRQF_SHARED,
@@ -354,6 +346,8 @@ static int tpci200_register(struct tpci200_board *tpci200)
return 0;
+out_release_mem8_space:
+ pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
out_release_ioid_int_space:
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
out_release_ip_space:
@@ -363,166 +357,6 @@ out_disable_pci:
return res;
}
-static int tpci200_slot_unmap_space(struct ipack_device *dev, int space)
-{
- struct ipack_addr_space *virt_addr_space;
- struct tpci200_board *tpci200;
-
- tpci200 = check_slot(dev);
- if (tpci200 == NULL)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&tpci200->mutex))
- return -ERESTARTSYS;
-
- switch (space) {
- case IPACK_IO_SPACE:
- if (dev->io_space.address == NULL) {
- dev_info(&dev->dev,
- "Slot [%d:%d] IO space not mapped !\n",
- dev->bus_nr, dev->slot);
- goto out_unlock;
- }
- virt_addr_space = &dev->io_space;
- break;
- case IPACK_ID_SPACE:
- if (dev->id_space.address == NULL) {
- dev_info(&dev->dev,
- "Slot [%d:%d] ID space not mapped !\n",
- dev->bus_nr, dev->slot);
- goto out_unlock;
- }
- virt_addr_space = &dev->id_space;
- break;
- case IPACK_INT_SPACE:
- if (dev->int_space.address == NULL) {
- dev_info(&dev->dev,
- "Slot [%d:%d] INT space not mapped !\n",
- dev->bus_nr, dev->slot);
- goto out_unlock;
- }
- virt_addr_space = &dev->int_space;
- break;
- case IPACK_MEM_SPACE:
- if (dev->mem_space.address == NULL) {
- dev_info(&dev->dev,
- "Slot [%d:%d] MEM space not mapped !\n",
- dev->bus_nr, dev->slot);
- goto out_unlock;
- }
- virt_addr_space = &dev->mem_space;
- break;
- default:
- dev_err(&dev->dev,
- "Slot [%d:%d] space number %d doesn't exist !\n",
- dev->bus_nr, dev->slot, space);
- mutex_unlock(&tpci200->mutex);
- return -EINVAL;
- }
-
- iounmap(virt_addr_space->address);
-
- virt_addr_space->address = NULL;
- virt_addr_space->size = 0;
-out_unlock:
- mutex_unlock(&tpci200->mutex);
- return 0;
-}
-
-static int tpci200_slot_map_space(struct ipack_device *dev,
- unsigned int memory_size, int space)
-{
- int res = 0;
- unsigned int size_to_map;
- void __iomem *phys_address;
- struct ipack_addr_space *virt_addr_space;
- struct tpci200_board *tpci200;
-
- tpci200 = check_slot(dev);
- if (tpci200 == NULL)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&tpci200->mutex))
- return -ERESTARTSYS;
-
- switch (space) {
- case IPACK_IO_SPACE:
- if (dev->io_space.address != NULL) {
- dev_err(&dev->dev,
- "Slot [%d:%d] IO space already mapped !\n",
- tpci200->number, dev->slot);
- res = -EINVAL;
- goto out_unlock;
- }
- virt_addr_space = &dev->io_space;
-
- phys_address = tpci200->slots[dev->slot].io_phys.address;
- size_to_map = tpci200->slots[dev->slot].io_phys.size;
- break;
- case IPACK_ID_SPACE:
- if (dev->id_space.address != NULL) {
- dev_err(&dev->dev,
- "Slot [%d:%d] ID space already mapped !\n",
- tpci200->number, dev->slot);
- res = -EINVAL;
- goto out_unlock;
- }
- virt_addr_space = &dev->id_space;
-
- phys_address = tpci200->slots[dev->slot].id_phys.address;
- size_to_map = tpci200->slots[dev->slot].id_phys.size;
- break;
- case IPACK_INT_SPACE:
- if (dev->int_space.address != NULL) {
- dev_err(&dev->dev,
- "Slot [%d:%d] INT space already mapped !\n",
- tpci200->number, dev->slot);
- res = -EINVAL;
- goto out_unlock;
- }
- virt_addr_space = &dev->int_space;
-
- phys_address = tpci200->slots[dev->slot].int_phys.address;
- size_to_map = tpci200->slots[dev->slot].int_phys.size;
- break;
- case IPACK_MEM_SPACE:
- if (dev->mem_space.address != NULL) {
- dev_err(&dev->dev,
- "Slot [%d:%d] MEM space already mapped !\n",
- tpci200->number, dev->slot);
- res = -EINVAL;
- goto out_unlock;
- }
- virt_addr_space = &dev->mem_space;
-
- if (memory_size > tpci200->slots[dev->slot].mem_phys.size) {
- dev_err(&dev->dev,
- "Slot [%d:%d] request is 0x%X memory, only 0x%X available !\n",
- dev->bus_nr, dev->slot, memory_size,
- tpci200->slots[dev->slot].mem_phys.size);
- res = -EINVAL;
- goto out_unlock;
- }
-
- phys_address = tpci200->slots[dev->slot].mem_phys.address;
- size_to_map = memory_size;
- break;
- default:
- dev_err(&dev->dev, "Slot [%d:%d] space %d doesn't exist !\n",
- tpci200->number, dev->slot, space);
- res = -EINVAL;
- goto out_unlock;
- }
-
- virt_addr_space->size = size_to_map;
- virt_addr_space->address =
- ioremap_nocache((unsigned long)phys_address, size_to_map);
-
-out_unlock:
- mutex_unlock(&tpci200->mutex);
- return res;
-}
-
static int tpci200_get_clockrate(struct ipack_device *dev)
{
struct tpci200_board *tpci200 = check_slot(dev);
@@ -610,8 +444,6 @@ static void tpci200_uninstall(struct tpci200_board *tpci200)
}
static const struct ipack_bus_ops tpci200_bus_ops = {
- .map_space = tpci200_slot_map_space,
- .unmap_space = tpci200_slot_unmap_space,
.request_irq = tpci200_request_irq,
.free_irq = tpci200_free_irq,
.get_clockrate = tpci200_get_clockrate,
@@ -641,6 +473,31 @@ static int tpci200_install(struct tpci200_board *tpci200)
return 0;
}
+static void tpci200_release_device(struct ipack_device *dev)
+{
+ kfree(dev);
+}
+
+static int tpci200_create_device(struct tpci200_board *tpci200, int i)
+{
+ enum ipack_space space;
+ struct ipack_device *dev =
+ kzalloc(sizeof(struct ipack_device), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->slot = i;
+ dev->bus = tpci200->info->ipack_bus;
+ dev->release = tpci200_release_device;
+
+ for (space = 0; space < IPACK_SPACE_COUNT; space++) {
+ dev->region[space].start =
+ tpci200->mod_mem[space]
+ + tpci200_space_interval[space] * i;
+ dev->region[space].size = tpci200_space_size[space];
+ }
+ return ipack_device_register(dev);
+}
+
static int tpci200_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -716,7 +573,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
dev_set_drvdata(&pdev->dev, tpci200);
for (i = 0; i < TPCI200_NB_SLOT; i++)
- ipack_device_register(tpci200->info->ipack_bus, i);
+ tpci200_create_device(tpci200, i);
return 0;
out_err_bus_register:
@@ -742,7 +599,7 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
kfree(tpci200);
}
-static void __devexit tpci200_pci_remove(struct pci_dev *dev)
+static void tpci200_pci_remove(struct pci_dev *dev)
{
struct tpci200_board *tpci200 = pci_get_drvdata(dev);
@@ -761,20 +618,10 @@ static struct pci_driver tpci200_pci_drv = {
.name = "tpci200",
.id_table = tpci200_idtable,
.probe = tpci200_pci_probe,
- .remove = __devexit_p(tpci200_pci_remove),
+ .remove = tpci200_pci_remove,
};
-static int __init tpci200_drvr_init_module(void)
-{
- return pci_register_driver(&tpci200_pci_drv);
-}
-
-static void __exit tpci200_drvr_exit_module(void)
-{
- pci_unregister_driver(&tpci200_pci_drv);
-}
+module_pci_driver(tpci200_pci_drv);
MODULE_DESCRIPTION("TEWS TPCI-200 device driver");
MODULE_LICENSE("GPL");
-module_init(tpci200_drvr_init_module);
-module_exit(tpci200_drvr_exit_module);
diff --git a/drivers/staging/ipack/bridges/tpci200.h b/drivers/ipack/carriers/tpci200.h
index 235d1fe4f48c..a7a151dab83c 100644
--- a/drivers/staging/ipack/bridges/tpci200.h
+++ b/drivers/ipack/carriers/tpci200.h
@@ -2,9 +2,10 @@
* tpci200.h
*
* driver for the carrier TEWS TPCI-200
- * Copyright (c) 2009 Nicolas Serafini, EIC2 SA
- * Copyright (c) 2010,2011 Samuel Iglesias Gonsalvez <siglesia@cern.ch>, CERN
- * Copyright (c) 2012 Samuel Iglesias Gonsalvez <siglesias@igalia.com>, Igalia
+ *
+ * Copyright (C) 2009-2012 CERN (www.cern.ch)
+ * Author: Nicolas Serafini, EIC2 SA
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -19,8 +20,7 @@
#include <linux/spinlock.h>
#include <linux/swab.h>
#include <linux/io.h>
-
-#include "../ipack.h"
+#include <linux/ipack.h>
#define TPCI200_NB_SLOT 0x4
#define TPCI200_NB_BAR 0x6
@@ -49,20 +49,20 @@ struct tpci200_regs {
#define TPCI200_IFACE_SIZE 0x100
#define TPCI200_IO_SPACE_OFF 0x0000
-#define TPCI200_IO_SPACE_GAP 0x0100
+#define TPCI200_IO_SPACE_INTERVAL 0x0100
#define TPCI200_IO_SPACE_SIZE 0x0080
#define TPCI200_ID_SPACE_OFF 0x0080
-#define TPCI200_ID_SPACE_GAP 0x0100
+#define TPCI200_ID_SPACE_INTERVAL 0x0100
#define TPCI200_ID_SPACE_SIZE 0x0040
#define TPCI200_INT_SPACE_OFF 0x00C0
-#define TPCI200_INT_SPACE_GAP 0x0100
+#define TPCI200_INT_SPACE_INTERVAL 0x0100
#define TPCI200_INT_SPACE_SIZE 0x0040
#define TPCI200_IOIDINT_SIZE 0x0400
-#define TPCI200_MEM8_GAP 0x00400000
-#define TPCI200_MEM8_SIZE 0x00400000
-#define TPCI200_MEM16_GAP 0x00800000
-#define TPCI200_MEM16_SIZE 0x00800000
+#define TPCI200_MEM8_SPACE_INTERVAL 0x00400000
+#define TPCI200_MEM8_SPACE_SIZE 0x00400000
+#define TPCI200_MEM16_SPACE_INTERVAL 0x00800000
+#define TPCI200_MEM16_SPACE_SIZE 0x00800000
/* control field in tpci200_regs */
#define TPCI200_INT0_EN 0x0040
@@ -137,11 +137,7 @@ struct slot_irq {
*
*/
struct tpci200_slot {
- struct slot_irq *irq;
- struct ipack_addr_space io_phys;
- struct ipack_addr_space id_phys;
- struct ipack_addr_space int_phys;
- struct ipack_addr_space mem_phys;
+ struct slot_irq *irq;
};
/**
@@ -156,8 +152,6 @@ struct tpci200_infos {
struct pci_dev *pdev;
struct pci_device_id *id_table;
struct tpci200_regs __iomem *interface_regs;
- void __iomem *ioidint_space;
- void __iomem *mem8_space;
void __iomem *cfg_regs;
struct ipack_bus_device *ipack_bus;
};
@@ -167,6 +161,7 @@ struct tpci200_board {
spinlock_t regs_lock;
struct tpci200_slot *slots;
struct tpci200_infos *info;
+ phys_addr_t mod_mem[IPACK_SPACE_COUNT];
};
#endif /* _TPCI200_H_ */
diff --git a/drivers/staging/ipack/devices/Kconfig b/drivers/ipack/devices/Kconfig
index 39f71888a584..0b82fdc198c0 100644
--- a/drivers/staging/ipack/devices/Kconfig
+++ b/drivers/ipack/devices/Kconfig
@@ -4,4 +4,3 @@ config SERIAL_IPOCTAL
help
This driver supports the IPOCTAL serial port device for the IndustryPack bus.
default n
-
diff --git a/drivers/staging/ipack/devices/Makefile b/drivers/ipack/devices/Makefile
index 6de18bda4a9a..6de18bda4a9a 100644
--- a/drivers/staging/ipack/devices/Makefile
+++ b/drivers/ipack/devices/Makefile
diff --git a/drivers/staging/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index d751edfda839..576d53d92677 100644
--- a/drivers/staging/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -2,9 +2,10 @@
* ipoctal.c
*
* driver for the GE IP-OCTAL boards
- * Copyright (c) 2009 Nicolas Serafini, EIC2 SA
- * Copyright (c) 2010,2011 Samuel Iglesias Gonsalvez <siglesia@cern.ch>, CERN
- * Copyright (c) 2012 Samuel Iglesias Gonsalvez <siglesias@igalia.com>, Igalia
+ *
+ * Copyright (C) 2009-2012 CERN (www.cern.ch)
+ * Author: Nicolas Serafini, EIC2 SA
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -21,7 +22,7 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/io.h>
-#include "../ipack.h"
+#include <linux/ipack.h>
#include "ipoctal.h"
#include "scc2698.h"
@@ -53,6 +54,8 @@ struct ipoctal {
struct ipoctal_channel channel[NR_CHANNELS];
unsigned char write;
struct tty_driver *tty_drv;
+ u8 __iomem *mem8_space;
+ u8 __iomem *int_space;
};
static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -252,35 +255,12 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
ipoctal_irq_channel(&ipoctal->channel[i]);
/* Clear the IPack device interrupt */
- readw(ipoctal->dev->int_space.address + ACK_INT_REQ0);
- readw(ipoctal->dev->int_space.address + ACK_INT_REQ1);
+ readw(ipoctal->int_space + ACK_INT_REQ0);
+ readw(ipoctal->int_space + ACK_INT_REQ1);
return IRQ_HANDLED;
}
-static int ipoctal_check_model(struct ipack_device *dev, unsigned char *id)
-{
- unsigned char manufacturerID;
- unsigned char board_id;
-
-
- manufacturerID = ioread8(dev->id_space.address + IPACK_IDPROM_OFFSET_MANUFACTURER_ID);
- if (manufacturerID != IPACK1_VENDOR_ID_SBS)
- return -ENODEV;
- board_id = ioread8(dev->id_space.address + IPACK_IDPROM_OFFSET_MODEL);
- switch (board_id) {
- case IPACK1_DEVICE_ID_SBS_OCTAL_232:
- case IPACK1_DEVICE_ID_SBS_OCTAL_422:
- case IPACK1_DEVICE_ID_SBS_OCTAL_485:
- *id = board_id;
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
static const struct tty_port_operations ipoctal_tty_port_ops = {
.dtr_rts = NULL,
.activate = ipoctal_port_activate,
@@ -289,64 +269,55 @@ static const struct tty_port_operations ipoctal_tty_port_ops = {
static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
unsigned int slot)
{
- int res = 0;
+ int res;
int i;
struct tty_driver *tty;
char name[20];
- unsigned char board_id;
struct ipoctal_channel *channel;
+ struct ipack_region *region;
+ void __iomem *addr;
union scc2698_channel __iomem *chan_regs;
union scc2698_block __iomem *block_regs;
- res = ipoctal->dev->bus->ops->map_space(ipoctal->dev, 0,
- IPACK_ID_SPACE);
- if (res) {
- dev_err(&ipoctal->dev->dev,
- "Unable to map slot [%d:%d] ID space!\n",
- bus_nr, slot);
- return res;
- }
-
- res = ipoctal_check_model(ipoctal->dev, &board_id);
- if (res) {
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev,
- IPACK_ID_SPACE);
- goto out_unregister_id_space;
- }
- ipoctal->board_id = board_id;
+ ipoctal->board_id = ipoctal->dev->id_device;
- res = ipoctal->dev->bus->ops->map_space(ipoctal->dev, 0,
- IPACK_IO_SPACE);
- if (res) {
+ region = &ipoctal->dev->region[IPACK_IO_SPACE];
+ addr = devm_ioremap_nocache(&ipoctal->dev->dev,
+ region->start, region->size);
+ if (!addr) {
dev_err(&ipoctal->dev->dev,
"Unable to map slot [%d:%d] IO space!\n",
bus_nr, slot);
- goto out_unregister_id_space;
+ return -EADDRNOTAVAIL;
}
+ /* Save the virtual address to access the registers easily */
+ chan_regs =
+ (union scc2698_channel __iomem *) addr;
+ block_regs =
+ (union scc2698_block __iomem *) addr;
- res = ipoctal->dev->bus->ops->map_space(ipoctal->dev, 0,
- IPACK_INT_SPACE);
- if (res) {
+ region = &ipoctal->dev->region[IPACK_INT_SPACE];
+ ipoctal->int_space =
+ devm_ioremap_nocache(&ipoctal->dev->dev,
+ region->start, region->size);
+ if (!ipoctal->int_space) {
dev_err(&ipoctal->dev->dev,
"Unable to map slot [%d:%d] INT space!\n",
bus_nr, slot);
- goto out_unregister_io_space;
+ return -EADDRNOTAVAIL;
}
- res = ipoctal->dev->bus->ops->map_space(ipoctal->dev,
- 0x8000, IPACK_MEM_SPACE);
- if (res) {
+ region = &ipoctal->dev->region[IPACK_MEM8_SPACE];
+ ipoctal->mem8_space =
+ devm_ioremap_nocache(&ipoctal->dev->dev,
+ region->start, 0x8000);
+ if (!addr) {
dev_err(&ipoctal->dev->dev,
- "Unable to map slot [%d:%d] MEM space!\n",
+ "Unable to map slot [%d:%d] MEM8 space!\n",
bus_nr, slot);
- goto out_unregister_int_space;
+ return -EADDRNOTAVAIL;
}
- /* Save the virtual address to access the registers easily */
- chan_regs =
- (union scc2698_channel __iomem *) ipoctal->dev->io_space.address;
- block_regs =
- (union scc2698_block __iomem *) ipoctal->dev->io_space.address;
/* Disable RX and TX before touching anything */
for (i = 0; i < NR_CHANNELS ; i++) {
@@ -389,17 +360,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->dev->bus->ops->request_irq(ipoctal->dev,
ipoctal_irq_handler, ipoctal);
/* Dummy write */
- iowrite8(1, ipoctal->dev->mem_space.address + 1);
+ iowrite8(1, ipoctal->mem8_space + 1);
/* Register the TTY device */
/* Each IP-OCTAL channel is a TTY port */
tty = alloc_tty_driver(NR_CHANNELS);
- if (!tty) {
- res = -ENOMEM;
- goto out_unregister_slot_unmap;
- }
+ if (!tty)
+ return -ENOMEM;
/* Fill struct tty_driver with ipoctal data */
tty->owner = THIS_MODULE;
@@ -422,7 +391,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
if (res) {
dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
put_tty_driver(tty);
- goto out_unregister_slot_unmap;
+ return res;
}
/* Save struct tty_driver for use it when uninstalling the device */
@@ -446,6 +415,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
if (IS_ERR(tty_dev)) {
dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
+ tty_port_destroy(&channel->tty_port);
continue;
}
dev_set_drvdata(tty_dev, channel);
@@ -458,16 +428,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
}
return 0;
-
-out_unregister_slot_unmap:
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_ID_SPACE);
-out_unregister_int_space:
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_INT_SPACE);
-out_unregister_io_space:
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_IO_SPACE);
-out_unregister_id_space:
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_MEM_SPACE);
- return res;
}
static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
@@ -719,7 +679,7 @@ static int ipoctal_probe(struct ipack_device *dev)
return -ENOMEM;
ipoctal->dev = dev;
- res = ipoctal_inst_slot(ipoctal, dev->bus_nr, dev->slot);
+ res = ipoctal_inst_slot(ipoctal, dev->bus->bus_nr, dev->slot);
if (res)
goto out_uninst;
@@ -741,14 +701,11 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
struct ipoctal_channel *channel = &ipoctal->channel[i];
tty_unregister_device(ipoctal->tty_drv, i);
tty_port_free_xmit_buf(&channel->tty_port);
+ tty_port_destroy(&channel->tty_port);
}
tty_unregister_driver(ipoctal->tty_drv);
put_tty_driver(ipoctal->tty_drv);
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_MEM_SPACE);
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_INT_SPACE);
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_IO_SPACE);
- ipoctal->dev->bus->ops->unmap_space(ipoctal->dev, IPACK_ID_SPACE);
kfree(ipoctal);
}
diff --git a/drivers/staging/ipack/devices/ipoctal.h b/drivers/ipack/devices/ipoctal.h
index c5b4ed46516f..28f1c4233154 100644
--- a/drivers/staging/ipack/devices/ipoctal.h
+++ b/drivers/ipack/devices/ipoctal.h
@@ -2,9 +2,10 @@
* ipoctal.h
*
* driver for the IPOCTAL boards
- * Copyright (c) 2009 Nicolas Serafini, EIC2 SA
- * Copyright (c) 2010,2011 Samuel Iglesias Gonsalvez <siglesia@cern.ch>, CERN
- * Copyright (c) 2012 Samuel Iglesias Gonsalvez <siglesias@igalia.com>, Igalia
+
+ * Copyright (C) 2009-2012 CERN (www.cern.ch)
+ * Author: Nicolas Serafini, EIC2 SA
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/staging/ipack/devices/scc2698.h b/drivers/ipack/devices/scc2698.h
index 96e8d8c30e14..2ad6acd513fa 100644
--- a/drivers/staging/ipack/devices/scc2698.h
+++ b/drivers/ipack/devices/scc2698.h
@@ -2,9 +2,10 @@
* scc2698.h
*
* driver for the IPOCTAL boards
- * Copyright (c) 2009 Nicolas Serafini, EIC2 SA
- * Copyright (c) 2010,2011 Samuel Iglesias Gonsalvez <siglesia@cern.ch>, CERN
- * Copyright (c) 2012 Samuel Iglesias Gonsalvez <siglesias@igalia.com>, Igalia
+ *
+ * Copyright (C) 2009-2012 CERN (www.cern.ch)
+ * Author: Nicolas Serafini, EIC2 SA
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/staging/ipack/ipack.c b/drivers/ipack/ipack.c
index d1e0651592a2..7ec6b208b1cb 100644
--- a/drivers/staging/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -1,8 +1,8 @@
/*
* Industry-pack bus support functions.
*
- * (C) 2011 Samuel Iglesias Gonsalvez <siglesia@cern.ch>, CERN
- * (C) 2012 Samuel Iglesias Gonsalvez <siglesias@igalia.com>, Igalia
+ * Copyright (C) 2011-2012 CERN (www.cern.ch)
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -12,8 +12,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/idr.h>
-#include <asm/io.h>
-#include "ipack.h"
+#include <linux/io.h>
+#include <linux/ipack.h>
#define to_ipack_dev(device) container_of(device, struct ipack_device, dev)
#define to_ipack_driver(drv) container_of(drv, struct ipack_driver, driver)
@@ -24,7 +24,7 @@ static void ipack_device_release(struct device *dev)
{
struct ipack_device *device = to_ipack_dev(dev);
kfree(device->id);
- kfree(device);
+ device->release(device);
}
static inline const struct ipack_device_id *
@@ -85,8 +85,6 @@ static int ipack_bus_remove(struct device *device)
return 0;
}
-#ifdef CONFIG_HOTPLUG
-
static int ipack_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct ipack_device *idev;
@@ -104,12 +102,6 @@ static int ipack_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#else /* !CONFIG_HOTPLUG */
-
-#define ipack_uevent NULL
-
-#endif /* !CONFIG_HOTPLUG */
-
#define ipack_device_attr(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
@@ -234,7 +226,7 @@ static int ipack_unregister_bus_member(struct device *dev, void *data)
struct ipack_device *idev = to_ipack_dev(dev);
struct ipack_bus_device *bus = data;
- if (idev->bus_nr == bus->bus_nr)
+ if (idev->bus == bus)
ipack_device_unregister(idev);
return 1;
@@ -242,7 +234,8 @@ static int ipack_unregister_bus_member(struct device *dev, void *data)
int ipack_bus_unregister(struct ipack_bus_device *bus)
{
- bus_for_each_dev(&ipack_bus_type, NULL, bus, ipack_unregister_bus_member);
+ bus_for_each_dev(&ipack_bus_type, NULL, bus,
+ ipack_unregister_bus_member);
ida_simple_remove(&ipack_ida, bus->bus_nr);
kfree(bus);
return 0;
@@ -351,12 +344,12 @@ static int ipack_device_read_id(struct ipack_device *dev)
int i;
int ret = 0;
- ret = dev->bus->ops->map_space(dev, 0, IPACK_ID_SPACE);
- if (ret) {
+ idmem = ioremap(dev->region[IPACK_ID_SPACE].start,
+ dev->region[IPACK_ID_SPACE].size);
+ if (!idmem) {
dev_err(&dev->dev, "error mapping memory\n");
- return ret;
+ return -ENOMEM;
}
- idmem = dev->id_space.address;
/* Determine ID PROM Data Format. If we find the ids "IPAC" or "IPAH"
* we are dealing with a IndustryPack format 1 device. If we detect
@@ -421,57 +414,44 @@ static int ipack_device_read_id(struct ipack_device *dev)
}
out:
- dev->bus->ops->unmap_space(dev, IPACK_ID_SPACE);
+ iounmap(idmem);
return ret;
}
-struct ipack_device *ipack_device_register(struct ipack_bus_device *bus,
- int slot)
+int ipack_device_register(struct ipack_device *dev)
{
int ret;
- struct ipack_device *dev;
-
- dev = kzalloc(sizeof(struct ipack_device), GFP_KERNEL);
- if (!dev)
- return NULL;
dev->dev.bus = &ipack_bus_type;
dev->dev.release = ipack_device_release;
- dev->dev.parent = bus->parent;
- dev->slot = slot;
- dev->bus_nr = bus->bus_nr;
- dev->bus = bus;
+ dev->dev.parent = dev->bus->parent;
dev_set_name(&dev->dev,
- "ipack-dev.%u.%u", dev->bus_nr, dev->slot);
+ "ipack-dev.%u.%u", dev->bus->bus_nr, dev->slot);
- if (bus->ops->set_clockrate(dev, 8))
+ if (dev->bus->ops->set_clockrate(dev, 8))
dev_warn(&dev->dev, "failed to switch to 8 MHz operation for reading of device ID.\n");
- if (bus->ops->reset_timeout(dev))
+ if (dev->bus->ops->reset_timeout(dev))
dev_warn(&dev->dev, "failed to reset potential timeout.");
ret = ipack_device_read_id(dev);
if (ret < 0) {
dev_err(&dev->dev, "error reading device id section.\n");
- kfree(dev);
- return NULL;
+ return ret;
}
/* if the device supports 32 MHz operation, use it. */
if (dev->speed_32mhz) {
- ret = bus->ops->set_clockrate(dev, 32);
+ ret = dev->bus->ops->set_clockrate(dev, 32);
if (ret < 0)
dev_err(&dev->dev, "failed to switch to 32 MHz operation.\n");
}
ret = device_register(&dev->dev);
- if (ret < 0) {
+ if (ret < 0)
kfree(dev->id);
- kfree(dev);
- return NULL;
- }
- return dev;
+ return ret;
}
EXPORT_SYMBOL_GPL(ipack_device_register);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 1bb8bf6d7fd4..62ca575701d3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1 +1,8 @@
-# empty
+config VERSATILE_FPGA_IRQ
+ bool
+ select IRQ_DOMAIN
+
+config VERSATILE_FPGA_IRQ_NR
+ int
+ default 4
+ depends on VERSATILE_FPGA_IRQ
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 054321db4350..bf4609a5bd9d 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1 +1,4 @@
-obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
diff --git a/drivers/irqchip/irq-sunxi.c b/drivers/irqchip/irq-sunxi.c
new file mode 100644
index 000000000000..10974fa42653
--- /dev/null
+++ b/drivers/irqchip/irq-sunxi.c
@@ -0,0 +1,151 @@
+/*
+ * Allwinner A1X SoCs IRQ chip driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * Benn Huang <benn@allwinnertech.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/irqchip/sunxi.h>
+
+#define SUNXI_IRQ_VECTOR_REG 0x00
+#define SUNXI_IRQ_PROTECTION_REG 0x08
+#define SUNXI_IRQ_NMI_CTRL_REG 0x0c
+#define SUNXI_IRQ_PENDING_REG(x) (0x10 + 0x4 * x)
+#define SUNXI_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x)
+#define SUNXI_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x)
+#define SUNXI_IRQ_MASK_REG(x) (0x50 + 0x4 * x)
+
+static void __iomem *sunxi_irq_base;
+static struct irq_domain *sunxi_irq_domain;
+
+void sunxi_irq_ack(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(sunxi_irq_base + SUNXI_IRQ_PENDING_REG(reg));
+ writel(val | (1 << irq_off),
+ sunxi_irq_base + SUNXI_IRQ_PENDING_REG(reg));
+}
+
+static void sunxi_irq_mask(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
+ writel(val & ~(1 << irq_off),
+ sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
+}
+
+static void sunxi_irq_unmask(struct irq_data *irqd)
+{
+ unsigned int irq = irqd_to_hwirq(irqd);
+ unsigned int irq_off = irq % 32;
+ int reg = irq / 32;
+ u32 val;
+
+ val = readl(sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
+ writel(val | (1 << irq_off),
+ sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(reg));
+}
+
+static struct irq_chip sunxi_irq_chip = {
+ .name = "sunxi_irq",
+ .irq_ack = sunxi_irq_ack,
+ .irq_mask = sunxi_irq_mask,
+ .irq_unmask = sunxi_irq_unmask,
+};
+
+static int sunxi_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &sunxi_irq_chip,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops sunxi_irq_ops = {
+ .map = sunxi_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init sunxi_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ sunxi_irq_base = of_iomap(node, 0);
+ if (!sunxi_irq_base)
+ panic("%s: unable to map IC registers\n",
+ node->full_name);
+
+ /* Disable all interrupts */
+ writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(0));
+ writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(1));
+ writel(0, sunxi_irq_base + SUNXI_IRQ_ENABLE_REG(2));
+
+ /* Mask all the interrupts */
+ writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(0));
+ writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(1));
+ writel(0, sunxi_irq_base + SUNXI_IRQ_MASK_REG(2));
+
+ /* Clear all the pending interrupts */
+ writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(0));
+ writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(1));
+ writel(0xffffffff, sunxi_irq_base + SUNXI_IRQ_PENDING_REG(2));
+
+ /* Enable protection mode */
+ writel(0x01, sunxi_irq_base + SUNXI_IRQ_PROTECTION_REG);
+
+ /* Configure the external interrupt source type */
+ writel(0x00, sunxi_irq_base + SUNXI_IRQ_NMI_CTRL_REG);
+
+ sunxi_irq_domain = irq_domain_add_linear(node, 3 * 32,
+ &sunxi_irq_ops, NULL);
+ if (!sunxi_irq_domain)
+ panic("%s: unable to create IRQ domain\n", node->full_name);
+
+ return 0;
+}
+
+static struct of_device_id sunxi_irq_dt_ids[] __initconst = {
+ { .compatible = "allwinner,sunxi-ic", .data = sunxi_of_init },
+ { }
+};
+
+void __init sunxi_init_irq(void)
+{
+ of_irq_init(sunxi_irq_dt_ids);
+}
+
+asmlinkage void __exception_irq_entry sunxi_handle_irq(struct pt_regs *regs)
+{
+ u32 irq, hwirq;
+
+ hwirq = readl(sunxi_irq_base + SUNXI_IRQ_VECTOR_REG) >> 2;
+ while (hwirq != 0) {
+ irq = irq_find_mapping(sunxi_irq_domain, hwirq);
+ handle_IRQ(irq, regs);
+ hwirq = readl(sunxi_irq_base + SUNXI_IRQ_VECTOR_REG) >> 2;
+ }
+}
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
new file mode 100644
index 000000000000..9dbd82b716d3
--- /dev/null
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -0,0 +1,203 @@
+/*
+ * Support for Versatile FPGA-based IRQ controllers
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip/versatile-fpga.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IRQ_STATUS 0x00
+#define IRQ_RAW_STATUS 0x04
+#define IRQ_ENABLE_SET 0x08
+#define IRQ_ENABLE_CLEAR 0x0c
+#define INT_SOFT_SET 0x10
+#define INT_SOFT_CLEAR 0x14
+#define FIQ_STATUS 0x20
+#define FIQ_RAW_STATUS 0x24
+#define FIQ_ENABLE 0x28
+#define FIQ_ENABLE_SET 0x28
+#define FIQ_ENABLE_CLEAR 0x2C
+
+/**
+ * struct fpga_irq_data - irq data container for the FPGA IRQ controller
+ * @base: memory offset in virtual memory
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ * @valid: mask for valid IRQs on this controller
+ * @used_irqs: number of active IRQs on this controller
+ */
+struct fpga_irq_data {
+ void __iomem *base;
+ struct irq_chip chip;
+ u32 valid;
+ struct irq_domain *domain;
+ u8 used_irqs;
+};
+
+/* we cannot allocate memory when the controllers are initially registered */
+static struct fpga_irq_data fpga_irq_devices[CONFIG_VERSATILE_FPGA_IRQ_NR];
+static int fpga_irq_id;
+
+static void fpga_irq_mask(struct irq_data *d)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << d->hwirq;
+
+ writel(mask, f->base + IRQ_ENABLE_CLEAR);
+}
+
+static void fpga_irq_unmask(struct irq_data *d)
+{
+ struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << d->hwirq;
+
+ writel(mask, f->base + IRQ_ENABLE_SET);
+}
+
+static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
+{
+ struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
+ u32 status = readl(f->base + IRQ_STATUS);
+
+ if (status == 0) {
+ do_bad_IRQ(irq, desc);
+ return;
+ }
+
+ do {
+ irq = ffs(status) - 1;
+ status &= ~(1 << irq);
+ generic_handle_irq(irq_find_mapping(f->domain, irq));
+ } while (status);
+}
+
+/*
+ * Handle each interrupt in a single FPGA IRQ controller. Returns non-zero
+ * if we've handled at least one interrupt. This does a single read of the
+ * status register and handles all interrupts in order from LSB first.
+ */
+static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
+{
+ int handled = 0;
+ int irq;
+ u32 status;
+
+ while ((status = readl(f->base + IRQ_STATUS))) {
+ irq = ffs(status) - 1;
+ handle_IRQ(irq_find_mapping(f->domain, irq), regs);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/*
+ * Keep iterating over all registered FPGA IRQ controllers until there are
+ * no pending interrupts.
+ */
+asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < fpga_irq_id; ++i)
+ handled |= handle_one_fpga(&fpga_irq_devices[i], regs);
+ } while (handled);
+}
+
+static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct fpga_irq_data *f = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(f->valid & BIT(hwirq)))
+ return -ENOTSUPP;
+ irq_set_chip_data(irq, f);
+ irq_set_chip_and_handler(irq, &f->chip,
+ handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ return 0;
+}
+
+static struct irq_domain_ops fpga_irqdomain_ops = {
+ .map = fpga_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
+ int parent_irq, u32 valid, struct device_node *node)
+{
+ struct fpga_irq_data *f;
+ int i;
+
+ if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) {
+ pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_PLAT_VERSATILE_FPGA_IRQ_NR\n", __func__);
+ return;
+ }
+ f = &fpga_irq_devices[fpga_irq_id];
+ f->base = base;
+ f->chip.name = name;
+ f->chip.irq_ack = fpga_irq_mask;
+ f->chip.irq_mask = fpga_irq_mask;
+ f->chip.irq_unmask = fpga_irq_unmask;
+ f->valid = valid;
+
+ if (parent_irq != -1) {
+ irq_set_handler_data(parent_irq, f);
+ irq_set_chained_handler(parent_irq, fpga_irq_handle);
+ }
+
+ /* This will also allocate irq descriptors */
+ f->domain = irq_domain_add_simple(node, fls(valid), irq_start,
+ &fpga_irqdomain_ops, f);
+
+ /* This will allocate all valid descriptors in the linear case */
+ for (i = 0; i < fls(valid); i++)
+ if (valid & BIT(i)) {
+ if (!irq_start)
+ irq_create_mapping(f->domain, i);
+ f->used_irqs++;
+ }
+
+ pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs\n",
+ fpga_irq_id, name, base, f->used_irqs);
+
+ fpga_irq_id++;
+}
+
+#ifdef CONFIG_OF
+int __init fpga_irq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *base;
+ u32 clear_mask;
+ u32 valid_mask;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ base = of_iomap(node, 0);
+ WARN(!base, "unable to map fpga irq registers\n");
+
+ if (of_property_read_u32(node, "clear-mask", &clear_mask))
+ clear_mask = 0;
+
+ if (of_property_read_u32(node, "valid-mask", &valid_mask))
+ valid_mask = 0;
+
+ fpga_irq_init(base, node->name, 0, -1, valid_mask, node);
+
+ writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+ writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
+ return 0;
+}
+#endif
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
new file mode 100644
index 000000000000..80e1d2fd9d4c
--- /dev/null
+++ b/drivers/irqchip/spear-shirq.c
@@ -0,0 +1,316 @@
+/*
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009-2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/spear-shirq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(lock);
+
+/* spear300 shared irq registers offsets and masks */
+#define SPEAR300_INT_ENB_MASK_REG 0x54
+#define SPEAR300_INT_STS_MASK_REG 0x58
+
+static struct spear_shirq spear300_shirq_ras1 = {
+ .irq_nr = 9,
+ .irq_bit_off = 0,
+ .regs = {
+ .enb_reg = SPEAR300_INT_ENB_MASK_REG,
+ .status_reg = SPEAR300_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq *spear300_shirq_blocks[] = {
+ &spear300_shirq_ras1,
+};
+
+/* spear310 shared irq registers offsets and masks */
+#define SPEAR310_INT_STS_MASK_REG 0x04
+
+static struct spear_shirq spear310_shirq_ras1 = {
+ .irq_nr = 8,
+ .irq_bit_off = 0,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_ras2 = {
+ .irq_nr = 5,
+ .irq_bit_off = 8,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_ras3 = {
+ .irq_nr = 1,
+ .irq_bit_off = 13,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq spear310_shirq_intrcomm_ras = {
+ .irq_nr = 3,
+ .irq_bit_off = 14,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR310_INT_STS_MASK_REG,
+ .clear_reg = -1,
+ },
+};
+
+static struct spear_shirq *spear310_shirq_blocks[] = {
+ &spear310_shirq_ras1,
+ &spear310_shirq_ras2,
+ &spear310_shirq_ras3,
+ &spear310_shirq_intrcomm_ras,
+};
+
+/* spear320 shared irq registers offsets and masks */
+#define SPEAR320_INT_STS_MASK_REG 0x04
+#define SPEAR320_INT_CLR_MASK_REG 0x04
+#define SPEAR320_INT_ENB_MASK_REG 0x08
+
+static struct spear_shirq spear320_shirq_ras1 = {
+ .irq_nr = 3,
+ .irq_bit_off = 7,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_ras2 = {
+ .irq_nr = 1,
+ .irq_bit_off = 10,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_ras3 = {
+ .irq_nr = 3,
+ .irq_bit_off = 0,
+ .invalid_irq = 1,
+ .regs = {
+ .enb_reg = SPEAR320_INT_ENB_MASK_REG,
+ .reset_to_enb = 1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq spear320_shirq_intrcomm_ras = {
+ .irq_nr = 11,
+ .irq_bit_off = 11,
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
+ .clear_reg = SPEAR320_INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+static struct spear_shirq *spear320_shirq_blocks[] = {
+ &spear320_shirq_ras3,
+ &spear320_shirq_ras1,
+ &spear320_shirq_ras2,
+ &spear320_shirq_intrcomm_ras,
+};
+
+static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
+{
+ struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
+ u32 val, offset = d->irq - shirq->irq_base;
+ unsigned long flags;
+
+ if (shirq->regs.enb_reg == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->base + shirq->regs.enb_reg);
+
+ if (mask ^ shirq->regs.reset_to_enb)
+ val &= ~(0x1 << shirq->irq_bit_off << offset);
+ else
+ val |= 0x1 << shirq->irq_bit_off << offset;
+
+ writel(val, shirq->base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+
+}
+
+static void shirq_irq_mask(struct irq_data *d)
+{
+ shirq_irq_mask_unmask(d, 1);
+}
+
+static void shirq_irq_unmask(struct irq_data *d)
+{
+ shirq_irq_mask_unmask(d, 0);
+}
+
+static struct irq_chip shirq_chip = {
+ .name = "spear-shirq",
+ .irq_ack = shirq_irq_mask,
+ .irq_mask = shirq_irq_mask,
+ .irq_unmask = shirq_irq_unmask,
+};
+
+static void shirq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 i, j, val, mask, tmp;
+ struct irq_chip *chip;
+ struct spear_shirq *shirq = irq_get_handler_data(irq);
+
+ chip = irq_get_chip(irq);
+ chip->irq_ack(&desc->irq_data);
+
+ mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
+ while ((val = readl(shirq->base + shirq->regs.status_reg) &
+ mask)) {
+
+ val >>= shirq->irq_bit_off;
+ for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
+
+ if (!(j & val))
+ continue;
+
+ generic_handle_irq(shirq->irq_base + i);
+
+ /* clear interrupt */
+ if (shirq->regs.clear_reg == -1)
+ continue;
+
+ tmp = readl(shirq->base + shirq->regs.clear_reg);
+ if (shirq->regs.reset_to_clear)
+ tmp &= ~(j << shirq->irq_bit_off);
+ else
+ tmp |= (j << shirq->irq_bit_off);
+ writel(tmp, shirq->base + shirq->regs.clear_reg);
+ }
+ }
+ chip->irq_unmask(&desc->irq_data);
+}
+
+static void __init spear_shirq_register(struct spear_shirq *shirq)
+{
+ int i;
+
+ if (shirq->invalid_irq)
+ return;
+
+ irq_set_chained_handler(shirq->irq, shirq_handler);
+ for (i = 0; i < shirq->irq_nr; i++) {
+ irq_set_chip_and_handler(shirq->irq_base + i,
+ &shirq_chip, handle_simple_irq);
+ set_irq_flags(shirq->irq_base + i, IRQF_VALID);
+ irq_set_chip_data(shirq->irq_base + i, shirq);
+ }
+
+ irq_set_handler_data(shirq->irq, shirq);
+}
+
+static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
+ struct device_node *np)
+{
+ int i, irq_base, hwirq = 0, irq_nr = 0;
+ static struct irq_domain *shirq_domain;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map shirq registers\n", __func__);
+ return -ENXIO;
+ }
+
+ for (i = 0; i < block_nr; i++)
+ irq_nr += shirq_blocks[i]->irq_nr;
+
+ irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ pr_err("%s: irq desc alloc failed\n", __func__);
+ goto err_unmap;
+ }
+
+ shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (WARN_ON(!shirq_domain)) {
+ pr_warn("%s: irq domain init failed\n", __func__);
+ goto err_free_desc;
+ }
+
+ for (i = 0; i < block_nr; i++) {
+ shirq_blocks[i]->base = base;
+ shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
+ hwirq);
+ shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
+
+ spear_shirq_register(shirq_blocks[i]);
+ hwirq += shirq_blocks[i]->irq_nr;
+ }
+
+ return 0;
+
+err_free_desc:
+ irq_free_descs(irq_base, irq_nr);
+err_unmap:
+ iounmap(base);
+ return -ENXIO;
+}
+
+int __init spear300_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear300_shirq_blocks,
+ ARRAY_SIZE(spear300_shirq_blocks), np);
+}
+
+int __init spear310_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear310_shirq_blocks,
+ ARRAY_SIZE(spear310_shirq_blocks), np);
+}
+
+int __init spear320_shirq_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return shirq_init(spear320_shirq_blocks,
+ ARRAY_SIZE(spear320_shirq_blocks), np);
+}
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index c679867c2ccd..89562a845f6a 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -77,8 +77,6 @@ struct ackqueue_entry {
};
struct capiminor {
- struct kref kref;
-
unsigned int minor;
struct capi20_appl *ap;
@@ -190,7 +188,20 @@ static void capiminor_del_all_ack(struct capiminor *mp)
/* -------- struct capiminor ---------------------------------------- */
-static const struct tty_port_operations capiminor_port_ops; /* we have none */
+static void capiminor_destroy(struct tty_port *port)
+{
+ struct capiminor *mp = container_of(port, struct capiminor, port);
+
+ kfree_skb(mp->outskb);
+ skb_queue_purge(&mp->inqueue);
+ skb_queue_purge(&mp->outqueue);
+ capiminor_del_all_ack(mp);
+ kfree(mp);
+}
+
+static const struct tty_port_operations capiminor_port_ops = {
+ .destruct = capiminor_destroy,
+};
static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
{
@@ -204,8 +215,6 @@ static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
return NULL;
}
- kref_init(&mp->kref);
-
mp->ap = ap;
mp->ncci = ncci;
INIT_LIST_HEAD(&mp->ackqueue);
@@ -247,21 +256,10 @@ err_out2:
spin_unlock(&capiminors_lock);
err_out1:
- kfree(mp);
+ tty_port_put(&mp->port);
return NULL;
}
-static void capiminor_destroy(struct kref *kref)
-{
- struct capiminor *mp = container_of(kref, struct capiminor, kref);
-
- kfree_skb(mp->outskb);
- skb_queue_purge(&mp->inqueue);
- skb_queue_purge(&mp->outqueue);
- capiminor_del_all_ack(mp);
- kfree(mp);
-}
-
static struct capiminor *capiminor_get(unsigned int minor)
{
struct capiminor *mp;
@@ -269,7 +267,7 @@ static struct capiminor *capiminor_get(unsigned int minor)
spin_lock(&capiminors_lock);
mp = capiminors[minor];
if (mp)
- kref_get(&mp->kref);
+ tty_port_get(&mp->port);
spin_unlock(&capiminors_lock);
return mp;
@@ -277,7 +275,7 @@ static struct capiminor *capiminor_get(unsigned int minor)
static inline void capiminor_put(struct capiminor *mp)
{
- kref_put(&mp->kref, capiminor_destroy);
+ tty_port_put(&mp->port);
}
static void capiminor_free(struct capiminor *mp)
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 68452b768da2..03a0a01a4054 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
CAPIMSG_CONTROL(data));
l -= 12;
+ if (l <= 0)
+ return;
dbgline = kmalloc(3 * l, GFP_ATOMIC);
if (!dbgline)
return;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 30a6b174fbb0..6849a11a1b24 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -507,6 +507,7 @@ void gigaset_freecs(struct cardstate *cs)
gig_dbg(DEBUG_INIT, "clearing at_state");
clear_at_state(&cs->at_state);
dealloc_temp_at_states(cs);
+ tty_port_destroy(&cs->port);
/* fall through */
case 0: /* error in basic setup */
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index b305e6b2b8ee..ac4863c2ecbc 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -299,8 +299,8 @@ static void b1pciv4_remove(struct pci_dev *pdev)
#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
-static int __devinit b1pci_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int b1pci_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
@@ -344,7 +344,7 @@ static int __devinit b1pci_pci_probe(struct pci_dev *pdev,
return retval;
}
-static void __devexit b1pci_pci_remove(struct pci_dev *pdev)
+static void b1pci_pci_remove(struct pci_dev *pdev)
{
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
avmcard *card = pci_get_drvdata(pdev);
@@ -362,7 +362,7 @@ static struct pci_driver b1pci_pci_driver = {
.name = "b1pci",
.id_table = b1pci_pci_tbl,
.probe = b1pci_pci_probe,
- .remove = __devexit_p(b1pci_pci_remove),
+ .remove = b1pci_pci_remove,
};
static struct capi_driver capi_driver_b1pci = {
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 98f18812441d..1d7fc44e3eef 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1249,8 +1249,7 @@ err:
/* ------------------------------------------------------------- */
-static int __devinit c4_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
int nr = ent->driver_data;
int retval = 0;
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index cb9a30427bd2..2180b1685691 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -187,8 +187,7 @@ static char *t1pci_procinfo(struct capi_ctr *ctrl)
/* ------------------------------------------------------------- */
-static int __devinit t1pci_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
+static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index ca6d276bb256..52377b4bf039 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -150,12 +150,12 @@ MODULE_DEVICE_TABLE(pci, divas_pci_tbl);
static int divas_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
-static void __devexit divas_remove_one(struct pci_dev *pdev);
+static void divas_remove_one(struct pci_dev *pdev);
static struct pci_driver diva_pci_driver = {
.name = "divas",
.probe = divas_init_one,
- .remove = __devexit_p(divas_remove_one),
+ .remove = divas_remove_one,
.id_table = divas_pci_tbl,
};
@@ -688,8 +688,7 @@ static int __init divas_register_chrdev(void)
/* --------------------------------------------------------------------------
PCI driver section
-------------------------------------------------------------------------- */
-static int __devinit divas_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int divas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
void *pdiva = NULL;
u8 pci_latency;
@@ -749,7 +748,7 @@ static int __devinit divas_init_one(struct pci_dev *pdev,
return (0);
}
-static void __devexit divas_remove_one(struct pci_dev *pdev)
+static void divas_remove_one(struct pci_dev *pdev)
{
void *pdiva = pci_get_drvdata(pdev);
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index dceaec821b0e..292991c90c02 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1034,7 +1034,7 @@ release_card(struct fritzcard *card)
AVM_cnt--;
}
-static int __devinit
+static int
setup_instance(struct fritzcard *card)
{
int i, err;
@@ -1096,7 +1096,7 @@ error:
return err;
}
-static int __devinit
+static int
fritzpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1130,7 +1130,7 @@ fritzpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
fritz_remove_pci(struct pci_dev *pdev)
{
struct fritzcard *card = pci_get_drvdata(pdev);
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
pr_info("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id fcpci_ids[] __devinitdata = {
+static struct pci_device_id fcpci_ids[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI"},
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
@@ -1154,7 +1154,7 @@ MODULE_DEVICE_TABLE(pci, fcpci_ids);
static struct pci_driver fcpci_driver = {
.name = "fcpci",
.probe = fritzpci_probe,
- .remove = __devexit_p(fritz_remove_pci),
+ .remove = fritz_remove_pci,
.id_table = fcpci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index f02794203bb1..28543d795188 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5274,7 +5274,7 @@ free_card:
return ret_err;
}
-static void __devexit hfc_remove_pci(struct pci_dev *pdev)
+static void hfc_remove_pci(struct pci_dev *pdev)
{
struct hfc_multi *card = pci_get_drvdata(pdev);
u_long flags;
@@ -5351,7 +5351,7 @@ static const struct hm_map hfcm_map[] = {
#undef H
#define H(x) ((unsigned long)&hfcm_map[x])
-static struct pci_device_id hfmultipci_ids[] __devinitdata = {
+static struct pci_device_id hfmultipci_ids[] = {
/* Cards with HFC-4S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
@@ -5472,7 +5472,7 @@ hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static struct pci_driver hfcmultipci_driver = {
.name = "hfc_multi",
.probe = hfcmulti_probe,
- .remove = __devexit_p(hfc_remove_pci),
+ .remove = hfc_remove_pci,
.id_table = hfmultipci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 81363ffa5357..a7e4939787c9 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -490,7 +490,7 @@ receive_dmsg(struct hfc_pci *hc)
(df->data[le16_to_cpu(zp->z1)])) {
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG
- "empty_fifo hfcpci paket inv. len "
+ "empty_fifo hfcpci packet inv. len "
"%d or crc %d\n",
rcnt,
df->data[le16_to_cpu(zp->z1)]);
@@ -2215,7 +2215,7 @@ static struct pci_device_id hfc_ids[] =
{},
};
-static int __devinit
+static int
hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -2246,7 +2246,7 @@ hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
hfc_remove_pci(struct pci_dev *pdev)
{
struct hfc_pci *card = pci_get_drvdata(pdev);
@@ -2263,7 +2263,7 @@ hfc_remove_pci(struct pci_dev *pdev)
static struct pci_driver hfc_driver = {
.name = "hfcpci",
.probe = hfc_probe,
- .remove = __devexit_p(hfc_remove_pci),
+ .remove = hfc_remove_pci,
.id_table = hfc_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 631eb3fa63cf..c1493f4162fb 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -125,7 +125,7 @@ struct inf_hw {
#define PCI_SUBVENDOR_SEDLBAUER_PCI 0x53
#define PCI_SUB_ID_SEDLBAUER 0x01
-static struct pci_device_id infineon_ids[] __devinitdata = {
+static struct pci_device_id infineon_ids[] = {
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20), INF_DIVA20 },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA20_U), INF_DIVA20U },
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_DIVA201), INF_DIVA201 },
@@ -603,7 +603,7 @@ inf_ctrl(struct inf_hw *hw, u32 cmd, u_long arg)
return ret;
}
-static int __devinit
+static int
init_irq(struct inf_hw *hw)
{
int ret, cnt = 3;
@@ -662,7 +662,7 @@ release_io(struct inf_hw *hw)
}
}
-static int __devinit
+static int
setup_io(struct inf_hw *hw)
{
int err = 0;
@@ -896,7 +896,7 @@ release_card(struct inf_hw *card) {
inf_cnt--;
}
-static int __devinit
+static int
setup_instance(struct inf_hw *card)
{
int err;
@@ -1060,7 +1060,7 @@ static const struct inf_cinfo inf_card_info[] = {
}
};
-static const struct inf_cinfo * __devinit
+static const struct inf_cinfo *
get_card_info(enum inf_types typ)
{
const struct inf_cinfo *ci = inf_card_info;
@@ -1073,7 +1073,7 @@ get_card_info(enum inf_types typ)
return NULL;
}
-static int __devinit
+static int
inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1135,7 +1135,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
inf_remove(struct pci_dev *pdev)
{
struct inf_hw *card = pci_get_drvdata(pdev);
@@ -1149,7 +1149,7 @@ inf_remove(struct pci_dev *pdev)
static struct pci_driver infineon_driver = {
.name = "ISDN Infineon pci",
.probe = inf_probe,
- .remove = __devexit_p(inf_remove),
+ .remove = inf_remove,
.id_table = infineon_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 182ecf0626c2..feafa91c2ed9 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1302,7 +1302,7 @@ modeisar(struct isar_ch *ch, u32 bprotocol)
&ch->is->Flags))
ch->dpath = 1;
else {
- pr_info("modeisar both pathes in use\n");
+ pr_info("modeisar both paths in use\n");
return -EBUSY;
}
if (bprotocol == ISDN_P_B_HDLC)
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 9bcade59eb73..8e2944784e00 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1008,7 +1008,7 @@ nj_setup(struct tiger_hw *card)
}
-static int __devinit
+static int
setup_instance(struct tiger_hw *card)
{
int i, err;
@@ -1059,7 +1059,7 @@ error:
return err;
}
-static int __devinit
+static int
nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1124,7 +1124,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
-static void __devexit nj_remove(struct pci_dev *pdev)
+static void nj_remove(struct pci_dev *pdev)
{
struct tiger_hw *card = pci_get_drvdata(pdev);
@@ -1137,7 +1137,7 @@ static void __devexit nj_remove(struct pci_dev *pdev)
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
-static struct pci_device_id nj_pci_ids[] __devinitdata = {
+static struct pci_device_id nj_pci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
@@ -1147,7 +1147,7 @@ MODULE_DEVICE_TABLE(pci, nj_pci_ids);
static struct pci_driver nj_driver = {
.name = "netjet",
.probe = nj_probe,
- .remove = __devexit_p(nj_remove),
+ .remove = nj_remove,
.id_table = nj_pci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 93f344d74e54..9815bb4eec9c 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -282,7 +282,7 @@ sfax_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
return err;
}
-static int __devinit
+static int
init_card(struct sfax_hw *sf)
{
int ret, cnt = 3;
@@ -321,7 +321,7 @@ init_card(struct sfax_hw *sf)
}
-static int __devinit
+static int
setup_speedfax(struct sfax_hw *sf)
{
u_long flags;
@@ -371,7 +371,7 @@ release_card(struct sfax_hw *card) {
sfax_cnt--;
}
-static int __devinit
+static int
setup_instance(struct sfax_hw *card)
{
const struct firmware *firmware;
@@ -451,7 +451,7 @@ error_fw:
return err;
}
-static int __devinit
+static int
sfaxpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -480,7 +480,7 @@ sfaxpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
sfax_remove_pci(struct pci_dev *pdev)
{
struct sfax_hw *card = pci_get_drvdata(pdev);
@@ -491,7 +491,7 @@ sfax_remove_pci(struct pci_dev *pdev)
pr_debug("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id sfaxpci_ids[] __devinitdata = {
+static struct pci_device_id sfaxpci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100,
PCI_SUBVENDOR_SPEEDFAX_PYRAMID, PCI_SUB_ID_SEDLBAUER,
0, 0, (unsigned long) "Pyramid Speedfax + PCI"
@@ -507,7 +507,7 @@ MODULE_DEVICE_TABLE(pci, sfaxpci_ids);
static struct pci_driver sfaxpci_driver = {
.name = "speedfax+ pci",
.probe = sfaxpci_probe,
- .remove = __devexit_p(sfax_remove_pci),
+ .remove = sfax_remove_pci,
.id_table = sfaxpci_ids,
};
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 335fe6455002..de69f6828c76 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1355,7 +1355,7 @@ error_setup:
return err;
}
-static int __devinit
+static int
w6692_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1387,7 +1387,7 @@ w6692_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit
+static void
w6692_remove_pci(struct pci_dev *pdev)
{
struct w6692_hw *card = pci_get_drvdata(pdev);
@@ -1414,7 +1414,7 @@ MODULE_DEVICE_TABLE(pci, w6692_ids);
static struct pci_driver w6692_driver = {
.name = "w6692",
.probe = w6692_probe,
- .remove = __devexit_p(w6692_remove_pci),
+ .remove = w6692_remove_pci,
.id_table = w6692_ids,
};
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 89342f7e0c5b..1063babe1d3a 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -628,7 +628,7 @@ Amd7930_l1hw(struct PStack *st, int pr, void *arg)
if ((cs->dc.amd7930.ph_state == 8)) {
/* b-channels off, PH-AR cleared
* change to F3 */
- Amd7930_ph_command(cs, 0x20, "HW_RESET REQEST"); //LMR1 bit 5
+ Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5
spin_unlock_irqrestore(&cs->lock, flags);
} else {
Amd7930_ph_command(cs, 0x40, "HW_RESET REQUEST");
@@ -786,8 +786,7 @@ void Amd7930_init(struct IsdnCardState *cs)
}
}
-void __devinit
-setup_Amd7930(struct IsdnCardState *cs)
+void setup_Amd7930(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, Amd7930_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
diff --git a/drivers/isdn/hisax/asuscom.c b/drivers/isdn/hisax/asuscom.c
index 2b74a40ad2a0..62f9c43e2377 100644
--- a/drivers/isdn/hisax/asuscom.c
+++ b/drivers/isdn/hisax/asuscom.c
@@ -295,7 +295,7 @@ Asus_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id asus_ids[] __devinitdata = {
+static struct isapnp_device_id asus_ids[] = {
{ ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
(unsigned long) "Asus1688 PnP" },
@@ -311,12 +311,11 @@ static struct isapnp_device_id asus_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &asus_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &asus_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_asuscom(struct IsdnCard *card)
+int setup_asuscom(struct IsdnCard *card)
{
int bytecnt;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/avm_a1.c b/drivers/isdn/hisax/avm_a1.c
index 402d489cbbf1..7dd74087ad72 100644
--- a/drivers/isdn/hisax/avm_a1.c
+++ b/drivers/isdn/hisax/avm_a1.c
@@ -177,8 +177,7 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_avm_a1(struct IsdnCard *card)
+int setup_avm_a1(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/avm_a1p.c b/drivers/isdn/hisax/avm_a1p.c
index 39347198d643..bc52d54ff5e1 100644
--- a/drivers/isdn/hisax/avm_a1p.c
+++ b/drivers/isdn/hisax/avm_a1p.c
@@ -213,7 +213,7 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return 0;
}
-int __devinit setup_avm_a1_pcmcia(struct IsdnCard *card)
+int setup_avm_a1_pcmcia(struct IsdnCard *card)
{
u_char model, vers;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index 979492d69dae..ee9b9a03cffa 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -718,7 +718,7 @@ AVM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit avm_setup_rest(struct IsdnCardState *cs)
+static int avm_setup_rest(struct IsdnCardState *cs)
{
u_int val, ver;
@@ -770,16 +770,16 @@ static int __devinit avm_setup_rest(struct IsdnCardState *cs)
#ifndef __ISAPNP__
-static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
+static int avm_pnp_setup(struct IsdnCardState *cs)
{
return (1); /* no-op: success */
}
#else
-static struct pnp_card *pnp_avm_c __devinitdata = NULL;
+static struct pnp_card *pnp_avm_c = NULL;
-static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
+static int avm_pnp_setup(struct IsdnCardState *cs)
{
struct pnp_dev *pnp_avm_d = NULL;
@@ -825,16 +825,16 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
#ifndef CONFIG_PCI
-static int __devinit avm_pci_setup(struct IsdnCardState *cs)
+static int avm_pci_setup(struct IsdnCardState *cs)
{
return (1); /* no-op: success */
}
#else
-static struct pci_dev *dev_avm __devinitdata = NULL;
+static struct pci_dev *dev_avm = NULL;
-static int __devinit avm_pci_setup(struct IsdnCardState *cs)
+static int avm_pci_setup(struct IsdnCardState *cs)
{
if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM,
PCI_DEVICE_ID_AVM_A1, dev_avm))) {
@@ -867,8 +867,7 @@ static int __devinit avm_pci_setup(struct IsdnCardState *cs)
#endif /* CONFIG_PCI */
-int __devinit
-setup_avm_pcipnp(struct IsdnCard *card)
+int setup_avm_pcipnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index c644557ae614..4e676bcf8506 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -38,11 +38,11 @@ module_param(isdnprot, int, 0);
/*====================================================================*/
-static int avma1cs_config(struct pcmcia_device *link) __devinit;
+static int avma1cs_config(struct pcmcia_device *link);
static void avma1cs_release(struct pcmcia_device *link);
-static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit;
+static void avma1cs_detach(struct pcmcia_device *p_dev);
-static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
+static int avma1cs_probe(struct pcmcia_device *p_dev)
{
dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
@@ -54,7 +54,7 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
return avma1cs_config(p_dev);
} /* avma1cs_attach */
-static void __devexit avma1cs_detach(struct pcmcia_device *link)
+static void avma1cs_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
avma1cs_release(link);
@@ -72,7 +72,7 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
}
-static int __devinit avma1cs_config(struct pcmcia_device *link)
+static int avma1cs_config(struct pcmcia_device *link)
{
int i = -1;
char devname[128];
@@ -156,7 +156,7 @@ static struct pcmcia_driver avma1cs_driver = {
.owner = THIS_MODULE,
.name = "avma1_cs",
.probe = avma1cs_probe,
- .remove = __devexit_p(avma1cs_detach),
+ .remove = avma1cs_detach,
.id_table = avma1cs_ids,
};
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index f6bf9c68892e..c360164bde1b 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -253,10 +253,8 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
- struct IsdnCardState *cs,
- u_int *found,
- u_int *pci_memaddr)
+static int a4t_pci_probe(struct pci_dev *dev_a4t, struct IsdnCardState *cs,
+ u_int *found, u_int *pci_memaddr)
{
u16 sub_sys;
u16 sub_vendor;
@@ -275,9 +273,8 @@ static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
return (-1); /* continue looping */
}
-static int __devinit a4t_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs,
- u_int pci_memaddr)
+static int a4t_cs_init(struct IsdnCard *card, struct IsdnCardState *cs,
+ u_int pci_memaddr)
{
I20_REGISTER_FILE *pI20_Regs;
@@ -323,10 +320,9 @@ static int __devinit a4t_cs_init(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_a4t __devinitdata = NULL;
+static struct pci_dev *dev_a4t = NULL;
-int __devinit
-setup_bkm_a4t(struct IsdnCard *card)
+int setup_bkm_a4t(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c
index c9c98f071af6..dd663ea57ec6 100644
--- a/drivers/isdn/hisax/bkm_a8.c
+++ b/drivers/isdn/hisax/bkm_a8.c
@@ -255,8 +255,7 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit
-sct_alloc_io(u_int adr, u_int len)
+static int sct_alloc_io(u_int adr, u_int len)
{
if (!request_region(adr, len, "scitel")) {
printk(KERN_WARNING
@@ -267,15 +266,14 @@ sct_alloc_io(u_int adr, u_int len)
return (0);
}
-static struct pci_dev *dev_a8 __devinitdata = NULL;
-static u16 sub_vendor_id __devinitdata = 0;
-static u16 sub_sys_id __devinitdata = 0;
-static u_char pci_bus __devinitdata = 0;
-static u_char pci_device_fn __devinitdata = 0;
-static u_char pci_irq __devinitdata = 0;
+static struct pci_dev *dev_a8 = NULL;
+static u16 sub_vendor_id = 0;
+static u16 sub_sys_id = 0;
+static u_char pci_bus = 0;
+static u_char pci_device_fn = 0;
+static u_char pci_irq = 0;
-int __devinit
-setup_sct_quadro(struct IsdnCard *card)
+int setup_sct_quadro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index a47637be0cc5..ddec47a911a0 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -35,7 +35,7 @@ static int chancount;
/* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */
#define ALERT_REJECT 0
-/* Value to delay the sending of the first B-channel paket after CONNECT
+/* Value to delay the sending of the first B-channel packet after CONNECT
* here is no value given by ITU, but experience shows that 300 ms will
* work on many networks, if you or your other side is behind local exchanges
* a greater value may be recommented. If the delay is to short the first paket
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b5edc0eeec06..bf04d2a3cf4a 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -338,11 +338,11 @@ static int io[HISAX_MAX_CARDS] = { 0, };
#define IO0_IO1
#endif
#ifdef IO0_IO1
-static int io0[HISAX_MAX_CARDS] __devinitdata = { 0, };
-static int io1[HISAX_MAX_CARDS] __devinitdata = { 0, };
+static int io0[HISAX_MAX_CARDS] = { 0, };
+static int io1[HISAX_MAX_CARDS] = { 0, };
#endif
-static int irq[HISAX_MAX_CARDS] __devinitdata = { 0, };
-static int mem[HISAX_MAX_CARDS] __devinitdata = { 0, };
+static int irq[HISAX_MAX_CARDS] = { 0, };
+static int mem[HISAX_MAX_CARDS] = { 0, };
static char *id = HiSaxID;
MODULE_DESCRIPTION("ISDN4Linux: Driver for passive ISDN cards");
@@ -852,7 +852,7 @@ static int init_card(struct IsdnCardState *cs)
return 3;
}
-static int __devinit hisax_cs_setup_card(struct IsdnCard *card)
+static int hisax_cs_setup_card(struct IsdnCard *card)
{
int ret;
@@ -1171,12 +1171,8 @@ outf_cs:
return 0;
}
-/* Used from an exported function but calls __devinit functions.
- * Tell modpost not to warn (__ref)
- */
-static int __ref checkcard(int cardnr, char *id, int *busy_flag,
- struct module *lockowner,
- hisax_setup_func_t card_setup)
+static int checkcard(int cardnr, char *id, int *busy_flag,
+ struct module *lockowner, hisax_setup_func_t card_setup)
{
int ret;
struct IsdnCard *card = cards + cardnr;
@@ -1547,9 +1543,7 @@ static void __exit HiSax_exit(void)
printk(KERN_INFO "HiSax module removed\n");
}
-#ifdef CONFIG_HOTPLUG
-
-int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card)
+int hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *card)
{
u_char ids[16];
int ret = -1;
@@ -1568,9 +1562,7 @@ int __devinit hisax_init_pcmcia(void *pcm_iob, int *busy_flag, struct IsdnCard *
error:
return ret;
}
-
EXPORT_SYMBOL(hisax_init_pcmcia);
-#endif
EXPORT_SYMBOL(HiSax_closecard);
@@ -1917,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
#ifdef CONFIG_PCI
#include <linux/pci.h>
-static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
+static struct pci_device_id hisax_pci_tbl[] __used = {
#ifdef CONFIG_HISAX_FRITZPCI
{PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
#endif
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 62a2945fa7f2..8d0cf6e4dc00 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -904,7 +904,7 @@ Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit setup_diva_common(struct IsdnCardState *cs)
+static int setup_diva_common(struct IsdnCardState *cs)
{
int bytecnt;
u_char val;
@@ -997,7 +997,7 @@ static int __devinit setup_diva_common(struct IsdnCardState *cs)
#ifdef CONFIG_ISA
-static int __devinit setup_diva_isa(struct IsdnCard *card)
+static int setup_diva_isa(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -1033,7 +1033,7 @@ static int __devinit setup_diva_isa(struct IsdnCard *card)
#else /* if !CONFIG_ISA */
-static int __devinit setup_diva_isa(struct IsdnCard *card)
+static int setup_diva_isa(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
@@ -1041,7 +1041,7 @@ static int __devinit setup_diva_isa(struct IsdnCard *card)
#endif /* CONFIG_ISA */
#ifdef __ISAPNP__
-static struct isapnp_device_id diva_ids[] __devinitdata = {
+static struct isapnp_device_id diva_ids[] = {
{ ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
ISAPNP_VENDOR('G', 'D', 'I'), ISAPNP_FUNCTION(0x51),
(unsigned long) "Diva picola" },
@@ -1063,10 +1063,10 @@ static struct isapnp_device_id diva_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &diva_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &diva_ids[0];
+static struct pnp_card *pnp_c = NULL;
-static int __devinit setup_diva_isapnp(struct IsdnCard *card)
+static int setup_diva_isapnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
struct pnp_dev *pnp_d;
@@ -1141,7 +1141,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
#else /* if !ISAPNP */
-static int __devinit setup_diva_isapnp(struct IsdnCard *card)
+static int setup_diva_isapnp(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
@@ -1149,12 +1149,12 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
#endif /* ISAPNP */
#ifdef CONFIG_PCI
-static struct pci_dev *dev_diva __devinitdata = NULL;
-static struct pci_dev *dev_diva_u __devinitdata = NULL;
-static struct pci_dev *dev_diva201 __devinitdata = NULL;
-static struct pci_dev *dev_diva202 __devinitdata = NULL;
+static struct pci_dev *dev_diva = NULL;
+static struct pci_dev *dev_diva_u = NULL;
+static struct pci_dev *dev_diva201 = NULL;
+static struct pci_dev *dev_diva202 = NULL;
-static int __devinit setup_diva_pci(struct IsdnCard *card)
+static int setup_diva_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
@@ -1231,15 +1231,14 @@ static int __devinit setup_diva_pci(struct IsdnCard *card)
#else /* if !CONFIG_PCI */
-static int __devinit setup_diva_pci(struct IsdnCard *card)
+static int setup_diva_pci(struct IsdnCard *card)
{
return (-1); /* card not found; continue search */
}
#endif /* CONFIG_PCI */
-int __devinit
-setup_diva(struct IsdnCard *card)
+int setup_diva(struct IsdnCard *card)
{
int rc, have_card = 0;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 64ba26a4afe6..1df6f9a56ca2 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -831,8 +831,7 @@ probe_elsa(struct IsdnCardState *cs)
return (CARD_portlist[i]);
}
-static int __devinit
-setup_elsa_isa(struct IsdnCard *card)
+static int setup_elsa_isa(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -902,7 +901,7 @@ setup_elsa_isa(struct IsdnCard *card)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id elsa_ids[] __devinitdata = {
+static struct isapnp_device_id elsa_ids[] = {
{ ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
ISAPNP_VENDOR('E', 'L', 'S'), ISAPNP_FUNCTION(0x0133),
(unsigned long) "Elsa QS1000" },
@@ -912,12 +911,11 @@ static struct isapnp_device_id elsa_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &elsa_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &elsa_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif /* __ISAPNP__ */
-static int __devinit
-setup_elsa_isapnp(struct IsdnCard *card)
+static int setup_elsa_isapnp(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
@@ -994,8 +992,7 @@ setup_elsa_isapnp(struct IsdnCard *card)
return (1);
}
-static void __devinit
-setup_elsa_pcmcia(struct IsdnCard *card)
+static void setup_elsa_pcmcia(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -1027,11 +1024,10 @@ setup_elsa_pcmcia(struct IsdnCard *card)
}
#ifdef CONFIG_PCI
-static struct pci_dev *dev_qs1000 __devinitdata = NULL;
-static struct pci_dev *dev_qs3000 __devinitdata = NULL;
+static struct pci_dev *dev_qs1000 = NULL;
+static struct pci_dev *dev_qs3000 = NULL;
-static int __devinit
-setup_elsa_pci(struct IsdnCard *card)
+static int setup_elsa_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
@@ -1089,15 +1085,13 @@ setup_elsa_pci(struct IsdnCard *card)
#else
-static int __devinit
-setup_elsa_pci(struct IsdnCard *card)
+static int setup_elsa_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI */
-static int __devinit
-setup_elsa_common(struct IsdnCard *card)
+static int setup_elsa_common(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u_char val;
@@ -1212,8 +1206,7 @@ setup_elsa_common(struct IsdnCard *card)
return (1);
}
-int __devinit
-setup_elsa(struct IsdnCard *card)
+int setup_elsa(struct IsdnCard *card)
{
int rc;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index a8c4d3fc9a6d..ebe56918f6fc 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -62,9 +62,9 @@ MODULE_LICENSE("Dual MPL/GPL");
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
-static int elsa_cs_config(struct pcmcia_device *link) __devinit;
+static int elsa_cs_config(struct pcmcia_device *link);
static void elsa_cs_release(struct pcmcia_device *link);
-static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
+static void elsa_cs_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct pcmcia_device *p_dev;
@@ -72,7 +72,7 @@ typedef struct local_info_t {
int cardnr;
} local_info_t;
-static int __devinit elsa_cs_probe(struct pcmcia_device *link)
+static int elsa_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -90,7 +90,7 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
return elsa_cs_config(link);
} /* elsa_cs_attach */
-static void __devexit elsa_cs_detach(struct pcmcia_device *link)
+static void elsa_cs_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
@@ -126,7 +126,7 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
return -ENODEV;
}
-static int __devinit elsa_cs_config(struct pcmcia_device *link)
+static int elsa_cs_config(struct pcmcia_device *link)
{
int i;
IsdnCard_t icard;
@@ -210,7 +210,7 @@ static struct pcmcia_driver elsa_cs_driver = {
.owner = THIS_MODULE,
.name = "elsa_cs",
.probe = elsa_cs_probe,
- .remove = __devexit_p(elsa_cs_detach),
+ .remove = elsa_cs_detach,
.id_table = elsa_ids,
.suspend = elsa_suspend,
.resume = elsa_resume,
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b1e38b54ebac..e8d431a8302d 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,8 +300,7 @@ enpci_interrupt(int intno, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
- struct IsdnCardState *cs)
+static int en_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
{
if (pci_enable_device(dev_netjet))
return (0);
@@ -326,8 +325,7 @@ static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
return (1);
}
-static void __devinit en_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static void en_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
{
cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
@@ -350,8 +348,7 @@ static void __devinit en_cs_init(struct IsdnCard *card,
outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
}
-static int __devinit en_cs_init_rest(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int en_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
{
const int bytecnt = 256;
@@ -384,11 +381,10 @@ static int __devinit en_cs_init_rest(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
+static struct pci_dev *dev_netjet = NULL;
/* called by config.c */
-int __devinit
-setup_enternow_pci(struct IsdnCard *card)
+int setup_enternow_pci(struct IsdnCard *card)
{
int ret;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/gazel.c b/drivers/isdn/hisax/gazel.c
index 4fef77562554..35c6df6534ec 100644
--- a/drivers/isdn/hisax/gazel.c
+++ b/drivers/isdn/hisax/gazel.c
@@ -483,8 +483,7 @@ error:
return 1;
}
-static int __devinit
-setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
+static int setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
{
printk(KERN_INFO "Gazel: ISA PnP card automatic recognition\n");
// we got an irq parameter, assume it is an ISA card
@@ -532,10 +531,9 @@ setup_gazelisa(struct IsdnCard *card, struct IsdnCardState *cs)
}
#ifdef CONFIG_PCI
-static struct pci_dev *dev_tel __devinitdata = NULL;
+static struct pci_dev *dev_tel = NULL;
-static int __devinit
-setup_gazelpci(struct IsdnCardState *cs)
+static int setup_gazelpci(struct IsdnCardState *cs)
{
u_int pci_ioaddr0 = 0, pci_ioaddr1 = 0;
u_char pci_irq = 0, found;
@@ -622,8 +620,7 @@ setup_gazelpci(struct IsdnCardState *cs)
}
#endif /* CONFIG_PCI */
-int __devinit
-setup_gazel(struct IsdnCard *card)
+int setup_gazel(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index dea04de8e7ca..c49c294fc81e 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1497,7 +1497,7 @@ enable_pci_ports(hfc4s8s_hw *hw)
/* initialise the HFC-4s/8s hardware */
/* return 0 on success. */
/*************************************/
-static int __devinit
+static int
setup_instance(hfc4s8s_hw *hw)
{
int err = -EIO;
@@ -1585,7 +1585,7 @@ out:
/*****************************************/
/* PCI hotplug interface: probe new card */
/*****************************************/
-static int __devinit
+static int
hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
@@ -1640,7 +1640,7 @@ out:
/**************************************/
/* PCI hotplug interface: remove card */
/**************************************/
-static void __devexit
+static void
hfc4s8s_remove(struct pci_dev *pdev)
{
hfc4s8s_hw *hw = pci_get_drvdata(pdev);
@@ -1662,7 +1662,7 @@ hfc4s8s_remove(struct pci_dev *pdev)
static struct pci_driver hfc4s8s_driver = {
.name = "hfc4s8s_l1",
.probe = hfc4s8s_probe,
- .remove = __devexit_p(hfc4s8s_remove),
+ .remove = hfc4s8s_remove,
.id_table = hfc4s8s_ids,
};
@@ -1688,14 +1688,6 @@ hfc4s8s_module_init(void)
}
printk(KERN_INFO "HFC-4S/8S: found %d cards\n", card_cnt);
-#if !defined(CONFIG_HOTPLUG)
- if (err == 0) {
- err = -ENODEV;
- pci_unregister_driver(&hfc4s8s_driver);
- goto out;
- }
-#endif
-
return 0;
out:
return (err);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 334fa90bed8e..3ccd724ff8c2 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -354,7 +354,7 @@ receive_dmsg(struct IsdnCardState *cs)
if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
(df->data[zp->z1])) {
if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
+ debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]);
#ifdef ERROR_STATISTIC
cs->err_rx++;
#endif
@@ -1632,9 +1632,9 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
/* this variable is used as card index when more than one cards are present */
-static struct pci_dev *dev_hfcpci __devinitdata = NULL;
+static struct pci_dev *dev_hfcpci = NULL;
-int __devinit
+int
setup_hfcpci(struct IsdnCard *card)
{
u_long flags;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 4db846be4369..90f34ae2b80f 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -270,7 +270,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
if ((count > fifo_size) || (count < 4)) {
if (cs->debug & L1_DEB_WARN)
- debugl1(cs, "hfcsx_read_fifo %d paket inv. len %d ", fifo , count);
+ debugl1(cs, "hfcsx_read_fifo %d packet inv. len %d ", fifo , count);
while (count) {
count--; /* empty fifo */
Read_hfc(cs, HFCSX_FIF_DRD);
@@ -1381,19 +1381,18 @@ hfcsx_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id hfc_ids[] __devinitdata = {
+static struct isapnp_device_id hfc_ids[] = {
{ ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620),
ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2620),
(unsigned long) "Teles 16.3c2" },
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &hfc_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_hfcsx(struct IsdnCard *card)
+int setup_hfcsx(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index a5f048bd2bb3..394da646e97b 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -136,7 +136,7 @@ hfcs_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id hfc_ids[] __devinitdata = {
+static struct isapnp_device_id hfc_ids[] = {
{ ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114),
ISAPNP_VENDOR('A', 'N', 'X'), ISAPNP_FUNCTION(0x1114),
(unsigned long) "Acer P10" },
@@ -161,12 +161,11 @@ static struct isapnp_device_id hfc_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &hfc_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &hfc_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_hfcs(struct IsdnCard *card)
+int setup_hfcs(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index e4f47fe3f7fd..5e8a5d967162 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -70,7 +70,7 @@ static struct pci_device_id fcpci_ids[] = {
MODULE_DEVICE_TABLE(pci, fcpci_ids);
#ifdef CONFIG_PNP
-static struct pnp_device_id fcpnp_ids[] __devinitdata = {
+static struct pnp_device_id fcpnp_ids[] = {
{
.id = "AVM0900",
.driver_data = (unsigned long) "Fritz!Card PnP",
@@ -712,7 +712,7 @@ static inline void fcpci_init(struct fritz_adapter *adapter)
// ----------------------------------------------------------------------
-static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter)
+static int fcpcipnp_setup(struct fritz_adapter *adapter)
{
u32 val = 0;
int retval;
@@ -825,7 +825,7 @@ err:
return retval;
}
-static void __devexit fcpcipnp_release(struct fritz_adapter *adapter)
+static void fcpcipnp_release(struct fritz_adapter *adapter)
{
DBG(1, "");
@@ -836,8 +836,7 @@ static void __devexit fcpcipnp_release(struct fritz_adapter *adapter)
// ----------------------------------------------------------------------
-static struct fritz_adapter * __devinit
-new_adapter(void)
+static struct fritz_adapter *new_adapter(void)
{
struct fritz_adapter *adapter;
struct hisax_b_if *b_if[2];
@@ -876,8 +875,7 @@ static void delete_adapter(struct fritz_adapter *adapter)
kfree(adapter);
}
-static int __devinit fcpci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int fcpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct fritz_adapter *adapter;
int retval;
@@ -917,7 +915,7 @@ err:
}
#ifdef CONFIG_PNP
-static int __devinit fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
{
struct fritz_adapter *adapter;
int retval;
@@ -959,7 +957,7 @@ err:
return retval;
}
-static void __devexit fcpnp_remove(struct pnp_dev *pdev)
+static void fcpnp_remove(struct pnp_dev *pdev)
{
struct fritz_adapter *adapter = pnp_get_drvdata(pdev);
@@ -973,12 +971,12 @@ static void __devexit fcpnp_remove(struct pnp_dev *pdev)
static struct pnp_driver fcpnp_driver = {
.name = "fcpnp",
.probe = fcpnp_probe,
- .remove = __devexit_p(fcpnp_remove),
+ .remove = fcpnp_remove,
.id_table = fcpnp_ids,
};
#endif
-static void __devexit fcpci_remove(struct pci_dev *pdev)
+static void fcpci_remove(struct pci_dev *pdev)
{
struct fritz_adapter *adapter = pci_get_drvdata(pdev);
@@ -990,7 +988,7 @@ static void __devexit fcpci_remove(struct pci_dev *pdev)
static struct pci_driver fcpci_driver = {
.name = "fcpci",
.probe = fcpci_probe,
- .remove = __devexit_p(fcpci_remove),
+ .remove = fcpci_remove,
.id_table = fcpci_ids,
};
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 7be762b17c70..db5321f6379b 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -673,8 +673,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
cs->writeisac(cs, ICC_MASK, 0xFF);
}
-void __devinit
-setup_icc(struct IsdnCardState *cs)
+void setup_icc(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, icc_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index bcd70a387307..a365ccc1c99c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -24,11 +24,11 @@
#define DBUSY_TIMER_VALUE 80
#define ARCOFI_USE 1
-static char *ISACVer[] __devinitdata =
+static char *ISACVer[] =
{"2086/2186 V1.1", "2085 B1", "2085 B2",
"2085 V2.3"};
-void __devinit ISACVersion(struct IsdnCardState *cs, char *s)
+void ISACVersion(struct IsdnCardState *cs, char *s)
{
int val;
@@ -669,8 +669,7 @@ void clear_pending_isac_ints(struct IsdnCardState *cs)
cs->writeisac(cs, ISAC_MASK, 0xFF);
}
-void __devinit
-setup_isac(struct IsdnCardState *cs)
+void setup_isac(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, isac_bh);
cs->dbusytimer.function = (void *) dbusy_timer_handler;
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index ff5e139f4850..7fdf34704fe5 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1417,7 +1417,7 @@ modeisar(struct BCState *bcs, int mode, int bc)
&bcs->hw.isar.reg->Flags))
bcs->hw.isar.dpath = 1;
else {
- printk(KERN_WARNING"isar modeisar both pathes in use\n");
+ printk(KERN_WARNING"isar modeisar both paths in use\n");
return (1);
}
break;
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index c1530fe248c2..1399ddd4f6cb 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -194,11 +194,10 @@ isurf_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
}
#ifdef __ISAPNP__
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_isurf(struct IsdnCard *card)
+int setup_isurf(struct IsdnCard *card)
{
int ver;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/ix1_micro.c b/drivers/isdn/hisax/ix1_micro.c
index 5f299f82b801..7ae39f5e865d 100644
--- a/drivers/isdn/hisax/ix1_micro.c
+++ b/drivers/isdn/hisax/ix1_micro.c
@@ -209,7 +209,7 @@ ix1_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id itk_ids[] __devinitdata = {
+static struct isapnp_device_id itk_ids[] = {
{ ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
(unsigned long) "ITK micro 2" },
@@ -219,13 +219,12 @@ static struct isapnp_device_id itk_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &itk_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &itk_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_ix1micro(struct IsdnCard *card)
+int setup_ix1micro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/mic.c b/drivers/isdn/hisax/mic.c
index 08a6b7fb17f7..93398676f78f 100644
--- a/drivers/isdn/hisax/mic.c
+++ b/drivers/isdn/hisax/mic.c
@@ -187,8 +187,7 @@ mic_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_mic(struct IsdnCard *card)
+int setup_mic(struct IsdnCard *card)
{
int bytecnt;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index 6569e0315cca..e4c33cfe3ef4 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -223,10 +223,10 @@ static int niccy_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit setup_niccy(struct IsdnCard *card)
+int setup_niccy(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
@@ -298,7 +298,7 @@ int __devinit setup_niccy(struct IsdnCard *card)
}
} else {
#ifdef CONFIG_PCI
- static struct pci_dev *niccy_dev __devinitdata;
+ static struct pci_dev *niccy_dev;
u_int pci_ioaddr;
cs->subtyp = 0;
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index f36ff69c07e1..32b4bbd18eb9 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,8 +148,7 @@ NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
- struct IsdnCardState *cs)
+static int njs_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
{
u32 cfg;
@@ -187,8 +186,7 @@ static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
return (1);
}
-static int __devinit njs_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int njs_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
{
cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
@@ -225,8 +223,7 @@ static int __devinit njs_cs_init(struct IsdnCard *card,
return 1; /* end loop */
}
-static int __devinit njs_cs_init_rest(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int njs_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
{
const int bytecnt = 256;
@@ -256,10 +253,9 @@ static int __devinit njs_cs_init_rest(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
+static struct pci_dev *dev_netjet = NULL;
-int __devinit
-setup_netjet_s(struct IsdnCard *card)
+int setup_netjet_s(struct IsdnCard *card)
{
int ret;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 333484aef425..4e8adbede361 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,8 +128,7 @@ NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
- struct IsdnCardState *cs)
+static int nju_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs)
{
if (pci_enable_device(dev_netjet))
return (0);
@@ -148,8 +147,7 @@ static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
return (1);
}
-static int __devinit nju_cs_init(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int nju_cs_init(struct IsdnCard *card, struct IsdnCardState *cs)
{
cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
@@ -187,8 +185,7 @@ static int __devinit nju_cs_init(struct IsdnCard *card,
return 1; /* end loop */
}
-static int __devinit nju_cs_init_rest(struct IsdnCard *card,
- struct IsdnCardState *cs)
+static int nju_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs)
{
const int bytecnt = 256;
@@ -219,10 +216,9 @@ static int __devinit nju_cs_init_rest(struct IsdnCard *card,
return (1);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
+static struct pci_dev *dev_netjet = NULL;
-int __devinit
-setup_netjet_u(struct IsdnCard *card)
+int setup_netjet_u(struct IsdnCard *card)
{
int ret;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/s0box.c b/drivers/isdn/hisax/s0box.c
index 383c4e7ce50b..4e7d0aa227ad 100644
--- a/drivers/isdn/hisax/s0box.c
+++ b/drivers/isdn/hisax/s0box.c
@@ -210,8 +210,7 @@ S0Box_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_s0box(struct IsdnCard *card)
+int setup_s0box(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/saphir.c b/drivers/isdn/hisax/saphir.c
index 75dcae6d36e0..6b2d0eccdd56 100644
--- a/drivers/isdn/hisax/saphir.c
+++ b/drivers/isdn/hisax/saphir.c
@@ -240,8 +240,7 @@ saphir_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
-int __devinit
-setup_saphir(struct IsdnCard *card)
+int setup_saphir(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 1ee531b6be99..f16a47bcef48 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -517,7 +517,7 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
}
#ifdef __ISAPNP__
-static struct isapnp_device_id sedl_ids[] __devinitdata = {
+static struct isapnp_device_id sedl_ids[] = {
{ ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
ISAPNP_VENDOR('S', 'A', 'G'), ISAPNP_FUNCTION(0x01),
(unsigned long) "Speed win" },
@@ -527,11 +527,10 @@ static struct isapnp_device_id sedl_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &sedl_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &sedl_ids[0];
+static struct pnp_card *pnp_c = NULL;
-static int __devinit
-setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
+static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
{
struct IsdnCardState *cs = card->cs;
struct pnp_dev *pnp_d;
@@ -591,18 +590,16 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
}
#else
-static int __devinit
-setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
+static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
{
return -1;
}
#endif /* __ISAPNP__ */
#ifdef CONFIG_PCI
-static struct pci_dev *dev_sedl __devinitdata = NULL;
+static struct pci_dev *dev_sedl = NULL;
-static int __devinit
-setup_sedlbauer_pci(struct IsdnCard *card)
+static int setup_sedlbauer_pci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
u16 sub_vendor_id, sub_id;
@@ -667,16 +664,14 @@ setup_sedlbauer_pci(struct IsdnCard *card)
#else
-static int __devinit
-setup_sedlbauer_pci(struct IsdnCard *card)
+static int setup_sedlbauer_pci(struct IsdnCard *card)
{
return (1);
}
#endif /* CONFIG_PCI */
-int __devinit
-setup_sedlbauer(struct IsdnCard *card)
+int setup_sedlbauer(struct IsdnCard *card)
{
int bytecnt = 8, ver, val, rc;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index f0dfc0c976eb..90f81291641b 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -62,10 +62,10 @@ MODULE_LICENSE("Dual MPL/GPL");
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
-static int sedlbauer_config(struct pcmcia_device *link) __devinit;
+static int sedlbauer_config(struct pcmcia_device *link);
static void sedlbauer_release(struct pcmcia_device *link);
-static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
+static void sedlbauer_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct pcmcia_device *p_dev;
@@ -73,7 +73,7 @@ typedef struct local_info_t {
int cardnr;
} local_info_t;
-static int __devinit sedlbauer_probe(struct pcmcia_device *link)
+static int sedlbauer_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -90,7 +90,7 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
return sedlbauer_config(link);
} /* sedlbauer_attach */
-static void __devexit sedlbauer_detach(struct pcmcia_device *link)
+static void sedlbauer_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
@@ -110,7 +110,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev, void *priv_data)
return pcmcia_request_io(p_dev);
}
-static int __devinit sedlbauer_config(struct pcmcia_device *link)
+static int sedlbauer_config(struct pcmcia_device *link)
{
int ret;
IsdnCard_t icard;
@@ -201,7 +201,7 @@ static struct pcmcia_driver sedlbauer_driver = {
.owner = THIS_MODULE,
.name = "sedlbauer_cs",
.probe = sedlbauer_probe,
- .remove = __devexit_p(sedlbauer_detach),
+ .remove = sedlbauer_detach,
.id_table = sedlbauer_ids,
.suspend = sedlbauer_suspend,
.resume = sedlbauer_resume,
diff --git a/drivers/isdn/hisax/sportster.c b/drivers/isdn/hisax/sportster.c
index 1267298ef551..18cee6360d0a 100644
--- a/drivers/isdn/hisax/sportster.c
+++ b/drivers/isdn/hisax/sportster.c
@@ -183,8 +183,7 @@ Sportster_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static int __devinit
-get_io_range(struct IsdnCardState *cs)
+static int get_io_range(struct IsdnCardState *cs)
{
int i, j, adr;
@@ -208,8 +207,7 @@ get_io_range(struct IsdnCardState *cs)
}
}
-int __devinit
-setup_sportster(struct IsdnCard *card)
+int setup_sportster(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c
index fa329e27cc5b..bf647545c70c 100644
--- a/drivers/isdn/hisax/teleint.c
+++ b/drivers/isdn/hisax/teleint.c
@@ -259,8 +259,7 @@ TeleInt_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_TeleInt(struct IsdnCard *card)
+int setup_TeleInt(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/teles0.c b/drivers/isdn/hisax/teles0.c
index 49b4a26f91e0..ce9eabdd2f6e 100644
--- a/drivers/isdn/hisax/teles0.c
+++ b/drivers/isdn/hisax/teles0.c
@@ -263,8 +263,7 @@ Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-int __devinit
-setup_teles0(struct IsdnCard *card)
+int setup_teles0(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
index 220b919fafc3..38fb2c1a3f0f 100644
--- a/drivers/isdn/hisax/teles3.c
+++ b/drivers/isdn/hisax/teles3.c
@@ -253,7 +253,7 @@ Teles_card_msg(struct IsdnCardState *cs, int mt, void *arg)
#ifdef __ISAPNP__
-static struct isapnp_device_id teles_ids[] __devinitdata = {
+static struct isapnp_device_id teles_ids[] = {
{ ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
ISAPNP_VENDOR('T', 'A', 'G'), ISAPNP_FUNCTION(0x2110),
(unsigned long) "Teles 16.3 PnP" },
@@ -266,12 +266,11 @@ static struct isapnp_device_id teles_ids[] __devinitdata = {
{ 0, }
};
-static struct isapnp_device_id *ipid __devinitdata = &teles_ids[0];
-static struct pnp_card *pnp_c __devinitdata = NULL;
+static struct isapnp_device_id *ipid = &teles_ids[0];
+static struct pnp_card *pnp_c = NULL;
#endif
-int __devinit
-setup_teles3(struct IsdnCard *card)
+int setup_teles3(struct IsdnCard *card)
{
u_char val;
struct IsdnCardState *cs = card->cs;
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 4deac451807c..f2476ffb04fd 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -43,9 +43,9 @@ MODULE_LICENSE("GPL");
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
-static int teles_cs_config(struct pcmcia_device *link) __devinit;
+static int teles_cs_config(struct pcmcia_device *link);
static void teles_cs_release(struct pcmcia_device *link);
-static void teles_detach(struct pcmcia_device *p_dev) __devexit;
+static void teles_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct pcmcia_device *p_dev;
@@ -53,7 +53,7 @@ typedef struct local_info_t {
int cardnr;
} local_info_t;
-static int __devinit teles_probe(struct pcmcia_device *link)
+static int teles_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -72,7 +72,7 @@ static int __devinit teles_probe(struct pcmcia_device *link)
return teles_cs_config(link);
} /* teles_attach */
-static void __devexit teles_detach(struct pcmcia_device *link)
+static void teles_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
@@ -108,7 +108,7 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
return -ENODEV;
}
-static int __devinit teles_cs_config(struct pcmcia_device *link)
+static int teles_cs_config(struct pcmcia_device *link)
{
int i;
IsdnCard_t icard;
@@ -192,7 +192,7 @@ static struct pcmcia_driver teles_cs_driver = {
.owner = THIS_MODULE,
.name = "teles_cs",
.probe = teles_probe,
- .remove = __devexit_p(teles_detach),
+ .remove = teles_detach,
.id_table = teles_ids,
.suspend = teles_suspend,
.resume = teles_resume,
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index 9c002c9dc771..f6ab63aa6995 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -283,10 +283,9 @@ TelesPCI_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static struct pci_dev *dev_tel __devinitdata = NULL;
+static struct pci_dev *dev_tel = NULL;
-int __devinit
-setup_telespci(struct IsdnCard *card)
+int setup_telespci(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 0f0d094af85b..d8cac6935818 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -991,10 +991,9 @@ w6692_card_msg(struct IsdnCardState *cs, int mt, void *arg)
static int id_idx;
-static struct pci_dev *dev_w6692 __devinitdata = NULL;
+static struct pci_dev *dev_w6692 = NULL;
-int __devinit
-setup_w6692(struct IsdnCard *card)
+int setup_w6692(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b61bbb4bb52b..0db2f7506250 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -56,8 +56,8 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
/* is assumed and the module will not be kept in memory. */
/****************************************************************************/
-static int __devinit hysdn_pci_init_one(struct pci_dev *akt_pcidev,
- const struct pci_device_id *ent)
+static int hysdn_pci_init_one(struct pci_dev *akt_pcidev,
+ const struct pci_device_id *ent)
{
hysdn_card *card;
int rc;
@@ -109,7 +109,7 @@ err_out:
return rc;
}
-static void __devexit hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
+static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev)
{
hysdn_card *card = pci_get_drvdata(akt_pcidev);
@@ -147,7 +147,7 @@ static struct pci_driver hysdn_pci_driver = {
.name = "hysdn",
.id_table = hysdn_pci_tbl,
.probe = hysdn_pci_init_one,
- .remove = __devexit_p(hysdn_pci_remove_one),
+ .remove = hysdn_pci_remove_one,
};
static int hysdn_have_procfs;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index b817809f763c..e09dc8a5e743 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1849,6 +1849,8 @@ err_unregister:
kfree(info->fax);
#endif
kfree(info->port.xmit_buf - 4);
+ info->port.xmit_buf = NULL;
+ tty_port_destroy(&info->port);
}
tty_unregister_driver(m->tty_modem);
err:
@@ -1870,6 +1872,8 @@ isdn_tty_exit(void)
kfree(info->fax);
#endif
kfree(info->port.xmit_buf - 4);
+ info->port.xmit_buf = NULL;
+ tty_port_destroy(&info->port);
}
tty_unregister_driver(dev->mdm.tty_modem);
put_tty_driver(dev->mdm.tty_modem);
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index c401634c00ec..3e245712bba7 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -140,7 +140,6 @@ static struct device_attribute mISDN_dev_attrs[] = {
{}
};
-#ifdef CONFIG_HOTPLUG
static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -153,7 +152,6 @@ static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#endif
static void mISDN_class_release(struct class *cls)
{
@@ -163,9 +161,7 @@ static void mISDN_class_release(struct class *cls)
static struct class mISDN_class = {
.name = "mISDN",
.owner = THIS_MODULE,
-#ifdef CONFIG_HOTPLUG
.dev_uevent = mISDN_uevent,
-#endif
.dev_attrs = mISDN_dev_attrs,
.dev_release = mISDN_dev_release,
.class_release = mISDN_class_release,
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 28c99c623bcd..22b720ec80cb 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1217,8 +1217,7 @@ static void __exit dsp_cleanup(void)
{
mISDN_unregister_Bprotocol(&DSP);
- if (timer_pending(&dsp_spl_tl))
- del_timer(&dsp_spl_tl);
+ del_timer_sync(&dsp_spl_tl);
if (!list_empty(&dsp_ilist)) {
printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index db50f788855d..f8e405c383a0 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -277,7 +277,6 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
- int multi = 0;
u8 frame[len + 32];
struct socket *socket = NULL;
@@ -317,9 +316,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
*p++ = hc->id >> 8;
*p++ = hc->id;
}
- *p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */
- if (multi == 1)
- *p++ = len; /* length */
+ *p++ = 0x00 + channel; /* m-flag, channel */
*p++ = timebase >> 8; /* time base */
*p++ = timebase;
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index be88728f1106..592f597d8951 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -250,7 +250,7 @@ tei_debug(struct FsmInst *fi, char *fmt, ...)
static int
get_free_id(struct manager *mgr)
{
- u64 ids = 0;
+ DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 };
int i;
struct layer2 *l2;
@@ -261,11 +261,11 @@ get_free_id(struct manager *mgr)
__func__);
return -EBUSY;
}
- test_and_set_bit(l2->ch.nr, (u_long *)&ids);
+ __set_bit(l2->ch.nr, ids);
}
- for (i = 1; i < 64; i++)
- if (!test_bit(i, (u_long *)&ids))
- return i;
+ i = find_next_zero_bit(ids, 64, 1);
+ if (i < 64)
+ return i;
printk(KERN_WARNING "%s: more as 63 layer2 for one device\n",
__func__);
return -EBUSY;
@@ -274,7 +274,7 @@ get_free_id(struct manager *mgr)
static int
get_free_tei(struct manager *mgr)
{
- u64 ids = 0;
+ DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 };
int i;
struct layer2 *l2;
@@ -288,11 +288,11 @@ get_free_tei(struct manager *mgr)
continue;
i -= 64;
- test_and_set_bit(i, (u_long *)&ids);
+ __set_bit(i, ids);
}
- for (i = 0; i < 64; i++)
- if (!test_bit(i, (u_long *)&ids))
- return i + 64;
+ i = find_first_zero_bit(ids, 64);
+ if (i < 64)
+ return i + 64;
printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n",
__func__);
return -1;
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index a18e639b40d7..42ecfef80132 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -508,7 +508,7 @@ pcbit_irq_handler(int interrupt, void *devptr)
return IRQ_NONE;
}
if (dev->interrupt) {
- printk(KERN_DEBUG "pcbit: reentering interrupt hander\n");
+ printk(KERN_DEBUG "pcbit: reentering interrupt handler\n");
return IRQ_HANDLED;
}
dev->interrupt = 1;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index f508defc0d96..b58bc8a14b9c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -379,7 +379,9 @@ config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || \
- MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2
+ MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2 || \
+ MACH_NETSPACE_V2_DT || MACH_INETSPACE_V2_DT || \
+ MACH_NETSPACE_MAX_V2_DT || MACH_NETSPACE_MINI_V2_DT
default y
help
This option enable support for the dual-GPIO LED found on the
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 48cce18e9d6d..a20752f562bc 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -211,7 +211,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
led_trigger_set_default(led_cdev);
#endif
- printk(KERN_DEBUG "Registered led device: %s\n",
+ dev_dbg(parent, "Registered led device: %s\n",
led_cdev->name);
return 0;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 262eb4193710..3c972b2f9893 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -166,6 +166,19 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
+void led_trigger_rename_static(const char *name, struct led_trigger *trig)
+{
+ /* new name must be on a temporary string to prevent races */
+ BUG_ON(name == trig->name);
+
+ down_write(&triggers_list_lock);
+ /* this assumes that trig->name was originaly allocated to
+ * non constant storage */
+ strcpy((char *)trig->name, name);
+ up_write(&triggers_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_trigger_rename_static);
+
/* LED Trigger Interface */
int led_trigger_register(struct led_trigger *trig)
@@ -300,13 +313,13 @@ void led_trigger_register_simple(const char *name, struct led_trigger **tp)
if (err < 0) {
kfree(trig);
trig = NULL;
- printk(KERN_WARNING "LED trigger %s failed to register"
- " (%d)\n", name, err);
+ pr_warn("LED trigger %s failed to register (%d)\n",
+ name, err);
}
- } else
- printk(KERN_WARNING "LED trigger %s failed to register"
- " (no memory)\n", name);
-
+ } else {
+ pr_warn("LED trigger %s failed to register (no memory)\n",
+ name);
+ }
*tp = trig;
}
EXPORT_SYMBOL_GPL(led_trigger_register_simple);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index b7e8cc0957fc..6be2edd41173 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -165,15 +165,13 @@ static int pm860x_led_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "control");
if (!res) {
dev_err(&pdev->dev, "No REG resource for control\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_control = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_REG, "blink");
if (!res) {
dev_err(&pdev->dev, "No REG resource for blink\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
data->reg_blink = res->start;
memset(data->name, 0, MFD_NAME_SIZE);
@@ -224,9 +222,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
}
pm860x_led_set(&data->cdev, 0);
return 0;
-out:
- devm_kfree(&pdev->dev, data);
- return ret;
}
static int pm860x_led_remove(struct platform_device *pdev)
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index aa56a867693a..e8072abe76e5 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -5,10 +5,10 @@
*
* Loosely derived from leds-da903x:
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* Licensed under the GPL-2 or later.
*/
@@ -85,7 +85,7 @@ static int adp5520_led_setup(struct adp5520_led *led)
return ret;
}
-static int __devinit adp5520_led_prepare(struct platform_device *pdev)
+static int adp5520_led_prepare(struct platform_device *pdev)
{
struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = pdev->dev.parent;
@@ -101,7 +101,7 @@ static int __devinit adp5520_led_prepare(struct platform_device *pdev)
return ret;
}
-static int __devinit adp5520_led_probe(struct platform_device *pdev)
+static int adp5520_led_probe(struct platform_device *pdev)
{
struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
struct adp5520_led *led, *led_dat;
@@ -183,7 +183,7 @@ err:
return ret;
}
-static int __devexit adp5520_led_remove(struct platform_device *pdev)
+static int adp5520_led_remove(struct platform_device *pdev)
{
struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
struct adp5520_led *led;
@@ -208,7 +208,7 @@ static struct platform_driver adp5520_led_driver = {
.owner = THIS_MODULE,
},
.probe = adp5520_led_probe,
- .remove = __devexit_p(adp5520_led_remove),
+ .remove = adp5520_led_remove,
};
module_platform_driver(adp5520_led_driver);
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 5de74ff90dcf..b474745e001b 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -92,7 +92,7 @@ static int blink_set(struct led_classdev *cdev,
return 0;
}
-static int __devinit asic3_led_probe(struct platform_device *pdev)
+static int asic3_led_probe(struct platform_device *pdev)
{
struct asic3_led *led = pdev->dev.platform_data;
int ret;
@@ -125,7 +125,7 @@ out:
return ret;
}
-static int __devexit asic3_led_remove(struct platform_device *pdev)
+static int asic3_led_remove(struct platform_device *pdev)
{
struct asic3_led *led = pdev->dev.platform_data;
@@ -167,7 +167,7 @@ static const struct dev_pm_ops asic3_led_pm_ops = {
static struct platform_driver asic3_led_driver = {
.probe = asic3_led_probe,
- .remove = __devexit_p(asic3_led_remove),
+ .remove = asic3_led_remove,
.driver = {
.name = "leds-asic3",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 45430632faab..386773532d95 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -35,7 +35,7 @@ static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b)
* NOTE: we reuse the platform_data structure of GPIO leds,
* but repurpose its "gpio" number as a PWM channel number.
*/
-static int __devinit pwmled_probe(struct platform_device *pdev)
+static int pwmled_probe(struct platform_device *pdev)
{
const struct gpio_led_platform_data *pdata;
struct pwmled *leds;
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 89ca6a2a19d1..851517030cc1 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -26,8 +26,8 @@
#define BD2802_LED_OFFSET 0xa
#define BD2802_COLOR_OFFSET 0x3
-#define BD2802_REG_CLKSETUP 0x00
-#define BD2802_REG_CONTROL 0x01
+#define BD2802_REG_CLKSETUP 0x00
+#define BD2802_REG_CONTROL 0x01
#define BD2802_REG_HOURSETUP 0x02
#define BD2802_REG_CURRENT1SETUP 0x03
#define BD2802_REG_CURRENT2SETUP 0x04
@@ -93,7 +93,7 @@ struct bd2802_led {
* In ADF mode, user can set registers of BD2802GU directly,
* therefore BD2802GU doesn't enter reset state.
*/
- int adf_on;
+ int adf_on;
enum led_ids led_id;
enum led_colors color;
@@ -328,7 +328,7 @@ static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
int ret; \
if (!count) \
return -EINVAL; \
- ret = strict_strtoul(buf, 16, &val); \
+ ret = kstrtoul(buf, 16, &val); \
if (ret) \
return ret; \
down_write(&led->rwsem); \
@@ -492,7 +492,7 @@ static ssize_t bd2802_store_##attr_name(struct device *dev, \
int ret; \
if (!count) \
return -EINVAL; \
- ret = strict_strtoul(buf, 16, &val); \
+ ret = kstrtoul(buf, 16, &val); \
if (ret) \
return ret; \
down_write(&led->rwsem); \
@@ -670,7 +670,7 @@ static void bd2802_unregister_led_classdev(struct bd2802_led *led)
led_classdev_unregister(&led->cdev_led1r);
}
-static int __devinit bd2802_probe(struct i2c_client *client,
+static int bd2802_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bd2802_led *led;
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index f7c3d7f1ec52..a502678cc7f5 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -632,7 +632,7 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int __devinit blinkm_probe(struct i2c_client *client,
+static int blinkm_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct blinkm_data *data;
@@ -743,7 +743,7 @@ exit:
return err;
}
-static int __devexit blinkm_remove(struct i2c_client *client)
+static int blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
int ret = 0;
@@ -801,7 +801,7 @@ static struct i2c_driver blinkm_driver = {
.name = "blinkm",
},
.probe = blinkm_probe,
- .remove = __devexit_p(blinkm_remove),
+ .remove = blinkm_remove,
.id_table = blinkm_id,
.detect = blinkm_detect,
.address_list = normal_i2c,
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index e024b0b1c3b1..6a8405df76a3 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
@@ -26,7 +27,7 @@ static struct platform_device *pdev;
static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
{
- printk(KERN_INFO KBUILD_MODNAME ": '%s' found\n", id->ident);
+ pr_info("'%s' found\n", id->ident);
return 1;
}
@@ -135,8 +136,7 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev,
status = 0;
} else {
- printk(KERN_DEBUG KBUILD_MODNAME
- ": clevo_mail_led_blink(..., %lu, %lu),"
+ pr_debug("clevo_mail_led_blink(..., %lu, %lu),"
" returning -EINVAL (unsupported)\n",
*delay_on, *delay_off);
}
@@ -153,7 +153,7 @@ static struct led_classdev clevo_mail_led = {
.flags = LED_CORE_SUSPENDRESUME,
};
-static int __devinit clevo_mail_led_probe(struct platform_device *pdev)
+static int clevo_mail_led_probe(struct platform_device *pdev)
{
return led_classdev_register(&pdev->dev, &clevo_mail_led);
}
@@ -183,7 +183,7 @@ static int __init clevo_mail_led_init(void)
count = dmi_check_system(clevo_mail_led_dmi_table);
} else {
count = 1;
- printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. "
+ pr_err("Skipping DMI detection. "
"If the driver works on your hardware please "
"report model and the output of dmidecode in tracker "
"at http://sourceforge.net/projects/clevo-mailled/\n");
@@ -197,8 +197,7 @@ static int __init clevo_mail_led_init(void)
error = platform_driver_probe(&clevo_mail_led_driver,
clevo_mail_led_probe);
if (error) {
- printk(KERN_ERR KBUILD_MODNAME
- ": Can't probe platform driver\n");
+ pr_err("Can't probe platform driver\n");
platform_device_unregister(pdev);
}
} else
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 6a8725cc7b4d..8abcb66db01c 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -34,7 +34,7 @@ static struct led_classdev qube_front_led = {
.default_trigger = "default-on",
};
-static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
+static int cobalt_qube_led_probe(struct platform_device *pdev)
{
struct resource *res;
int retval;
@@ -43,7 +43,7 @@ static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, resource_size(res));
+ led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
@@ -52,32 +52,29 @@ static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
retval = led_classdev_register(&pdev->dev, &qube_front_led);
if (retval)
- goto err_iounmap;
+ goto err_null;
return 0;
-err_iounmap:
- iounmap(led_port);
+err_null:
led_port = NULL;
return retval;
}
-static int __devexit cobalt_qube_led_remove(struct platform_device *pdev)
+static int cobalt_qube_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&qube_front_led);
- if (led_port) {
- iounmap(led_port);
+ if (led_port)
led_port = NULL;
- }
return 0;
}
static struct platform_driver cobalt_qube_led_driver = {
.probe = cobalt_qube_led_probe,
- .remove = __devexit_p(cobalt_qube_led_remove),
+ .remove = cobalt_qube_led_remove,
.driver = {
.name = "cobalt-qube-leds",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index aac1c073fe7b..001088b31373 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -76,7 +76,7 @@ static struct led_classdev raq_power_off_led = {
.default_trigger = "power-off",
};
-static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
+static int cobalt_raq_led_probe(struct platform_device *pdev)
{
struct resource *res;
int retval;
@@ -85,13 +85,13 @@ static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, resource_size(res));
+ led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
retval = led_classdev_register(&pdev->dev, &raq_power_off_led);
if (retval)
- goto err_iounmap;
+ goto err_null;
retval = led_classdev_register(&pdev->dev, &raq_web_led);
if (retval)
@@ -102,29 +102,26 @@ static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
err_unregister:
led_classdev_unregister(&raq_power_off_led);
-err_iounmap:
- iounmap(led_port);
+err_null:
led_port = NULL;
return retval;
}
-static int __devexit cobalt_raq_led_remove(struct platform_device *pdev)
+static int cobalt_raq_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&raq_power_off_led);
led_classdev_unregister(&raq_web_led);
- if (led_port) {
- iounmap(led_port);
+ if (led_port)
led_port = NULL;
- }
return 0;
}
static struct platform_driver cobalt_raq_led_driver = {
.probe = cobalt_raq_led_probe,
- .remove = __devexit_p(cobalt_raq_led_remove),
+ .remove = cobalt_raq_led_remove,
.driver = {
.name = "cobalt-raq-leds",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index cc77c9d92615..c263a21db829 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -2,10 +2,10 @@
* LEDs driver for Dialog Semiconductor DA9030/DA9034
*
* Copyright (C) 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
+ * Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -85,13 +85,13 @@ static void da903x_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct da903x_led *led;
-
+
led = container_of(led_cdev, struct da903x_led, cdev);
led->new_brightness = value;
schedule_work(&led->work);
}
-static int __devinit da903x_led_probe(struct platform_device *pdev)
+static int da903x_led_probe(struct platform_device *pdev)
{
struct led_info *pdata = pdev->dev.platform_data;
struct da903x_led *led;
@@ -136,7 +136,7 @@ static int __devinit da903x_led_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit da903x_led_remove(struct platform_device *pdev)
+static int da903x_led_remove(struct platform_device *pdev)
{
struct da903x_led *led = platform_get_drvdata(pdev);
@@ -150,13 +150,13 @@ static struct platform_driver da903x_led_driver = {
.owner = THIS_MODULE,
},
.probe = da903x_led_probe,
- .remove = __devexit_p(da903x_led_remove),
+ .remove = da903x_led_remove,
};
module_platform_driver(da903x_led_driver);
MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
-MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
- "Mike Rapoport <mike@compulab.co.il>");
+MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da903x-led");
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
index 58a5244c437e..efec43344e9f 100644
--- a/drivers/leds/leds-da9052.c
+++ b/drivers/leds/leds-da9052.c
@@ -102,7 +102,7 @@ static int da9052_configure_leds(struct da9052 *da9052)
return error;
}
-static int __devinit da9052_led_probe(struct platform_device *pdev)
+static int da9052_led_probe(struct platform_device *pdev)
{
struct da9052_pdata *pdata;
struct da9052 *da9052;
@@ -176,7 +176,7 @@ err:
return error;
}
-static int __devexit da9052_led_remove(struct platform_device *pdev)
+static int da9052_led_remove(struct platform_device *pdev)
{
struct da9052_led *led = platform_get_drvdata(pdev);
struct da9052_pdata *pdata;
@@ -204,7 +204,7 @@ static struct platform_driver da9052_led_driver = {
.owner = THIS_MODULE,
},
.probe = da9052_led_probe,
- .remove = __devexit_p(da9052_led_remove),
+ .remove = da9052_led_remove,
};
module_platform_driver(da9052_led_driver);
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index b9053fa6e253..b4d5a44cc41b 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -20,8 +20,8 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/io.h>
#define FSG_LED_WLAN_BIT 0
#define FSG_LED_WAN_BIT 1
@@ -149,11 +149,10 @@ static int fsg_led_probe(struct platform_device *pdev)
int ret;
/* Map the LED chip select address space */
- latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
- if (!latch_address) {
- ret = -ENOMEM;
- goto failremap;
- }
+ latch_address = (unsigned short *) devm_ioremap(&pdev->dev,
+ IXP4XX_EXP_BUS_BASE(2), 512);
+ if (!latch_address)
+ return -ENOMEM;
latch_value = 0xffff;
*latch_address = latch_value;
@@ -195,8 +194,6 @@ static int fsg_led_probe(struct platform_device *pdev)
failwan:
led_classdev_unregister(&fsg_wlan_led);
failwlan:
- iounmap(latch_address);
- failremap:
return ret;
}
@@ -210,8 +207,6 @@ static int fsg_led_remove(struct platform_device *pdev)
led_classdev_unregister(&fsg_sync_led);
led_classdev_unregister(&fsg_ring_led);
- iounmap(latch_address);
-
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 087d1e66f4f7..a0d931bcb37c 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/err.h>
struct gpio_led_data {
struct led_classdev cdev;
@@ -91,7 +92,7 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
delay_on, delay_off);
}
-static int __devinit create_gpio_led(const struct gpio_led *template,
+static int create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, int, unsigned long *, unsigned long *))
{
@@ -101,15 +102,11 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
- printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
+ dev_info(parent, "Skipping unavailable LED gpio %d (%s)\n",
template->gpio, template->name);
return 0;
}
- ret = gpio_request(template->gpio, template->name);
- if (ret < 0)
- return ret;
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -129,20 +126,20 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
+ ret = devm_gpio_request_one(parent, template->gpio,
+ (led_dat->active_low ^ state) ?
+ GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ template->name);
if (ret < 0)
- goto err;
-
+ return ret;
+
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
-err:
- gpio_free(led_dat->gpio);
- return ret;
}
static void delete_gpio_led(struct gpio_led_data *led)
@@ -151,7 +148,6 @@ static void delete_gpio_led(struct gpio_led_data *led)
return;
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
- gpio_free(led->gpio);
}
struct gpio_leds_priv {
@@ -167,7 +163,7 @@ static inline int sizeof_gpio_leds_priv(int num_leds)
/* Code to create from OpenFirmware platform devices */
#ifdef CONFIG_OF_GPIO
-static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
+static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *child;
struct gpio_leds_priv *priv;
@@ -176,12 +172,16 @@ static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_dev
/* count LEDs in this device, so we know how much to allocate */
count = of_get_child_count(np);
if (!count)
- return NULL;
+ return ERR_PTR(-ENODEV);
+
+ for_each_child_of_node(np, child)
+ if (of_get_gpio(child, 0) == -EPROBE_DEFER)
+ return ERR_PTR(-EPROBE_DEFER);
priv = devm_kzalloc(&pdev->dev, sizeof_gpio_leds_priv(count),
GFP_KERNEL);
if (!priv)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for_each_child_of_node(np, child) {
struct gpio_led led = {};
@@ -216,7 +216,7 @@ static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_dev
err:
for (count = priv->num_leds - 2; count >= 0; count--)
delete_gpio_led(&priv->leds[count]);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static const struct of_device_id of_gpio_leds_match[] = {
@@ -224,14 +224,14 @@ static const struct of_device_id of_gpio_leds_match[] = {
{},
};
#else /* CONFIG_OF_GPIO */
-static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
+static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_OF_GPIO */
-static int __devinit gpio_led_probe(struct platform_device *pdev)
+static int gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_leds_priv *priv;
@@ -264,8 +264,8 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
}
} else {
priv = gpio_leds_create_of(pdev);
- if (!priv)
- return -ENODEV;
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
}
platform_set_drvdata(pdev, priv);
@@ -273,7 +273,7 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit gpio_led_remove(struct platform_device *pdev)
+static int gpio_led_remove(struct platform_device *pdev)
{
struct gpio_leds_priv *priv = platform_get_drvdata(pdev);
int i;
@@ -288,7 +288,7 @@ static int __devexit gpio_led_remove(struct platform_device *pdev)
static struct platform_driver gpio_led_driver = {
.probe = gpio_led_probe,
- .remove = __devexit_p(gpio_led_remove),
+ .remove = gpio_led_remove,
.driver = {
.name = "leds-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index b26306f6724d..214145483836 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -377,7 +377,7 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
}
static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
-static int __devinit lm3530_probe(struct i2c_client *client,
+static int lm3530_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm3530_platform_data *pdata = client->dev.platform_data;
@@ -452,7 +452,7 @@ err_create_file:
return err;
}
-static int __devexit lm3530_remove(struct i2c_client *client)
+static int lm3530_remove(struct i2c_client *client)
{
struct lm3530_data *drvdata = i2c_get_clientdata(client);
@@ -472,7 +472,7 @@ MODULE_DEVICE_TABLE(i2c, lm3530_id);
static struct i2c_driver lm3530_i2c_driver = {
.probe = lm3530_probe,
- .remove = __devexit_p(lm3530_remove),
+ .remove = lm3530_remove,
.id_table = lm3530_id,
.driver = {
.name = LM3530_NAME,
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index f6837b99908c..bbf24d038a7f 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -646,7 +646,7 @@ static struct attribute_group lm3533_led_attribute_group = {
.attrs = lm3533_led_attributes
};
-static int __devinit lm3533_led_setup(struct lm3533_led *led,
+static int lm3533_led_setup(struct lm3533_led *led,
struct lm3533_led_platform_data *pdata)
{
int ret;
@@ -658,7 +658,7 @@ static int __devinit lm3533_led_setup(struct lm3533_led *led,
return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
}
-static int __devinit lm3533_led_probe(struct platform_device *pdev)
+static int lm3533_led_probe(struct platform_device *pdev)
{
struct lm3533 *lm3533;
struct lm3533_led_platform_data *pdata;
@@ -742,7 +742,7 @@ err_unregister:
return ret;
}
-static int __devexit lm3533_led_remove(struct platform_device *pdev)
+static int lm3533_led_remove(struct platform_device *pdev)
{
struct lm3533_led *led = platform_get_drvdata(pdev);
@@ -774,7 +774,7 @@ static struct platform_driver lm3533_led_driver = {
.owner = THIS_MODULE,
},
.probe = lm3533_led_probe,
- .remove = __devexit_p(lm3533_led_remove),
+ .remove = lm3533_led_remove,
.shutdown = lm3533_led_shutdown,
};
module_platform_driver(lm3533_led_driver);
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 065ec015d67a..65d79284c488 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -168,7 +168,7 @@ static char lm355x_name[][I2C_NAME_SIZE] = {
};
/* chip initialize */
-static int __devinit lm355x_chip_init(struct lm355x_chip_data *chip)
+static int lm355x_chip_init(struct lm355x_chip_data *chip)
{
int ret;
unsigned int reg_val;
@@ -408,10 +408,10 @@ static ssize_t lm3556_indicator_pattern_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(pattern, 0666, NULL, lm3556_indicator_pattern_store);
+static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store);
static const struct regmap_config lm355x_regmap = {
.reg_bits = 8,
@@ -420,7 +420,7 @@ static const struct regmap_config lm355x_regmap = {
};
/* module initialize */
-static int __devinit lm355x_probe(struct i2c_client *client,
+static int lm355x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm355x_platform_data *pdata = client->dev.platform_data;
@@ -526,7 +526,7 @@ err_out:
return err;
}
-static int __devexit lm355x_remove(struct i2c_client *client)
+static int lm355x_remove(struct i2c_client *client)
{
struct lm355x_chip_data *chip = i2c_get_clientdata(client);
struct lm355x_reg_data *preg = chip->regs;
@@ -560,7 +560,7 @@ static struct i2c_driver lm355x_i2c_driver = {
.pm = NULL,
},
.probe = lm355x_probe,
- .remove = __devexit_p(lm355x_remove),
+ .remove = lm355x_remove,
.id_table = lm355x_id,
};
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 3285006e9888..07b3dde90613 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -93,7 +93,7 @@ struct lm3642_chip_data {
};
/* chip initialize */
-static int __devinit lm3642_chip_init(struct lm3642_chip_data *chip)
+static int lm3642_chip_init(struct lm3642_chip_data *chip)
{
int ret;
struct lm3642_platform_data *pdata = chip->pdata;
@@ -201,13 +201,13 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
out_strtoint:
dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(torch_pin, 0666, NULL, lm3642_torch_pin_store);
+static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
static void lm3642_deferred_torch_brightness_set(struct work_struct *work)
{
@@ -258,13 +258,13 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
return size;
out:
dev_err(chip->dev, "%s:i2c access fail to register\n", __func__);
- return size;
+ return ret;
out_strtoint:
dev_err(chip->dev, "%s: fail to change str to int\n", __func__);
- return size;
+ return ret;
}
-static DEVICE_ATTR(strobe_pin, 0666, NULL, lm3642_strobe_pin_store);
+static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
static void lm3642_deferred_strobe_brightness_set(struct work_struct *work)
{
@@ -313,7 +313,7 @@ static const struct regmap_config lm3642_regmap = {
.max_register = REG_MAX,
};
-static int __devinit lm3642_probe(struct i2c_client *client,
+static int lm3642_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm3642_platform_data *pdata = client->dev.platform_data;
@@ -420,7 +420,7 @@ err_out:
return err;
}
-static int __devexit lm3642_remove(struct i2c_client *client)
+static int lm3642_remove(struct i2c_client *client)
{
struct lm3642_chip_data *chip = i2c_get_clientdata(client);
@@ -450,7 +450,7 @@ static struct i2c_driver lm3642_i2c_driver = {
.pm = NULL,
},
.probe = lm3642_probe,
- .remove = __devexit_p(lm3642_remove),
+ .remove = lm3642_remove,
.id_table = lm3642_id,
};
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index c298f7d9f535..0c4386e656c1 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -86,7 +86,7 @@ static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
tmp = i2c_smbus_read_byte_data(client, reg);
if (tmp < 0)
- return -EINVAL;
+ return tmp;
*value = tmp;
@@ -374,7 +374,7 @@ exit:
return err;
}
-static int __devinit lp3944_probe(struct i2c_client *client,
+static int lp3944_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
@@ -411,7 +411,7 @@ static int __devinit lp3944_probe(struct i2c_client *client,
return 0;
}
-static int __devexit lp3944_remove(struct i2c_client *client)
+static int lp3944_remove(struct i2c_client *client)
{
struct lp3944_platform_data *pdata = client->dev.platform_data;
struct lp3944_data *data = i2c_get_clientdata(client);
@@ -446,7 +446,7 @@ static struct i2c_driver lp3944_driver = {
.name = "lp3944",
},
.probe = lp3944_probe,
- .remove = __devexit_p(lp3944_remove),
+ .remove = lp3944_remove,
.id_table = lp3944_id,
};
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 2064aefedc07..cb8a5220200b 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -152,7 +152,7 @@ static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*buf = ret;
return 0;
@@ -616,7 +616,7 @@ static ssize_t store_led_pattern(struct device *dev,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
@@ -687,7 +687,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
&lp5521_led_attribute_group);
}
-static int __devinit lp5521_init_led(struct lp5521_led *led,
+static int lp5521_init_led(struct lp5521_led *led,
struct i2c_client *client,
int chan, struct lp5521_platform_data *pdata)
{
@@ -736,7 +736,7 @@ static int __devinit lp5521_init_led(struct lp5521_led *led,
return 0;
}
-static int __devinit lp5521_probe(struct i2c_client *client,
+static int lp5521_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5521_chip *chip;
@@ -788,10 +788,17 @@ static int __devinit lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
+ if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ dev_err(&client->dev,
+ "unexpected data in register (expected 0x%x got 0x%x)\n",
+ LP5521_REG_R_CURR_DEFAULT, buf);
+ ret = -EINVAL;
+ goto fail2;
+ }
usleep_range(10000, 20000);
ret = lp5521_detect(client);
@@ -855,7 +862,7 @@ fail1:
return ret;
}
-static int __devexit lp5521_remove(struct i2c_client *client)
+static int lp5521_remove(struct i2c_client *client)
{
struct lp5521_chip *chip = i2c_get_clientdata(client);
int i;
@@ -886,7 +893,7 @@ static struct i2c_driver lp5521_driver = {
.name = "lp5521",
},
.probe = lp5521_probe,
- .remove = __devexit_p(lp5521_remove),
+ .remove = lp5521_remove,
.id_table = lp5521_id,
};
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 97994ffdc014..7f5be8948cde 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -171,7 +171,7 @@ static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
s32 ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- return -EIO;
+ return ret;
*buf = ret;
return 0;
@@ -248,7 +248,10 @@ static int lp5523_configure(struct i2c_client *client)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp5523_read(client, LP5523_REG_STATUS, &status);
+ ret = lp5523_read(client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
status &= LP5523_ENG_STATUS_MASK;
if (status == LP5523_ENG_STATUS_MASK) {
@@ -464,10 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev,
LP5523_EN_LEDTEST | 16);
usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000); /* Was not ready. Wait little bit */
- ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+ if (ret < 0)
+ goto fail;
+
vdd--; /* There may be some fluctuation in measurement */
for (i = 0; i < LP5523_LEDS; i++) {
@@ -489,9 +498,14 @@ static ssize_t lp5523_selftest(struct device *dev,
/* ADC conversion time is 2.7 ms typically */
usleep_range(3000, 6000);
ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+ if (ret < 0)
+ goto fail;
+
if (!(status & LP5523_LEDTEST_DONE))
usleep_range(3000, 6000);/* Was not ready. Wait. */
- ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+ ret = lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+ if (ret < 0)
+ goto fail;
if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
pos += sprintf(buf + pos, "LED %d FAIL\n", i);
@@ -696,7 +710,7 @@ static ssize_t store_current(struct device *dev,
ssize_t ret;
unsigned long curr;
- if (strict_strtoul(buf, 0, &curr))
+ if (kstrtoul(buf, 0, &curr))
return -EINVAL;
if (curr > led->max_current)
@@ -833,7 +847,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
return 0;
}
-static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
+static int lp5523_init_led(struct lp5523_led *led, struct device *dev,
int chan, struct lp5523_platform_data *pdata,
const char *chip_name)
{
@@ -882,7 +896,7 @@ static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
return 0;
}
-static int __devinit lp5523_probe(struct i2c_client *client,
+static int lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5523_chip *chip;
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 64009a176651..4353942c5fd1 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -125,7 +125,7 @@ static void lp8788_brightness_set(struct led_classdev *led_cdev,
schedule_work(&led->work);
}
-static __devinit int lp8788_led_probe(struct platform_device *pdev)
+static int lp8788_led_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
struct lp8788_led_platform_data *led_pdata;
@@ -167,7 +167,7 @@ static __devinit int lp8788_led_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit lp8788_led_remove(struct platform_device *pdev)
+static int lp8788_led_remove(struct platform_device *pdev)
{
struct lp8788_led *led = platform_get_drvdata(pdev);
@@ -179,7 +179,7 @@ static int __devexit lp8788_led_remove(struct platform_device *pdev)
static struct platform_driver lp8788_led_driver = {
.probe = lp8788_led_probe,
- .remove = __devexit_p(lp8788_led_remove),
+ .remove = lp8788_led_remove,
.driver = {
.name = LP8788_DEV_KEYLED,
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 09a732217f6d..c9b9e1fec587 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -82,22 +82,18 @@ static void lt3593_led_set(struct led_classdev *led_cdev,
schedule_work(&led_dat->work);
}
-static int __devinit create_lt3593_led(const struct gpio_led *template,
+static int create_lt3593_led(const struct gpio_led *template,
struct lt3593_led_data *led_dat, struct device *parent)
{
int ret, state;
/* skip leds on GPIOs that aren't available */
if (!gpio_is_valid(template->gpio)) {
- printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
+ dev_info(parent, "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
KBUILD_MODNAME, template->gpio, template->name);
return 0;
}
- ret = gpio_request(template->gpio, template->name);
- if (ret < 0)
- return ret;
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
@@ -110,24 +106,21 @@ static int __devinit create_lt3593_led(const struct gpio_led *template,
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, state);
+ ret = devm_gpio_request_one(parent, template->gpio,
+ GPIOF_DIR_OUT | state, template->name);
if (ret < 0)
- goto err;
+ return ret;
INIT_WORK(&led_dat->work, lt3593_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
- goto err;
+ return ret;
- printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
+ dev_info(parent, "%s: registered LT3593 LED '%s' at GPIO %d\n",
KBUILD_MODNAME, template->name, template->gpio);
return 0;
-
-err:
- gpio_free(led_dat->gpio);
- return ret;
}
static void delete_lt3593_led(struct lt3593_led_data *led)
@@ -137,10 +130,9 @@ static void delete_lt3593_led(struct lt3593_led_data *led)
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
- gpio_free(led->gpio);
}
-static int __devinit lt3593_led_probe(struct platform_device *pdev)
+static int lt3593_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct lt3593_led_data *leds_data;
@@ -173,7 +165,7 @@ err:
return ret;
}
-static int __devexit lt3593_led_remove(struct platform_device *pdev)
+static int lt3593_led_remove(struct platform_device *pdev)
{
int i;
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
@@ -189,7 +181,7 @@ static int __devexit lt3593_led_remove(struct platform_device *pdev)
static struct platform_driver lt3593_led_driver = {
.probe = lt3593_led_probe,
- .remove = __devexit_p(lt3593_led_remove),
+ .remove = lt3593_led_remove,
.driver = {
.name = "leds-lt3593",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 569e36de37df..f449a8bdddc7 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -229,7 +229,7 @@ static ssize_t max8997_led_store_mode(struct device *dev,
static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
-static int __devinit max8997_led_probe(struct platform_device *pdev)
+static int max8997_led_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
@@ -292,7 +292,7 @@ static int __devinit max8997_led_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit max8997_led_remove(struct platform_device *pdev)
+static int max8997_led_remove(struct platform_device *pdev)
{
struct max8997_led *led = platform_get_drvdata(pdev);
@@ -308,7 +308,7 @@ static struct platform_driver max8997_led_driver = {
.owner = THIS_MODULE,
},
.probe = max8997_led_probe,
- .remove = __devexit_p(max8997_led_remove),
+ .remove = max8997_led_remove,
};
module_platform_driver(max8997_led_driver);
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 2a5d43400677..e942adaa7504 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -128,7 +128,7 @@ static void mc13783_led_set(struct led_classdev *led_cdev,
schedule_work(&led->work);
}
-static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
+static int mc13783_led_setup(struct mc13783_led *led, int max_current)
{
int shift = 0;
int mask = 0;
@@ -181,7 +181,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
return ret;
}
-static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
+static int mc13783_leds_prepare(struct platform_device *pdev)
{
struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mc13xxx *dev = dev_get_drvdata(pdev->dev.parent);
@@ -262,7 +262,7 @@ out:
return ret;
}
-static int __devinit mc13783_led_probe(struct platform_device *pdev)
+static int mc13783_led_probe(struct platform_device *pdev)
{
struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mc13xxx_led_platform_data *led_cur;
@@ -348,7 +348,7 @@ err_register:
return ret;
}
-static int __devexit mc13783_led_remove(struct platform_device *pdev)
+static int mc13783_led_remove(struct platform_device *pdev)
{
struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mc13783_led *led = platform_get_drvdata(pdev);
@@ -381,7 +381,7 @@ static struct platform_driver mc13783_led_driver = {
.owner = THIS_MODULE,
},
.probe = mc13783_led_probe,
- .remove = __devexit_p(mc13783_led_remove),
+ .remove = mc13783_led_remove,
};
module_platform_driver(mc13783_led_driver);
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index f117f7326c5b..27d06c528246 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/nsc_gpio.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 461bbf9b33fa..c61c5ebcc08e 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -71,7 +71,7 @@ static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
spin_unlock_irqrestore(&gpio_ext_lock, flags);
}
-static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
+static int gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
{
int err;
int i;
@@ -243,7 +243,7 @@ static ssize_t netxbig_led_sata_store(struct device *dev,
int mode_val;
int ret;
- ret = strict_strtoul(buff, 10, &enable);
+ ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
@@ -301,7 +301,7 @@ static void delete_netxbig_led(struct netxbig_led_data *led_dat)
led_classdev_unregister(&led_dat->cdev);
}
-static int __devinit
+static int
create_netxbig_led(struct platform_device *pdev,
struct netxbig_led_data *led_dat,
const struct netxbig_led *template)
@@ -352,7 +352,7 @@ create_netxbig_led(struct platform_device *pdev,
return ret;
}
-static int __devinit netxbig_led_probe(struct platform_device *pdev)
+static int netxbig_led_probe(struct platform_device *pdev)
{
struct netxbig_led_platform_data *pdata = pdev->dev.platform_data;
struct netxbig_led_data *leds_data;
@@ -389,7 +389,7 @@ err_free_leds:
return ret;
}
-static int __devexit netxbig_led_remove(struct platform_device *pdev)
+static int netxbig_led_remove(struct platform_device *pdev)
{
struct netxbig_led_platform_data *pdata = pdev->dev.platform_data;
struct netxbig_led_data *leds_data;
@@ -407,7 +407,7 @@ static int __devexit netxbig_led_remove(struct platform_device *pdev)
static struct platform_driver netxbig_led_driver = {
.probe = netxbig_led_probe,
- .remove = __devexit_p(netxbig_led_remove),
+ .remove = netxbig_led_remove,
.driver = {
.name = "leds-netxbig",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index d176ec83f5d9..d978171c25b4 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -30,6 +30,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_data/leds-kirkwood-ns2.h>
+#include <linux/of_gpio.h>
/*
* The Network Space v2 dual-GPIO LED is wired to a CPLD and can blink in
@@ -149,7 +150,7 @@ static ssize_t ns2_led_sata_store(struct device *dev,
unsigned long enable;
enum ns2_led_modes mode;
- ret = strict_strtoul(buff, 10, &enable);
+ ret = kstrtoul(buff, 10, &enable);
if (ret < 0)
return ret;
@@ -184,36 +185,29 @@ static ssize_t ns2_led_sata_show(struct device *dev,
static DEVICE_ATTR(sata, 0644, ns2_led_sata_show, ns2_led_sata_store);
-static int __devinit
+static int
create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
const struct ns2_led *template)
{
int ret;
enum ns2_led_modes mode;
- ret = gpio_request(template->cmd, template->name);
- if (ret == 0) {
- ret = gpio_direction_output(template->cmd,
- gpio_get_value(template->cmd));
- if (ret)
- gpio_free(template->cmd);
- }
+ ret = devm_gpio_request_one(&pdev->dev, template->cmd,
+ GPIOF_DIR_OUT | gpio_get_value(template->cmd),
+ template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup command GPIO\n",
template->name);
+ return ret;
}
- ret = gpio_request(template->slow, template->name);
- if (ret == 0) {
- ret = gpio_direction_output(template->slow,
- gpio_get_value(template->slow));
- if (ret)
- gpio_free(template->slow);
- }
+ ret = devm_gpio_request_one(&pdev->dev, template->slow,
+ GPIOF_DIR_OUT | gpio_get_value(template->slow),
+ template->name);
if (ret) {
dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n",
template->name);
- goto err_free_cmd;
+ return ret;
}
rwlock_init(&led_dat->rw_lock);
@@ -228,7 +222,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
ret = ns2_led_get_mode(led_dat, &mode);
if (ret < 0)
- goto err_free_slow;
+ return ret;
/* Set LED initial state. */
led_dat->sata = (mode == NS_V2_LED_SATA) ? 1 : 0;
@@ -237,7 +231,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
if (ret < 0)
- goto err_free_slow;
+ return ret;
ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata);
if (ret < 0)
@@ -247,11 +241,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
err_free_cdev:
led_classdev_unregister(&led_dat->cdev);
-err_free_slow:
- gpio_free(led_dat->slow);
-err_free_cmd:
- gpio_free(led_dat->cmd);
-
return ret;
}
@@ -259,22 +248,90 @@ static void delete_ns2_led(struct ns2_led_data *led_dat)
{
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
led_classdev_unregister(&led_dat->cdev);
- gpio_free(led_dat->cmd);
- gpio_free(led_dat->slow);
}
-static int __devinit ns2_led_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF_GPIO
+/*
+ * Translate OpenFirmware node properties into platform_data.
+ */
+static int
+ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ struct ns2_led *leds;
+ int num_leds = 0;
+ int i = 0;
+
+ num_leds = of_get_child_count(np);
+ if (!num_leds)
+ return -ENODEV;
+
+ leds = devm_kzalloc(dev, num_leds * sizeof(struct ns2_led),
+ GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ const char *string;
+ int ret;
+
+ ret = of_get_named_gpio(child, "cmd-gpio", 0);
+ if (ret < 0)
+ return ret;
+ leds[i].cmd = ret;
+ ret = of_get_named_gpio(child, "slow-gpio", 0);
+ if (ret < 0)
+ return ret;
+ leds[i].slow = ret;
+ ret = of_property_read_string(child, "label", &string);
+ leds[i].name = (ret == 0) ? string : child->name;
+ ret = of_property_read_string(child, "linux,default-trigger",
+ &string);
+ if (ret == 0)
+ leds[i].default_trigger = string;
+
+ i++;
+ }
+
+ pdata->leds = leds;
+ pdata->num_leds = num_leds;
+
+ return 0;
+}
+
+static const struct of_device_id of_ns2_leds_match[] = {
+ { .compatible = "lacie,ns2-leds", },
+ {},
+};
+#endif /* CONFIG_OF_GPIO */
+
+static int ns2_led_probe(struct platform_device *pdev)
{
struct ns2_led_platform_data *pdata = pdev->dev.platform_data;
struct ns2_led_data *leds_data;
int i;
int ret;
+#ifdef CONFIG_OF_GPIO
+ if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct ns2_led_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = ns2_leds_get_of_pdata(&pdev->dev, pdata);
+ if (ret)
+ return ret;
+ }
+#else
if (!pdata)
return -EINVAL;
+#endif /* CONFIG_OF_GPIO */
leds_data = devm_kzalloc(&pdev->dev, sizeof(struct ns2_led_data) *
- pdata->num_leds, GFP_KERNEL);
+ pdata->num_leds, GFP_KERNEL);
if (!leds_data)
return -ENOMEM;
@@ -292,7 +349,7 @@ static int __devinit ns2_led_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ns2_led_remove(struct platform_device *pdev)
+static int ns2_led_remove(struct platform_device *pdev)
{
int i;
struct ns2_led_platform_data *pdata = pdev->dev.platform_data;
@@ -310,10 +367,11 @@ static int __devexit ns2_led_remove(struct platform_device *pdev)
static struct platform_driver ns2_led_driver = {
.probe = ns2_led_probe,
- .remove = __devexit_p(ns2_led_remove),
+ .remove = ns2_led_remove,
.driver = {
- .name = "leds-ns2",
- .owner = THIS_MODULE,
+ .name = "leds-ns2",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_ns2_leds_match),
},
};
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index c4646825a620..ee14662ed5ce 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -115,7 +115,7 @@ static void ot200_led_brightness_set(struct led_classdev *led_cdev,
spin_unlock_irqrestore(&value_lock, flags);
}
-static int __devinit ot200_led_probe(struct platform_device *pdev)
+static int ot200_led_probe(struct platform_device *pdev)
{
int i;
int ret;
@@ -144,7 +144,7 @@ err:
return ret;
}
-static int __devexit ot200_led_remove(struct platform_device *pdev)
+static int ot200_led_remove(struct platform_device *pdev)
{
int i;
@@ -156,7 +156,7 @@ static int __devexit ot200_led_remove(struct platform_device *pdev)
static struct platform_driver ot200_led_driver = {
.probe = ot200_led_probe,
- .remove = __devexit_p(ot200_led_remove),
+ .remove = ot200_led_remove,
.driver = {
.name = "leds-ot200",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index aef3cf0432fe..edf485b773c8 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -255,7 +255,7 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
schedule_work(&pca955x->work);
}
-static int __devinit pca955x_probe(struct i2c_client *client,
+static int pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca955x *pca955x;
@@ -277,7 +277,7 @@ static int __devinit pca955x_probe(struct i2c_client *client,
return -ENODEV;
}
- printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at "
+ dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);
@@ -363,7 +363,7 @@ exit:
return err;
}
-static int __devexit pca955x_remove(struct i2c_client *client)
+static int pca955x_remove(struct i2c_client *client)
{
struct pca955x *pca955x = i2c_get_clientdata(client);
int i;
@@ -382,7 +382,7 @@ static struct i2c_driver pca955x_driver = {
.owner = THIS_MODULE,
},
.probe = pca955x_probe,
- .remove = __devexit_p(pca955x_remove),
+ .remove = pca955x_remove,
.id_table = pca955x_id,
};
diff --git a/drivers/leds/leds-pca9633.c b/drivers/leds/leds-pca9633.c
index 2f2f9c43535d..9aae5679ffb2 100644
--- a/drivers/leds/leds-pca9633.c
+++ b/drivers/leds/leds-pca9633.c
@@ -93,7 +93,7 @@ static void pca9633_led_set(struct led_classdev *led_cdev,
schedule_work(&pca9633->work);
}
-static int __devinit pca9633_probe(struct i2c_client *client,
+static int pca9633_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca9633_led *pca9633;
@@ -164,7 +164,7 @@ exit:
return err;
}
-static int __devexit pca9633_remove(struct i2c_client *client)
+static int pca9633_remove(struct i2c_client *client)
{
struct pca9633_led *pca9633 = i2c_get_clientdata(client);
int i;
@@ -183,7 +183,7 @@ static struct i2c_driver pca9633_driver = {
.owner = THIS_MODULE,
},
.probe = pca9633_probe,
- .remove = __devexit_p(pca9633_remove),
+ .remove = pca9633_remove,
.id_table = pca9633_id,
};
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index f2e44c719437..2157524f277c 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -26,7 +26,7 @@
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
- unsigned int active_low;
+ unsigned int active_low;
unsigned int period;
};
@@ -107,7 +107,7 @@ err:
return ret;
}
-static int __devexit led_pwm_remove(struct platform_device *pdev)
+static int led_pwm_remove(struct platform_device *pdev)
{
int i;
struct led_pwm_platform_data *pdata = pdev->dev.platform_data;
@@ -125,7 +125,7 @@ static int __devexit led_pwm_remove(struct platform_device *pdev)
static struct platform_driver led_pwm_driver = {
.probe = led_pwm_probe,
- .remove = __devexit_p(led_pwm_remove),
+ .remove = led_pwm_remove,
.driver = {
.name = "leds_pwm",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index a7815b6cd856..2e746d257b02 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -16,7 +16,7 @@
#include <asm/mach-rc32434/rb.h>
static void rb532_led_set(struct led_classdev *cdev,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
if (brightness)
set_latch_u5(LO_ULED, 0);
@@ -37,12 +37,12 @@ static struct led_classdev rb532_uled = {
.default_trigger = "nand-disk",
};
-static int __devinit rb532_led_probe(struct platform_device *pdev)
+static int rb532_led_probe(struct platform_device *pdev)
{
return led_classdev_register(&pdev->dev, &rb532_uled);
}
-static int __devexit rb532_led_remove(struct platform_device *pdev)
+static int rb532_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&rb532_uled);
return 0;
@@ -50,7 +50,7 @@ static int __devexit rb532_led_remove(struct platform_device *pdev)
static struct platform_driver rb532_led_driver = {
.probe = rb532_led_probe,
- .remove = __devexit_p(rb532_led_remove),
+ .remove = rb532_led_remove,
.driver = {
.name = "rb532-led",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 25d382d60fa9..4253a9b03dbf 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -140,7 +140,7 @@ static void regulator_led_brightness_set(struct led_classdev *led_cdev,
schedule_work(&led->work);
}
-static int __devinit regulator_led_probe(struct platform_device *pdev)
+static int regulator_led_probe(struct platform_device *pdev)
{
struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
struct regulator_led *led;
@@ -206,7 +206,7 @@ err_vcc:
return ret;
}
-static int __devexit regulator_led_remove(struct platform_device *pdev)
+static int regulator_led_remove(struct platform_device *pdev)
{
struct regulator_led *led = platform_get_drvdata(pdev);
@@ -223,7 +223,7 @@ static struct platform_driver regulator_led_driver = {
.owner = THIS_MODULE,
},
.probe = regulator_led_probe,
- .remove = __devexit_p(regulator_led_remove),
+ .remove = regulator_led_remove,
};
module_platform_driver(regulator_led_driver);
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 771ea067e680..e0fff1ca5923 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -204,10 +204,10 @@ static void r_tpu_set_pin(struct r_tpu_priv *p, enum r_tpu_pin new_state,
if (p->pin_state == R_TPU_PIN_GPIO_FN)
gpio_free(cfg->pin_gpio_fn);
- if (new_state == R_TPU_PIN_GPIO) {
- gpio_request(cfg->pin_gpio, cfg->name);
- gpio_direction_output(cfg->pin_gpio, !!brightness);
- }
+ if (new_state == R_TPU_PIN_GPIO)
+ gpio_request_one(cfg->pin_gpio, GPIOF_DIR_OUT | !!brightness,
+ cfg->name);
+
if (new_state == R_TPU_PIN_GPIO_FN)
gpio_request(cfg->pin_gpio_fn, cfg->name);
@@ -238,7 +238,7 @@ static void r_tpu_set_brightness(struct led_classdev *ldev,
schedule_work(&p->work);
}
-static int __devinit r_tpu_probe(struct platform_device *pdev)
+static int r_tpu_probe(struct platform_device *pdev)
{
struct led_renesas_tpu_config *cfg = pdev->dev.platform_data;
struct r_tpu_priv *p;
@@ -263,18 +263,18 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
}
/* map memory, let mapbase point to our channel */
- p->mapbase = ioremap_nocache(res->start, resource_size(res));
+ p->mapbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
if (p->mapbase == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
return -ENXIO;
}
/* get hold of clock */
- p->clk = clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err0;
+ return PTR_ERR(p->clk);
}
p->pdev = pdev;
@@ -293,7 +293,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
p->ldev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &p->ldev);
if (ret < 0)
- goto err1;
+ goto err0;
/* max_brightness may be updated by the LED core code */
p->min_rate = p->ldev.max_brightness * p->refresh_rate;
@@ -301,15 +301,12 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
- err1:
- r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
- clk_put(p->clk);
err0:
- iounmap(p->mapbase);
+ r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
return ret;
}
-static int __devexit r_tpu_remove(struct platform_device *pdev)
+static int r_tpu_remove(struct platform_device *pdev)
{
struct r_tpu_priv *p = platform_get_drvdata(pdev);
@@ -320,15 +317,13 @@ static int __devexit r_tpu_remove(struct platform_device *pdev)
r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
pm_runtime_disable(&pdev->dev);
- clk_put(p->clk);
- iounmap(p->mapbase);
return 0;
}
static struct platform_driver r_tpu_device_driver = {
.probe = r_tpu_probe,
- .remove = __devexit_p(r_tpu_remove),
+ .remove = r_tpu_remove,
.driver = {
.name = "leds-renesas-tpu",
}
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 57371e1485ab..ec9b287ecfbf 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -263,7 +263,7 @@ static int nasgpio_led_set_blink(struct led_classdev *led_cdev,
* already taken care of this, but we will do so in a non destructive manner
* so that we have what we need whether the BIOS did it or not.
*/
-static int __devinit ich7_gpio_init(struct device *dev)
+static int ich7_gpio_init(struct device *dev)
{
int i;
u32 config_data = 0;
@@ -342,7 +342,7 @@ static void ich7_lpc_cleanup(struct device *dev)
* so we can retrive the required operational information and prepare the GPIO.
*/
static struct pci_dev *nas_gpio_pci_dev;
-static int __devinit ich7_lpc_probe(struct pci_dev *dev,
+static int ich7_lpc_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int status;
@@ -459,7 +459,7 @@ static ssize_t nas_led_blink_store(struct device *dev,
struct led_classdev *led = dev_get_drvdata(dev);
unsigned long blink_state;
- ret = strict_strtoul(buf, 10, &blink_state);
+ ret = kstrtoul(buf, 10, &blink_state);
if (ret)
return ret;
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 134d9a4b34f1..07ff5a3a6cee 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -123,7 +123,7 @@ struct sunfire_drvdata {
struct sunfire_led leds[NUM_LEDS_PER_BOARD];
};
-static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
+static int sunfire_led_generic_probe(struct platform_device *pdev,
struct led_type *types)
{
struct sunfire_drvdata *p;
@@ -165,7 +165,7 @@ static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
return 0;
}
-static int __devexit sunfire_led_generic_remove(struct platform_device *pdev)
+static int sunfire_led_generic_remove(struct platform_device *pdev)
{
struct sunfire_drvdata *p = dev_get_drvdata(&pdev->dev);
int i;
@@ -192,7 +192,7 @@ static struct led_type clockboard_led_types[NUM_LEDS_PER_BOARD] = {
},
};
-static int __devinit sunfire_clockboard_led_probe(struct platform_device *pdev)
+static int sunfire_clockboard_led_probe(struct platform_device *pdev)
{
return sunfire_led_generic_probe(pdev, clockboard_led_types);
}
@@ -213,7 +213,7 @@ static struct led_type fhc_led_types[NUM_LEDS_PER_BOARD] = {
},
};
-static int __devinit sunfire_fhc_led_probe(struct platform_device *pdev)
+static int sunfire_fhc_led_probe(struct platform_device *pdev)
{
return sunfire_led_generic_probe(pdev, fhc_led_types);
}
@@ -223,7 +223,7 @@ MODULE_ALIAS("platform:sunfire-fhc-leds");
static struct platform_driver sunfire_clockboard_led_driver = {
.probe = sunfire_clockboard_led_probe,
- .remove = __devexit_p(sunfire_led_generic_remove),
+ .remove = sunfire_led_generic_remove,
.driver = {
.name = "sunfire-clockboard-leds",
.owner = THIS_MODULE,
@@ -232,7 +232,7 @@ static struct platform_driver sunfire_clockboard_led_driver = {
static struct platform_driver sunfire_fhc_led_driver = {
.probe = sunfire_fhc_led_probe,
- .remove = __devexit_p(sunfire_led_generic_remove),
+ .remove = sunfire_led_generic_remove,
.driver = {
.name = "sunfire-fhc-leds",
.owner = THIS_MODULE,
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index dabcf7ae8d0f..b26a63bae16b 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -667,7 +667,7 @@ static void tca6507_remove_gpio(struct tca6507_chip *tca)
}
#endif /* CONFIG_GPIOLIB */
-static int __devinit tca6507_probe(struct i2c_client *client,
+static int tca6507_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tca6507_chip *tca;
@@ -730,7 +730,7 @@ exit:
return err;
}
-static int __devexit tca6507_remove(struct i2c_client *client)
+static int tca6507_remove(struct i2c_client *client)
{
int i;
struct tca6507_chip *tca = i2c_get_clientdata(client);
@@ -752,7 +752,7 @@ static struct i2c_driver tca6507_driver = {
.owner = THIS_MODULE,
},
.probe = tca6507_probe,
- .remove = __devexit_p(tca6507_remove),
+ .remove = tca6507_remove,
.id_table = tca6507_id,
};
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 88f23f845595..ed15157c8f6c 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -216,13 +216,13 @@ static int wm8350_led_probe(struct platform_device *pdev)
isink = devm_regulator_get(&pdev->dev, "led_isink");
if (IS_ERR(isink)) {
- printk(KERN_ERR "%s: can't get ISINK\n", __func__);
+ dev_err(&pdev->dev, "%s: can't get ISINK\n", __func__);
return PTR_ERR(isink);
}
dcdc = devm_regulator_get(&pdev->dev, "led_vcc");
if (IS_ERR(dcdc)) {
- printk(KERN_ERR "%s: can't get DCDC\n", __func__);
+ dev_err(&pdev->dev, "%s: can't get DCDC\n", __func__);
return PTR_ERR(dcdc);
}
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 6e21e654bb02..b358cc05eff5 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
#include <linux/module.h>
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index b941685f2227..027a2b15d7d8 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -40,7 +40,7 @@ static int fb_notifier_callback(struct notifier_block *p,
int new_status = *blank ? BLANK : UNBLANK;
switch (event) {
- case FB_EVENT_BLANK :
+ case FB_EVENT_BLANK:
if (new_status == n->old_status)
break;
@@ -76,7 +76,7 @@ static ssize_t bl_trig_invert_store(struct device *dev,
unsigned long invert;
int ret;
- ret = strict_strtoul(buf, 10, &invert);
+ ret = kstrtoul(buf, 10, &invert);
if (ret < 0)
return ret;
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ba215dc42f98..72e3ebfc281f 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -110,7 +110,7 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
unsigned long inverted;
int ret;
- ret = strict_strtoul(buf, 10, &inverted);
+ ret = kstrtoul(buf, 10, &inverted);
if (ret < 0)
return ret;
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index b5fdcb78a75b..a5ebc0083d87 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -225,7 +225,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
* eventfd (ie. the appropriate virtqueue thread)?
*/
if (!send_notify_to_eventfd(cpu)) {
- /* OK, we tell the main Laucher. */
+ /* OK, we tell the main Launcher. */
if (put_user(cpu->pending_notify, user))
return -EFAULT;
return sizeof(cpu->pending_notify);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index ef87310b7662..ac5c87939860 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -679,7 +679,7 @@ void macio_release_resources(struct macio_dev *dev)
#ifdef CONFIG_PCI
-static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device_node* np;
struct macio_chip* chip;
@@ -739,7 +739,7 @@ static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_devi
return 0;
}
-static void __devexit macio_pci_remove(struct pci_dev* pdev)
+static void macio_pci_remove(struct pci_dev* pdev)
{
panic("removing of macio-asic not supported !\n");
}
@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
* MacIO is matched against any Apple ID, it's probe() function
* will then decide wether it applies or not
*/
-static const struct pci_device_id __devinitconst pci_ids[] = { {
+static const struct pci_device_id pci_ids[] = { {
.vendor = PCI_VENDOR_ID_APPLE,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 3f8d032f180f..d98e566a8f5e 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -556,7 +556,8 @@ static int media_bay_task(void *x)
return 0;
}
-static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_device_id *match)
+static int media_bay_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
{
struct media_bay_info* bay;
u32 __iomem *regbase;
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 6dc26b61219b..cad0e19b47a2 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -253,7 +253,7 @@ static void rackmeter_do_timer(struct work_struct *work)
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
-static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
+static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
{
unsigned int cpu;
@@ -287,7 +287,7 @@ static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
}
-static int __devinit rackmeter_setup(struct rackmeter *rm)
+static int rackmeter_setup(struct rackmeter *rm)
{
pr_debug("rackmeter: setting up i2s..\n");
rackmeter_setup_i2s(rm);
@@ -362,8 +362,8 @@ static irqreturn_t rackmeter_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int __devinit rackmeter_probe(struct macio_dev* mdev,
- const struct of_device_id *match)
+static int rackmeter_probe(struct macio_dev* mdev,
+ const struct of_device_id *match)
{
struct device_node *i2s = NULL, *np = NULL;
struct rackmeter *rm = NULL;
@@ -521,7 +521,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
return rc;
}
-static int __devexit rackmeter_remove(struct macio_dev* mdev)
+static int rackmeter_remove(struct macio_dev* mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
@@ -588,7 +588,7 @@ static struct macio_driver rackmeter_driver = {
.of_match_table = rackmeter_match,
},
.probe = rackmeter_probe,
- .remove = __devexit_p(rackmeter_remove),
+ .remove = rackmeter_remove,
.shutdown = rackmeter_shutdown,
};
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 7d5a6b40b31c..9c6b96414862 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -565,7 +565,7 @@ fail_msg_node:
fail_db_node:
of_node_put(smu->db_node);
fail_bootmem:
- free_bootmem((unsigned long)smu, sizeof(struct smu_device));
+ free_bootmem(__pa(smu), sizeof(struct smu_device));
smu = NULL;
fail_np:
of_node_put(np);
@@ -997,7 +997,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
"%02x !\n", id, hdr->id);
goto failure;
}
- if (prom_add_property(smu->of_node, prop)) {
+ if (of_add_property(smu->of_node, prop)) {
printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x "
"property !\n", id);
goto failure;
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
index ac3f243b9c5a..7c28b71246c9 100644
--- a/drivers/macintosh/windfarm_ad7417_sensor.c
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -177,9 +177,9 @@ static const struct wf_sensor_ops wf_ad7417_adc_ops = {
.owner = THIS_MODULE,
};
-static void __devinit wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
- int index, const char *name,
- const struct wf_sensor_ops *ops)
+static void wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
+ int index, const char *name,
+ const struct wf_sensor_ops *ops)
{
pv->sensors[index].name = kasprintf(GFP_KERNEL, "%s-%d", name, pv->cpu);
pv->sensors[index].priv = pv;
@@ -188,7 +188,7 @@ static void __devinit wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
kref_get(&pv->ref);
}
-static void __devinit wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
+static void wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
{
int rc;
u8 buf[2];
@@ -230,8 +230,8 @@ static void __devinit wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
pv->config = config;
}
-static int __devinit wf_ad7417_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int wf_ad7417_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wf_ad7417_priv *pv;
const struct mpu_data *mpu;
@@ -290,7 +290,7 @@ static int __devinit wf_ad7417_probe(struct i2c_client *client,
return 0;
}
-static int __devexit wf_ad7417_remove(struct i2c_client *client)
+static int wf_ad7417_remove(struct i2c_client *client)
{
struct wf_ad7417_priv *pv = dev_get_drvdata(&client->dev);
int i;
@@ -322,7 +322,7 @@ static struct i2c_driver wf_ad7417_driver = {
.id_table = wf_ad7417_id,
};
-static int __devinit wf_ad7417_init(void)
+static int wf_ad7417_init(void)
{
/* This is only supported on these machines */
if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -333,7 +333,7 @@ static int __devinit wf_ad7417_init(void)
return i2c_add_driver(&wf_ad7417_driver);
}
-static void __devexit wf_ad7417_exit(void)
+static void wf_ad7417_exit(void)
{
i2c_del_driver(&wf_ad7417_driver);
}
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index b3411edb324b..0226b796a21c 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -282,7 +282,7 @@ static const struct wf_control_ops wf_fcu_fan_pwm_ops = {
.owner = THIS_MODULE,
};
-static void __devinit wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
+static void wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
{
const struct mpu_data *mpu = wf_get_mpu(0);
u16 pump_min = 0, pump_max = 0xffff;
@@ -317,7 +317,7 @@ static void __devinit wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
fan->ctrl.name, pump_min, pump_max);
}
-static void __devinit wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
+static void wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
{
struct wf_fcu_priv *pv = fan->fcu_priv;
const struct mpu_data *mpu0 = wf_get_mpu(0);
@@ -359,9 +359,8 @@ static void __devinit wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
fan->ctrl.name, fan->min, fan->max);
}
-static void __devinit wf_fcu_add_fan(struct wf_fcu_priv *pv,
- const char *name,
- int type, int id)
+static void wf_fcu_add_fan(struct wf_fcu_priv *pv, const char *name,
+ int type, int id)
{
struct wf_fcu_fan *fan;
@@ -399,7 +398,7 @@ static void __devinit wf_fcu_add_fan(struct wf_fcu_priv *pv,
kref_get(&pv->ref);
}
-static void __devinit wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
+static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
{
/* Translation of device-tree location properties to
* windfarm fan names
@@ -481,7 +480,7 @@ static void __devinit wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
}
}
-static void __devinit wf_fcu_default_fans(struct wf_fcu_priv *pv)
+static void wf_fcu_default_fans(struct wf_fcu_priv *pv)
{
/* We only support the default fans for PowerMac7,2 */
if (!of_machine_is_compatible("PowerMac7,2"))
@@ -496,7 +495,7 @@ static void __devinit wf_fcu_default_fans(struct wf_fcu_priv *pv)
wf_fcu_add_fan(pv, "cpu-rear-fan-1", FCU_FAN_RPM, 6);
}
-static int __devinit wf_fcu_init_chip(struct wf_fcu_priv *pv)
+static int wf_fcu_init_chip(struct wf_fcu_priv *pv)
{
unsigned char buf = 0xff;
int rc;
@@ -518,8 +517,8 @@ static int __devinit wf_fcu_init_chip(struct wf_fcu_priv *pv)
return 0;
}
-static int __devinit wf_fcu_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int wf_fcu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct wf_fcu_priv *pv;
@@ -564,7 +563,7 @@ static int __devinit wf_fcu_probe(struct i2c_client *client,
return 0;
}
-static int __devexit wf_fcu_remove(struct i2c_client *client)
+static int wf_fcu_remove(struct i2c_client *client)
{
struct wf_fcu_priv *pv = dev_get_drvdata(&client->dev);
struct wf_fcu_fan *fan;
@@ -593,19 +592,7 @@ static struct i2c_driver wf_fcu_driver = {
.id_table = wf_fcu_id,
};
-static int __init wf_fcu_init(void)
-{
- return i2c_add_driver(&wf_fcu_driver);
-}
-
-static void __exit wf_fcu_exit(void)
-{
- i2c_del_driver(&wf_fcu_driver);
-}
-
-
-module_init(wf_fcu_init);
-module_exit(wf_fcu_exit);
+module_i2c_driver(wf_fcu_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("FCU control objects for PowerMacs thermal control");
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index b0c2d3695b34..9ef32b3df91f 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -174,19 +174,7 @@ static struct i2c_driver wf_lm75_driver = {
.id_table = wf_lm75_id,
};
-static int __init wf_lm75_sensor_init(void)
-{
- return i2c_add_driver(&wf_lm75_driver);
-}
-
-static void __exit wf_lm75_sensor_exit(void)
-{
- i2c_del_driver(&wf_lm75_driver);
-}
-
-
-module_init(wf_lm75_sensor_init);
-module_exit(wf_lm75_sensor_exit);
+module_i2c_driver(wf_lm75_driver);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("LM75 sensor objects for PowerMacs thermal control");
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 371b058d2f7d..945a25b2f31e 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -130,18 +130,7 @@ static struct i2c_driver wf_max6690_driver = {
.id_table = wf_max6690_id,
};
-static int __init wf_max6690_sensor_init(void)
-{
- return i2c_add_driver(&wf_max6690_driver);
-}
-
-static void __exit wf_max6690_sensor_exit(void)
-{
- i2c_del_driver(&wf_max6690_driver);
-}
-
-module_init(wf_max6690_sensor_init);
-module_exit(wf_max6690_sensor_exit);
+module_i2c_driver(wf_max6690_driver);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("MAX6690 sensor objects for PowerMac thermal control");
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index e0ee80700cde..35ef6e2582b8 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -656,7 +656,7 @@ static int wf_pm112_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wf_pm112_remove(struct platform_device *dev)
+static int wf_pm112_remove(struct platform_device *dev)
{
wf_unregister_client(&pm112_events);
/* should release all sensors and controls */
@@ -665,7 +665,7 @@ static int __devexit wf_pm112_remove(struct platform_device *dev)
static struct platform_driver wf_pm112_driver = {
.probe = wf_pm112_probe,
- .remove = __devexit_p(wf_pm112_remove),
+ .remove = wf_pm112_remove,
.driver = {
.name = "windfarm",
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index 04067e073aa9..af605e915d41 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -987,7 +987,7 @@ static int pm121_probe(struct platform_device *ddev)
return 0;
}
-static int __devexit pm121_remove(struct platform_device *ddev)
+static int pm121_remove(struct platform_device *ddev)
{
wf_unregister_client(&pm121_events);
return 0;
@@ -995,7 +995,7 @@ static int __devexit pm121_remove(struct platform_device *ddev)
static struct platform_driver pm121_driver = {
.probe = pm121_probe,
- .remove = __devexit_p(pm121_remove),
+ .remove = pm121_remove,
.driver = {
.name = "windfarm",
.bus = &platform_bus_type,
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 84ac913d7e3a..6e5585357cd3 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -776,7 +776,7 @@ static int wf_pm72_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wf_pm72_remove(struct platform_device *dev)
+static int wf_pm72_remove(struct platform_device *dev)
{
wf_unregister_client(&pm72_events);
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 990c87606be9..f84933ff3298 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -720,7 +720,7 @@ static int wf_smu_probe(struct platform_device *ddev)
return 0;
}
-static int __devexit wf_smu_remove(struct platform_device *ddev)
+static int wf_smu_remove(struct platform_device *ddev)
{
wf_unregister_client(&wf_smu_events);
@@ -763,7 +763,7 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
static struct platform_driver wf_smu_driver = {
.probe = wf_smu_probe,
- .remove = __devexit_p(wf_smu_remove),
+ .remove = wf_smu_remove,
.driver = {
.name = "windfarm",
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 7653603cb00e..2eb484f213c8 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -642,7 +642,7 @@ static int wf_smu_probe(struct platform_device *ddev)
return 0;
}
-static int __devexit wf_smu_remove(struct platform_device *ddev)
+static int wf_smu_remove(struct platform_device *ddev)
{
wf_unregister_client(&wf_smu_events);
@@ -692,7 +692,7 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
static struct platform_driver wf_smu_driver = {
.probe = wf_smu_probe,
- .remove = __devexit_p(wf_smu_remove),
+ .remove = wf_smu_remove,
.driver = {
.name = "windfarm",
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 3eca6d4b52fc..844003fb4ef0 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -669,7 +669,7 @@ static int wf_rm31_probe(struct platform_device *dev)
return 0;
}
-static int __devexit wf_rm31_remove(struct platform_device *dev)
+static int wf_rm31_remove(struct platform_device *dev)
{
wf_unregister_client(&rm31_events);
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 426e810233d7..d87f5ee04ca9 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -364,18 +364,7 @@ static struct i2c_driver wf_sat_driver = {
.id_table = wf_sat_id,
};
-static int __init sat_sensors_init(void)
-{
- return i2c_add_driver(&wf_sat_driver);
-}
-
-static void __exit sat_sensors_exit(void)
-{
- i2c_del_driver(&wf_sat_driver);
-}
-
-module_init(sat_sensors_init);
-module_exit(sat_sensors_exit);
+module_i2c_driver(wf_sat_driver);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("SMU satellite sensors for PowerMac thermal control");
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index e4e841567459..aefb78e3cbf9 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -208,31 +208,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
EXPORT_SYMBOL_GPL(dm_cell_release);
/*
- * There are a couple of places where we put a bio into a cell briefly
- * before taking it out again. In these situations we know that no other
- * bio may be in the cell. This function releases the cell, and also does
- * a sanity check.
- */
-static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- BUG_ON(cell->holder != bio);
- BUG_ON(!bio_list_empty(&cell->bios));
-
- __cell_release(cell, NULL);
-}
-
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
- unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
-
- spin_lock_irqsave(&prison->lock, flags);
- __cell_release_singleton(cell, bio);
- spin_unlock_irqrestore(&prison->lock, flags);
-}
-EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
-
-/*
* Sometimes we don't want the holder, just the additional bios.
*/
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 4e0ac376700a..53d1a7a84e2f 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
struct bio *inmate, struct dm_bio_prison_cell **ref);
void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison_cell *cell);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bbf459bca61d..f7369f9d8595 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1689,8 +1689,7 @@ bad:
return ret;
}
-static int crypt_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int crypt_map(struct dm_target *ti, struct bio *bio)
{
struct dm_crypt_io *io;
struct crypt_config *cc = ti->private;
@@ -1846,7 +1845,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f53846f9ab50..cc1bd048acb2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -274,8 +274,7 @@ static void delay_resume(struct dm_target *ti)
atomic_set(&dc->may_delay, 1);
}
-static int delay_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int delay_map(struct dm_target *ti, struct bio *bio)
{
struct delay_c *dc = ti->private;
@@ -338,7 +337,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index cc15543a6ad7..9721f2ffb1a2 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -39,6 +39,10 @@ enum feature_flag_bits {
DROP_WRITES
};
+struct per_bio_data {
+ bool bio_submitted;
+};
+
static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
@@ -214,6 +218,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
@@ -265,11 +270,12 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
}
}
-static int flakey_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
unsigned elapsed;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ pb->bio_submitted = false;
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
@@ -277,7 +283,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
/*
* Flag this bio as submitted while down.
*/
- map_context->ll = 1;
+ pb->bio_submitted = true;
/*
* Map reads as normal.
@@ -314,17 +320,16 @@ map_bio:
return DM_MAPIO_REMAPPED;
}
-static int flakey_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct flakey_c *fc = ti->private;
- unsigned bio_submitted_while_down = map_context->ll;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
/*
* Corrupt successful READs while in down state.
* If flags were specified, only corrupt those that match.
*/
- if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
+ if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
@@ -406,7 +411,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 1c46f97d6664..ea49834377c8 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -287,7 +287,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
- sector_t discard_sectors;
+ unsigned short logical_block_size = queue_logical_block_size(q);
+ sector_t num_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
@@ -297,7 +298,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Allocate a suitably sized-bio.
*/
- if (rw & REQ_DISCARD)
+ if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
num_bvecs = 1;
else
num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
@@ -310,9 +311,21 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
- discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
- bio->bi_size = discard_sectors << SECTOR_SHIFT;
- remaining -= discard_sectors;
+ num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = num_sectors << SECTOR_SHIFT;
+ remaining -= num_sectors;
+ } else if (rw & REQ_WRITE_SAME) {
+ /*
+ * WRITE SAME only uses a single page.
+ */
+ dp->get_page(dp, &page, &len, &offset);
+ bio_add_page(bio, page, logical_block_size, offset);
+ num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
+ bio->bi_size = num_sectors << SECTOR_SHIFT;
+
+ offset = 0;
+ remaining -= num_sectors;
+ dp->next_page(dp);
} else while (remaining) {
/*
* Try and add as many pages as possible.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afd95986d099..0666b5d14b88 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1543,7 +1543,21 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
+#define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */
+#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
+
+static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
+{
+ if (param_flags & DM_WIPE_BUFFER)
+ memset(param, 0, param_size);
+
+ if (param_flags & DM_PARAMS_VMALLOC)
+ vfree(param);
+ else
+ kfree(param);
+}
+
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl tmp, *dmi;
int secure_data;
@@ -1556,7 +1570,21 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
- dmi = vmalloc(tmp.data_size);
+ *param_flags = secure_data ? DM_WIPE_BUFFER : 0;
+
+ /*
+ * Try to avoid low memory issues when a device is suspended.
+ * Use kmalloc() rather than vmalloc() when we can.
+ */
+ dmi = NULL;
+ if (tmp.data_size <= KMALLOC_MAX_SIZE)
+ dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+ if (!dmi) {
+ dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ *param_flags |= DM_PARAMS_VMALLOC;
+ }
+
if (!dmi) {
if (secure_data && clear_user(user, tmp.data_size))
return -EFAULT;
@@ -1566,6 +1594,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
if (copy_from_user(dmi, user, tmp.data_size))
goto bad;
+ /*
+ * Abort if something changed the ioctl data while it was being copied.
+ */
+ if (dmi->data_size != tmp.data_size) {
+ DMERR("rejecting ioctl: data size modified while processing parameters");
+ goto bad;
+ }
+
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, tmp.data_size))
goto bad;
@@ -1574,9 +1610,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
return 0;
bad:
- if (secure_data)
- memset(dmi, 0, tmp.data_size);
- vfree(dmi);
+ free_params(dmi, tmp.data_size, *param_flags);
+
return -EFAULT;
}
@@ -1613,7 +1648,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
- int wipe_buffer;
+ int param_flags;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
@@ -1649,24 +1684,14 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
}
/*
- * Trying to avoid low memory issues when a device is
- * suspended.
- */
- current->flags |= PF_MEMALLOC;
-
- /*
* Copy the parameters into kernel space.
*/
- r = copy_params(user, &param);
-
- current->flags &= ~PF_MEMALLOC;
+ r = copy_params(user, &param, &param_flags);
if (r)
return r;
input_param_size = param->data_size;
- wipe_buffer = param->flags & DM_SECURE_DATA_FLAG;
-
r = validate_params(cmd, param);
if (r)
goto out;
@@ -1681,10 +1706,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
r = -EFAULT;
out:
- if (wipe_buffer)
- memset(param, 0, input_param_size);
-
- vfree(param);
+ free_params(param, input_param_size, param_flags);
return r;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index bed444c93d8d..68c02673263b 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -349,7 +349,7 @@ static void complete_io(unsigned long error, void *context)
struct dm_kcopyd_client *kc = job->kc;
if (error) {
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
job->write_err |= error;
else
job->read_err = 1;
@@ -361,7 +361,7 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
push(&kc->complete_jobs, job);
else {
@@ -432,7 +432,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (job->rw == WRITE)
+ if (job->rw & WRITE)
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -585,6 +585,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
struct kcopyd_job *job;
+ int i;
/*
* Allocate an array of jobs consisting of one master job
@@ -611,7 +612,16 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
memset(&job->source, 0, sizeof job->source);
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
- job->rw = WRITE;
+
+ /*
+ * Use WRITE SAME to optimize zeroing if all dests support it.
+ */
+ job->rw = WRITE | REQ_WRITE_SAME;
+ for (i = 0; i < job->num_dests; i++)
+ if (!bdev_write_same(job->dests[i].bdev)) {
+ job->rw = WRITE;
+ break;
+ }
}
job->fn = fn;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 1bf19a93eef0..328cad5617ab 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -55,6 +55,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->num_write_same_requests = 1;
ti->private = lc;
return 0;
@@ -87,8 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
-static int linear_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int linear_map(struct dm_target *ti, struct bio *bio)
{
linear_map_bio(ti, bio);
@@ -155,7 +155,7 @@ static int linear_iterate_devices(struct dm_target *ti,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 45d94a7e7f6d..9e58dbd8d8cb 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -295,9 +295,11 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
* Choose a reasonable default. All figures in sectors.
*/
if (min_region_size > (1 << 13)) {
+ /* If not a power of 2, make it the next power of 2 */
+ if (min_region_size & (min_region_size - 1))
+ region_size = 1 << fls(region_size);
DMINFO("Choosing default region size of %lu sectors",
region_size);
- region_size = min_region_size;
} else {
DMINFO("Choosing default region size of 4MiB");
region_size = 1 << 13; /* sectors */
@@ -338,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
}
/*
- * validate_rebuild_devices
+ * validate_raid_redundancy
* @rs
*
- * Determine if the devices specified for rebuild can result in a valid
- * usable array that is capable of rebuilding the given devices.
+ * Determine if there are enough devices in the array that haven't
+ * failed (or are being rebuilt) to form a usable array.
*
* Returns: 0 on success, -EINVAL on failure.
*/
-static int validate_rebuild_devices(struct raid_set *rs)
+static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
- if (!(rs->print_flags & DMPF_REBUILD))
- return 0;
-
for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+ !rs->dev[i].rdev.sb_page)
rebuild_cnt++;
switch (rs->raid_type->level) {
@@ -391,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs)
* A A B B C
* C D D E E
*/
- rebuilds_per_group = 0;
for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
d = i % rs->md.raid_disks;
- if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
+ if ((!rs->dev[d].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
- if (!((i + 1) % copies))
- rebuilds_per_group = 0;
}
break;
default:
- DMERR("The rebuild parameter is not supported for %s",
- rs->raid_type->name);
- rs->ti->error = "Rebuild not supported for this RAID type";
- return -EINVAL;
+ if (rebuild_cnt)
+ return -EINVAL;
}
return 0;
too_many:
- rs->ti->error = "Too many rebuild devices specified";
return -EINVAL;
}
@@ -662,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
rs->md.dev_sectors = sectors_per_dev;
- if (validate_rebuild_devices(rs))
- return -EINVAL;
-
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
@@ -993,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
int ret;
- unsigned redundancy = 0;
struct raid_dev *dev;
struct md_rdev *rdev, *tmp, *freshest;
struct mddev *mddev = &rs->md;
- switch (rs->raid_type->level) {
- case 1:
- redundancy = rs->md.raid_disks - 1;
- break;
- case 4:
- case 5:
- case 6:
- redundancy = rs->raid_type->parity_devs;
- break;
- case 10:
- redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
- break;
- default:
- ti->error = "Unknown RAID type";
- return -EINVAL;
- }
-
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev) {
/*
@@ -1043,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
break;
default:
dev = container_of(rdev, struct raid_dev, rdev);
- if (redundancy--) {
- if (dev->meta_dev)
- dm_put_device(ti, dev->meta_dev);
-
- dev->meta_dev = NULL;
- rdev->meta_bdev = NULL;
+ if (dev->meta_dev)
+ dm_put_device(ti, dev->meta_dev);
- if (rdev->sb_page)
- put_page(rdev->sb_page);
+ dev->meta_dev = NULL;
+ rdev->meta_bdev = NULL;
- rdev->sb_page = NULL;
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
- rdev->sb_loaded = 0;
+ rdev->sb_page = NULL;
- /*
- * We might be able to salvage the data device
- * even though the meta device has failed. For
- * now, we behave as though '- -' had been
- * set for this device in the table.
- */
- if (dev->data_dev)
- dm_put_device(ti, dev->data_dev);
+ rdev->sb_loaded = 0;
- dev->data_dev = NULL;
- rdev->bdev = NULL;
+ /*
+ * We might be able to salvage the data device
+ * even though the meta device has failed. For
+ * now, we behave as though '- -' had been
+ * set for this device in the table.
+ */
+ if (dev->data_dev)
+ dm_put_device(ti, dev->data_dev);
- list_del(&rdev->same_set);
+ dev->data_dev = NULL;
+ rdev->bdev = NULL;
- continue;
- }
- ti->error = "Failed to load superblock";
- return ret;
+ list_del(&rdev->same_set);
}
}
if (!freshest)
return 0;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -1216,7 +1191,7 @@ static void raid_dtr(struct dm_target *ti)
context_free(rs);
}
-static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
+static int raid_map(struct dm_target *ti, struct bio *bio)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
@@ -1430,7 +1405,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 3, 1},
+ .version = {1, 4, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fd61f98ee1f6..fa519185ebba 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -61,7 +61,6 @@ struct mirror_set {
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
- mempool_t *read_record_pool;
/* recovery */
region_t nr_regions;
@@ -139,14 +138,13 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
queue_bio(ms, bio, WRITE);
}
-#define MIN_READ_RECORDS 20
-struct dm_raid1_read_record {
+struct dm_raid1_bio_record {
struct mirror *m;
+ /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
+ region_t write_region;
};
-static struct kmem_cache *_dm_raid1_read_record_cache;
-
/*
* Every mirror should look like this one.
*/
@@ -876,19 +874,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
- ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
- _dm_raid1_read_record_cache);
-
- if (!ms->read_record_pool) {
- ti->error = "Error creating mirror read_record_pool";
- kfree(ms);
- return NULL;
- }
-
ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
- mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
@@ -900,7 +888,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
- mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
@@ -916,7 +903,6 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
dm_io_client_destroy(ms->io_client);
dm_region_hash_destroy(ms->rh);
- mempool_destroy(ms->read_record_pool);
kfree(ms);
}
@@ -1088,6 +1074,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
@@ -1155,18 +1142,20 @@ static void mirror_dtr(struct dm_target *ti)
/*
* Mirror mapping function
*/
-static int mirror_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int mirror_map(struct dm_target *ti, struct bio *bio)
{
int r, rw = bio_rw(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
- struct dm_raid1_read_record *read_record = NULL;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ struct dm_raid1_bio_record *bio_record =
+ dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
+
+ bio_record->details.bi_bdev = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
- map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
+ bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
@@ -1194,33 +1183,29 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
if (unlikely(!m))
return -EIO;
- read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
- if (likely(read_record)) {
- dm_bio_record(&read_record->details, bio);
- map_context->ptr = read_record;
- read_record->m = m;
- }
+ dm_bio_record(&bio_record->details, bio);
+ bio_record->m = m;
map_bio(m, bio);
return DM_MAPIO_REMAPPED;
}
-static int mirror_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{
int rw = bio_rw(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
- struct dm_raid1_read_record *read_record = map_context->ptr;
+ struct dm_raid1_bio_record *bio_record =
+ dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
/*
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
- dm_rh_dec(ms->rh, map_context->ll);
+ dm_rh_dec(ms->rh, bio_record->write_region);
return error;
}
@@ -1231,7 +1216,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(error)) {
- if (!read_record) {
+ if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1241,7 +1226,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
return -EIO;
}
- m = read_record->m;
+ m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
m->dev->name);
@@ -1253,22 +1238,18 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* mirror.
*/
if (default_ok(m) || mirror_available(ms, bio)) {
- bd = &read_record->details;
+ bd = &bio_record->details;
dm_bio_restore(bd, bio);
- mempool_free(read_record, ms->read_record_pool);
- map_context->ptr = NULL;
+ bio_record->details.bi_bdev = NULL;
queue_bio(ms, bio, rw);
- return 1;
+ return DM_ENDIO_INCOMPLETE;
}
DMERR("All replicated volumes dead, failing I/O");
}
out:
- if (read_record) {
- mempool_free(read_record, ms->read_record_pool);
- map_context->ptr = NULL;
- }
+ bio_record->details.bi_bdev = NULL;
return error;
}
@@ -1422,7 +1403,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 12, 1},
+ .version = {1, 13, 1},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1439,13 +1420,6 @@ static int __init dm_mirror_init(void)
{
int r;
- _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
- if (!_dm_raid1_read_record_cache) {
- DMERR("Can't allocate dm_raid1_read_record cache");
- r = -ENOMEM;
- goto bad_cache;
- }
-
r = dm_register_target(&mirror_target);
if (r < 0) {
DMERR("Failed to register mirror target");
@@ -1455,15 +1429,12 @@ static int __init dm_mirror_init(void)
return 0;
bad_target:
- kmem_cache_destroy(_dm_raid1_read_record_cache);
-bad_cache:
return r;
}
static void __exit dm_mirror_exit(void)
{
dm_unregister_target(&mirror_target);
- kmem_cache_destroy(_dm_raid1_read_record_cache);
}
/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a143921feaf6..59fc18ae52c2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -79,7 +79,6 @@ struct dm_snapshot {
/* Chunks with outstanding reads */
spinlock_t tracked_chunk_lock;
- mempool_t *tracked_chunk_pool;
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
/* The on disk metadata handler */
@@ -191,35 +190,38 @@ struct dm_snap_tracked_chunk {
chunk_t chunk;
};
-static struct kmem_cache *tracked_chunk_cache;
+static void init_tracked_chunk(struct bio *bio)
+{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+ INIT_HLIST_NODE(&c->node);
+}
-static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
- chunk_t chunk)
+static bool is_bio_tracked(struct bio *bio)
{
- struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
- GFP_NOIO);
- unsigned long flags;
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+ return !hlist_unhashed(&c->node);
+}
+
+static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
c->chunk = chunk;
- spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ spin_lock_irq(&s->tracked_chunk_lock);
hlist_add_head(&c->node,
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
- spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
-
- return c;
+ spin_unlock_irq(&s->tracked_chunk_lock);
}
-static void stop_tracking_chunk(struct dm_snapshot *s,
- struct dm_snap_tracked_chunk *c)
+static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
{
+ struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
unsigned long flags;
spin_lock_irqsave(&s->tracked_chunk_lock, flags);
hlist_del(&c->node);
spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
-
- mempool_free(c, s->tracked_chunk_pool);
}
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
@@ -1120,14 +1122,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_pending_pool;
}
- s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
- tracked_chunk_cache);
- if (!s->tracked_chunk_pool) {
- ti->error = "Could not allocate tracked_chunk mempool for "
- "tracking reads";
- goto bad_tracked_chunk_pool;
- }
-
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
@@ -1135,6 +1129,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->num_flush_requests = num_flush_requests;
+ ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -1183,9 +1178,6 @@ bad_read_metadata:
unregister_snapshot(s);
bad_load_and_register:
- mempool_destroy(s->tracked_chunk_pool);
-
-bad_tracked_chunk_pool:
mempool_destroy(s->pending_pool);
bad_pending_pool:
@@ -1290,8 +1282,6 @@ static void snapshot_dtr(struct dm_target *ti)
BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
#endif
- mempool_destroy(s->tracked_chunk_pool);
-
__free_exceptions(s);
mempool_destroy(s->pending_pool);
@@ -1577,8 +1567,7 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
s->store->chunk_mask);
}
-static int snapshot_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int snapshot_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
struct dm_snapshot *s = ti->private;
@@ -1586,6 +1575,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ init_tracked_chunk(bio);
+
if (bio->bi_rw & REQ_FLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
@@ -1670,7 +1661,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
} else {
bio->bi_bdev = s->origin->bdev;
- map_context->ptr = track_chunk(s, chunk);
+ track_chunk(s, bio, chunk);
}
out_unlock:
@@ -1691,20 +1682,20 @@ out:
* If merging is currently taking place on the chunk in question, the
* I/O is deferred by adding it to s->bios_queued_during_merge.
*/
-static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
+ init_tracked_chunk(bio);
+
if (bio->bi_rw & REQ_FLUSH) {
- if (!map_context->target_request_nr)
+ if (!dm_bio_get_target_request_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
- map_context->ptr = NULL;
return DM_MAPIO_REMAPPED;
}
@@ -1733,7 +1724,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
remap_exception(s, e, bio, chunk);
if (bio_rw(bio) == WRITE)
- map_context->ptr = track_chunk(s, chunk);
+ track_chunk(s, bio, chunk);
goto out_unlock;
}
@@ -1751,14 +1742,12 @@ out_unlock:
return r;
}
-static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct dm_snapshot *s = ti->private;
- struct dm_snap_tracked_chunk *c = map_context->ptr;
- if (c)
- stop_tracking_chunk(s, c);
+ if (is_bio_tracked(bio))
+ stop_tracking_chunk(s, bio);
return 0;
}
@@ -2127,8 +2116,7 @@ static void origin_dtr(struct dm_target *ti)
dm_put_device(ti, dev);
}
-static int origin_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int origin_map(struct dm_target *ti, struct bio *bio)
{
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
@@ -2193,7 +2181,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 7, 1},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2206,7 +2194,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2220,7 +2208,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2281,17 +2269,8 @@ static int __init dm_snapshot_init(void)
goto bad_pending_cache;
}
- tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
- if (!tracked_chunk_cache) {
- DMERR("Couldn't create cache to track chunks in use.");
- r = -ENOMEM;
- goto bad_tracked_chunk_cache;
- }
-
return 0;
-bad_tracked_chunk_cache:
- kmem_cache_destroy(pending_cache);
bad_pending_cache:
kmem_cache_destroy(exception_cache);
bad_exception_cache:
@@ -2317,7 +2296,6 @@ static void __exit dm_snapshot_exit(void)
exit_origin_hash();
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
- kmem_cache_destroy(tracked_chunk_cache);
dm_exception_store_exit();
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e2f876539743..c89cde86d400 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -162,6 +162,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes;
+ ti->num_write_same_requests = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
@@ -251,8 +252,8 @@ static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
*result += sc->chunk_size; /* next chunk */
}
-static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
- uint32_t target_stripe)
+static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
+ uint32_t target_stripe)
{
sector_t begin, end;
@@ -271,23 +272,23 @@ static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
}
}
-static int stripe_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
unsigned target_request_nr;
if (bio->bi_rw & REQ_FLUSH) {
- target_request_nr = map_context->target_request_nr;
+ target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- target_request_nr = map_context->target_request_nr;
+ if (unlikely(bio->bi_rw & REQ_DISCARD) ||
+ unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+ target_request_nr = dm_bio_get_target_request_nr(bio);
BUG_ON(target_request_nr >= sc->stripes);
- return stripe_map_discard(sc, bio, target_request_nr);
+ return stripe_map_range(sc, bio, target_request_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -342,8 +343,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
return 0;
}
-static int stripe_end_io(struct dm_target *ti, struct bio *bio,
- int error, union map_info *map_context)
+static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
{
unsigned i;
char major_minor[16];
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 100368eb7991..daf25d0890b3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -967,13 +967,22 @@ bool dm_table_request_based(struct dm_table *t)
int dm_table_alloc_md_mempools(struct dm_table *t)
{
unsigned type = dm_table_get_type(t);
+ unsigned per_bio_data_size = 0;
+ struct dm_target *tgt;
+ unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
- t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
+ if (type == DM_TYPE_BIO_BASED)
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
+ }
+
+ t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
if (!t->mempools)
return -ENOMEM;
@@ -1414,6 +1423,33 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return 1;
}
+static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !q->limits.max_write_same_sectors;
+}
+
+static bool dm_table_supports_write_same(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_write_same_requests)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+ return false;
+ }
+
+ return true;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1445,6 +1481,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+ if (!dm_table_supports_write_same(t))
+ q->limits.max_write_same_sectors = 0;
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 8da366cf381c..617d21a77256 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -126,15 +126,14 @@ static void io_err_dtr(struct dm_target *tt)
/* empty */
}
-static int io_err_map(struct dm_target *tt, struct bio *bio,
- union map_info *map_context)
+static int io_err_map(struct dm_target *tt, struct bio *bio)
{
return -EIO;
}
static struct target_type error_target = {
.name = "error",
- .version = {1, 0, 1},
+ .version = {1, 1, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 693e149e9727..4d6e85367b84 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -408,7 +408,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
pmd->tl_info.tm = pmd->tm;
pmd->tl_info.levels = 1;
- pmd->tl_info.value_type.context = &pmd->info;
+ pmd->tl_info.value_type.context = &pmd->bl_info;
pmd->tl_info.value_type.size = sizeof(__le64);
pmd->tl_info.value_type.inc = subtree_inc;
pmd->tl_info.value_type.dec = subtree_dec;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 058acf3a5ba7..5409607d4875 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -186,7 +186,6 @@ struct pool {
struct dm_thin_new_mapping *next_mapping;
mempool_t *mapping_pool;
- mempool_t *endio_hook_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
@@ -304,7 +303,7 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(master);
while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
if (h->tc == tc)
bio_endio(bio, DM_ENDIO_REQUEUE);
@@ -368,6 +367,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
dm_thin_changed_this_transaction(tc->td);
}
+static void inc_all_io_entry(struct pool *pool, struct bio *bio)
+{
+ struct dm_thin_endio_hook *h;
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return;
+
+ h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+ h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
+}
+
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
@@ -474,7 +484,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
static void overwrite_endio(struct bio *bio, int err)
{
unsigned long flags;
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct dm_thin_new_mapping *m = h->overwrite_mapping;
struct pool *pool = m->tc->pool;
@@ -499,8 +509,7 @@ static void overwrite_endio(struct bio *bio, int err)
/*
* This sends the bios in the cell back to the deferred_bios list.
*/
-static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
- dm_block_t data_block)
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
struct pool *pool = tc->pool;
unsigned long flags;
@@ -513,17 +522,13 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
}
/*
- * Same as cell_defer above, except it omits one particular detainee,
- * a write bio that covers the block and has already been processed.
+ * Same as cell_defer except it omits the original holder of the cell.
*/
-static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
+static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
- struct bio_list bios;
struct pool *pool = tc->pool;
unsigned long flags;
- bio_list_init(&bios);
-
spin_lock_irqsave(&pool->lock, flags);
dm_cell_release_no_holder(cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
@@ -561,7 +566,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR("dm_thin_insert_block() failed");
+ DMERR_LIMIT("dm_thin_insert_block() failed");
dm_cell_error(m->cell);
goto out;
}
@@ -573,10 +578,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
* the bios in the cell.
*/
if (bio) {
- cell_defer_except(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell);
bio_endio(bio, 0);
} else
- cell_defer(tc, m->cell, m->data_block);
+ cell_defer(tc, m->cell);
out:
list_del(&m->list);
@@ -588,8 +593,8 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
struct thin_c *tc = m->tc;
bio_io_error(m->bio);
- cell_defer_except(tc, m->cell);
- cell_defer_except(tc, m->cell2);
+ cell_defer_no_holder(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
@@ -597,13 +602,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ inc_all_io_entry(tc->pool, m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ cell_defer_no_holder(tc, m->cell2);
+
if (m->pass_discard)
remap_and_issue(tc, m->bio, m->data_block);
else
bio_endio(m->bio, 0);
- cell_defer_except(tc, m->cell);
- cell_defer_except(tc, m->cell2);
mempool_free(m, tc->pool->mapping_pool);
}
@@ -614,7 +621,7 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m)
r = dm_thin_remove_block(tc->td, m->virt_block);
if (r)
- DMERR("dm_thin_remove_block() failed");
+ DMERR_LIMIT("dm_thin_remove_block() failed");
process_prepared_discard_passdown(m);
}
@@ -706,11 +713,12 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
* bio immediately. Otherwise we use kcopyd to clone the data first.
*/
if (io_overwrites_block(pool, bio)) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ inc_all_io_entry(pool, bio);
remap_and_issue(tc, bio, data_dest);
} else {
struct dm_io_region from, to;
@@ -727,7 +735,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
0, copy_complete, m);
if (r < 0) {
mempool_free(m, pool->mapping_pool);
- DMERR("dm_kcopyd_copy() failed");
+ DMERR_LIMIT("dm_kcopyd_copy() failed");
dm_cell_error(cell);
}
}
@@ -775,11 +783,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
process_prepared_mapping(m);
else if (io_overwrites_block(pool, bio)) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+ inc_all_io_entry(pool, bio);
remap_and_issue(tc, bio, data_block);
} else {
int r;
@@ -792,7 +801,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
if (r < 0) {
mempool_free(m, pool->mapping_pool);
- DMERR("dm_kcopyd_zero() failed");
+ DMERR_LIMIT("dm_kcopyd_zero() failed");
dm_cell_error(cell);
}
}
@@ -804,7 +813,7 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
- DMERR("commit failed, error = %d", r);
+ DMERR_LIMIT("commit failed: error = %d", r);
return r;
}
@@ -889,7 +898,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
*/
static void retry_on_resume(struct bio *bio)
{
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
struct pool *pool = tc->pool;
unsigned long flags;
@@ -936,7 +945,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/
build_data_key(tc->td, lookup_result.block, &key2);
if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
break;
}
@@ -962,13 +971,15 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
} else {
+ inc_all_io_entry(pool, bio);
+ cell_defer_no_holder(tc, cell);
+ cell_defer_no_holder(tc, cell2);
+
/*
* The DM core makes sure that the discard doesn't span
* a block boundary. So we submit the discard of a
* partial block appropriately.
*/
- dm_cell_release_singleton(cell, bio);
- dm_cell_release_singleton(cell2, bio);
if ((!lookup_result.shared) && pool->pf.discard_passdown)
remap_and_issue(tc, bio, lookup_result.block);
else
@@ -980,13 +991,14 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
/*
* It isn't provisioned, just forget it.
*/
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
bio_endio(bio, 0);
break;
default:
- DMERR("discard: find block unexpectedly returned %d", r);
- dm_cell_release_singleton(cell, bio);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
+ cell_defer_no_holder(tc, cell);
bio_io_error(bio);
break;
}
@@ -1012,7 +1024,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
default:
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+ __func__, r);
dm_cell_error(cell);
break;
}
@@ -1037,11 +1050,12 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_data_dir(bio) == WRITE && bio->bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
+ inc_all_io_entry(pool, bio);
+ cell_defer_no_holder(tc, cell);
- dm_cell_release_singleton(cell, bio);
remap_and_issue(tc, bio, lookup_result->block);
}
}
@@ -1056,7 +1070,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- dm_cell_release_singleton(cell, bio);
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_and_issue(tc, bio, 0);
return;
}
@@ -1066,7 +1082,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
*/
if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio);
- dm_cell_release_singleton(cell, bio);
+ cell_defer_no_holder(tc, cell);
bio_endio(bio, 0);
return;
}
@@ -1085,7 +1101,8 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
default:
- DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+ __func__, r);
set_pool_mode(tc->pool, PM_READ_ONLY);
dm_cell_error(cell);
break;
@@ -1111,34 +1128,31 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
switch (r) {
case 0:
- /*
- * We can release this cell now. This thread is the only
- * one that puts bios into a cell, and we know there were
- * no preceding bios.
- */
- /*
- * TODO: this will probably have to change when discard goes
- * back in.
- */
- dm_cell_release_singleton(cell, bio);
-
- if (lookup_result.shared)
+ if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
- else
+ cell_defer_no_holder(tc, cell);
+ } else {
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_and_issue(tc, bio, lookup_result.block);
+ }
break;
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- dm_cell_release_singleton(cell, bio);
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell);
+
remap_to_origin_and_issue(tc, bio);
} else
provision_block(tc, bio, block, cell);
break;
default:
- DMERR("dm_thin_find_block() failed, error = %d", r);
- dm_cell_release_singleton(cell, bio);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
+ cell_defer_no_holder(tc, cell);
bio_io_error(bio);
break;
}
@@ -1156,8 +1170,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
case 0:
if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
bio_io_error(bio);
- else
+ else {
+ inc_all_io_entry(tc->pool, bio);
remap_and_issue(tc, bio, lookup_result.block);
+ }
break;
case -ENODATA:
@@ -1167,6 +1183,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
}
if (tc->origin_dev) {
+ inc_all_io_entry(tc->pool, bio);
remap_to_origin_and_issue(tc, bio);
break;
}
@@ -1176,7 +1193,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
break;
default:
- DMERR("dm_thin_find_block() failed, error = %d", r);
+ DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+ __func__, r);
bio_io_error(bio);
break;
}
@@ -1207,7 +1225,7 @@ static void process_deferred_bios(struct pool *pool)
spin_unlock_irqrestore(&pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
- struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
/*
@@ -1340,32 +1358,30 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
-static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
{
- struct pool *pool = tc->pool;
- struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->tc = tc;
h->shared_read_entry = NULL;
- h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
+ h->all_io_entry = NULL;
h->overwrite_mapping = NULL;
-
- return h;
}
/*
* Non-blocking function called from the thin target's map function.
*/
-static int thin_bio_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int thin_bio_map(struct dm_target *ti, struct bio *bio)
{
int r;
struct thin_c *tc = ti->private;
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
+ struct dm_bio_prison_cell *cell1, *cell2;
+ struct dm_cell_key key;
- map_context->ptr = thin_hook_bio(tc, bio);
+ thin_hook_bio(tc, bio);
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
@@ -1400,12 +1416,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* shared flag will be set in their case.
*/
thin_defer_bio(tc, bio);
- r = DM_MAPIO_SUBMITTED;
- } else {
- remap(tc, bio, result.block);
- r = DM_MAPIO_REMAPPED;
+ return DM_MAPIO_SUBMITTED;
}
- break;
+
+ build_virtual_key(tc->td, block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+ return DM_MAPIO_SUBMITTED;
+
+ build_data_key(tc->td, result.block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
+ cell_defer_no_holder(tc, cell1);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ inc_all_io_entry(tc->pool, bio);
+ cell_defer_no_holder(tc, cell2);
+ cell_defer_no_holder(tc, cell1);
+
+ remap(tc, bio, result.block);
+ return DM_MAPIO_REMAPPED;
case -ENODATA:
if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
@@ -1414,8 +1443,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* of doing so. Just error it.
*/
bio_io_error(bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
}
/* fall through */
@@ -1425,8 +1453,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* provide the hint to load the metadata into cache.
*/
thin_defer_bio(tc, bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
default:
/*
@@ -1435,11 +1462,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
* pool is switched to fail-io mode.
*/
bio_io_error(bio);
- r = DM_MAPIO_SUBMITTED;
- break;
+ return DM_MAPIO_SUBMITTED;
}
-
- return r;
}
static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
@@ -1566,14 +1590,12 @@ static void __pool_destroy(struct pool *pool)
if (pool->next_mapping)
mempool_free(pool->next_mapping, pool->mapping_pool);
mempool_destroy(pool->mapping_pool);
- mempool_destroy(pool->endio_hook_pool);
dm_deferred_set_destroy(pool->shared_read_ds);
dm_deferred_set_destroy(pool->all_io_ds);
kfree(pool);
}
static struct kmem_cache *_new_mapping_cache;
-static struct kmem_cache *_endio_hook_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
@@ -1667,13 +1689,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_mapping_pool;
}
- pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
- _endio_hook_cache);
- if (!pool->endio_hook_pool) {
- *error = "Error creating pool's endio_hook mempool";
- err_p = ERR_PTR(-ENOMEM);
- goto bad_endio_hook_pool;
- }
pool->ref_count = 1;
pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
@@ -1682,8 +1697,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return pool;
-bad_endio_hook_pool:
- mempool_destroy(pool->mapping_pool);
bad_mapping_pool:
dm_deferred_set_destroy(pool->all_io_ds);
bad_all_io_ds:
@@ -1966,8 +1979,7 @@ out_unlock:
return r;
}
-static int pool_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int pool_map(struct dm_target *ti, struct bio *bio)
{
int r;
struct pool_c *pt = ti->private;
@@ -2358,7 +2370,9 @@ static int pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (pool->pf.discard_enabled && pool->pf.discard_passdown)
+ if (!pool->pf.discard_enabled)
+ DMEMIT("ignore_discard");
+ else if (pool->pf.discard_passdown)
DMEMIT("discard_passdown");
else
DMEMIT("no_discard_passdown");
@@ -2454,7 +2468,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2576,6 +2590,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_requests = 1;
ti->flush_supported = true;
+ ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
@@ -2609,20 +2624,17 @@ out_unlock:
return r;
}
-static int thin_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int thin_map(struct dm_target *ti, struct bio *bio)
{
bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
- return thin_bio_map(ti, bio, map_context);
+ return thin_bio_map(ti, bio);
}
-static int thin_endio(struct dm_target *ti,
- struct bio *bio, int err,
- union map_info *map_context)
+static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
{
unsigned long flags;
- struct dm_thin_endio_hook *h = map_context->ptr;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct list_head work;
struct dm_thin_new_mapping *m, *tmp;
struct pool *pool = h->tc->pool;
@@ -2643,14 +2655,15 @@ static int thin_endio(struct dm_target *ti,
if (h->all_io_entry) {
INIT_LIST_HEAD(&work);
dm_deferred_entry_dec(h->all_io_entry, &work);
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry_safe(m, tmp, &work, list)
- list_add(&m->list, &pool->prepared_discards);
- spin_unlock_irqrestore(&pool->lock, flags);
+ if (!list_empty(&work)) {
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry_safe(m, tmp, &work, list)
+ list_add(&m->list, &pool->prepared_discards);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ wake_worker(pool);
+ }
}
- mempool_free(h, pool->endio_hook_pool);
-
return 0;
}
@@ -2733,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
return 0;
}
-/*
- * A thin device always inherits its queue limits from its pool.
- */
-static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
-{
- struct thin_c *tc = ti->private;
-
- *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
-}
-
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 5, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -2754,7 +2757,6 @@ static struct target_type thin_target = {
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
- .io_hints = thin_io_hints,
};
/*----------------------------------------------------------------*/
@@ -2779,14 +2781,8 @@ static int __init dm_thin_init(void)
if (!_new_mapping_cache)
goto bad_new_mapping_cache;
- _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
- if (!_endio_hook_cache)
- goto bad_endio_hook_cache;
-
return 0;
-bad_endio_hook_cache:
- kmem_cache_destroy(_new_mapping_cache);
bad_new_mapping_cache:
dm_unregister_target(&pool_target);
bad_pool_target:
@@ -2801,7 +2797,6 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
- kmem_cache_destroy(_endio_hook_cache);
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 9e7328bb4030..52cde982164a 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -55,7 +55,6 @@ struct dm_verity {
unsigned shash_descsize;/* the size of temporary space for crypto */
int hash_failed; /* set to 1 if hash of any block failed */
- mempool_t *io_mempool; /* mempool of struct dm_verity_io */
mempool_t *vec_mempool; /* mempool of bio vector */
struct workqueue_struct *verify_wq;
@@ -66,7 +65,6 @@ struct dm_verity {
struct dm_verity_io {
struct dm_verity *v;
- struct bio *bio;
/* original values of bio->bi_end_io and bio->bi_private */
bio_end_io_t *orig_bi_end_io;
@@ -389,8 +387,8 @@ test_block_hash:
*/
static void verity_finish_io(struct dm_verity_io *io, int error)
{
- struct bio *bio = io->bio;
struct dm_verity *v = io->v;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_private = io->orig_bi_private;
@@ -398,8 +396,6 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
if (io->io_vec != io->io_vec_inline)
mempool_free(io->io_vec, v->vec_mempool);
- mempool_free(io, v->io_mempool);
-
bio_endio(bio, error);
}
@@ -462,8 +458,7 @@ no_prefetch_cluster:
* Bio map function. It allocates dm_verity_io structure and bio vector and
* fills them. Then it issues prefetches and the I/O.
*/
-static int verity_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int verity_map(struct dm_target *ti, struct bio *bio)
{
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
@@ -486,9 +481,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio,
if (bio_data_dir(bio) == WRITE)
return -EIO;
- io = mempool_alloc(v->io_mempool, GFP_NOIO);
+ io = dm_per_bio_data(bio, ti->per_bio_data_size);
io->v = v;
- io->bio = bio;
io->orig_bi_end_io = bio->bi_end_io;
io->orig_bi_private = bio->bi_private;
io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
@@ -610,9 +604,6 @@ static void verity_dtr(struct dm_target *ti)
if (v->vec_mempool)
mempool_destroy(v->vec_mempool);
- if (v->io_mempool)
- mempool_destroy(v->io_mempool);
-
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -841,13 +832,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
- sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2);
- if (!v->io_mempool) {
- ti->error = "Cannot allocate io mempool";
- r = -ENOMEM;
- goto bad;
- }
+ ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
BIO_MAX_PAGES * sizeof(struct bio_vec));
@@ -875,7 +860,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index cc2b3cb81946..69a5c3b3b340 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -33,8 +33,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/*
* Return zeros only on reads
*/
-static int zero_map(struct dm_target *ti, struct bio *bio,
- union map_info *map_context)
+static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch(bio_rw(bio)) {
case READ:
@@ -56,7 +55,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
static struct target_type zero_target = {
.name = "zero",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = zero_ctr,
.map = zero_map,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 77e6eff41cae..314a0e2faf79 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -63,18 +63,6 @@ struct dm_io {
};
/*
- * For bio-based dm.
- * One of these is allocated per target within a bio. Hopefully
- * this will be simplified out one day.
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- union map_info info;
- struct bio clone;
-};
-
-/*
* For request-based dm.
* One of these is allocated per request.
*/
@@ -657,7 +645,7 @@ static void clone_endio(struct bio *bio, int error)
error = -EIO;
if (endio) {
- r = endio(tio->ti, bio, error, &tio->info);
+ r = endio(tio->ti, bio, error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
/*
* error and requeue request are handled
@@ -1016,7 +1004,7 @@ static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
*/
atomic_inc(&tio->io->io_count);
sector = clone->bi_sector;
- r = ti->type->map(ti, clone, &tio->info);
+ r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
@@ -1111,6 +1099,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
+ tio->target_request_nr = 0;
return tio;
}
@@ -1121,7 +1110,7 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
struct bio *clone = &tio->clone;
- tio->info.target_request_nr = request_nr;
+ tio->target_request_nr = request_nr;
/*
* Discard requests require the bio's inline iovecs be initialized.
@@ -1174,10 +1163,32 @@ static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
ci->sector_count = 0;
}
-static int __clone_and_map_discard(struct clone_info *ci)
+typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+
+static unsigned get_num_discard_requests(struct dm_target *ti)
+{
+ return ti->num_discard_requests;
+}
+
+static unsigned get_num_write_same_requests(struct dm_target *ti)
+{
+ return ti->num_write_same_requests;
+}
+
+typedef bool (*is_split_required_fn)(struct dm_target *ti);
+
+static bool is_split_required_for_discard(struct dm_target *ti)
+{
+ return ti->split_discard_requests;
+}
+
+static int __clone_and_map_changing_extent_only(struct clone_info *ci,
+ get_num_requests_fn get_num_requests,
+ is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
+ unsigned num_requests;
do {
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1185,20 +1196,21 @@ static int __clone_and_map_discard(struct clone_info *ci)
return -EIO;
/*
- * Even though the device advertised discard support,
- * that does not mean every target supports it, and
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
* reconfiguration might also have changed that since the
* check was performed.
*/
- if (!ti->num_discard_requests)
+ num_requests = get_num_requests ? get_num_requests(ti) : 0;
+ if (!num_requests)
return -EOPNOTSUPP;
- if (!ti->split_discard_requests)
+ if (is_split_required && !is_split_required(ti))
len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
- __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+ __issue_target_requests(ci, ti, num_requests, len);
ci->sector += len;
} while (ci->sector_count -= len);
@@ -1206,6 +1218,17 @@ static int __clone_and_map_discard(struct clone_info *ci)
return 0;
}
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+ return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
+ is_split_required_for_discard);
+}
+
+static int __clone_and_map_write_same(struct clone_info *ci)
+{
+ return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *bio = ci->bio;
@@ -1215,6 +1238,8 @@ static int __clone_and_map(struct clone_info *ci)
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __clone_and_map_discard(ci);
+ else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ return __clone_and_map_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
@@ -1946,13 +1971,20 @@ static void free_dev(struct mapped_device *md)
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
- struct dm_md_mempools *p;
+ struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs)
- /* the md already has necessary mempools */
+ if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
+ /*
+ * The md already has necessary mempools. Reload just the
+ * bioset because front_pad may have changed because
+ * a different table was loaded.
+ */
+ bioset_free(md->bs);
+ md->bs = p->bs;
+ p->bs = NULL;
goto out;
+ }
- p = dm_table_get_md_mempools(t);
BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
md->io_pool = p->io_pool;
@@ -2711,7 +2743,7 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
@@ -2719,6 +2751,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
if (!pools)
return NULL;
+ per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+
pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
mempool_create_slab_pool(MIN_IOS, _io_cache) :
mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
@@ -2734,7 +2768,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
pools->bs = (type == DM_TYPE_BIO_BASED) ?
bioset_create(pool_size,
- offsetof(struct dm_target_io, clone)) :
+ per_bio_data_size + offsetof(struct dm_target_io, clone)) :
bioset_create(pool_size,
offsetof(struct dm_rq_clone_bio_info, clone));
if (!pools->bs)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 6a99fefaa743..45b97da1bd06 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -159,7 +159,7 @@ void dm_kcopyd_exit(void);
/*
* Mempool operations
*/
-struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 61200717687b..3db3d1b271f7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,7 +452,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
- mddev->write_lock, /*nothing*/);
+ mddev->write_lock);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
@@ -1414,12 +1414,11 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
- int i;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
- for (i=0; size>=4; size -= 4 )
+ for (; size >= 4; size -= 4)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
@@ -4124,7 +4123,7 @@ static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
-/* Metdata version.
+/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
@@ -4753,6 +4752,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
+ if (entry->store == new_dev_store)
+ flush_workqueue(md_misc_wq);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->store(mddev, page, length);
@@ -6346,24 +6347,23 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* Commands dealing with the RAID driver but not any
* particular array:
*/
- switch (cmd)
- {
- case RAID_VERSION:
- err = get_version(argp);
- goto done;
+ switch (cmd) {
+ case RAID_VERSION:
+ err = get_version(argp);
+ goto done;
- case PRINT_RAID_DEBUG:
- err = 0;
- md_print_devices();
- goto done;
+ case PRINT_RAID_DEBUG:
+ err = 0;
+ md_print_devices();
+ goto done;
#ifndef MODULE
- case RAID_AUTORUN:
- err = 0;
- autostart_arrays(arg);
- goto done;
+ case RAID_AUTORUN:
+ err = 0;
+ autostart_arrays(arg);
+ goto done;
#endif
- default:;
+ default:;
}
/*
@@ -6398,6 +6398,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto abort;
}
+ if (cmd == ADD_NEW_DISK)
+ /* need to ensure md_delayed_delete() has completed */
+ flush_workqueue(md_misc_wq);
+
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -6406,50 +6410,44 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto abort;
}
- switch (cmd)
- {
- case SET_ARRAY_INFO:
- {
- mdu_array_info_t info;
- if (!arg)
- memset(&info, 0, sizeof(info));
- else if (copy_from_user(&info, argp, sizeof(info))) {
- err = -EFAULT;
- goto abort_unlock;
- }
- if (mddev->pers) {
- err = update_array_info(mddev, &info);
- if (err) {
- printk(KERN_WARNING "md: couldn't update"
- " array info. %d\n", err);
- goto abort_unlock;
- }
- goto done_unlock;
- }
- if (!list_empty(&mddev->disks)) {
- printk(KERN_WARNING
- "md: array %s already has disks!\n",
- mdname(mddev));
- err = -EBUSY;
- goto abort_unlock;
- }
- if (mddev->raid_disks) {
- printk(KERN_WARNING
- "md: array %s already initialised!\n",
- mdname(mddev));
- err = -EBUSY;
- goto abort_unlock;
- }
- err = set_array_info(mddev, &info);
- if (err) {
- printk(KERN_WARNING "md: couldn't set"
- " array info. %d\n", err);
- goto abort_unlock;
- }
+ if (cmd == SET_ARRAY_INFO) {
+ mdu_array_info_t info;
+ if (!arg)
+ memset(&info, 0, sizeof(info));
+ else if (copy_from_user(&info, argp, sizeof(info))) {
+ err = -EFAULT;
+ goto abort_unlock;
+ }
+ if (mddev->pers) {
+ err = update_array_info(mddev, &info);
+ if (err) {
+ printk(KERN_WARNING "md: couldn't update"
+ " array info. %d\n", err);
+ goto abort_unlock;
}
goto done_unlock;
-
- default:;
+ }
+ if (!list_empty(&mddev->disks)) {
+ printk(KERN_WARNING
+ "md: array %s already has disks!\n",
+ mdname(mddev));
+ err = -EBUSY;
+ goto abort_unlock;
+ }
+ if (mddev->raid_disks) {
+ printk(KERN_WARNING
+ "md: array %s already initialised!\n",
+ mdname(mddev));
+ err = -EBUSY;
+ goto abort_unlock;
+ }
+ err = set_array_info(mddev, &info);
+ if (err) {
+ printk(KERN_WARNING "md: couldn't set"
+ " array info. %d\n", err);
+ goto abort_unlock;
+ }
+ goto done_unlock;
}
/*
@@ -6468,52 +6466,51 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
/*
* Commands even a read-only array can execute:
*/
- switch (cmd)
- {
- case GET_BITMAP_FILE:
- err = get_bitmap_file(mddev, argp);
- goto done_unlock;
+ switch (cmd) {
+ case GET_BITMAP_FILE:
+ err = get_bitmap_file(mddev, argp);
+ goto done_unlock;
- case RESTART_ARRAY_RW:
- err = restart_array(mddev);
- goto done_unlock;
+ case RESTART_ARRAY_RW:
+ err = restart_array(mddev);
+ goto done_unlock;
- case STOP_ARRAY:
- err = do_md_stop(mddev, 0, bdev);
- goto done_unlock;
+ case STOP_ARRAY:
+ err = do_md_stop(mddev, 0, bdev);
+ goto done_unlock;
- case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, bdev);
- goto done_unlock;
+ case STOP_ARRAY_RO:
+ err = md_set_readonly(mddev, bdev);
+ goto done_unlock;
- case BLKROSET:
- if (get_user(ro, (int __user *)(arg))) {
- err = -EFAULT;
- goto done_unlock;
- }
- err = -EINVAL;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
- /* if the bdev is going readonly the value of mddev->ro
- * does not matter, no writes are coming
- */
- if (ro)
- goto done_unlock;
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
- /* are we are already prepared for writes? */
- if (mddev->ro != 1)
- goto done_unlock;
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
- /* transitioning to readauto need only happen for
- * arrays that call md_write_start
- */
- if (mddev->pers) {
- err = restart_array(mddev);
- if (err == 0) {
- mddev->ro = 2;
- set_disk_ro(mddev->gendisk, 0);
- }
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
}
- goto done_unlock;
+ }
+ goto done_unlock;
}
/*
@@ -6535,37 +6532,36 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
}
- switch (cmd)
+ switch (cmd) {
+ case ADD_NEW_DISK:
{
- case ADD_NEW_DISK:
- {
- mdu_disk_info_t info;
- if (copy_from_user(&info, argp, sizeof(info)))
- err = -EFAULT;
- else
- err = add_new_disk(mddev, &info);
- goto done_unlock;
- }
+ mdu_disk_info_t info;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ err = -EFAULT;
+ else
+ err = add_new_disk(mddev, &info);
+ goto done_unlock;
+ }
- case HOT_REMOVE_DISK:
- err = hot_remove_disk(mddev, new_decode_dev(arg));
- goto done_unlock;
+ case HOT_REMOVE_DISK:
+ err = hot_remove_disk(mddev, new_decode_dev(arg));
+ goto done_unlock;
- case HOT_ADD_DISK:
- err = hot_add_disk(mddev, new_decode_dev(arg));
- goto done_unlock;
+ case HOT_ADD_DISK:
+ err = hot_add_disk(mddev, new_decode_dev(arg));
+ goto done_unlock;
- case RUN_ARRAY:
- err = do_md_run(mddev);
- goto done_unlock;
+ case RUN_ARRAY:
+ err = do_md_run(mddev);
+ goto done_unlock;
- case SET_BITMAP_FILE:
- err = set_bitmap_file(mddev, (int)arg);
- goto done_unlock;
+ case SET_BITMAP_FILE:
+ err = set_bitmap_file(mddev, (int)arg);
+ goto done_unlock;
- default:
- err = -EINVAL;
- goto abort_unlock;
+ default:
+ err = -EINVAL;
+ goto abort_unlock;
}
done_unlock:
@@ -7184,6 +7180,7 @@ void md_done_sync(struct mddev *mddev, int blocks, int ok)
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
@@ -7281,6 +7278,7 @@ EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
+#define UPDATE_FREQUENCY (5*60*HZ)
void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
@@ -7289,6 +7287,7 @@ void md_do_sync(struct md_thread *thread)
window;
sector_t max_sectors,j, io_sectors;
unsigned long mark[SYNC_MARKS];
+ unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
@@ -7448,6 +7447,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync_completed = j;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
md_new_event(mddev);
+ update_time = jiffies;
blk_start_plug(&plug);
while (j < max_sectors) {
@@ -7459,6 +7459,7 @@ void md_do_sync(struct md_thread *thread)
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
+ time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
@@ -7466,6 +7467,10 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ j > mddev->recovery_cp)
+ mddev->recovery_cp = j;
+ update_time = jiffies;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -7570,8 +7575,13 @@ void md_do_sync(struct md_thread *thread)
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
- mddev->recovery_cp =
- mddev->curr_resync_completed;
+ if (test_bit(MD_RECOVERY_ERROR,
+ &mddev->recovery))
+ mddev->recovery_cp =
+ mddev->curr_resync_completed;
+ else
+ mddev->recovery_cp =
+ mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index af443ab868db..eca59c3074ef 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -307,6 +307,7 @@ struct mddev {
* REQUEST: user-space has requested a sync (used with SYNC)
* CHECK: user-space request for check-only, no repair
* RESHAPE: A reshape is happening
+ * ERROR: sync-action interrupted because io-error
*
* If neither SYNC or RESHAPE are set, then it is a recovery.
*/
@@ -320,6 +321,7 @@ struct mddev {
#define MD_RECOVERY_CHECK 7
#define MD_RECOVERY_RESHAPE 8
#define MD_RECOVERY_FROZEN 9
+#define MD_RECOVERY_ERROR 10
unsigned long recovery;
/* If a RAID personality determines that recovery (of a particular
@@ -551,32 +553,6 @@ struct md_thread {
#define THREAD_WAKEUP 0
-#define __wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- if (condition) \
- break; \
- __wait_event_lock_irq(wq, condition, lock, cmd); \
-} while (0)
-
static inline void safe_put_page(struct page *p)
{
if (p) put_page(p);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 5ba277768d99..28c3ed072a79 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -25,7 +25,7 @@
* may be held at once. This is just an implementation detail.
*
* ii) Recursive locking attempts are detected and return EINVAL. A stack
- * trace is also emitted for the previous lock aquisition.
+ * trace is also emitted for the previous lock acquisition.
*
* iii) Priority is given to write locks.
*/
@@ -109,7 +109,7 @@ static int __check_holder(struct block_lock *lock)
DMERR("previously held here:");
print_stack_trace(lock->traces + i, 4);
- DMERR("subsequent aquisition attempted here:");
+ DMERR("subsequent acquisition attempted here:");
t.nr_entries = 0;
t.max_entries = MAX_STACK;
t.entries = entries;
@@ -428,15 +428,17 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
- if (unlikely(r))
+ if (unlikely(r)) {
+ DMERR_LIMIT("%s validator check failed for block %llu", v->name,
+ (unsigned long long) dm_bufio_get_block_number(buf));
return r;
+ }
aux->validator = v;
} else {
if (unlikely(aux->validator != v)) {
- DMERR("validator mismatch (old=%s vs new=%s) for block %llu",
- aux->validator->name, v ? v->name : "NULL",
- (unsigned long long)
- dm_bufio_get_block_number(buf));
+ DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
+ aux->validator->name, v ? v->name : "NULL",
+ (unsigned long long) dm_bufio_get_block_number(buf));
return -EINVAL;
}
}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 5709bfeab1e8..accbb05f17b6 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -36,13 +36,13 @@ struct node_header {
__le32 padding;
} __packed;
-struct node {
+struct btree_node {
struct node_header header;
__le64 keys[0];
} __packed;
-void inc_children(struct dm_transaction_manager *tm, struct node *n,
+void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt);
int new_block(struct dm_btree_info *info, struct dm_block **result);
@@ -64,7 +64,7 @@ struct ro_spine {
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
int exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
-struct node *ro_node(struct ro_spine *s);
+struct btree_node *ro_node(struct ro_spine *s);
struct shadow_spine {
struct dm_btree_info *info;
@@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s);
/*
* Some inlines.
*/
-static inline __le64 *key_ptr(struct node *n, uint32_t index)
+static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
{
return n->keys + index;
}
-static inline void *value_base(struct node *n)
+static inline void *value_base(struct btree_node *n)
{
return &n->keys[le32_to_cpu(n->header.max_entries)];
}
-static inline void *value_ptr(struct node *n, uint32_t index)
+static inline void *value_ptr(struct btree_node *n, uint32_t index)
{
uint32_t value_size = le32_to_cpu(n->header.value_size);
return value_base(n) + (value_size * index);
@@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index)
/*
* Assumes the values are suitably-aligned and converts to core format.
*/
-static inline uint64_t value64(struct node *n, uint32_t index)
+static inline uint64_t value64(struct btree_node *n, uint32_t index)
{
__le64 *values_le = value_base(n);
@@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
/*
* Searching for a key within a single node.
*/
-int lower_bound(struct node *n, uint64_t key);
+int lower_bound(struct btree_node *n, uint64_t key);
extern struct dm_block_validator btree_node_validator;
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index aa71e2359a07..c4f28133ef82 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -53,7 +53,7 @@
/*
* Some little utilities for moving node data around.
*/
-static void node_shift(struct node *n, int shift)
+static void node_shift(struct btree_node *n, int shift)
{
uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
uint32_t value_size = le32_to_cpu(n->header.value_size);
@@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
}
}
-static void node_copy(struct node *left, struct node *right, int shift)
+static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
@@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
/*
* Delete a specific entry from a leaf node.
*/
-static void delete_at(struct node *n, unsigned index)
+static void delete_at(struct btree_node *n, unsigned index)
{
unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
unsigned nr_to_copy = nr_entries - (index + 1);
@@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
n->header.nr_entries = cpu_to_le32(nr_entries - 1);
}
-static unsigned merge_threshold(struct node *n)
+static unsigned merge_threshold(struct btree_node *n)
{
return le32_to_cpu(n->header.max_entries) / 3;
}
@@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
struct child {
unsigned index;
struct dm_block *block;
- struct node *n;
+ struct btree_node *n;
};
static struct dm_btree_value_type le64_type = {
@@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
.equal = NULL
};
-static int init_child(struct dm_btree_info *info, struct node *parent,
+static int init_child(struct dm_btree_info *info, struct btree_node *parent,
unsigned index, struct child *result)
{
int r, inc;
@@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
return dm_tm_unlock(info->tm, c->block);
}
-static void shift(struct node *left, struct node *right, int count)
+static void shift(struct btree_node *left, struct btree_node *right, int count)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
@@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
right->header.nr_entries = cpu_to_le32(nr_right + count);
}
-static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *r)
{
- struct node *left = l->n;
- struct node *right = r->n;
+ struct btree_node *left = l->n;
+ struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
unsigned threshold = 2 * merge_threshold(left) + 1;
@@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
unsigned left_index)
{
int r;
- struct node *parent;
+ struct btree_node *parent;
struct child left, right;
parent = dm_block_data(shadow_current(s));
@@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
* in right, then rebalance2. This wastes some cpu, but I want something
* simple atm.
*/
-static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r,
- struct node *left, struct node *center, struct node *right,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
@@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
/*
* Redistributes entries among 3 sibling nodes.
*/
-static void redistribute3(struct dm_btree_info *info, struct node *parent,
+static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r,
- struct node *left, struct node *center, struct node *right,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
int s;
@@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
*key_ptr(parent, r->index) = right->keys[0];
}
-static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
struct child *l, struct child *c, struct child *r)
{
- struct node *left = l->n;
- struct node *center = c->n;
- struct node *right = r->n;
+ struct btree_node *left = l->n;
+ struct btree_node *center = c->n;
+ struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
@@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
unsigned left_index)
{
int r;
- struct node *parent = dm_block_data(shadow_current(s));
+ struct btree_node *parent = dm_block_data(shadow_current(s));
struct child left, center, right;
/*
@@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
{
int r;
struct dm_block *block;
- struct node *n;
+ struct btree_node *n;
r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
if (r)
@@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
{
int i, r, has_left_sibling, has_right_sibling;
uint32_t child_entries;
- struct node *n;
+ struct btree_node *n;
n = dm_block_data(shadow_current(s));
@@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
return r;
}
-static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
{
int i = lower_bound(n, key);
@@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
uint64_t key, unsigned *index)
{
int i = *index, r;
- struct node *n;
+ struct btree_node *n;
for (;;) {
r = shadow_step(s, root, vt);
@@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
unsigned level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
- struct node *n;
+ struct btree_node *n;
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index d9a7912ee8ee..f199a0c4ed04 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
- struct node *n = dm_block_data(b);
+ struct btree_node *n = dm_block_data(b);
struct node_header *h = &n->header;
h->blocknr = cpu_to_le64(dm_block_location(b));
@@ -38,15 +38,15 @@ static int node_check(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
{
- struct node *n = dm_block_data(b);
+ struct btree_node *n = dm_block_data(b);
struct node_header *h = &n->header;
size_t value_size;
__le32 csum_disk;
uint32_t flags;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
- DMERR("node_check failed blocknr %llu wanted %llu",
- le64_to_cpu(h->blocknr), dm_block_location(b));
+ DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(h->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -54,8 +54,8 @@ static int node_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
if (csum_disk != h->csum) {
- DMERR("node_check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
+ DMERR_LIMIT("node_check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
return -EILSEQ;
}
@@ -63,12 +63,12 @@ static int node_check(struct dm_block_validator *v,
if (sizeof(struct node_header) +
(sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
- DMERR("node_check failed: max_entries too large");
+ DMERR_LIMIT("node_check failed: max_entries too large");
return -EILSEQ;
}
if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
- DMERR("node_check failed, too many entries");
+ DMERR_LIMIT("node_check failed: too many entries");
return -EILSEQ;
}
@@ -77,7 +77,7 @@ static int node_check(struct dm_block_validator *v,
*/
flags = le32_to_cpu(h->flags);
if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
- DMERR("node_check failed, node is neither INTERNAL or LEAF");
+ DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
return -EILSEQ;
}
@@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
return r;
}
-struct node *ro_node(struct ro_spine *s)
+struct btree_node *ro_node(struct ro_spine *s)
{
struct dm_block *block;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index d12b2cc51f1a..4caf66918cdb 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
/*----------------------------------------------------------------*/
/* makes the assumption that no two keys are the same. */
-static int bsearch(struct node *n, uint64_t key, int want_hi)
+static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
{
int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
@@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
return want_hi ? hi : lo;
}
-int lower_bound(struct node *n, uint64_t key)
+int lower_bound(struct btree_node *n, uint64_t key)
{
return bsearch(n, key, 0);
}
-void inc_children(struct dm_transaction_manager *tm, struct node *n,
+void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt)
{
unsigned i;
@@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
vt->inc(vt->context, value_ptr(n, i));
}
-static int insert_at(size_t value_size, struct node *node, unsigned index,
+static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
uint64_t key, void *value)
__dm_written_to_disk(value)
{
@@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
{
int r;
struct dm_block *b;
- struct node *n;
+ struct btree_node *n;
size_t block_size;
uint32_t max_entries;
@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
#define MAX_SPINE_DEPTH 64
struct frame {
struct dm_block *b;
- struct node *n;
+ struct btree_node *n;
unsigned level;
unsigned nr_children;
unsigned current_child;
@@ -230,6 +230,11 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+ return f->level < (info->levels - 1);
+}
+
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -241,7 +246,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
s->tm = info->tm;
s->top = -1;
- r = push_frame(s, root, 1);
+ r = push_frame(s, root, 0);
if (r)
goto out;
@@ -267,7 +272,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
if (r)
goto out;
- } else if (f->level != (info->levels - 1)) {
+ } else if (is_internal_level(info, f)) {
b = value64(f->n, f->current_child);
f->current_child++;
r = push_frame(s, b, f->level + 1);
@@ -295,7 +300,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
/*----------------------------------------------------------------*/
static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
- int (*search_fn)(struct node *, uint64_t),
+ int (*search_fn)(struct btree_node *, uint64_t),
uint64_t *result_key, void *v, size_t value_size)
{
int i, r;
@@ -406,7 +411,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *parent;
- struct node *ln, *rn, *pn;
+ struct btree_node *ln, *rn, *pn;
__le64 location;
left = shadow_current(s);
@@ -491,7 +496,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
size_t size;
unsigned nr_left, nr_right;
struct dm_block *left, *right, *new_parent;
- struct node *pn, *ln, *rn;
+ struct btree_node *pn, *ln, *rn;
__le64 val;
new_parent = shadow_current(s);
@@ -576,7 +581,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
uint64_t key, unsigned *index)
{
int r, i = *index, top = 1;
- struct node *node;
+ struct btree_node *node;
for (;;) {
r = shadow_step(s, root, vt);
@@ -643,7 +648,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
unsigned level, index = -1, last_level = info->levels - 1;
dm_block_t block = root;
struct shadow_spine spine;
- struct node *n;
+ struct btree_node *n;
struct dm_btree_value_type le64_type;
le64_type.context = NULL;
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index ae02c84410ff..a2cd50441ca1 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -35,7 +35,7 @@ struct dm_transaction_manager;
*/
/*
- * Infomation about the values stored within the btree.
+ * Information about the values stored within the btree.
*/
struct dm_btree_value_type {
void *context;
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index f3a9af8cdec3..3e7a88d99eb0 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -39,8 +39,8 @@ static int index_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
- DMERR("index_check failed blocknr %llu wanted %llu",
- le64_to_cpu(mi_le->blocknr), dm_block_location(b));
+ DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(mi_le->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -48,8 +48,8 @@ static int index_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
if (csum_disk != mi_le->csum) {
- DMERR("index_check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
+ DMERR_LIMIT("index_check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
return -EILSEQ;
}
@@ -89,8 +89,8 @@ static int bitmap_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
- DMERR("bitmap check failed blocknr %llu wanted %llu",
- le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -98,8 +98,8 @@ static int bitmap_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BITMAP_CSUM_XOR));
if (csum_disk != disk_header->csum) {
- DMERR("bitmap check failed csum %u wanted %u",
- le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
return -EILSEQ;
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index e89ae5e7a519..906cf3df71af 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -337,7 +337,7 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
{
int r = sm_metadata_new_block_(sm, b);
if (r)
- DMERR("out of metadata space");
+ DMERR("unable to allocate new metadata block");
return r;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a0f73092176e..d5bddfc4010e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf)
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock, );
+ conf->resync_lock);
/* block any new IO from starting */
conf->barrier++;
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, );
+ conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
}
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf)
(conf->nr_pending &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
- conf->resync_lock,
- );
+ conf->resync_lock);
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf)
spin_lock_irq(&conf->resync_lock);
conf->barrier++;
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
- conf->resync_lock,
- flush_pending_writes(conf));
+ wait_event_lock_irq_cmd(conf->wait_barrier,
+ conf->nr_pending == conf->nr_queued+1,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r1conf *conf)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c9acbd717131..64d48249c03b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -952,7 +952,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock, );
+ conf->resync_lock);
/* block any new IO from starting */
conf->barrier++;
@@ -960,7 +960,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, );
+ conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
}
@@ -993,8 +993,7 @@ static void wait_barrier(struct r10conf *conf)
(conf->nr_pending &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
- conf->resync_lock,
- );
+ conf->resync_lock);
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -1027,10 +1026,10 @@ static void freeze_array(struct r10conf *conf)
spin_lock_irq(&conf->resync_lock);
conf->barrier++;
conf->nr_waiting++;
- wait_event_lock_irq(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
- conf->resync_lock,
- flush_pending_writes(conf));
+ wait_event_lock_irq_cmd(conf->wait_barrier,
+ conf->nr_pending == conf->nr_queued+1,
+ conf->resync_lock,
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a4502686e7a8..19d77a026639 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,8 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <trace/events/block.h>
+
#include "md.h"
#include "raid5.h"
#include "raid0.h"
@@ -182,6 +184,8 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -466,7 +470,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
@@ -480,8 +484,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
- conf->device_lock,
- );
+ conf->device_lock);
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -671,6 +674,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_next = NULL;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
+ trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+ bi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(bi);
}
if (rrdev) {
@@ -698,6 +704,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
rbi->bi_next = NULL;
+ trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+ rbi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
@@ -1576,7 +1585,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
* This happens in stages:
* 1/ create a new kmem_cache and allocate the required number of
* stripe_heads.
- * 2/ gather all the old stripe_heads and tranfer the pages across
+ * 2/ gather all the old stripe_heads and transfer the pages across
* to the new stripe_heads. This will have the side effect of
* freezing the array as once all stripe_heads have been collected,
* no IO will be possible. Old stripe heads are freed once their
@@ -1646,8 +1655,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
- conf->device_lock,
- );
+ conf->device_lock);
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
@@ -2855,8 +2863,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0)
+ if (rmw < rcw && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
+ blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
+ (unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
@@ -2867,7 +2877,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
- "%d for r-m-w\n", i);
+ "%d for r-m-w\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
@@ -2877,8 +2887,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
+ }
if (rcw <= rmw && rcw > 0) {
/* want reconstruct write, but need to get some data */
+ int qread =0;
rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2897,12 +2909,17 @@ static void handle_stripe_dirtying(struct r5conf *conf,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
+ qread++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
+ if (rcw)
+ blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
+ (unsigned long long)sh->sector,
+ rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
}
/* now if nothing is locked, and if we have enough data,
* we can start a write request
@@ -3224,10 +3241,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
}
/* done submitting copies, wait for them to complete */
- if (tx) {
- async_tx_ack(tx);
- dma_wait_for_async_tx(tx);
- }
+ async_tx_quiesce(&tx);
}
/*
@@ -3903,6 +3917,8 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
+ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+ raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4003,10 +4019,13 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock);
+ trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+ align_bi, disk_devt(mddev->gendisk),
+ raid_bio->bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4081,6 +4100,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct stripe_head *sh;
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
+ int cnt = 0;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4095,9 +4115,11 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
__release_stripe(conf, sh);
+ cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
}
@@ -4355,6 +4377,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
}
}
@@ -4731,8 +4755,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0)
+ if (remaining == 0) {
+ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+ raid_bio, 0);
bio_endio(raid_bio, 0);
+ }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
@@ -6095,7 +6122,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
wait_event_lock_irq(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock, /* nothing */);
+ conf->device_lock);
conf->quiesce = 1;
spin_unlock_irq(&conf->device_lock);
/* allow reshape to continue */
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 121b0110af3c..d2a436ce77f8 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -1,3 +1,10 @@
+# Used by common drivers, when they need to ask questions
+config MEDIA_COMMON_OPTIONS
+ bool
+
+comment "common driver options"
+ depends on MEDIA_COMMON_OPTIONS
+
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
diff --git a/drivers/media/common/b2c2/Kconfig b/drivers/media/common/b2c2/Kconfig
index 1df9e578daa5..a8c6cdfaa2f5 100644
--- a/drivers/media/common/b2c2/Kconfig
+++ b/drivers/media/common/b2c2/Kconfig
@@ -17,11 +17,6 @@ config DVB_B2C2_FLEXCOP
select DVB_CX24123 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_CX24113 if MEDIA_SUBDRV_AUTOSELECT
- help
- Support for the digital TV receiver chip made by B2C2 Inc. included in
- Technisats PCI cards and USB boxes.
-
- Say Y if you own such a device and want to use it.
# Selected via the PCI or USB flexcop drivers
config DVB_B2C2_FLEXCOP_DEBUG
diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
index 425aeadfb49d..68f0f604678e 100644
--- a/drivers/media/common/siano/Kconfig
+++ b/drivers/media/common/siano/Kconfig
@@ -4,14 +4,16 @@
config SMS_SIANO_MDTV
tristate
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
+ depends on !RC_CORE || RC_CORE
depends on SMS_USB_DRV || SMS_SDIO_DRV
default y
- ---help---
- Choose Y or M here if you have MDTV receiver with a Siano chipset.
-
- To compile this driver as a module, choose M here
- (The module will be called smsmdtv).
- Further documentation on this driver can be found on the WWW
- at http://www.siano-ms.com/
+config SMS_SIANO_RC
+ bool "Enable Remote Controller support for Siano devices"
+ depends on SMS_SIANO_MDTV && RC_CORE
+ depends on SMS_USB_DRV || SMS_SDIO_DRV
+ depends on MEDIA_COMMON_OPTIONS
+ default y
+ ---help---
+ Choose Y to select Remote Controller support for Siano driver.
diff --git a/drivers/media/common/siano/Makefile b/drivers/media/common/siano/Makefile
index 2a09279e0648..81b1e985bea5 100644
--- a/drivers/media/common/siano/Makefile
+++ b/drivers/media/common/siano/Makefile
@@ -1,7 +1,11 @@
-smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
+smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o
obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o
+ifeq ($(CONFIG_SMS_SIANO_RC),y)
+ smsmdtv-objs += smsir.o
+endif
+
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 9cc55546cc30..1842e64e6338 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -1092,7 +1092,7 @@ EXPORT_SYMBOL_GPL(smscore_onresponse);
* @return pointer to descriptor on success, NULL on error.
*/
-struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
+static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb = NULL;
unsigned long flags;
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 37bc5c4b8ad8..b8c5cad78537 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -88,7 +88,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
dev->priv = coredev;
dev->driver_type = RC_DRIVER_IR_RAW;
- dev->allowed_protos = RC_TYPE_ALL;
+ dev->allowed_protos = RC_BIT_ALL;
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index ae92b3a8587e..69b59b9eee28 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -46,10 +46,19 @@ struct ir_t {
u32 controller;
};
+#ifdef CONFIG_SMS_SIANO_RC
int sms_ir_init(struct smscore_device_t *coredev);
void sms_ir_exit(struct smscore_device_t *coredev);
void sms_ir_event(struct smscore_device_t *coredev,
const char *buf, int len);
+#else
+inline static int sms_ir_init(struct smscore_device_t *coredev) {
+ return 0;
+}
+inline static void sms_ir_exit(struct smscore_device_t *coredev) {};
+inline static void sms_ir_event(struct smscore_device_t *coredev,
+ const char *buf, int len) {};
+#endif
#endif /* __SMS_IR_H__ */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 889c9c16c6df..d81dbb22aa81 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -877,7 +877,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
dvb_dmxdev_filter_stop(dmxdevfilter);
dvb_dmxdev_filter_reset(dmxdevfilter);
- if (params->pes_type > DMX_PES_OTHER || params->pes_type < 0)
+ if ((unsigned)params->pes_type > DMX_PES_OTHER)
return -EINVAL;
dmxdevfilter->type = DMXDEV_TYPE_PES;
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 02ebe28f830d..48c6cf92ab99 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
+#include <linux/time.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/fs.h>
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 58e0220447c0..388c2eb4d747 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -250,6 +250,7 @@
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_NOXON_DAB_STICK 0x00b3
+#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
#define USB_PID_PINNACLE_PCTV2000E 0x022c
#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 7e92793260f0..49d95040096a 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1029,12 +1029,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
/* Get */
_DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
_DTV_CMD(DTV_API_VERSION, 0, 0),
- _DTV_CMD(DTV_CODE_RATE_HP, 0, 0),
- _DTV_CMD(DTV_CODE_RATE_LP, 0, 0),
- _DTV_CMD(DTV_GUARD_INTERVAL, 0, 0),
- _DTV_CMD(DTV_TRANSMISSION_MODE, 0, 0),
- _DTV_CMD(DTV_HIERARCHY, 0, 0),
- _DTV_CMD(DTV_INTERLEAVING, 0, 0),
_DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
@@ -1042,13 +1036,11 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0),
_DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0),
- _DTV_CMD(DTV_ATSCMH_PARADE_ID, 0, 0),
_DTV_CMD(DTV_ATSCMH_NOG, 0, 0),
_DTV_CMD(DTV_ATSCMH_TNOG, 0, 0),
_DTV_CMD(DTV_ATSCMH_SGN, 0, 0),
_DTV_CMD(DTV_ATSCMH_PRC, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0),
- _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0),
_DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0),
@@ -1056,8 +1048,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
_DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
-
- _DTV_CMD(DTV_LNA, 0, 0),
};
static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index f2a90f990ce3..3d399d9a6343 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -139,7 +139,7 @@ static int cx22700_set_tps(struct cx22700_state *state,
if (p->code_rate_HP == FEC_4_5 || p->code_rate_LP == FEC_4_5)
return -EINVAL;
- if (p->guard_interval < GUARD_INTERVAL_1_32 ||
+ if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
@@ -152,7 +152,7 @@ static int cx22700_set_tps(struct cx22700_state *state,
p->modulation != QAM_64)
return -EINVAL;
- if (p->hierarchy < HIERARCHY_NONE ||
+ if ((int)p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 7e28b4ee7d4f..68c88ab58e71 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -338,7 +338,7 @@ static int cx24123_set_fec(struct cx24123_state *state, fe_code_rate_t fec)
{
u8 nom_reg = cx24123_readreg(state, 0x0e) & ~0x07;
- if ((fec < FEC_NONE) || (fec > FEC_AUTO))
+ if (((int)fec < FEC_NONE) || (fec > FEC_AUTO))
fec = FEC_AUTO;
/* Set the soft decision threshold */
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index b5781a48034c..de1cc91fd833 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -97,7 +97,7 @@ static inline int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb
return -ENODEV;
}
-int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
+static inline int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 6d9853750d2b..e71cc60851e7 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1748,7 +1748,8 @@ static int DRX_Stop(struct drxd_state *state)
return status;
}
-int SetOperationMode(struct drxd_state *state, int oMode)
+#if 0 /* Currently unused */
+static int SetOperationMode(struct drxd_state *state, int oMode)
{
int status;
@@ -1788,6 +1789,7 @@ int SetOperationMode(struct drxd_state *state, int oMode)
state->operation_mode = oMode;
return status;
}
+#endif
static int StartDiversity(struct drxd_state *state)
{
@@ -2612,7 +2614,7 @@ static int CDRXD(struct drxd_state *state, u32 IntermediateFrequency)
return 0;
}
-int DRXD_init(struct drxd_state *state, const u8 * fw, u32 fw_size)
+static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
{
int status = 0;
u32 driverVersion;
@@ -2774,7 +2776,7 @@ int DRXD_init(struct drxd_state *state, const u8 * fw, u32 fw_size)
return status;
}
-int DRXD_status(struct drxd_state *state, u32 * pLockStatus)
+static int DRXD_status(struct drxd_state *state, u32 *pLockStatus)
{
DRX_GetLockStatus(state, pLockStatus);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 8b4c6d5f8f36..c2fc7da0d6bf 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -65,16 +65,6 @@ static bool IsQAM(struct drxk_state *state)
state->m_OperationMode == OM_QAM_ITU_C;
}
-bool IsA1WithPatchCode(struct drxk_state *state)
-{
- return state->m_DRXK_A1_PATCH_CODE;
-}
-
-bool IsA1WithRomCode(struct drxk_state *state)
-{
- return state->m_DRXK_A1_ROM_CODE;
-}
-
#define NOA1ROM 0
#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
@@ -189,7 +179,7 @@ static inline u32 MulDiv32(u32 a, u32 b, u32 c)
return (u32) tmp64;
}
-inline u32 Frac28a(u32 a, u32 c)
+static inline u32 Frac28a(u32 a, u32 c)
{
int i = 0;
u32 Q1 = 0;
@@ -587,7 +577,7 @@ static int write_block(struct drxk_state *state, u32 Address,
#define DRXK_MAX_RETRIES_POWERUP 20
#endif
-int PowerUpDevice(struct drxk_state *state)
+static int PowerUpDevice(struct drxk_state *state)
{
int status;
u8 data = 0;
@@ -720,11 +710,6 @@ static int init_state(struct drxk_state *state)
state->m_bPowerDown = (ulPowerDown != 0);
- state->m_DRXK_A1_PATCH_CODE = false;
- state->m_DRXK_A1_ROM_CODE = false;
- state->m_DRXK_A2_ROM_CODE = false;
- state->m_DRXK_A3_ROM_CODE = false;
- state->m_DRXK_A2_PATCH_CODE = false;
state->m_DRXK_A3_PATCH_CODE = false;
/* Init AGC and PGA parameters */
@@ -921,7 +906,7 @@ static int GetDeviceCapabilities(struct drxk_state *state)
status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
if (status < 0)
goto error;
- status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
if (status < 0)
goto error;
status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg);
@@ -948,7 +933,7 @@ static int GetDeviceCapabilities(struct drxk_state *state)
state->m_oscClockFreq = 20250;
break;
default:
- printk(KERN_ERR "drxk: Clock Frequency is unkonwn\n");
+ printk(KERN_ERR "drxk: Clock Frequency is unknown\n");
return -EINVAL;
}
/*
@@ -1217,7 +1202,7 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
goto error;
/* MPEG TS pad configuration */
- status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
if (status < 0)
goto error;
@@ -5461,6 +5446,7 @@ static int QAMDemodulatorCommand(struct drxk_state *state,
} else {
printk(KERN_WARNING "drxk: Unknown QAM demodulator parameter "
"count %d\n", numberOfParameters);
+ status = -EINVAL;
}
error:
diff --git a/drivers/media/dvb-frontends/drxk_hard.h b/drivers/media/dvb-frontends/drxk_hard.h
index 6bb9fc4a7b96..d18a896a9835 100644
--- a/drivers/media/dvb-frontends/drxk_hard.h
+++ b/drivers/media/dvb-frontends/drxk_hard.h
@@ -320,11 +320,7 @@ struct drxk_state {
u8 *m_microcode;
int m_microcode_length;
- bool m_DRXK_A1_PATCH_CODE;
- bool m_DRXK_A1_ROM_CODE;
- bool m_DRXK_A2_ROM_CODE;
- bool m_DRXK_A3_ROM_CODE;
- bool m_DRXK_A2_PATCH_CODE;
+ bool m_DRXK_A3_ROM_CODE;
bool m_DRXK_A3_PATCH_CODE;
bool m_rfmirror;
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 5b639087ce45..60a529e3833f 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -30,7 +30,6 @@
#include "ds3000.h"
static int debug;
-static int force_fw_upload;
#define dprintk(args...) \
do { \
@@ -234,7 +233,6 @@ struct ds3000_state {
struct i2c_adapter *i2c;
const struct ds3000_config *config;
struct dvb_frontend frontend;
- u8 skip_fw_load;
/* previous uncorrected block counter for DVB-S2 */
u16 prevUCBS2;
};
@@ -397,9 +395,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- if (state->skip_fw_load || !force_fw_upload)
- return 0; /* Firmware already uploaded, skipping */
-
/* Load firmware */
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
@@ -413,9 +408,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
return ret;
}
- /* Make sure we don't recurse back through here during loading */
- state->skip_fw_load = 1;
-
ret = ds3000_load_firmware(fe, fw);
if (ret)
printk("%s: Writing firmware to device failed\n", __func__);
@@ -425,9 +417,6 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
dprintk("%s: Firmware upload %s\n", __func__,
ret == 0 ? "complete" : "failed");
- /* Ensure firmware is always loaded if required */
- state->skip_fw_load = 0;
-
return ret;
}
@@ -1309,10 +1298,8 @@ static struct dvb_frontend_ops ds3000_ops = {
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
-module_param(force_fw_upload, int, 0644);
-MODULE_PARM_DESC(force_fw_upload, "Force firmware upload (default:0)");
-
MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
"DS3000/TS2020 hardware");
MODULE_AUTHOR("Konstantin Dimitrov");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 36fcf559e361..ddf866c46f8b 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -180,11 +180,11 @@ static int apply_frontend_param(struct dvb_frontend *fe)
p->transmission_mode != TRANSMISSION_MODE_8K)
return -EINVAL;
- if (p->guard_interval < GUARD_INTERVAL_1_32 ||
+ if ((int)p->guard_interval < GUARD_INTERVAL_1_32 ||
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
- if (p->hierarchy < HIERARCHY_NONE ||
+ if ((int)p->hierarchy < HIERARCHY_NONE ||
p->hierarchy > HIERARCHY_4)
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index e20bf13aa860..ec388c1d6913 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -549,7 +549,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
|| (p->frequency > fe->ops.info.frequency_max))
return -EINVAL;
- if ((p->inversion < INVERSION_OFF)
+ if (((int)p->inversion < INVERSION_OFF)
|| (p->inversion > INVERSION_ON))
return -EINVAL;
@@ -557,7 +557,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
|| (p->symbol_rate > fe->ops.info.symbol_rate_max))
return -EINVAL;
- if ((p->fec_inner < FEC_NONE)
+ if (((int)p->fec_inner < FEC_NONE)
|| (p->fec_inner > FEC_AUTO))
return -EINVAL;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index b0f6ec03d1eb..362d26d11e82 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -130,7 +130,7 @@ static int rtl2830_rd_reg(struct rtl2830_priv *priv, u16 reg, u8 *val)
}
/* write single register with mask */
-int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
+static int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
@@ -150,7 +150,7 @@ int rtl2830_wr_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
+static int rtl2830_rd_reg_mask(struct rtl2830_priv *priv, u16 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
@@ -256,7 +256,7 @@ static int rtl2830_sleep(struct dvb_frontend *fe)
return 0;
}
-int rtl2830_get_tune_settings(struct dvb_frontend *fe,
+static int rtl2830_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 500;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 80c8e5f1182f..73887690b046 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -265,7 +265,7 @@ static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
return rtl2832_rd_regs(priv, reg, page, val, 1);
}
-int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
+static int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
{
int ret;
@@ -305,7 +305,7 @@ err:
}
-int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
+static int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
{
int ret, i;
u8 len;
@@ -510,7 +510,7 @@ static int rtl2832_sleep(struct dvb_frontend *fe)
return 0;
}
-int rtl2832_get_tune_settings(struct dvb_frontend *fe,
+static int rtl2832_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
struct rtl2832_priv *priv = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 79e29de87fb7..cc278b3d6d5a 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1260,7 +1260,7 @@ static inline void CONVERT32(u32 x, char *str)
*str = '\0';
}
-int stb0899_get_dev_id(struct stb0899_state *state)
+static int stb0899_get_dev_id(struct stb0899_state *state)
{
u8 chip_id, release;
u16 id;
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 2a8aaeb1112d..0c8e45949b11 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -879,7 +879,8 @@ static u8 stv0367_readbits(struct stv0367_state *state, u32 label)
return val;
}
-u8 stv0367_getbits(u8 reg, u32 label)
+#if 0 /* Currently, unused */
+static u8 stv0367_getbits(u8 reg, u32 label)
{
u8 mask, pos;
@@ -887,7 +888,7 @@ u8 stv0367_getbits(u8 reg, u32 label)
return (reg & mask) >> pos;
}
-
+#endif
static int stv0367ter_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv0367_state *state = fe->demodulator_priv;
@@ -1263,8 +1264,8 @@ stv0367_ter_signal_type stv0367ter_check_cpamp(struct stv0367_state *state,
return CPAMPStatus;
}
-enum
-stv0367_ter_signal_type stv0367ter_lock_algo(struct stv0367_state *state)
+static enum stv0367_ter_signal_type
+stv0367ter_lock_algo(struct stv0367_state *state)
{
enum stv0367_ter_signal_type ret_flag;
short int wd, tempo;
@@ -1528,7 +1529,7 @@ static int stv0367ter_sleep(struct dvb_frontend *fe)
return stv0367ter_standby(fe, 1);
}
-int stv0367ter_init(struct dvb_frontend *fe)
+static int stv0367ter_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
@@ -2378,9 +2379,9 @@ static u32 stv0367cab_get_adc_freq(struct dvb_frontend *fe, u32 ExtClk_Hz)
return ADCClk_Hz;
}
-enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
- u32 SymbolRate,
- enum stv0367cab_mod QAMSize)
+static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
+ u32 SymbolRate,
+ enum stv0367cab_mod QAMSize)
{
/* Set QAM size */
stv0367_writebits(state, F367CAB_QAM_MODE, QAMSize);
@@ -2762,7 +2763,7 @@ static int stv0367cab_sleep(struct dvb_frontend *fe)
return stv0367cab_standby(fe, 1);
}
-int stv0367cab_init(struct dvb_frontend *fe)
+static int stv0367cab_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index a83bf6802345..16a4bc54dbe7 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -96,7 +96,8 @@ static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
}
/* write single register with mask */
-int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
+static int tda10071_wr_reg_mask(struct tda10071_priv *priv,
+ u8 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
@@ -116,7 +117,8 @@ int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
}
/* read single register with mask */
-int tda10071_rd_reg_mask(struct tda10071_priv *priv, u8 reg, u8 *val, u8 mask)
+static int tda10071_rd_reg_mask(struct tda10071_priv *priv,
+ u8 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index ad7c72e8f517..d281f77d5c28 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -32,6 +32,7 @@
#include <asm/div64.h>
#include "dvb_frontend.h"
+#include "tda18271c2dd.h"
struct SStandardParam {
s32 m_IFFrequency;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 4fdcd8cb7530..c2ba085e0d20 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -13,6 +13,7 @@
#ifndef _FIREDTV_H
#define _FIREDTV_H
+#include <linux/time.h>
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
#include <linux/list.h>
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 18a38b38fcb8..df163800c8e1 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -3,10 +3,10 @@
*
* Copyright (C) 2008--2011 Nokia Corporation
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Contributors:
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
* Tuukka Toivonen <tuukkat76@gmail.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 45ecf8db1eae..64d71fb87a96 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -540,8 +540,8 @@ static int init_device(struct i2c_client *client, struct adv7180_state *state)
return 0;
}
-static __devinit int adv7180_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adv7180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct adv7180_state *state;
struct v4l2_subdev *sd;
@@ -587,7 +587,7 @@ err:
return ret;
}
-static __devexit int adv7180_remove(struct i2c_client *client)
+static int adv7180_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
@@ -652,7 +652,7 @@ static struct i2c_driver adv7180_driver = {
.name = KBUILD_MODNAME,
},
.probe = adv7180_probe,
- .remove = __devexit_p(adv7180_remove),
+ .remove = adv7180_remove,
#ifdef CONFIG_PM
.suspend = adv7180_suspend,
.resume = adv7180_resume,
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index e1d4c89d7140..6fed5b74e743 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -677,22 +677,11 @@ static struct i2c_driver adv7183_driver = {
.name = "adv7183",
},
.probe = adv7183_probe,
- .remove = __devexit_p(adv7183_remove),
+ .remove = adv7183_remove,
.id_table = adv7183_id,
};
-static __init int adv7183_init(void)
-{
- return i2c_add_driver(&adv7183_driver);
-}
-
-static __exit void adv7183_exit(void)
-{
- i2c_del_driver(&adv7183_driver);
-}
-
-module_init(adv7183_init);
-module_exit(adv7183_exit);
+module_i2c_driver(adv7183_driver);
MODULE_DESCRIPTION("Analog Devices ADV7183 video decoder driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 05f8950f6f91..f47555b1000a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -486,9 +486,19 @@ static inline int edid_read_block(struct v4l2_subdev *sd, unsigned len, u8 *val)
struct i2c_client *client = state->i2c_edid;
u8 msgbuf0[1] = { 0 };
u8 msgbuf1[256];
- struct i2c_msg msg[2] = { { client->addr, 0, 1, msgbuf0 },
- { client->addr, 0 | I2C_M_RD, len, msgbuf1 }
- };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = msgbuf0
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = msgbuf1
+ },
+ };
if (i2c_transfer(client->adapter, msg, 2) < 0)
return -EIO;
diff --git a/drivers/media/i2c/as3645a.c b/drivers/media/i2c/as3645a.c
index 3bfdbf9d9bf1..58d523f2648f 100644
--- a/drivers/media/i2c/as3645a.c
+++ b/drivers/media/i2c/as3645a.c
@@ -713,7 +713,7 @@ static int as3645a_resume(struct device *dev)
* The number of LEDs reported in platform data is used to compute default
* limits. Parameters passed through platform data can override those limits.
*/
-static int __devinit as3645a_init_controls(struct as3645a *flash)
+static int as3645a_init_controls(struct as3645a *flash)
{
const struct as3645a_platform_data *pdata = flash->pdata;
struct v4l2_ctrl *ctrl;
@@ -804,8 +804,8 @@ static int __devinit as3645a_init_controls(struct as3645a *flash)
return flash->ctrls.error;
}
-static int __devinit as3645a_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int as3645a_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
{
struct as3645a *flash;
int ret;
@@ -846,7 +846,7 @@ done:
return ret;
}
-static int __devexit as3645a_remove(struct i2c_client *client)
+static int as3645a_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
@@ -877,7 +877,7 @@ static struct i2c_driver as3645a_i2c_driver = {
.pm = &as3645a_pm_ops,
},
.probe = as3645a_probe,
- .remove = __devexit_p(as3645a_remove),
+ .remove = as3645a_remove,
.id_table = as3645a_id_table,
};
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 2cee69e34184..f4149eb4d7b4 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -2065,7 +2065,7 @@ static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
#define DIF_BPF_COEFF3435 (0x38c)
#define DIF_BPF_COEFF36 (0x390)
-void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
+static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
{
u64 pll_freq;
u32 pll_freq_word;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 04f192a0398a..08ae067b2b6f 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -284,7 +284,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
char *ir_codes = NULL;
const char *name = NULL;
- u64 rc_type = RC_TYPE_UNKNOWN;
+ u64 rc_type = RC_BIT_UNKNOWN;
struct IR_i2c *ir;
struct rc_dev *rc = NULL;
struct i2c_adapter *adap = client->adapter;
@@ -303,7 +303,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x64:
name = "Pixelview";
ir->get_key = get_key_pixelview;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x18:
@@ -311,31 +311,31 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x1a:
name = "Hauppauge";
ir->get_key = get_key_haup;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
- rc_type = RC_TYPE_OTHER;
+ rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
case 0x71:
name = "Hauppauge/Zilog Z8";
ir->get_key = get_key_haup_xvr;
- rc_type = RC_TYPE_RC5;
+ rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
}
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 8131d651de9e..d4e7567b367c 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
mutex_lock(&info->lock);
format = __find_format(info, fh, fmt->which, info->res_type);
- if (!format)
+ if (format)
fmt->format = *format;
else
ret = -EINVAL;
@@ -926,8 +926,8 @@ static irqreturn_t m5mols_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit m5mols_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int m5mols_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
const struct m5mols_platform_data *pdata = client->dev.platform_data;
struct m5mols_info *info;
@@ -1018,7 +1018,7 @@ out_free:
return ret;
}
-static int __devexit m5mols_remove(struct i2c_client *client)
+static int m5mols_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct m5mols_info *info = to_m5mols(sd);
@@ -1045,7 +1045,7 @@ static struct i2c_driver m5mols_i2c_driver = {
.name = MODULE_NAME,
},
.probe = m5mols_probe,
- .remove = __devexit_p(m5mols_remove),
+ .remove = m5mols_remove,
.id_table = m5mols_id,
};
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 49c1b3abb425..2750de634270 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -343,7 +343,7 @@ static int s5k4ecgx_load_firmware(struct v4l2_subdev *sd)
}
regs_num = le32_to_cpu(get_unaligned_le32(fw->data));
- v4l2_dbg(3, debug, sd, "FW: %s size %d register sets %d\n",
+ v4l2_dbg(3, debug, sd, "FW: %s size %zu register sets %d\n",
S5K4ECGX_FIRMWARE, fw->size, regs_num);
regs_num++; /* Add header */
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index a577614bd84f..d8d5da7c52db 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -58,7 +58,7 @@ static int bounds_check(struct device *dev, uint32_t val,
if (val >= min && val <= max)
return 0;
- dev_warn(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
+ dev_dbg(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
return -EINVAL;
}
@@ -87,14 +87,14 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
}
-int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
- struct smiapp_pll *pll)
+static int __smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll, uint32_t mul,
+ uint32_t div, uint32_t lane_op_clock_ratio)
{
uint32_t sys_div;
uint32_t best_pix_div = INT_MAX >> 1;
uint32_t vt_op_binning_div;
- uint32_t lane_op_clock_ratio;
- uint32_t mul, div;
uint32_t more_mul_min, more_mul_max;
uint32_t more_mul_factor;
uint32_t min_vt_div, max_vt_div, vt_div;
@@ -102,54 +102,6 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
unsigned int i;
int rval;
- if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
- lane_op_clock_ratio = pll->lanes;
- else
- lane_op_clock_ratio = 1;
- dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
-
- dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
- pll->binning_vertical);
-
- /* CSI transfers 2 bits per clock per lane; thus times 2 */
- pll->pll_op_clk_freq_hz = pll->link_freq * 2
- * (pll->lanes / lane_op_clock_ratio);
-
- /* Figure out limits for pre-pll divider based on extclk */
- dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
- limits->max_pre_pll_clk_div =
- min_t(uint16_t, limits->max_pre_pll_clk_div,
- clk_div_even(pll->ext_clk_freq_hz /
- limits->min_pll_ip_freq_hz));
- limits->min_pre_pll_clk_div =
- max_t(uint16_t, limits->min_pre_pll_clk_div,
- clk_div_even_up(
- DIV_ROUND_UP(pll->ext_clk_freq_hz,
- limits->max_pll_ip_freq_hz)));
- dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
-
- i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
- mul = div_u64(pll->pll_op_clk_freq_hz, i);
- div = pll->ext_clk_freq_hz / i;
- dev_dbg(dev, "mul %d / div %d\n", mul, div);
-
- limits->min_pre_pll_clk_div =
- max_t(uint16_t, limits->min_pre_pll_clk_div,
- clk_div_even_up(
- DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
- limits->max_pll_op_freq_hz)));
- dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
- limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
-
- if (limits->min_pre_pll_clk_div > limits->max_pre_pll_clk_div) {
- dev_err(dev, "unable to compute pre_pll divisor\n");
- return -EINVAL;
- }
-
- pll->pre_pll_clk_div = limits->min_pre_pll_clk_div;
-
/*
* Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
* too high.
@@ -162,7 +114,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_max);
/* Don't go above max pll op frequency. */
more_mul_max =
- min_t(int,
+ min_t(uint32_t,
more_mul_max,
limits->max_pll_op_freq_hz
/ (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
@@ -170,7 +122,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_max);
/* Don't go above the division capability of op sys clock divider. */
more_mul_max = min(more_mul_max,
- limits->max_op_sys_clk_div * pll->pre_pll_clk_div
+ limits->op.max_sys_clk_div * pll->pre_pll_clk_div
/ div);
dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n",
more_mul_max);
@@ -193,14 +145,14 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
more_mul_min);
if (more_mul_min > more_mul_max) {
- dev_warn(dev,
- "unable to compute more_mul_min and more_mul_max");
+ dev_dbg(dev,
+ "unable to compute more_mul_min and more_mul_max\n");
return -EINVAL;
}
more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor);
- more_mul_factor = lcm(more_mul_factor, limits->min_op_sys_clk_div);
+ more_mul_factor = lcm(more_mul_factor, limits->op.min_sys_clk_div);
dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
more_mul_factor);
i = roundup(more_mul_min, more_mul_factor);
@@ -209,7 +161,7 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
dev_dbg(dev, "final more_mul: %d\n", i);
if (i > more_mul_max) {
- dev_warn(dev, "final more_mul is bad, max %d", more_mul_max);
+ dev_dbg(dev, "final more_mul is bad, max %d\n", more_mul_max);
return -EINVAL;
}
@@ -268,19 +220,19 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
dev_dbg(dev, "min_vt_div: %d\n", min_vt_div);
min_vt_div = max(min_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->max_vt_pix_clk_freq_hz));
+ limits->vt.max_pix_clk_freq_hz));
dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n",
min_vt_div);
min_vt_div = max_t(uint32_t, min_vt_div,
- limits->min_vt_pix_clk_div
- * limits->min_vt_sys_clk_div);
+ limits->vt.min_pix_clk_div
+ * limits->vt.min_sys_clk_div);
dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div);
- max_vt_div = limits->max_vt_sys_clk_div * limits->max_vt_pix_clk_div;
+ max_vt_div = limits->vt.max_sys_clk_div * limits->vt.max_pix_clk_div;
dev_dbg(dev, "max_vt_div: %d\n", max_vt_div);
max_vt_div = min(max_vt_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz));
+ limits->vt.min_pix_clk_freq_hz));
dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n",
max_vt_div);
@@ -288,28 +240,28 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
* Find limitsits for sys_clk_div. Not all values are possible
* with all values of pix_clk_div.
*/
- min_sys_div = limits->min_vt_sys_clk_div;
+ min_sys_div = limits->vt.min_sys_clk_div;
dev_dbg(dev, "min_sys_div: %d\n", min_sys_div);
min_sys_div = max(min_sys_div,
DIV_ROUND_UP(min_vt_div,
- limits->max_vt_pix_clk_div));
+ limits->vt.max_pix_clk_div));
dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div);
min_sys_div = max(min_sys_div,
pll->pll_op_clk_freq_hz
- / limits->max_vt_sys_clk_freq_hz);
+ / limits->vt.max_sys_clk_freq_hz);
dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div);
min_sys_div = clk_div_even_up(min_sys_div);
dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div);
- max_sys_div = limits->max_vt_sys_clk_div;
+ max_sys_div = limits->vt.max_sys_clk_div;
dev_dbg(dev, "max_sys_div: %d\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(max_vt_div,
- limits->min_vt_pix_clk_div));
+ limits->vt.min_pix_clk_div));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div);
max_sys_div = min(max_sys_div,
DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz));
+ limits->vt.min_pix_clk_freq_hz));
dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div);
/*
@@ -322,15 +274,15 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
for (sys_div = min_sys_div;
sys_div <= max_sys_div;
sys_div += 2 - (sys_div & 1)) {
- int pix_div = DIV_ROUND_UP(vt_div, sys_div);
+ uint16_t pix_div = DIV_ROUND_UP(vt_div, sys_div);
- if (pix_div < limits->min_vt_pix_clk_div
- || pix_div > limits->max_vt_pix_clk_div) {
+ if (pix_div < limits->vt.min_pix_clk_div
+ || pix_div > limits->vt.max_pix_clk_div) {
dev_dbg(dev,
"pix_div %d too small or too big (%d--%d)\n",
pix_div,
- limits->min_vt_pix_clk_div,
- limits->max_vt_pix_clk_div);
+ limits->vt.min_pix_clk_div,
+ limits->vt.max_pix_clk_div);
continue;
}
@@ -354,16 +306,10 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
pll->pixel_rate_csi =
pll->op_pix_clk_freq_hz * lane_op_clock_ratio;
- print_pll(dev, pll);
-
- rval = bounds_check(dev, pll->pre_pll_clk_div,
- limits->min_pre_pll_clk_div,
- limits->max_pre_pll_clk_div, "pre_pll_clk_div");
- if (!rval)
- rval = bounds_check(
- dev, pll->pll_ip_clk_freq_hz,
- limits->min_pll_ip_freq_hz, limits->max_pll_ip_freq_hz,
- "pll_ip_clk_freq_hz");
+ rval = bounds_check(dev, pll->pll_ip_clk_freq_hz,
+ limits->min_pll_ip_freq_hz,
+ limits->max_pll_ip_freq_hz,
+ "pll_ip_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->pll_multiplier,
@@ -377,42 +323,121 @@ int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
if (!rval)
rval = bounds_check(
dev, pll->op_sys_clk_div,
- limits->min_op_sys_clk_div, limits->max_op_sys_clk_div,
+ limits->op.min_sys_clk_div, limits->op.max_sys_clk_div,
"op_sys_clk_div");
if (!rval)
rval = bounds_check(
dev, pll->op_pix_clk_div,
- limits->min_op_pix_clk_div, limits->max_op_pix_clk_div,
+ limits->op.min_pix_clk_div, limits->op.max_pix_clk_div,
"op_pix_clk_div");
if (!rval)
rval = bounds_check(
dev, pll->op_sys_clk_freq_hz,
- limits->min_op_sys_clk_freq_hz,
- limits->max_op_sys_clk_freq_hz,
+ limits->op.min_sys_clk_freq_hz,
+ limits->op.max_sys_clk_freq_hz,
"op_sys_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->op_pix_clk_freq_hz,
- limits->min_op_pix_clk_freq_hz,
- limits->max_op_pix_clk_freq_hz,
+ limits->op.min_pix_clk_freq_hz,
+ limits->op.max_pix_clk_freq_hz,
"op_pix_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->vt_sys_clk_freq_hz,
- limits->min_vt_sys_clk_freq_hz,
- limits->max_vt_sys_clk_freq_hz,
+ limits->vt.min_sys_clk_freq_hz,
+ limits->vt.max_sys_clk_freq_hz,
"vt_sys_clk_freq_hz");
if (!rval)
rval = bounds_check(
dev, pll->vt_pix_clk_freq_hz,
- limits->min_vt_pix_clk_freq_hz,
- limits->max_vt_pix_clk_freq_hz,
+ limits->vt.min_pix_clk_freq_hz,
+ limits->vt.max_pix_clk_freq_hz,
"vt_pix_clk_freq_hz");
return rval;
}
+
+int smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll)
+{
+ uint16_t min_pre_pll_clk_div;
+ uint16_t max_pre_pll_clk_div;
+ uint32_t lane_op_clock_ratio;
+ uint32_t mul, div;
+ unsigned int i;
+ int rval = -EINVAL;
+
+ if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
+ lane_op_clock_ratio = pll->csi2.lanes;
+ else
+ lane_op_clock_ratio = 1;
+ dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
+
+ dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
+ pll->binning_vertical);
+
+ switch (pll->bus_type) {
+ case SMIAPP_PLL_BUS_TYPE_CSI2:
+ /* CSI transfers 2 bits per clock per lane; thus times 2 */
+ pll->pll_op_clk_freq_hz = pll->link_freq * 2
+ * (pll->csi2.lanes / lane_op_clock_ratio);
+ break;
+ case SMIAPP_PLL_BUS_TYPE_PARALLEL:
+ pll->pll_op_clk_freq_hz = pll->link_freq * pll->bits_per_pixel
+ / DIV_ROUND_UP(pll->bits_per_pixel,
+ pll->parallel.bus_width);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Figure out limits for pre-pll divider based on extclk */
+ dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+ max_pre_pll_clk_div =
+ min_t(uint16_t, limits->max_pre_pll_clk_div,
+ clk_div_even(pll->ext_clk_freq_hz /
+ limits->min_pll_ip_freq_hz));
+ min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
+ limits->max_pll_ip_freq_hz)));
+ dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
+ min_pre_pll_clk_div, max_pre_pll_clk_div);
+
+ i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
+ mul = div_u64(pll->pll_op_clk_freq_hz, i);
+ div = pll->ext_clk_freq_hz / i;
+ dev_dbg(dev, "mul %d / div %d\n", mul, div);
+
+ min_pre_pll_clk_div =
+ max_t(uint16_t, min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
+ limits->max_pll_op_freq_hz)));
+ dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
+ min_pre_pll_clk_div, max_pre_pll_clk_div);
+
+ for (pll->pre_pll_clk_div = min_pre_pll_clk_div;
+ pll->pre_pll_clk_div <= max_pre_pll_clk_div;
+ pll->pre_pll_clk_div += 2 - (pll->pre_pll_clk_div & 1)) {
+ rval = __smiapp_pll_calculate(dev, limits, pll, mul, div,
+ lane_op_clock_ratio);
+ if (rval)
+ continue;
+
+ print_pll(dev, pll);
+ return 0;
+ }
+
+ dev_info(dev, "unable to compute pre_pll divisor\n");
+ return rval;
+}
EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ PLL calculator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/smiapp-pll.h b/drivers/media/i2c/smiapp-pll.h
index cb2d2db5d02d..a4a649834a18 100644
--- a/drivers/media/i2c/smiapp-pll.h
+++ b/drivers/media/i2c/smiapp-pll.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -27,16 +27,34 @@
#include <linux/device.h>
+/* CSI-2 or CCP-2 */
+#define SMIAPP_PLL_BUS_TYPE_CSI2 0x00
+#define SMIAPP_PLL_BUS_TYPE_PARALLEL 0x01
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
+
struct smiapp_pll {
- uint8_t lanes;
+ /* input values */
+ uint8_t bus_type;
+ union {
+ struct {
+ uint8_t lanes;
+ } csi2;
+ struct {
+ uint8_t bus_width;
+ } parallel;
+ };
+ uint8_t flags;
uint8_t binning_horizontal;
uint8_t binning_vertical;
uint8_t scale_m;
uint8_t scale_n;
uint8_t bits_per_pixel;
- uint16_t flags;
uint32_t link_freq;
+ /* output values */
uint16_t pre_pll_clk_div;
uint16_t pll_multiplier;
uint16_t op_sys_clk_div;
@@ -55,6 +73,17 @@ struct smiapp_pll {
uint32_t pixel_rate_csi;
};
+struct smiapp_pll_branch_limits {
+ uint16_t min_sys_clk_div;
+ uint16_t max_sys_clk_div;
+ uint32_t min_sys_clk_freq_hz;
+ uint32_t max_sys_clk_freq_hz;
+ uint16_t min_pix_clk_div;
+ uint16_t max_pix_clk_div;
+ uint32_t min_pix_clk_freq_hz;
+ uint32_t max_pix_clk_freq_hz;
+};
+
struct smiapp_pll_limits {
/* Strict PLL limits */
uint32_t min_ext_clk_freq_hz;
@@ -68,36 +97,18 @@ struct smiapp_pll_limits {
uint32_t min_pll_op_freq_hz;
uint32_t max_pll_op_freq_hz;
- uint16_t min_vt_sys_clk_div;
- uint16_t max_vt_sys_clk_div;
- uint32_t min_vt_sys_clk_freq_hz;
- uint32_t max_vt_sys_clk_freq_hz;
- uint16_t min_vt_pix_clk_div;
- uint16_t max_vt_pix_clk_div;
- uint32_t min_vt_pix_clk_freq_hz;
- uint32_t max_vt_pix_clk_freq_hz;
-
- uint16_t min_op_sys_clk_div;
- uint16_t max_op_sys_clk_div;
- uint32_t min_op_sys_clk_freq_hz;
- uint32_t max_op_sys_clk_freq_hz;
- uint16_t min_op_pix_clk_div;
- uint16_t max_op_pix_clk_div;
- uint32_t min_op_pix_clk_freq_hz;
- uint32_t max_op_pix_clk_freq_hz;
+ struct smiapp_pll_branch_limits vt;
+ struct smiapp_pll_branch_limits op;
/* Other relevant limits */
uint32_t min_line_length_pck_bin;
uint32_t min_line_length_pck;
};
-/* op pix clock is for all lanes in total normally */
-#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
-#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
-
struct device;
-int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+int smiapp_pll_calculate(struct device *dev,
+ const struct smiapp_pll_limits *limits,
struct smiapp_pll *pll);
#endif /* SMIAPP_PLL_H */
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index e08e588ad24b..83c7ed7ffcc2 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2010--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* Based on smiapp driver by Vimarsh Zutshi
* Based on jt8ev1.c by Vimarsh Zutshi
@@ -252,23 +252,23 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
.min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ],
.max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ],
- .min_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
- .max_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
- .min_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
- .max_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
- .min_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
- .max_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
- .min_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
- .max_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
-
- .min_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
- .max_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
- .min_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
- .max_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
- .min_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
- .max_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
- .min_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
- .max_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
+ .op.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
+ .op.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
+ .op.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
+ .op.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
+ .op.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
+ .op.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
+ .op.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
+ .op.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
+
+ .vt.min_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
+ .vt.max_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
+ .vt.min_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
+ .vt.max_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
+ .vt.min_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
+ .vt.max_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
+ .vt.min_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
+ .vt.max_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
.min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
.min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
@@ -276,11 +276,6 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
struct smiapp_pll *pll = &sensor->pll;
int rval;
- memset(&sensor->pll, 0, sizeof(sensor->pll));
-
- pll->lanes = sensor->platform_data->lanes;
- pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
-
if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
/*
* Fill in operational clock divisors limits from the
@@ -288,28 +283,14 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
* requirements regarding them are essentially the
* same as on VT ones.
*/
- lim.min_op_sys_clk_div = lim.min_vt_sys_clk_div;
- lim.max_op_sys_clk_div = lim.max_vt_sys_clk_div;
- lim.min_op_pix_clk_div = lim.min_vt_pix_clk_div;
- lim.max_op_pix_clk_div = lim.max_vt_pix_clk_div;
- lim.min_op_sys_clk_freq_hz = lim.min_vt_sys_clk_freq_hz;
- lim.max_op_sys_clk_freq_hz = lim.max_vt_sys_clk_freq_hz;
- lim.min_op_pix_clk_freq_hz = lim.min_vt_pix_clk_freq_hz;
- lim.max_op_pix_clk_freq_hz = lim.max_vt_pix_clk_freq_hz;
- /* Profile 0 sensors have no separate OP clock branch. */
- pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ lim.op = lim.vt;
}
- if (smiapp_needs_quirk(sensor,
- SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
- pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
-
pll->binning_horizontal = sensor->binning_horizontal;
pll->binning_vertical = sensor->binning_vertical;
pll->link_freq =
sensor->link_freq->qmenu_int[sensor->link_freq->val];
pll->scale_m = sensor->scale_m;
- pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
pll->bits_per_pixel = sensor->csi_format->compressed;
rval = smiapp_pll_calculate(&client->dev, &lim, pll);
@@ -1010,7 +991,7 @@ static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
* do not change, or if you do at least know what you're
* doing. :-)
*
- * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 2010-10-25
+ * Sakari Ailus <sakari.ailus@iki.fi> 2010-10-25
*
* flash_strobe_length [us] / 10^6 = (tFlash_strobe_width_ctrl
* / EXTCLK freq [Hz]) * flash_strobe_adjustment
@@ -2369,6 +2350,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_pll *pll = &sensor->pll;
struct smiapp_subdev *last = NULL;
u32 tmp;
unsigned int i;
@@ -2635,6 +2617,18 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
if (rval < 0)
goto out_nvm_release;
+ /* prepare PLL configuration input values */
+ pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+ pll->csi2.lanes = sensor->platform_data->lanes;
+ pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+ /* Profile 0 sensors have no separate OP clock branch. */
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ if (smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
+ pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+ pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
rval = smiapp_update_mode(sensor);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -2893,6 +2887,6 @@ static struct i2c_driver smiapp_i2c_driver = {
module_i2c_driver(smiapp_i2c_driver);
-MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.c b/drivers/media/i2c/smiapp/smiapp-limits.c
index fb2f81ad8c3b..847cb235e198 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.c
+++ b/drivers/media/i2c/smiapp/smiapp-limits.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-limits.h b/drivers/media/i2c/smiapp/smiapp-limits.h
index 9ae765e23ea5..343e9c3827fc 100644
--- a/drivers/media/i2c/smiapp/smiapp-limits.h
+++ b/drivers/media/i2c/smiapp/smiapp-limits.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index 725cf62836c6..bb8c506e0e3d 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.h b/drivers/media/i2c/smiapp/smiapp-quirk.h
index 86fd3e8bfb0f..504a6d80ced5 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.h
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-reg-defs.h b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
index defa7c5adebf..3aa0ca948d87 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg-defs.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg-defs.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 54568ca2fe6d..b0dcbb8fa5e2 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 70e0d8db0130..4fac32cfcb3f 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.h b/drivers/media/i2c/smiapp/smiapp-regs.h
index 7f9013b47971..eefc6c84d5fe 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.h
+++ b/drivers/media/i2c/smiapp/smiapp-regs.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 4182a695ab53..7cc5aae662fd 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -4,7 +4,7 @@
* Generic driver for SMIA/SMIA++ compliant camera modules
*
* Copyright (C) 2010--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 333ef178d6fb..d40a8858be01 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -15,6 +15,7 @@
#include <linux/log2.h>
#include <linux/module.h>
+#include <media/mt9v022.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
@@ -50,6 +51,7 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_PIXEL_OPERATION_MODE 0x0f
#define MT9V022_LED_OUT_CONTROL 0x1b
#define MT9V022_ADC_MODE_CONTROL 0x1c
+#define MT9V022_REG32 0x20
#define MT9V022_ANALOG_GAIN 0x35
#define MT9V022_BLACK_LEVEL_CALIB_CTRL 0x47
#define MT9V022_PIXCLK_FV_LV 0x74
@@ -71,7 +73,15 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_COLUMN_SKIP 1
#define MT9V022_ROW_SKIP 4
-#define is_mt9v024(id) (id == 0x1324)
+#define MT9V022_HORIZONTAL_BLANKING_MIN 43
+#define MT9V022_HORIZONTAL_BLANKING_MAX 1023
+#define MT9V022_HORIZONTAL_BLANKING_DEF 94
+#define MT9V022_VERTICAL_BLANKING_MIN 2
+#define MT9V022_VERTICAL_BLANKING_MAX 3000
+#define MT9V022_VERTICAL_BLANKING_DEF 45
+
+#define is_mt9v022_rev3(id) (id == 0x1313)
+#define is_mt9v024(id) (id == 0x1324)
/* MT9V022 has only one fixed colorspace per pixelcode */
struct mt9v022_datafmt {
@@ -136,6 +146,8 @@ struct mt9v022 {
struct v4l2_ctrl *autogain;
struct v4l2_ctrl *gain;
};
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
struct v4l2_rect rect; /* Sensor window */
const struct mt9v022_datafmt *fmt;
const struct mt9v022_datafmt *fmts;
@@ -143,6 +155,7 @@ struct mt9v022 {
int num_fmts;
int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */
u16 chip_control;
+ u16 chip_version;
unsigned short y_skip_top; /* Lines to skip at the top */
};
@@ -225,12 +238,32 @@ static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- if (enable)
+ if (enable) {
/* Switch to master "normal" mode */
mt9v022->chip_control &= ~0x10;
- else
+ if (is_mt9v022_rev3(mt9v022->chip_version) ||
+ is_mt9v024(mt9v022->chip_version)) {
+ /*
+ * Unset snapshot mode specific settings: clear bit 9
+ * and bit 2 in reg. 0x20 when in normal mode.
+ */
+ if (reg_clear(client, MT9V022_REG32, 0x204))
+ return -EIO;
+ }
+ } else {
/* Switch to snapshot mode */
mt9v022->chip_control |= 0x10;
+ if (is_mt9v022_rev3(mt9v022->chip_version) ||
+ is_mt9v024(mt9v022->chip_version)) {
+ /*
+ * Required settings for snapshot mode: set bit 9
+ * (RST enable) and bit 2 (CR enable) in reg. 0x20
+ * See TechNote TN0960 or TN-09-225.
+ */
+ if (reg_set(client, MT9V022_REG32, 0x204))
+ return -EIO;
+ }
+ }
if (reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control) < 0)
return -EIO;
@@ -282,11 +315,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
* Default 94, Phytec driver says:
* "width + horizontal blank >= 660"
*/
- ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
- rect.width > 660 - 43 ? 43 :
- 660 - rect.width);
+ ret = v4l2_ctrl_s_ctrl(mt9v022->hblank,
+ rect.width > 660 - 43 ? 43 : 660 - rect.width);
if (!ret)
- ret = reg_write(client, MT9V022_VERTICAL_BLANKING, 45);
+ ret = v4l2_ctrl_s_ctrl(mt9v022->vblank, 45);
if (!ret)
ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width);
if (!ret)
@@ -509,6 +541,18 @@ static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
range = exp->maximum - exp->minimum;
exp->val = ((data - 1) * range + 239) / 479 + exp->minimum;
return 0;
+ case V4L2_CID_HBLANK:
+ data = reg_read(client, MT9V022_HORIZONTAL_BLANKING);
+ if (data < 0)
+ return -EIO;
+ ctrl->val = data;
+ return 0;
+ case V4L2_CID_VBLANK:
+ data = reg_read(client, MT9V022_VERTICAL_BLANKING);
+ if (data < 0)
+ return -EIO;
+ ctrl->val = data;
+ return 0;
}
return -EINVAL;
}
@@ -590,6 +634,16 @@ static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl)
return -EIO;
}
return 0;
+ case V4L2_CID_HBLANK:
+ if (reg_write(client, MT9V022_HORIZONTAL_BLANKING,
+ ctrl->val) < 0)
+ return -EIO;
+ return 0;
+ case V4L2_CID_VBLANK:
+ if (reg_write(client, MT9V022_VERTICAL_BLANKING,
+ ctrl->val) < 0)
+ return -EIO;
+ return 0;
}
return -EINVAL;
}
@@ -621,6 +675,8 @@ static int mt9v022_video_probe(struct i2c_client *client)
goto ei2c;
}
+ mt9v022->chip_version = data;
+
mt9v022->reg = is_mt9v024(data) ? &mt9v024_register :
&mt9v022_register;
@@ -819,6 +875,7 @@ static int mt9v022_probe(struct i2c_client *client,
struct mt9v022 *mt9v022;
struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct mt9v022_platform_data *pdata = icl->priv;
int ret;
if (!icl) {
@@ -857,10 +914,21 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->exposure = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
V4L2_CID_EXPOSURE, 1, 255, 1, 255);
+ mt9v022->hblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_HBLANK, MT9V022_HORIZONTAL_BLANKING_MIN,
+ MT9V022_HORIZONTAL_BLANKING_MAX, 1,
+ MT9V022_HORIZONTAL_BLANKING_DEF);
+
+ mt9v022->vblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops,
+ V4L2_CID_VBLANK, MT9V022_VERTICAL_BLANKING_MIN,
+ MT9V022_VERTICAL_BLANKING_MAX, 1,
+ MT9V022_VERTICAL_BLANKING_DEF);
+
mt9v022->subdev.ctrl_handler = &mt9v022->hdl;
if (mt9v022->hdl.error) {
int err = mt9v022->hdl.error;
+ dev_err(&client->dev, "control initialisation err %d\n", err);
kfree(mt9v022);
return err;
}
@@ -871,10 +939,10 @@ static int mt9v022_probe(struct i2c_client *client,
mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT;
/*
- * MT9V022 _really_ corrupts the first read out line.
- * TODO: verify on i.MX31
+ * On some platforms the first read out line is corrupted.
+ * Workaround it by skipping if indicated by platform data.
*/
- mt9v022->y_skip_top = 1;
+ mt9v022->y_skip_top = pdata ? pdata->y_skip_top : 0;
mt9v022->rect.left = MT9V022_COLUMN_SKIP;
mt9v022->rect.top = MT9V022_ROW_SKIP;
mt9v022->rect.width = MT9V022_MAX_WIDTH;
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index d2d298b6354e..66698a83bda2 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -586,9 +586,20 @@ static const struct regval_list ov2640_format_change_preamble_regs[] = {
ENDMARKER,
};
-static const struct regval_list ov2640_yuv422_regs[] = {
+static const struct regval_list ov2640_yuyv_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_YUV422 },
+ { 0xd7, 0x03 },
+ { 0x33, 0xa0 },
+ { 0xe5, 0x1f },
+ { 0xe1, 0x67 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_uyvy_regs[] = {
{ IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_YUV422 },
- { 0xD7, 0x01 },
+ { 0xd7, 0x01 },
{ 0x33, 0xa0 },
{ 0xe1, 0x67 },
{ RESET, 0x00 },
@@ -596,7 +607,15 @@ static const struct regval_list ov2640_yuv422_regs[] = {
ENDMARKER,
};
-static const struct regval_list ov2640_rgb565_regs[] = {
+static const struct regval_list ov2640_rgb565_be_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_RGB565 },
+ { 0xd7, 0x03 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_rgb565_le_regs[] = {
{ IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_RGB565 },
{ 0xd7, 0x03 },
{ RESET, 0x00 },
@@ -605,7 +624,9 @@ static const struct regval_list ov2640_rgb565_regs[] = {
};
static enum v4l2_mbus_pixelcode ov2640_codes[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_RGB565_2X8_BE,
V4L2_MBUS_FMT_RGB565_2X8_LE,
};
@@ -787,14 +808,22 @@ static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
/* select format */
priv->cfmt_code = 0;
switch (code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_be_regs;
+ break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
- dev_dbg(&client->dev, "%s: Selected cfmt RGB565", __func__);
- selected_cfmt_regs = ov2640_rgb565_regs;
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_le_regs;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
+ selected_cfmt_regs = ov2640_yuyv_regs;
break;
default:
case V4L2_MBUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "%s: Selected cfmt YUV422", __func__);
- selected_cfmt_regs = ov2640_yuv422_regs;
+ dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
+ selected_cfmt_regs = ov2640_uyvy_regs;
}
/* reset hardware */
@@ -859,10 +888,12 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
mf->code = priv->cfmt_code;
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -879,11 +910,13 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
@@ -896,21 +929,21 @@ static int ov2640_s_fmt(struct v4l2_subdev *sd,
static int ov2640_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- const struct ov2640_win_size *win;
-
/*
- * select suitable win
+ * select suitable win, but don't store it
*/
- win = ov2640_select_win(&mf->width, &mf->height);
+ ov2640_select_win(&mf->width, &mf->height);
mf->field = V4L2_FIELD_NONE;
switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
case V4L2_MBUS_FMT_RGB565_2X8_LE:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
default:
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
}
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 42ae9dc9c574..9ac1b8c3a837 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -788,7 +788,7 @@ static const struct v4l2_subdev_ops vs6624_ops = {
.video = &vs6624_video_ops,
};
-static int __devinit vs6624_probe(struct i2c_client *client,
+static int vs6624_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct vs6624 *sensor;
@@ -881,7 +881,7 @@ static int __devinit vs6624_probe(struct i2c_client *client,
return ret;
}
-static int __devexit vs6624_remove(struct i2c_client *client)
+static int vs6624_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct vs6624 *sensor = to_vs6624(sd);
@@ -906,22 +906,11 @@ static struct i2c_driver vs6624_driver = {
.name = "vs6624",
},
.probe = vs6624_probe,
- .remove = __devexit_p(vs6624_remove),
+ .remove = vs6624_remove,
.id_table = vs6624_id,
};
-static __init int vs6624_init(void)
-{
- return i2c_add_driver(&vs6624_driver);
-}
-
-static __exit void vs6624_exit(void)
-{
- i2c_del_driver(&vs6624_driver);
-}
-
-module_init(vs6624_init);
-module_exit(vs6624_exit);
+module_i2c_driver(vs6624_driver);
MODULE_DESCRIPTION("VS6624 sensor driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/mmc/siano/Kconfig b/drivers/media/mmc/siano/Kconfig
index fa62475be3bf..aa05ad3c1ccb 100644
--- a/drivers/media/mmc/siano/Kconfig
+++ b/drivers/media/mmc/siano/Kconfig
@@ -4,7 +4,8 @@
config SMS_SDIO_DRV
tristate "Siano SMS1xxx based MDTV via SDIO interface"
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
depends on MMC
+ select MEDIA_COMMON_OPTIONS
---help---
Choose if you would like to have Siano's support for SDIO interface
diff --git a/drivers/media/mmc/siano/smssdio.c b/drivers/media/mmc/siano/smssdio.c
index d6f3f100699a..15d34935e00b 100644
--- a/drivers/media/mmc/siano/smssdio.c
+++ b/drivers/media/mmc/siano/smssdio.c
@@ -50,7 +50,7 @@
#define SMSSDIO_INT 0x04
#define SMSSDIO_BLOCK_SIZE 128
-static const struct sdio_device_id smssdio_ids[] __devinitconst = {
+static const struct sdio_device_id smssdio_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
.driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
@@ -224,7 +224,7 @@ static void smssdio_interrupt(struct sdio_func *func)
smscore_onresponse(smsdev->coredev, cb);
}
-static int __devinit smssdio_probe(struct sdio_func *func,
+static int smssdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret;
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index b34fa95185e4..66eb0baab0e9 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -391,7 +391,7 @@ EXPORT_SYMBOL(bt878_device_control);
.driver_data = (unsigned long) name \
}
-static struct pci_device_id bt878_pci_tbl[] __devinitdata = {
+static struct pci_device_id bt878_pci_tbl[] = {
BROOKTREE_878_DEVICE(0x0071, 0x0101, "Nebula Electronics DigiTV"),
BROOKTREE_878_DEVICE(0x1461, 0x0761, "AverMedia AverTV DVB-T 761"),
BROOKTREE_878_DEVICE(0x11bd, 0x001c, "Pinnacle PCTV Sat"),
@@ -410,7 +410,7 @@ static struct pci_device_id bt878_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, bt878_pci_tbl);
-static const char * __devinit card_name(const struct pci_device_id *id)
+static const char * card_name(const struct pci_device_id *id)
{
return id->driver_data ? (const char *)id->driver_data : "Unknown";
}
@@ -419,8 +419,7 @@ static const char * __devinit card_name(const struct pci_device_id *id)
/* PCI device handling */
/***********************/
-static int __devinit bt878_probe(struct pci_dev *dev,
- const struct pci_device_id *pci_id)
+static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
{
int result = 0;
unsigned char lat;
@@ -529,7 +528,7 @@ static int __devinit bt878_probe(struct pci_dev *dev,
return result;
}
-static void __devexit bt878_remove(struct pci_dev *pci_dev)
+static void bt878_remove(struct pci_dev *pci_dev)
{
u8 command;
struct bt878 *bt = pci_get_drvdata(pci_dev);
@@ -573,7 +572,7 @@ static struct pci_driver bt878_pci_driver = {
.name = "bt878",
.id_table = bt878_pci_tbl,
.probe = bt878_probe,
- .remove = __devexit_p(bt878_remove),
+ .remove = bt878_remove,
};
/*******************************/
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 38952faaffda..c4c59175e52c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -87,7 +87,7 @@ static int tea5757_read(struct bttv *btv);
static int tea5757_write(struct bttv *btv, int value);
static void identify_by_eeprom(struct bttv *btv,
unsigned char eeprom_data[256]);
-static int __devinit pvr_boot(struct bttv *btv);
+static int pvr_boot(struct bttv *btv);
/* config variables */
static unsigned int triton1;
@@ -151,7 +151,7 @@ static struct CARD {
unsigned id;
int cardnr;
char *name;
-} cards[] __devinitdata = {
+} cards[] = {
{ 0x13eb0070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV" },
{ 0x39000070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV-D" },
{ 0x45000070, BTTV_BOARD_HAUPPAUGEPVR, "Hauppauge WinTV/PVR" },
@@ -2837,7 +2837,7 @@ static unsigned char eeprom_data[256];
/*
* identify card
*/
-void __devinit bttv_idcard(struct bttv *btv)
+void bttv_idcard(struct bttv *btv)
{
unsigned int gpiobits;
int i,type;
@@ -3235,7 +3235,7 @@ static void bttv_reset_audio(struct bttv *btv)
}
/* initialization part one -- before registering i2c bus */
-void __devinit bttv_init_card1(struct bttv *btv)
+void bttv_init_card1(struct bttv *btv)
{
switch (btv->c.type) {
case BTTV_BOARD_HAUPPAUGE:
@@ -3267,7 +3267,7 @@ void __devinit bttv_init_card1(struct bttv *btv)
}
/* initialization part two -- after registering i2c bus */
-void __devinit bttv_init_card2(struct bttv *btv)
+void bttv_init_card2(struct bttv *btv)
{
btv->tuner_type = UNSET;
@@ -3571,7 +3571,7 @@ no_audio:
/* initialize the tuner */
-void __devinit bttv_init_tuner(struct bttv *btv)
+void bttv_init_tuner(struct bttv *btv)
{
int addr = ADDR_UNSET;
@@ -3635,7 +3635,7 @@ static void modtec_eeprom(struct bttv *btv)
}
}
-static void __devinit hauppauge_eeprom(struct bttv *btv)
+static void hauppauge_eeprom(struct bttv *btv)
{
struct tveeprom tv;
@@ -3709,8 +3709,7 @@ static int terratec_active_radio_upgrade(struct bttv *btv)
#define BTTV_ALT_DCLK 0x100000
#define BTTV_ALT_NCONFIG 0x800000
-static int __devinit pvr_altera_load(struct bttv *btv, const u8 *micro,
- u32 microlen)
+static int pvr_altera_load(struct bttv *btv, const u8 *micro, u32 microlen)
{
u32 n;
u8 bits;
@@ -3747,7 +3746,7 @@ static int __devinit pvr_altera_load(struct bttv *btv, const u8 *micro,
return 0;
}
-static int __devinit pvr_boot(struct bttv *btv)
+static int pvr_boot(struct bttv *btv)
{
const struct firmware *fw_entry;
int rc;
@@ -3767,7 +3766,7 @@ static int __devinit pvr_boot(struct bttv *btv)
/* ----------------------------------------------------------------------- */
/* some osprey specific stuff */
-static void __devinit osprey_eeprom(struct bttv *btv, const u8 ee[256])
+static void osprey_eeprom(struct bttv *btv, const u8 ee[256])
{
int i;
u32 serial = 0;
@@ -3898,7 +3897,7 @@ static int tuner_1_table[] = {
TUNER_TEMIC_4012FY5, TUNER_TEMIC_4012FY5, /* TUNER_TEMIC_SECAM */
TUNER_TEMIC_4012FY5, TUNER_TEMIC_PAL};
-static void __devinit avermedia_eeprom(struct bttv *btv)
+static void avermedia_eeprom(struct bttv *btv)
{
int tuner_make, tuner_tv_fm, tuner_format, tuner_type = 0;
@@ -3960,7 +3959,7 @@ u32 bttv_tda9880_setnorm(struct bttv *btv, u32 gpiobits)
* Hauppauge: pin 5
* Voodoo: pin 20
*/
-static void __devinit boot_msp34xx(struct bttv *btv, int pin)
+static void boot_msp34xx(struct bttv *btv, int pin)
{
int mask = (1 << pin);
@@ -3983,11 +3982,10 @@ static void __devinit boot_msp34xx(struct bttv *btv, int pin)
* used by Alessandro Rubini in his pxc200
* driver, but using BTTV functions */
-static void __devinit init_PXC200(struct bttv *btv)
+static void init_PXC200(struct bttv *btv)
{
- static int vals[] __devinitdata = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
- 0x00 };
+ static int vals[] = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d, 0x01, 0x02,
+ 0x03, 0x04, 0x05, 0x06, 0x00 };
unsigned int i;
int tmp;
u32 val;
@@ -4851,7 +4849,7 @@ void __init bttv_check_chipset(void)
}
}
-int __devinit bttv_handle_chipset(struct bttv *btv)
+int bttv_handle_chipset(struct bttv *btv)
{
unsigned char command;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 56c6c77793d7..45e5d0661b60 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -200,7 +200,7 @@ static void flush_request_modules(struct bttv *dev)
}
#else
#define request_modules(dev)
-#define flush_request_modules(dev)
+#define flush_request_modules(dev) do {} while(0)
#endif /* CONFIG_MODULES */
@@ -301,11 +301,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* totalwidth */ 1135,
/* sqwidth */ 944,
/* vdelay */ 0x20,
- /* sheight */ 576,
- /* videostart0 */ 23)
/* bt878 (and bt848?) can capture another
line below active video. */
- .cropcap.bounds.height = (576 + 2) + 0x20 - 2,
+ /* sheight */ (576 + 2) + 0x20 - 2,
+ /* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
.name = "NTSC",
@@ -4200,7 +4199,7 @@ static void bttv_unregister_video(struct bttv *btv)
}
/* register video4linux devices */
-static int __devinit bttv_register_video(struct bttv *btv)
+static int bttv_register_video(struct bttv *btv)
{
if (no_overlay > 0)
pr_notice("Overlay support disabled\n");
@@ -4266,8 +4265,7 @@ static void pci_set_command(struct pci_dev *dev)
#endif
}
-static int __devinit bttv_probe(struct pci_dev *dev,
- const struct pci_device_id *pci_id)
+static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
{
int result;
unsigned char lat;
@@ -4455,7 +4453,7 @@ fail0:
return result;
}
-static void __devexit bttv_remove(struct pci_dev *pci_dev)
+static void bttv_remove(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct bttv *btv = to_bttv(v4l2_dev);
@@ -4599,7 +4597,7 @@ static struct pci_driver bttv_pci_driver = {
.name = "bttv",
.id_table = bttv_pci_tbl,
.probe = bttv_probe,
- .remove = __devexit_p(bttv_remove),
+ .remove = bttv_remove,
#ifdef CONFIG_PM
.suspend = bttv_suspend,
.resume = bttv_resume,
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index 580c8e682392..5039b8826e0a 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -99,7 +99,7 @@ static int bttv_bit_getsda(void *data)
return state;
}
-static struct i2c_algo_bit_data __devinitdata bttv_i2c_algo_bit_template = {
+static struct i2c_algo_bit_data bttv_i2c_algo_bit_template = {
.setsda = bttv_bit_setsda,
.setscl = bttv_bit_setscl,
.getsda = bttv_bit_getsda,
@@ -312,7 +312,7 @@ int bttv_I2CWrite(struct bttv *btv, unsigned char addr, unsigned char b1,
}
/* read EEPROM content */
-void __devinit bttv_readee(struct bttv *btv, unsigned char *eedata, int addr)
+void bttv_readee(struct bttv *btv, unsigned char *eedata, int addr)
{
memset(eedata, 0, 256);
if (0 != btv->i2c_rc)
@@ -347,7 +347,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
}
/* init + register i2c adapter */
-int __devinit init_bttv_i2c(struct bttv *btv)
+int init_bttv_i2c(struct bttv *btv)
{
strlcpy(btv->i2c_client.name, "bttv internal", I2C_NAME_SIZE);
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index ef4c7cd41982..04207a799055 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -368,7 +368,7 @@ static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
}
/* Instantiate the I2C IR receiver device, if present */
-void __devinit init_bttv_i2c_ir(struct bttv *btv)
+void init_bttv_i2c_ir(struct bttv *btv)
{
const unsigned short addr_list[] = {
0x1a, 0x18, 0x64, 0x30, 0x71,
@@ -411,7 +411,7 @@ void __devinit init_bttv_i2c_ir(struct bttv *btv)
return;
}
-int __devexit fini_bttv_i2c(struct bttv *btv)
+int fini_bttv_i2c(struct bttv *btv)
{
if (0 != btv->i2c_rc)
return 0;
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 81fab9adc1ca..d407244fd1bc 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -118,7 +118,8 @@ static int is_pci_slot_eq(struct pci_dev* adev, struct pci_dev* bdev)
return 0;
}
-static struct bt878 __devinit *dvb_bt8xx_878_match(unsigned int bttv_nr, struct pci_dev* bttv_pci_dev)
+static struct bt878 *dvb_bt8xx_878_match(unsigned int bttv_nr,
+ struct pci_dev* bttv_pci_dev)
{
unsigned int card_nr;
@@ -720,7 +721,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
}
}
-static int __devinit dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
+static int dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
{
int result;
@@ -811,7 +812,7 @@ err_unregister_adaptor:
return result;
}
-static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
+static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
{
struct dvb_bt8xx_card *card;
struct pci_dev* bttv_pci_dev;
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 6d2a98246b6d..8e971ff60588 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -197,7 +197,7 @@ err_exit:
return ret;
}
-int cx18_alsa_load(struct cx18 *cx)
+static int __init cx18_alsa_load(struct cx18 *cx)
{
struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
struct cx18_stream *s;
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 7a5b84a86bb3..180077c49123 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -37,6 +37,7 @@
#include "cx18-streams.h"
#include "cx18-fileops.h"
#include "cx18-alsa.h"
+#include "cx18-alsa-pcm.h"
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 039133d692e3..613e5ae7d5ca 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -53,7 +53,7 @@ int (*cx18_ext_init)(struct cx18 *);
EXPORT_SYMBOL(cx18_ext_init);
/* add your revision and whatnot here */
-static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
+static struct pci_device_id cx18_pci_tbl[] = {
{PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
@@ -691,7 +691,7 @@ done:
cx->card_i2c = cx->card->i2c;
}
-static int __devinit cx18_create_in_workq(struct cx18 *cx)
+static int cx18_create_in_workq(struct cx18 *cx)
{
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
@@ -703,7 +703,7 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
return 0;
}
-static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
+static void cx18_init_in_work_orders(struct cx18 *cx)
{
int i;
for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
@@ -718,7 +718,7 @@ static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
No assumptions on the card type may be made here (see cx18_init_struct2
for that).
*/
-static int __devinit cx18_init_struct1(struct cx18 *cx)
+static int cx18_init_struct1(struct cx18 *cx)
{
int ret;
@@ -775,7 +775,7 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
/* Second initialization part. Here the card type has been
autodetected. */
-static void __devinit cx18_init_struct2(struct cx18 *cx)
+static void cx18_init_struct2(struct cx18 *cx)
{
int i;
@@ -892,8 +892,8 @@ static void cx18_init_subdevs(struct cx18 *cx)
cx->sd_extmux = cx18_find_hw(cx, cx->card->hw_muxer);
}
-static int __devinit cx18_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx18_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
int retval = 0;
int i;
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 51609d5c88ce..4908eb7bcf6c 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -98,7 +98,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
case CX18_HW_Z8F0811_IR_RX_HAUP:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = cx->card_name;
info.platform_data = init_data;
break;
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 72af9b5c2d7d..843c62b2f482 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -97,7 +97,7 @@ static struct {
};
-void cx18_dma_free(struct videobuf_queue *q,
+static void cx18_dma_free(struct videobuf_queue *q,
struct cx18_stream *s, struct cx18_videobuf_buffer *buf)
{
videobuf_waiton(q, &buf->vb, 0, 0);
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 495781ee4711..2926f7fadccd 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -263,7 +263,7 @@ static int netup_fpga_op_rw(struct fpga_internal *inter, int addr,
}
/* flag - mem/io, read - read/write */
-int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
+static int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 val)
{
@@ -298,31 +298,32 @@ int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
return mem;
}
-int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
- int slot, int addr)
+static int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr)
{
return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0);
}
-int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
- int slot, int addr, u8 data)
+static int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
-int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+static int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL,
NETUP_CI_FLG_RD, addr, 0);
}
-int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
- u8 addr, u8 data)
+static int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
+ u8 addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data);
}
-int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
@@ -365,13 +366,13 @@ int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
-int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
-int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
+static int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
@@ -448,8 +449,8 @@ int altera_ci_irq(void *dev)
}
EXPORT_SYMBOL(altera_ci_irq);
-int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
- int open)
+static int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
{
struct altera_ci_state *state = en50221->data;
@@ -459,7 +460,7 @@ int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
return state->status;
}
-void altera_hw_filt_release(void *main_dev, int filt_nr)
+static void altera_hw_filt_release(void *main_dev, int filt_nr)
{
struct fpga_inode *temp_int = find_inode(main_dev);
struct netup_hw_pid_filter *pid_filt = NULL;
@@ -581,7 +582,7 @@ static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt,
mutex_unlock(&inter->fpga_mutex);
}
-int altera_pid_feed_control(void *demux_dev, int filt_nr,
+static int altera_pid_feed_control(void *demux_dev, int filt_nr,
struct dvb_demux_feed *feed, int onoff)
{
struct fpga_inode *temp_int = find_dinode(demux_dev);
@@ -603,41 +604,41 @@ int altera_pid_feed_control(void *demux_dev, int filt_nr,
}
EXPORT_SYMBOL(altera_pid_feed_control);
-int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
+static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 1);
return 0;
}
-int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
+static int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 0);
return 0;
}
-int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
+static int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 1);
}
-int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
+static int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 1);
}
-int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
+static int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 2);
}
-int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
+static int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 2);
}
-int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
+static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
{
struct netup_hw_pid_filter *pid_filt = NULL;
struct fpga_inode *temp_int = find_inode(config->dev);
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 6617774a326a..7344849183a7 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -24,6 +24,7 @@
*/
#include "cx23885.h"
+#include "cimax2.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
@@ -87,7 +88,7 @@ struct netup_ci_state {
};
-int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+static int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
@@ -120,7 +121,7 @@ int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
return 0;
}
-int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
+static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
@@ -147,7 +148,7 @@ int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
return 0;
}
-int netup_ci_get_mem(struct cx23885_dev *dev)
+static int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
@@ -166,7 +167,7 @@ int netup_ci_get_mem(struct cx23885_dev *dev)
return mem & 0xff;
}
-int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
+static int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
@@ -248,7 +249,8 @@ int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
-int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
+ u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
@@ -295,7 +297,7 @@ int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
-int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
+static int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
@@ -399,7 +401,8 @@ int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
return 1;
}
-int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
+int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
{
struct netup_ci_state *state = en50221->data;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 795169237e70..c6c9bd58f8be 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -45,8 +45,10 @@
#define AUDIO_SRAM_CHANNEL SRAM_CH07
-#define dprintk(level, fmt, arg...) if (audio_debug >= level) \
- printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (audio_debug + 1 > level) \
+ printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg); \
+} while(0)
#define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \
printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index 134ebddd860f..e958a01fd554 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -22,6 +22,7 @@
*/
#include "cx23885.h"
+#include "cx23885-av.h"
void cx23885_av_work_handler(struct work_struct *work)
{
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 5acdf954ff6b..6277e145f0b8 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1427,7 +1427,7 @@ void cx23885_ir_fini(struct cx23885_dev *dev)
}
}
-int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
+static int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
{
int data;
int tdo = 0;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 697728f09430..f0416a668b4c 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -303,7 +303,7 @@ static struct sram_channel cx23887_sram_channels[] = {
},
};
-void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
+static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
@@ -1516,8 +1516,7 @@ int cx23885_restart_queue(struct cx23885_tsport *port,
buf = list_entry(q->queued.next, struct cx23885_buffer,
vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
cx23885_start_dma(port, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
@@ -1528,8 +1527,7 @@ int cx23885_restart_queue(struct cx23885_tsport *port,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
@@ -2088,8 +2086,8 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
/* TODO: 23-19 */
}
-static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx23885_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx23885_dev *dev;
int err;
@@ -2169,7 +2167,7 @@ fail_free:
return err;
}
-static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
+static void cx23885_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx23885_dev *dev = to_cx23885(v4l2_dev);
@@ -2212,7 +2210,7 @@ static struct pci_driver cx23885_pci_driver = {
.name = "cx23885",
.id_table = cx23885_pci_tbl,
.probe = cx23885_initdev,
- .remove = __devexit_p(cx23885_finidev),
+ .remove = cx23885_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 4379d8a6dad5..2f5b902e63ae 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -659,7 +659,7 @@ static struct mt2063_config terratec_mt2063_config[] = {
},
};
-int netup_altera_fpga_rw(void *device, int flag, int data, int read)
+static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index 93998f220986..5444cc526008 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -29,6 +29,7 @@
*/
#include "cx23885.h"
+#include "cx23885-f300.h"
#define F300_DATA GPIO_0
#define F300_RESET GPIO_1
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 2c925f77cf2a..4f1055a194b5 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -40,6 +40,7 @@
#include <media/v4l2-subdev.h>
#include "cx23885.h"
+#include "cx23885-input.h"
#define MODULE_NAME "cx23885"
@@ -270,21 +271,21 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Integrated CX2388[58] IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_ALL;
+ allowed_protos = RC_BIT_ALL;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_NEC;
+ allowed_protos = RC_BIT_NEC;
/* The grey Terratec remote with orange buttons */
rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_TYPE_ALL;
+ allowed_protos = RC_BIT_ALL;
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
diff --git a/drivers/media/pci/cx23885/cx23885-input.h b/drivers/media/pci/cx23885/cx23885-input.h
index 75ef15d3f523..87dc44e69977 100644
--- a/drivers/media/pci/cx23885/cx23885-input.h
+++ b/drivers/media/pci/cx23885/cx23885-input.h
@@ -23,7 +23,7 @@
#ifndef _CX23885_INPUT_H_
#define _CX23885_INPUT_H_
-int cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
+void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events);
int cx23885_input_init(struct cx23885_dev *dev);
void cx23885_input_fini(struct cx23885_dev *dev);
diff --git a/drivers/media/pci/cx23885/cx23885-ioctl.c b/drivers/media/pci/cx23885/cx23885-ioctl.c
index 44812ca78899..ea9a614f3bbf 100644
--- a/drivers/media/pci/cx23885/cx23885-ioctl.c
+++ b/drivers/media/pci/cx23885/cx23885-ioctl.c
@@ -22,6 +22,8 @@
*/
#include "cx23885.h"
+#include "cx23885-ioctl.h"
+
#include <media/v4l2-chip-ident.h>
int cx23885_g_chip_ident(struct file *file, void *fh,
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index 7125247dd255..bfef19359291 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -24,6 +24,7 @@
#include <media/v4l2-device.h>
#include "cx23885.h"
+#include "cx23885-ir.h"
#include "cx23885-input.h"
#define CX23885_IR_RX_FIFO_SERVICE_REQ 0
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c2bc39c58f82..c4bd1e95d33f 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -29,6 +29,7 @@
#include <media/rc-core.h>
#include "cx23885.h"
+#include "cx23888-ir.h"
static unsigned int ir_888_debug;
module_param(ir_888_debug, int, 0644);
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index f4893e69cd89..0044fef7ca24 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -24,6 +24,7 @@
*/
#include "cx23885.h"
+#include "netup-init.h"
static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val)
{
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index 8b2a99975c23..87491ca05ee5 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -44,7 +44,7 @@ MODULE_LICENSE("GPL");
static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF |
FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR;
-int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
+static int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
@@ -133,7 +133,7 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
return rp;
}
-int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
+static int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
struct pci_dev *pci,
unsigned int bpl, unsigned int lines)
{
@@ -197,7 +197,7 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
return 0;
}
-void cx25821_free_memory_audio(struct cx25821_dev *dev)
+static void cx25821_free_memory_audio(struct cx25821_dev *dev)
{
if (dev->_risc_virt_addr) {
pci_free_consistent(dev->pci, dev->_audiorisc_size,
@@ -256,7 +256,7 @@ void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
cx25821_free_memory_audio(dev);
}
-int cx25821_get_audio_data(struct cx25821_dev *dev,
+static int cx25821_get_audio_data(struct cx25821_dev *dev,
struct sram_channel *sram_ch)
{
struct file *myfile;
@@ -351,7 +351,7 @@ static void cx25821_audioups_handler(struct work_struct *work)
sram_channels);
}
-int cx25821_openfile_audio(struct cx25821_dev *dev,
+static int cx25821_openfile_audio(struct cx25821_dev *dev,
struct sram_channel *sram_ch)
{
struct file *myfile;
@@ -490,7 +490,7 @@ error:
return ret;
}
-int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
+static int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
u32 status)
{
int i = 0;
@@ -634,8 +634,8 @@ static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
}
-int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -700,9 +700,7 @@ fail_irq:
int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
{
struct sram_channel *sram_ch;
- int retval = 0;
int err = 0;
- int str_length = 0;
if (dev->_audio_is_running) {
pr_warn("Audio Channel is still running so return!\n");
@@ -731,27 +729,29 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
_line_size = AUDIO_LINE_SIZE;
if (dev->input_audiofilename) {
- str_length = strlen(dev->input_audiofilename);
- dev->_audiofilename = kmemdup(dev->input_audiofilename,
- str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kstrdup(dev->input_audiofilename,
+ GFP_KERNEL);
- if (!dev->_audiofilename)
+ if (!dev->_audiofilename) {
+ err = -ENOMEM;
goto error;
+ }
/* Default if filename is empty string */
if (strcmp(dev->input_audiofilename, "") == 0)
dev->_audiofilename = "/root/audioGOOD.wav";
} else {
- str_length = strlen(_defaultAudioName);
- dev->_audiofilename = kmemdup(_defaultAudioName,
- str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kstrdup(_defaultAudioName,
+ GFP_KERNEL);
- if (!dev->_audiofilename)
+ if (!dev->_audiofilename) {
+ err = -ENOMEM;
goto error;
+ }
}
- retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
- _line_size, 0);
+ cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
+ _line_size, 0);
dev->audio_upstream_riscbuf_size =
AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
@@ -759,9 +759,9 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
+ err = cx25821_audio_upstream_buffer_prepare(dev, sram_ch,
_line_size);
- if (retval < 0) {
+ if (err < 0) {
pr_err("%s: Failed to set up Audio upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-biffuncs.h b/drivers/media/pci/cx25821/cx25821-biffuncs.h
index 9326a7c729ec..937f5a70fb7a 100644
--- a/drivers/media/pci/cx25821/cx25821-biffuncs.h
+++ b/drivers/media/pci/cx25821/cx25821-biffuncs.h
@@ -25,17 +25,17 @@
#define SetBit(Bit) (1 << Bit)
-inline u8 getBit(u32 sample, u8 index)
+static inline u8 getBit(u32 sample, u8 index)
{
return (u8) ((sample >> index) & 1);
}
-inline u32 clearBitAtPos(u32 value, u8 bit)
+static inline u32 clearBitAtPos(u32 value, u8 bit)
{
return value & ~(1 << bit);
}
-inline u32 setBitAtPos(u32 sample, u8 bit)
+static inline u32 setBitAtPos(u32 sample, u8 bit)
{
sample |= (1 << bit);
return sample;
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index f11f6f07e915..1884e2cc35e9 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1361,8 +1361,8 @@ struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
}
EXPORT_SYMBOL(cx25821_dev_get);
-static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx25821_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx25821_dev *dev;
int err = 0;
@@ -1433,7 +1433,7 @@ fail_free:
return err;
}
-static void __devexit cx25821_finidev(struct pci_dev *pci_dev)
+static void cx25821_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
@@ -1478,7 +1478,7 @@ static struct pci_driver cx25821_pci_driver = {
.name = "cx25821",
.id_table = cx25821_pci_tbl,
.probe = cx25821_initdev,
- .remove = __devexit_p(cx25821_finidev),
+ .remove = cx25821_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 9844549764c9..a8dc945bbe17 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -329,7 +329,8 @@ int cx25821_i2c_unregister(struct cx25821_i2c *bus)
return 0;
}
-void cx25821_av_clk(struct cx25821_dev *dev, int enable)
+#if 0 /* Currently unused */
+static void cx25821_av_clk(struct cx25821_dev *dev, int enable)
{
/* write 0 to bus 2 addr 0x144 via i2x_xfer() */
char buffer[3];
@@ -351,6 +352,7 @@ void cx25821_av_clk(struct cx25821_dev *dev, int enable)
i2c_xfer(&dev->i2c_bus[0].i2c_adap, &msg, 1);
}
+#endif
int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
{
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
index d33fc1a23030..cf2723c7197f 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream-ch2.c
@@ -123,10 +123,11 @@ static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
return rp;
}
-int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int top_offset, unsigned int bpl,
- unsigned int lines)
+static int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
+ struct pci_dev *pci,
+ unsigned int top_offset,
+ unsigned int bpl,
+ unsigned int lines)
{
__le32 *rp;
int fifo_enable = 0;
@@ -255,7 +256,8 @@ void cx25821_free_mem_upstream_ch2(struct cx25821_dev *dev)
}
}
-int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_get_frame_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int frame_index_temp = dev->_frame_index_ch2;
@@ -360,7 +362,8 @@ static void cx25821_vidups_handler_ch2(struct work_struct *work)
_channel2_upstream_select].sram_channels);
}
-int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_openfile_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
@@ -507,8 +510,9 @@ error:
return ret;
}
-int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
- u32 status)
+static int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev,
+ int chan_num,
+ u32 status)
{
u32 int_msk_tmp;
struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -647,8 +651,8 @@ static void cx25821_set_pixelengine_ch2(struct cx25821_dev *dev,
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
}
-int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -704,11 +708,9 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
{
struct sram_channel *sram_ch;
u32 tmp;
- int retval = 0;
int err = 0;
int data_frame_size = 0;
int risc_buffer_size = 0;
- int str_length = 0;
if (dev->_is_running_ch2) {
pr_info("Video Channel is still running so return!\n");
@@ -744,20 +746,16 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
risc_buffer_size = dev->_isNTSC_ch2 ?
NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
- if (dev->input_filename_ch2) {
- str_length = strlen(dev->input_filename_ch2);
- dev->_filename_ch2 = kmemdup(dev->input_filename_ch2,
- str_length + 1, GFP_KERNEL);
-
- if (!dev->_filename_ch2)
- goto error;
- } else {
- str_length = strlen(dev->_defaultname_ch2);
- dev->_filename_ch2 = kmemdup(dev->_defaultname_ch2,
- str_length + 1, GFP_KERNEL);
+ if (dev->input_filename_ch2)
+ dev->_filename_ch2 = kstrdup(dev->input_filename_ch2,
+ GFP_KERNEL);
+ else
+ dev->_filename_ch2 = kstrdup(dev->_defaultname_ch2,
+ GFP_KERNEL);
- if (!dev->_filename_ch2)
- goto error;
+ if (!dev->_filename_ch2) {
+ err = -ENOENT;
+ goto error;
}
/* Default if filename is empty string */
@@ -773,7 +771,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
}
}
- retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size_ch2, 0);
/* setup fifo + format */
@@ -783,9 +781,9 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size_ch2 = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
+ err = cx25821_upstream_buffer_prepare_ch2(dev, sram_ch,
dev->_line_size_ch2);
- if (retval < 0) {
+ if (err < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 6759fff8eb64..7fc97110d973 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -173,10 +173,10 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
return rp;
}
-int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
- struct pci_dev *pci,
- unsigned int top_offset,
- unsigned int bpl, unsigned int lines)
+static int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
+ struct pci_dev *pci,
+ unsigned int top_offset,
+ unsigned int bpl, unsigned int lines)
{
__le32 *rp;
int fifo_enable = 0;
@@ -300,7 +300,8 @@ void cx25821_free_mem_upstream_ch1(struct cx25821_dev *dev)
}
}
-int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_get_frame(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int frame_index_temp = dev->_frame_index;
@@ -405,7 +406,8 @@ static void cx25821_vidups_handler(struct work_struct *work)
sram_channels);
}
-int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
+static int cx25821_openfile(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
@@ -486,8 +488,9 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
return 0;
}
-int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
- struct sram_channel *sram_ch, int bpl)
+static int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch,
+ int bpl)
{
int ret = 0;
dma_addr_t dma_addr;
@@ -548,8 +551,8 @@ error:
return ret;
}
-int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
- u32 status)
+static int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
+ u32 status)
{
u32 int_msk_tmp;
struct sram_channel *channel = dev->channels[chan_num].sram_channels;
@@ -664,8 +667,9 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
- int pix_format)
+static void cx25821_set_pixelengine(struct cx25821_dev *dev,
+ struct sram_channel *ch,
+ int pix_format)
{
int width = WIDTH_D1;
int height = dev->_lines_count;
@@ -696,8 +700,8 @@ void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
}
-int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
- struct sram_channel *sram_ch)
+static int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
+ struct sram_channel *sram_ch)
{
u32 tmp = 0;
int err = 0;
@@ -753,7 +757,6 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
{
struct sram_channel *sram_ch;
u32 tmp;
- int retval = 0;
int err = 0;
int data_frame_size = 0;
int risc_buffer_size = 0;
@@ -796,15 +799,19 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_filename = kmemdup(dev->input_filename, str_length + 1,
GFP_KERNEL);
- if (!dev->_filename)
+ if (!dev->_filename) {
+ err = -ENOENT;
goto error;
+ }
} else {
str_length = strlen(dev->_defaultname);
dev->_filename = kmemdup(dev->_defaultname, str_length + 1,
GFP_KERNEL);
- if (!dev->_filename)
+ if (!dev->_filename) {
+ err = -ENOENT;
goto error;
+ }
}
/* Default if filename is empty string */
@@ -828,7 +835,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ?
(WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ err = cx25821_sram_channel_setup_upstream(dev, sram_ch,
dev->_line_size, 0);
/* setup fifo + format */
@@ -838,8 +845,8 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->upstream_databuf_size = data_frame_size * 2;
/* Allocating buffers and prepare RISC program */
- retval = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
- if (retval < 0) {
+ err = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
+ if (err < 0) {
pr_err("%s: Failed to set up Video upstream buffers!\n",
dev->name);
goto error;
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 0a80245165d0..53b16dd70320 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -291,9 +291,9 @@ int cx25821_start_video_dma(struct cx25821_dev *dev,
return 0;
}
-int cx25821_restart_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q,
- struct sram_channel *channel)
+static int cx25821_restart_video_queue(struct cx25821_dev *dev,
+ struct cx25821_dmaqueue *q,
+ struct sram_channel *channel)
{
struct cx25821_buffer *buf, *prev;
struct list_head *item;
@@ -342,7 +342,7 @@ int cx25821_restart_video_queue(struct cx25821_dev *dev,
}
}
-void cx25821_vid_timeout(unsigned long data)
+static void cx25821_vid_timeout(unsigned long data)
{
struct cx25821_data *timeout_data = (struct cx25821_data *)data;
struct cx25821_dev *dev = timeout_data->dev;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 3aa6856ead3b..27d62623274b 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -45,11 +45,15 @@
#include "cx88.h"
#include "cx88-reg.h"
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
+} while(0)
-#define dprintk_core(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg)
+#define dprintk_core(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
+} while(0)
/****************************************************************************
Data type declarations - Can be moded to a header file later
@@ -536,7 +540,7 @@ static struct snd_pcm_ops snd_cx88_pcm_ops = {
/*
* create a PCM device
*/
-static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
+static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
{
int err;
struct snd_pcm *pcm;
@@ -749,7 +753,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
+static const struct pci_device_id cx88_audio_pci_tbl[] = {
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0, }
@@ -788,10 +792,9 @@ static void snd_cx88_dev_free(struct snd_card * card)
*/
static int devno;
-static int __devinit snd_cx88_create(struct snd_card *card,
- struct pci_dev *pci,
- snd_cx88_card_t **rchip,
- struct cx88_core **core_ptr)
+static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
+ snd_cx88_card_t **rchip,
+ struct cx88_core **core_ptr)
{
snd_cx88_card_t *chip;
struct cx88_core *core;
@@ -858,8 +861,8 @@ static int __devinit snd_cx88_create(struct snd_card *card,
return 0;
}
-static int __devinit cx88_audio_initdev(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int cx88_audio_initdev(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
snd_cx88_card_t *chip;
@@ -927,7 +930,7 @@ error:
/*
* ALSA destructor
*/
-static void __devexit cx88_audio_finidev(struct pci_dev *pci)
+static void cx88_audio_finidev(struct pci_dev *pci)
{
struct cx88_audio_dev *card = pci_get_drvdata(pci);
@@ -946,7 +949,7 @@ static struct pci_driver cx88_audio_pci_driver = {
.name = "cx88_audio",
.id_table = cx88_audio_pci_tbl,
.probe = cx88_audio_initdev,
- .remove = __devexit_p(cx88_audio_finidev),
+ .remove = cx88_audio_finidev,
};
/****************************************************************************
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 62184eb919e5..a6ff8a6f4fc0 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -53,9 +53,10 @@ static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg)
-
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg); \
+} while(0)
/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index c97b174be3ab..19a58754c6e1 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -646,22 +646,22 @@ int cx88_reset(struct cx88_core *core)
/* ------------------------------------------------------------------ */
-static unsigned int inline norm_swidth(v4l2_std_id norm)
+static inline unsigned int norm_swidth(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 754 : 922;
}
-static unsigned int inline norm_hdelay(v4l2_std_id norm)
+static inline unsigned int norm_hdelay(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 135 : 186;
}
-static unsigned int inline norm_vdelay(v4l2_std_id norm)
+static inline unsigned int norm_vdelay(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 0x24 : 0x18;
}
-static unsigned int inline norm_fsc8(v4l2_std_id norm)
+static inline unsigned int norm_fsc8(v4l2_std_id norm)
{
if (norm & V4L2_STD_PAL_M)
return 28604892; // 3.575611 MHz
@@ -681,7 +681,7 @@ static unsigned int inline norm_fsc8(v4l2_std_id norm)
return 35468950; // 4.43361875 MHz +/- 5 Hz
}
-static unsigned int inline norm_htotal(v4l2_std_id norm)
+static inline unsigned int norm_htotal(v4l2_std_id norm)
{
unsigned int fsc4=norm_fsc8(norm)/2;
@@ -692,7 +692,7 @@ static unsigned int inline norm_htotal(v4l2_std_id norm)
((fsc4+262)/525*1001+15000)/30000;
}
-static unsigned int inline norm_vbipack(v4l2_std_id norm)
+static inline unsigned int norm_vbipack(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 511 : 400;
}
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index ebf448c48ca3..f29e18c72f44 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -248,7 +248,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
struct cx88_IR *ir;
struct rc_dev *dev;
char *ir_codes = NULL;
- u64 rc_type = RC_TYPE_OTHER;
+ u64 rc_type = RC_BIT_OTHER;
int err = -ENOMEM;
u32 hardware_mask = 0; /* For devices with a hardware mask, when
* used with a full-code IR table
@@ -416,7 +416,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
ir_codes = RC_MAP_TWINHAN_VP1027_DVBS;
- rc_type = RC_TYPE_NEC;
+ rc_type = RC_BIT_NEC;
ir->sampling = 0xff00; /* address */
break;
}
@@ -592,7 +592,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
case CX88_BOARD_LEADTEK_PVR2000:
addr_list = pvr2000_addr_list;
core->init_data.name = "cx88 Leadtek PVR 2000 remote";
- core->init_data.type = RC_TYPE_UNKNOWN;
+ core->init_data.type = RC_BIT_UNKNOWN;
core->init_data.get_key = get_key_pvr2000;
core->init_data.ir_codes = RC_MAP_EMPTY;
break;
@@ -613,7 +613,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
/* Hauppauge XVR */
core->init_data.name = "cx88 Hauppauge XVR remote";
core->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- core->init_data.type = RC_TYPE_RC5;
+ core->init_data.type = RC_BIT_RC5;
core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
info.platform_data = &core->init_data;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index d154bc197356..c9d3182f79d5 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -45,11 +45,15 @@ static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
-#define dprintk(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg)
+#define dprintk(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg); \
+} while(0)
-#define mpeg_dbg(level,fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg)
+#define mpeg_dbg(level, fmt, arg...) do { \
+ if (debug + 1 > level) \
+ printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg); \
+} while(0)
#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
@@ -217,8 +221,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
return 0;
buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
if (NULL == prev) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
cx8802_start_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
@@ -229,8 +232,7 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue,&q->active);
+ list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
@@ -789,8 +791,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
}
/* ----------------------------------------------------------- */
-static int __devinit cx8802_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx8802_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx8802_dev *dev;
struct cx88_core *core;
@@ -838,7 +840,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
return err;
}
-static void __devexit cx8802_remove(struct pci_dev *pci_dev)
+static void cx8802_remove(struct pci_dev *pci_dev)
{
struct cx8802_dev *dev;
@@ -896,7 +898,7 @@ static struct pci_driver cx8802_pci_driver = {
.name = "cx88-mpeg driver manager",
.id_table = cx8802_pci_tbl,
.probe = cx8802_probe,
- .remove = __devexit_p(cx8802_remove),
+ .remove = cx8802_remove,
};
static int __init cx8802_init(void)
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 05171457bf28..bc78354262ac 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1696,8 +1696,8 @@ static void cx8800_unregister_video(struct cx8800_dev *dev)
}
}
-static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int cx8800_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct cx8800_dev *dev;
struct cx88_core *core;
@@ -1923,7 +1923,7 @@ fail_free:
return err;
}
-static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
+static void cx8800_finidev(struct pci_dev *pci_dev)
{
struct cx8800_dev *dev = pci_get_drvdata(pci_dev);
struct cx88_core *core = dev->core;
@@ -2052,7 +2052,7 @@ static struct pci_driver cx8800_pci_driver = {
.name = "cx8800",
.id_table = cx8800_pci_tbl,
.probe = cx8800_initdev,
- .remove = __devexit_p(cx8800_finidev),
+ .remove = cx8800_finidev,
#ifdef CONFIG_PM
.suspend = cx8800_suspend,
.resume = cx8800_resume,
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 44ffc8b3d45f..ba0dba4a4d22 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -94,13 +94,13 @@ enum cx8802_board_access {
/* ----------------------------------------------------------- */
/* tv norms */
-static unsigned int inline norm_maxw(v4l2_std_id norm)
+static inline unsigned int norm_maxw(v4l2_std_id norm)
{
return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 720 : 768;
}
-static unsigned int inline norm_maxh(v4l2_std_id norm)
+static inline unsigned int norm_maxh(v4l2_std_id norm)
{
return (norm & V4L2_STD_625_50) ? 576 : 480;
}
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index feff57ee5a08..36e34522b9a8 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1542,7 +1542,7 @@ static void ddb_unmap(struct ddb *dev)
}
-static void __devexit ddb_remove(struct pci_dev *pdev)
+static void ddb_remove(struct pci_dev *pdev)
{
struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
@@ -1565,8 +1565,7 @@ static void __devexit ddb_remove(struct pci_dev *pdev)
}
-static int __devinit ddb_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ddb *dev;
int stat = 0;
@@ -1679,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
.subvendor = _subvend, .subdevice = _subdev, \
.driver_data = (unsigned long)&_driverdata }
-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
+static const struct pci_device_id ddb_id_tbl[] = {
DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
@@ -1696,7 +1695,7 @@ static struct pci_driver ddb_pci_driver = {
.name = "DDBridge",
.id_table = ddb_id_tbl,
.probe = ddb_probe,
- .remove = __devexit_p(ddb_remove),
+ .remove = ddb_remove,
};
static __init int module_init_ddbridge(void)
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index a609b3a9b146..904c3ea350f5 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -616,7 +616,7 @@ static void dm1105_set_dma_addr(struct dm1105_dev *dev)
dm_writel(DM1105_STADR, cpu_to_le32(dev->dma_addr));
}
-static int __devinit dm1105_dma_map(struct dm1105_dev *dev)
+static int dm1105_dma_map(struct dm1105_dev *dev)
{
dev->ts_buf = pci_alloc_consistent(dev->pdev,
6 * DM1105_DMA_BYTES,
@@ -736,7 +736,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
+static int dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct rc_dev *dev;
int err = -ENOMEM;
@@ -776,12 +776,12 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
return 0;
}
-void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
+static void dm1105_ir_exit(struct dm1105_dev *dm1105)
{
rc_unregister_device(dm1105->ir.dev);
}
-static int __devinit dm1105_hw_init(struct dm1105_dev *dev)
+static int dm1105_hw_init(struct dm1105_dev *dev)
{
dm1105_disable_irqs(dev);
@@ -849,7 +849,7 @@ static struct ds3000_config dvbworld_ds3000_config = {
.demod_address = 0x68,
};
-static int __devinit frontend_init(struct dm1105_dev *dev)
+static int frontend_init(struct dm1105_dev *dev)
{
int ret;
@@ -949,7 +949,7 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
return 0;
}
-static void __devinit dm1105_read_mac(struct dm1105_dev *dev, u8 *mac)
+static void dm1105_read_mac(struct dm1105_dev *dev, u8 *mac)
{
static u8 command[1] = { 0x28 };
@@ -971,7 +971,7 @@ static void __devinit dm1105_read_mac(struct dm1105_dev *dev, u8 *mac)
dev_info(&dev->pdev->dev, "MAC %pM\n", mac);
}
-static int __devinit dm1105_probe(struct pci_dev *pdev,
+static int dm1105_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct dm1105_dev *dev;
@@ -1128,8 +1128,10 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
INIT_WORK(&dev->work, dm1105_dmx_buffer);
sprintf(dev->wqn, "%s/%d", dvb_adapter->name, dvb_adapter->num);
dev->wq = create_singlethread_workqueue(dev->wqn);
- if (!dev->wq)
+ if (!dev->wq) {
+ ret = -ENOMEM;
goto err_dvb_net;
+ }
ret = request_irq(pdev->irq, dm1105_irq, IRQF_SHARED,
DRIVER_NAME, dev);
@@ -1172,7 +1174,7 @@ err_kfree:
return ret;
}
-static void __devexit dm1105_remove(struct pci_dev *pdev)
+static void dm1105_remove(struct pci_dev *pdev)
{
struct dm1105_dev *dev = pci_get_drvdata(pdev);
struct dvb_adapter *dvb_adapter = &dev->dvb_adapter;
@@ -1205,7 +1207,7 @@ static void __devexit dm1105_remove(struct pci_dev *pdev)
kfree(dev);
}
-static struct pci_device_id dm1105_id_table[] __devinitdata = {
+static struct pci_device_id dm1105_id_table[] = {
{
.vendor = PCI_VENDOR_ID_TRIGEM,
.device = PCI_DEVICE_ID_DM1105,
@@ -1227,7 +1229,7 @@ static struct pci_driver dm1105_driver = {
.name = DRIVER_NAME,
.id_table = dm1105_id_table,
.probe = dm1105_probe,
- .remove = __devexit_p(dm1105_remove),
+ .remove = dm1105_remove,
};
static int __init dm1105_init(void)
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 8deab1629b3b..4a221c693995 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -205,7 +205,7 @@ err_exit:
return ret;
}
-int ivtv_alsa_load(struct ivtv *itv)
+static int __init ivtv_alsa_load(struct ivtv *itv)
{
struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
struct ivtv_stream *s;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index f7022bd58ffd..e1863dbf4edc 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -37,6 +37,7 @@
#include "ivtv-streams.h"
#include "ivtv-fileops.h"
#include "ivtv-alsa.h"
+#include "ivtv-alsa-pcm.h"
static unsigned int pcm_debug;
module_param(pcm_debug, int, 0644);
@@ -69,8 +70,9 @@ static struct snd_pcm_hardware snd_ivtv_hw_capture = {
.periods_max = 98, /* 12544, */
};
-void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc, u8 *pcm_data,
- size_t num_bytes)
+static void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc,
+ u8 *pcm_data,
+ size_t num_bytes)
{
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
index 5ab18319ea4d..23dfe0d12400 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
@@ -21,7 +21,3 @@
*/
int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
-
-/* Used by ivtv driver to announce the PCM data to the module */
-void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *card, u8 *pcm_data,
- size_t num_bytes);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 74e9a5032364..df88dc4ab555 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -73,7 +73,7 @@ int (*ivtv_ext_init)(struct ivtv *);
EXPORT_SYMBOL(ivtv_ext_init);
/* add your revision and whatnot here */
-static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
+static struct pci_device_id ivtv_pci_tbl[] = {
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16,
@@ -736,7 +736,7 @@ done:
No assumptions on the card type may be made here (see ivtv_init_struct2
for that).
*/
-static int __devinit ivtv_init_struct1(struct ivtv *itv)
+static int ivtv_init_struct1(struct ivtv *itv)
{
struct sched_param param = { .sched_priority = 99 };
@@ -802,7 +802,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
/* Second initialization part. Here the card type has been
autodetected. */
-static void __devinit ivtv_init_struct2(struct ivtv *itv)
+static void ivtv_init_struct2(struct ivtv *itv)
{
int i;
@@ -1001,8 +1001,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
}
}
-static int __devinit ivtv_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
int retval = 0;
int vbi_buf_size;
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 6ec7705af555..68387d4369d6 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -276,7 +276,7 @@ void ivtv_init_mpeg_decoder(struct ivtv *itv)
}
/* Try to restart the card & restore previous settings */
-int ivtv_firmware_restart(struct ivtv *itv)
+static int ivtv_firmware_restart(struct ivtv *itv)
{
int rc = 0;
v4l2_std_id std;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index d47f41a0ef66..46e262becb67 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -200,21 +200,21 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
- init_data->type = RC_TYPE_OTHER;
+ init_data->type = RC_BIT_OTHER;
init_data->name = "AVerMedia AVerTV card";
break;
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_I2C_IR_RX_ADAPTEC:
@@ -222,7 +222,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->name = itv->card_name;
/* FIXME: The protocol and RC_MAP needs to be corrected */
init_data->ir_codes = RC_MAP_EMPTY;
- init_data->type = RC_TYPE_UNKNOWN;
+ init_data->type = RC_BIT_UNKNOWN;
break;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 949ae230e119..7a8b0d0b6127 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -993,7 +993,7 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
v4l2_std_id std;
int i;
- if (inp < 0 || inp >= itv->nof_inputs)
+ if (inp >= itv->nof_inputs)
return -EINVAL;
if (inp == itv->active_input) {
@@ -1168,7 +1168,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
}
}
-int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
+static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct ivtv *itv = fh2id(fh)->itv;
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index cc0251e01077..6fe9fe5293dc 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -151,7 +151,8 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int hopper_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
@@ -230,7 +231,7 @@ fail0:
return err;
}
-static void __devexit hopper_pci_remove(struct pci_dev *pdev)
+static void hopper_pci_remove(struct pci_dev *pdev)
{
struct mantis_pci *mantis = pci_get_drvdata(pdev);
@@ -259,12 +260,12 @@ static struct pci_driver hopper_pci_driver = {
.remove = hopper_pci_remove,
};
-static int __devinit hopper_init(void)
+static int hopper_init(void)
{
return pci_register_driver(&hopper_pci_driver);
}
-static void __devexit hopper_exit(void)
+static void hopper_exit(void)
{
return pci_unregister_driver(&hopper_pci_driver);
}
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index 0207d1f064e0..932a0d73a7f8 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -159,7 +159,8 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit mantis_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int mantis_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
@@ -249,7 +250,7 @@ fail0:
return err;
}
-static void __devexit mantis_pci_remove(struct pci_dev *pdev)
+static void mantis_pci_remove(struct pci_dev *pdev)
{
struct mantis_pci *mantis = pci_get_drvdata(pdev);
@@ -289,12 +290,12 @@ static struct pci_driver mantis_pci_driver = {
.remove = mantis_pci_remove,
};
-static int __devinit mantis_init(void)
+static int mantis_init(void)
{
return pci_register_driver(&mantis_pci_driver);
}
-static void __devexit mantis_exit(void)
+static void mantis_exit(void)
{
return pci_unregister_driver(&mantis_pci_driver);
}
diff --git a/drivers/media/pci/mantis/mantis_dvb.c b/drivers/media/pci/mantis/mantis_dvb.c
index 5d15c6b74d9b..5a71e1791cf5 100644
--- a/drivers/media/pci/mantis/mantis_dvb.c
+++ b/drivers/media/pci/mantis/mantis_dvb.c
@@ -144,7 +144,7 @@ static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
return 0;
}
-int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+int mantis_dvb_init(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
int result = -1;
@@ -271,7 +271,7 @@ err0:
}
EXPORT_SYMBOL_GPL(mantis_dvb_init);
-int __devexit mantis_dvb_exit(struct mantis_pci *mantis)
+int mantis_dvb_exit(struct mantis_pci *mantis)
{
int err;
diff --git a/drivers/media/pci/mantis/mantis_i2c.c b/drivers/media/pci/mantis/mantis_i2c.c
index e7794517fe26..937fb9d50213 100644
--- a/drivers/media/pci/mantis/mantis_i2c.c
+++ b/drivers/media/pci/mantis/mantis_i2c.c
@@ -217,7 +217,7 @@ static struct i2c_algorithm mantis_algo = {
.functionality = mantis_i2c_func,
};
-int __devinit mantis_i2c_init(struct mantis_pci *mantis)
+int mantis_i2c_init(struct mantis_pci *mantis)
{
u32 intstat, intmask;
struct i2c_adapter *i2c_adapter = &mantis->adapter;
diff --git a/drivers/media/pci/mantis/mantis_input.c b/drivers/media/pci/mantis/mantis_input.c
index db6d54d3fec0..0e5252e5c0ef 100644
--- a/drivers/media/pci/mantis/mantis_input.c
+++ b/drivers/media/pci/mantis/mantis_input.c
@@ -18,6 +18,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#if 0 /* Currently unused */
+
#include <media/rc-core.h>
#include <linux/pci.h>
@@ -150,10 +152,11 @@ out:
return err;
}
-int mantis_exit(struct mantis_pci *mantis)
+int mantis_init_exit(struct mantis_pci *mantis)
{
rc_unregister_device(mantis->rc);
rc_map_unregister(&ir_mantis_map);
return 0;
}
+#endif
diff --git a/drivers/media/pci/mantis/mantis_pci.c b/drivers/media/pci/mantis/mantis_pci.c
index 371558af2d96..a846036ea022 100644
--- a/drivers/media/pci/mantis/mantis_pci.c
+++ b/drivers/media/pci/mantis/mantis_pci.c
@@ -46,7 +46,7 @@
#define DRIVER_NAME "Mantis Core"
-int __devinit mantis_pci_init(struct mantis_pci *mantis)
+int mantis_pci_init(struct mantis_pci *mantis)
{
u8 latency;
struct mantis_hwconfig *config = mantis->hwconfig;
diff --git a/drivers/media/pci/mantis/mantis_uart.c b/drivers/media/pci/mantis/mantis_uart.c
index 85e977861b4a..a70719218631 100644
--- a/drivers/media/pci/mantis/mantis_uart.c
+++ b/drivers/media/pci/mantis/mantis_uart.c
@@ -61,7 +61,7 @@ static struct {
#define UART_MAX_BUF 16
-int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
+static int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
{
struct mantis_hwconfig *config = mantis->hwconfig;
u32 stat = 0, i;
diff --git a/drivers/media/pci/mantis/mantis_vp1033.c b/drivers/media/pci/mantis/mantis_vp1033.c
index ad013e93ed11..115003e8d19d 100644
--- a/drivers/media/pci/mantis/mantis_vp1033.c
+++ b/drivers/media/pci/mantis/mantis_vp1033.c
@@ -83,7 +83,7 @@ u8 lgtdqcs001f_inittab[] = {
#define MANTIS_MODEL_NAME "VP-1033"
#define MANTIS_DEV_TYPE "DVB-S/DSS"
-int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
+static int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
@@ -115,8 +115,8 @@ int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
return 0;
}
-int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
- u32 srate, u32 ratio)
+static int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index e5a76da86081..049e18667cd0 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1728,8 +1728,7 @@ static int meye_resume(struct pci_dev *pdev)
}
#endif
-static int __devinit meye_probe(struct pci_dev *pcidev,
- const struct pci_device_id *ent)
+static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
@@ -1889,7 +1888,7 @@ outnotdev:
return ret;
}
-static void __devexit meye_remove(struct pci_dev *pcidev)
+static void meye_remove(struct pci_dev *pcidev)
{
video_unregister_device(meye.vdev);
@@ -1935,7 +1934,7 @@ static struct pci_driver meye_driver = {
.name = "meye",
.id_table = meye_pci_tbl,
.probe = meye_probe,
- .remove = __devexit_p(meye_remove),
+ .remove = meye_remove,
#ifdef CONFIG_PM
.suspend = meye_suspend,
.resume = meye_resume,
@@ -1945,7 +1944,7 @@ static struct pci_driver meye_driver = {
static int __init meye_init(void)
{
gbuffers = max(2, min((int)gbuffers, MEYE_MAX_BUFNBRS));
- if (gbufsize < 0 || gbufsize > MEYE_MAX_BUFSIZE)
+ if (gbufsize > MEYE_MAX_BUFSIZE)
gbufsize = MEYE_MAX_BUFSIZE;
gbufsize = PAGE_ALIGN(gbufsize);
printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) "
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 96a13ed197d0..fad214113669 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -425,8 +425,10 @@ static int ReadEEProm(struct i2c_adapter *adapter,
status = i2c_read_eeprom(adapter, 0x50, Addr, data, Length);
if (!status) {
*pLength = EETag[2];
+#if 0
if (Length < EETag[2])
- ; /*status=STATUS_BUFFER_OVERFLOW; */
+ status = STATUS_BUFFER_OVERFLOW;
+#endif
}
}
return status;
@@ -741,7 +743,7 @@ static struct ngene_info ngene_info_terratec = {
/****************************************************************************/
-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
+static const struct pci_device_id ngene_id_tbl[] = {
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
@@ -798,7 +800,7 @@ static struct pci_driver ngene_pci_driver = {
.name = "ngene",
.id_table = ngene_id_tbl,
.probe = ngene_probe,
- .remove = __devexit_p(ngene_remove),
+ .remove = ngene_remove,
.err_handler = &ngene_errors,
.shutdown = ngene_shutdown,
};
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index c8e0d5b99d4c..37ebc42392ad 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -752,8 +752,8 @@ void set_transfer(struct ngene_channel *chan, int state)
if (chan->mode & NGENE_IO_TSIN)
chan->pBufferExchange = tsin_exchange;
spin_unlock_irq(&chan->state_lock);
- } else
- ;/* printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
+ }
+ /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
ngreadl(0x9310)); */
ret = ngene_command_stream_control(dev, chan->number,
@@ -1636,7 +1636,7 @@ void ngene_shutdown(struct pci_dev *pdev)
/* device probe/remove calls ************************************************/
/****************************************************************************/
-void __devexit ngene_remove(struct pci_dev *pdev)
+void ngene_remove(struct pci_dev *pdev)
{
struct ngene *dev = pci_get_drvdata(pdev);
int i;
@@ -1652,8 +1652,7 @@ void __devexit ngene_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-int __devinit ngene_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
+int ngene_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
struct ngene *dev;
int stat = 0;
@@ -1691,7 +1690,8 @@ int __devinit ngene_probe(struct pci_dev *pci_dev,
dev->i2c_current_bus = -1;
/* Register DVB adapters and devices for both channels */
- if (init_channels(dev) < 0)
+ stat = init_channels(dev);
+ if (stat < 0)
goto fail2;
return 0;
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 5443dc0caea5..22c39ff6bfa0 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -887,9 +887,8 @@ struct ngene_buffer {
/* Provided by ngene-core.c */
-int __devinit ngene_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id);
-void __devexit ngene_remove(struct pci_dev *pdev);
+int ngene_probe(struct pci_dev *pci_dev, const struct pci_device_id *id);
+void ngene_remove(struct pci_dev *pdev);
void ngene_shutdown(struct pci_dev *pdev);
int ngene_command(struct ngene *dev, struct ngene_command *com);
int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level);
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index f148b19a206a..2290faee5852 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -240,7 +240,7 @@ static void pluto_set_dma_addr(struct pluto *pluto)
pluto_writereg(pluto, REG_PCAR, pluto->dma_addr);
}
-static int __devinit pluto_dma_map(struct pluto *pluto)
+static int pluto_dma_map(struct pluto *pluto)
{
pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
@@ -368,7 +368,7 @@ static irqreturn_t pluto_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __devinit pluto_enable_irqs(struct pluto *pluto)
+static void pluto_enable_irqs(struct pluto *pluto)
{
u32 val = pluto_readreg(pluto, REG_TSCR);
@@ -394,7 +394,7 @@ static void pluto_disable_irqs(struct pluto *pluto)
pluto_write_tscr(pluto, val);
}
-static int __devinit pluto_hw_init(struct pluto *pluto)
+static int pluto_hw_init(struct pluto *pluto)
{
pluto_reset_frontend(pluto, 1);
@@ -505,7 +505,7 @@ static int pluto2_request_firmware(struct dvb_frontend *fe,
return request_firmware(fw, name, &pluto->pdev->dev);
}
-static struct tda1004x_config pluto2_fe_config __devinitdata = {
+static struct tda1004x_config pluto2_fe_config = {
.demod_address = I2C_ADDR_TDA10046 >> 1,
.invert = 1,
.invert_oclk = 0,
@@ -515,7 +515,7 @@ static struct tda1004x_config pluto2_fe_config __devinitdata = {
.request_firmware = pluto2_request_firmware,
};
-static int __devinit frontend_init(struct pluto *pluto)
+static int frontend_init(struct pluto *pluto)
{
int ret;
@@ -536,14 +536,14 @@ static int __devinit frontend_init(struct pluto *pluto)
return 0;
}
-static void __devinit pluto_read_rev(struct pluto *pluto)
+static void pluto_read_rev(struct pluto *pluto)
{
u32 val = pluto_readreg(pluto, REG_MISC) & MISC_DVR;
dev_info(&pluto->pdev->dev, "board revision %d.%d\n",
(val >> 12) & 0x0f, (val >> 4) & 0xff);
}
-static void __devinit pluto_read_mac(struct pluto *pluto, u8 *mac)
+static void pluto_read_mac(struct pluto *pluto, u8 *mac)
{
u32 val = pluto_readreg(pluto, REG_MMAC);
mac[0] = (val >> 8) & 0xff;
@@ -560,7 +560,7 @@ static void __devinit pluto_read_mac(struct pluto *pluto, u8 *mac)
dev_info(&pluto->pdev->dev, "MAC %pM\n", mac);
}
-static int __devinit pluto_read_serial(struct pluto *pluto)
+static int pluto_read_serial(struct pluto *pluto)
{
struct pci_dev *pdev = pluto->pdev;
unsigned int i, j;
@@ -588,8 +588,7 @@ out:
return 0;
}
-static int __devinit pluto2_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pluto *pluto;
struct dvb_adapter *dvb_adapter;
@@ -742,7 +741,7 @@ err_kfree:
goto out;
}
-static void __devexit pluto2_remove(struct pci_dev *pdev)
+static void pluto2_remove(struct pci_dev *pdev)
{
struct pluto *pluto = pci_get_drvdata(pdev);
struct dvb_adapter *dvb_adapter = &pluto->dvb_adapter;
@@ -777,7 +776,7 @@ static void __devexit pluto2_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_PLUTO2 0x0001
#endif
-static struct pci_device_id pluto2_id_table[] __devinitdata = {
+static struct pci_device_id pluto2_id_table[] = {
{
.vendor = PCI_VENDOR_ID_SCM,
.device = PCI_DEVICE_ID_PLUTO2,
@@ -794,7 +793,7 @@ static struct pci_driver pluto2_driver = {
.name = DRIVER_NAME,
.id_table = pluto2_id_table,
.probe = pluto2_probe,
- .remove = __devexit_p(pluto2_remove),
+ .remove = pluto2_remove,
};
static int __init pluto2_init(void)
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index 15b35c4725f1..e9211086df49 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1058,7 +1058,7 @@ static void pt1_i2c_init(struct pt1 *pt1)
pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
}
-static void __devexit pt1_remove(struct pci_dev *pdev)
+static void pt1_remove(struct pci_dev *pdev)
{
struct pt1 *pt1;
void __iomem *regs;
@@ -1083,8 +1083,7 @@ static void __devexit pt1_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __devinit
-pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
void __iomem *regs;
@@ -1222,7 +1221,7 @@ MODULE_DEVICE_TABLE(pci, pt1_id_table);
static struct pci_driver pt1_driver = {
.name = DRIVER_NAME,
.probe = pt1_probe,
- .remove = __devexit_p(pt1_remove),
+ .remove = pt1_remove,
.id_table = pt1_id_table,
};
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index f2b37e05b964..e359d200d698 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -754,7 +754,7 @@ static int saa7134_hwfini(struct saa7134_dev *dev)
return 0;
}
-static void __devinit must_configure_manually(int has_eeprom)
+static void must_configure_manually(int has_eeprom)
{
unsigned int i,p;
@@ -860,8 +860,8 @@ static void mpeg_ops_detach(struct saa7134_mpeg_ops *ops,
dev->mops = NULL;
}
-static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int saa7134_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct saa7134_dev *dev;
struct saa7134_mpeg_ops *mops;
@@ -944,8 +944,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
/* board config */
dev->board = pci_id->driver_data;
- if (card[dev->nr] >= 0 &&
- card[dev->nr] < saa7134_bcount)
+ if ((unsigned)card[dev->nr] < saa7134_bcount)
dev->board = card[dev->nr];
if (SAA7134_BOARD_UNKNOWN == dev->board)
must_configure_manually(0);
@@ -1103,7 +1102,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
return err;
}
-static void __devexit saa7134_finidev(struct pci_dev *pci_dev)
+static void saa7134_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
@@ -1323,7 +1322,7 @@ static struct pci_driver saa7134_pci_driver = {
.name = "saa7134",
.id_table = saa7134_pci_tbl,
.probe = saa7134_initdev,
- .remove = __devexit_p(saa7134_finidev),
+ .remove = saa7134_finidev,
#ifdef CONFIG_PM
.suspend = saa7134_suspend,
.resume = saa7134_resume
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 0f78f5e537e2..e761262f7475 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -990,7 +990,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
dev->init_data.ir_codes = RC_MAP_BEHOLD;
- dev->init_data.type = RC_TYPE_NEC;
+ dev->init_data.type = RC_BIT_NEC;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 4a77124ee70e..3abf52711e13 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2511,7 +2511,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
/* sanitycheck insmod options */
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
- if (gbufsize < 0 || gbufsize > gbufsize_max)
+ if (gbufsize > gbufsize_max)
gbufsize = gbufsize_max;
gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index c24b6512bd8f..075908fae4d9 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -739,7 +739,7 @@ extern int (*saa7134_dmasound_exit)(struct saa7134_dev *dev);
extern struct saa7134_board saa7134_boards[];
extern const unsigned int saa7134_bcount;
-extern struct pci_device_id __devinitdata saa7134_pci_tbl[];
+extern struct pci_device_id saa7134_pci_tbl[];
extern int saa7134_board_init1(struct saa7134_dev *dev);
extern int saa7134_board_init2(struct saa7134_dev *dev);
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index eff7135cf0e8..e042963d377d 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -165,7 +165,7 @@ int saa7164_api_set_vbi_format(struct saa7164_port *port)
return ret;
}
-int saa7164_api_set_gop_size(struct saa7164_port *port)
+static int saa7164_api_set_gop_size(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoGopStructure gs;
@@ -619,7 +619,7 @@ int saa7164_api_get_videomux(struct saa7164_port *port)
return ret;
}
-int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
+static int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
{
struct saa7164_dev *dev = port->dev;
@@ -822,8 +822,8 @@ int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
&reg[0], 128, buf);
}
-int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
- struct saa7164_port *port)
+static int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
+ struct saa7164_port *port)
{
struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc;
@@ -858,9 +858,10 @@ int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
- struct saa7164_port *port,
- struct tmComResTSFormatDescrHeader *tsfmt)
+static int
+saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResTSFormatDescrHeader *tsfmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex);
dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset);
@@ -892,9 +893,10 @@ int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
- struct saa7164_port *port,
- struct tmComResPSFormatDescrHeader *fmt)
+static int
+saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
+ struct saa7164_port *port,
+ struct tmComResPSFormatDescrHeader *fmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength);
@@ -925,7 +927,7 @@ int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
return 0;
}
-int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
+static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
{
struct saa7164_port *tsport = NULL;
struct saa7164_port *encport = NULL;
@@ -1486,7 +1488,7 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
return ret == SAA_OK ? 0 : -EIO;
}
-int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
+static int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
u8 pin, u8 state)
{
int ret;
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a7f58a998752..5f6f3094c44e 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -81,7 +81,7 @@ void saa7164_bus_dump(struct saa7164_dev *dev)
}
/* Intensionally throw a BUG() if the state of the message bus looks corrupt */
-void saa7164_bus_verify(struct saa7164_dev *dev)
+static void saa7164_bus_verify(struct saa7164_dev *dev)
{
struct tmComResBusInfo *b = &dev->bus;
int bug = 0;
@@ -106,8 +106,8 @@ void saa7164_bus_verify(struct saa7164_dev *dev)
}
}
-void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo* m,
- void *buf)
+static void saa7164_bus_dumpmsg(struct saa7164_dev *dev, struct tmComResInfo *m,
+ void *buf)
{
dprintk(DBGLVL_BUS, "Dumping msg structure:\n");
dprintk(DBGLVL_BUS, " .id = %d\n", m->id);
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 62fac7f9d04e..cfabcbacc33d 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -23,7 +23,7 @@
#include "saa7164.h"
-int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
+static int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
{
int i, ret = -1;
@@ -42,7 +42,7 @@ int saa7164_cmd_alloc_seqno(struct saa7164_dev *dev)
return ret;
}
-void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
+static void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
{
mutex_lock(&dev->lock);
if ((dev->cmds[seqno].inuse == 1) &&
@@ -54,7 +54,7 @@ void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
+static void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
{
mutex_lock(&dev->lock);
if ((dev->cmds[seqno].inuse == 1) &&
@@ -64,7 +64,7 @@ void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno)
mutex_unlock(&dev->lock);
}
-u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
+static u32 saa7164_cmd_timeout_get(struct saa7164_dev *dev, u8 seqno)
{
int ret = 0;
@@ -132,7 +132,7 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
/* Commands to the f/w get marshelled to/from this code then onto the PCI
* -bus/c running buffer. */
-int saa7164_cmd_dequeue(struct saa7164_dev *dev)
+static int saa7164_cmd_dequeue(struct saa7164_dev *dev)
{
int loop = 1;
int ret;
@@ -186,8 +186,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
return SAA_OK;
}
-int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
- void *buf)
+static int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
+ void *buf)
{
struct tmComResBusInfo *bus = &dev->bus;
u8 cmd_sent;
@@ -259,7 +259,7 @@ out:
/* Wait for a signal event, without holding a mutex. Either return TIMEOUT if
* the event never occurred, or SAA_OK if it was signaled during the wait.
*/
-int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
+static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
{
wait_queue_head_t *q = NULL;
int ret = SAA_BUS_TIMEOUT;
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 2c9ad878bef3..63502e7a2a76 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -410,7 +410,7 @@ static void saa7164_work_enchandler(struct work_struct *w)
} else
rp = (port->last_svc_rp + 1) % 8;
- if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ if (rp > (port->hwcfg.buffercount - 1)) {
printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
break;
}
@@ -486,7 +486,7 @@ static void saa7164_work_vbihandler(struct work_struct *w)
} else
rp = (port->last_svc_rp + 1) % 8;
- if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) {
+ if (rp > (port->hwcfg.buffercount - 1)) {
printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp);
break;
}
@@ -1185,8 +1185,8 @@ static int saa7164_thread_function(void *data)
return 0;
}
-static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int saa7164_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct saa7164_dev *dev;
int err, i;
@@ -1376,7 +1376,7 @@ static void saa7164_shutdown(struct saa7164_dev *dev)
dprintk(1, "%s()\n", __func__);
}
-static void __devexit saa7164_finidev(struct pci_dev *pci_dev)
+static void saa7164_finidev(struct pci_dev *pci_dev)
{
struct saa7164_dev *dev = pci_get_drvdata(pci_dev);
@@ -1459,7 +1459,7 @@ static struct pci_driver saa7164_pci_driver = {
.name = "saa7164",
.id_table = saa7164_pci_tbl,
.probe = saa7164_initdev,
- .remove = __devexit_p(saa7164_finidev),
+ .remove = saa7164_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index a9ed686ad08a..994018e2d0d6 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1101,7 +1101,8 @@ static int fops_release(struct file *file)
return 0;
}
-struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
+static struct
+saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
{
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -1287,8 +1288,8 @@ static const struct v4l2_file_operations mpeg_fops = {
.unlocked_ioctl = video_ioctl2,
};
-int saa7164_g_chip_ident(struct file *file, void *fh,
- struct v4l2_dbg_chip_ident *chip)
+static int saa7164_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *chip)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
@@ -1297,8 +1298,8 @@ int saa7164_g_chip_ident(struct file *file, void *fh,
return 0;
}
-int saa7164_g_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int saa7164_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
@@ -1310,8 +1311,8 @@ int saa7164_g_register(struct file *file, void *fh,
return 0;
}
-int saa7164_s_register(struct file *file, void *fh,
- struct v4l2_dbg_register *reg)
+static int saa7164_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
{
struct saa7164_port *port = ((struct saa7164_encoder_fh *)fh)->port;
struct saa7164_dev *dev = port->dev;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index a266bf0169e6..86763203d61d 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -37,7 +37,7 @@ struct fw_header {
u32 version;
};
-int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
+static int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while ((saa7164_readl(reg) & 0x01) == 0) {
@@ -53,7 +53,7 @@ int saa7164_dl_wait_ack(struct saa7164_dev *dev, u32 reg)
return 0;
}
-int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
+static int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
{
u32 timeout = SAA_DEVICE_TIMEOUT;
while (saa7164_readl(reg) & 0x01) {
@@ -71,8 +71,8 @@ int saa7164_dl_wait_clr(struct saa7164_dev *dev, u32 reg)
/* TODO: move dlflags into dev-> and change to write/readl/b */
/* TODO: Excessive levels of debug */
-int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
- u32 dlflags, u8 *dst, u32 dstsize)
+static int saa7164_downloadimage(struct saa7164_dev *dev, u8 *src, u32 srcsize,
+ u32 dlflags, u8 *dst, u32 dstsize)
{
u32 reg, timeout, offset;
u8 *srcbuf = NULL;
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index d8e6c8f14079..b4532299c0ed 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -984,7 +984,8 @@ out:
return ret;
}
-int saa7164_vbi_fmt(struct file *file, void *priv, struct v4l2_format *f)
+static int saa7164_vbi_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
/* ntsc */
f->fmt.vbi.samples_per_line = 1600;
@@ -1047,7 +1048,8 @@ static int fops_release(struct file *file)
return 0;
}
-struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
+static struct
+saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
{
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 4c10205264d4..27ae48842656 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1205,8 +1205,8 @@ static void vip_gpio_release(struct device *dev, int pin, const char *name)
*
* -ENODEV, device could not be detected or registered
*/
-static int __devinit sta2x11_vip_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sta2x11_vip_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret;
struct sta2x11_vip *vip;
@@ -1376,7 +1376,7 @@ disable:
* free memory
* free GPIO pins
*/
-static void __devexit sta2x11_vip_remove_one(struct pci_dev *pdev)
+static void sta2x11_vip_remove_one(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
struct sta2x11_vip *vip =
@@ -1517,7 +1517,7 @@ static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
static struct pci_driver sta2x11_vip_driver = {
.name = DRV_NAME,
.probe = sta2x11_vip_init_one,
- .remove = __devexit_p(sta2x11_vip_remove_one),
+ .remove = sta2x11_vip_remove_one,
.id_table = sta2x11_vip_pci_tbl,
#ifdef CONFIG_PM
.suspend = sta2x11_vip_suspend,
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 4bd8bd56befc..4656d4a10af0 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -2367,8 +2367,8 @@ static int frontend_init(struct av7110 *av7110)
* The same behaviour of missing VSYNC can be duplicated on budget
* cards, by seting DD1_INIT trigger mode 7 in 3rd nibble.
*/
-static int __devinit av7110_attach(struct saa7146_dev* dev,
- struct saa7146_pci_extension_data *pci_ext)
+static int av7110_attach(struct saa7146_dev* dev,
+ struct saa7146_pci_extension_data *pci_ext)
{
const int length = TS_WIDTH * TS_HEIGHT;
struct pci_dev *pdev = dev->pci;
@@ -2761,7 +2761,7 @@ err_kfree_0:
goto out;
}
-static int __devexit av7110_detach(struct saa7146_dev* saa)
+static int av7110_detach(struct saa7146_dev* saa)
{
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
@@ -2910,7 +2910,7 @@ static struct saa7146_extension av7110_extension_driver = {
.module = THIS_MODULE,
.pci_tbl = &pci_tbl[0],
.attach = av7110_attach,
- .detach = __devexit_p(av7110_detach),
+ .detach = av7110_detach,
.irq_mask = MASK_19 | MASK_03 | MASK_10,
.irq_func = av7110_irq,
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 88b3b2d6cc0e..a378662b1dcf 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/time.h>
#include <linux/dvb/video.h>
#include <linux/dvb/audio.h>
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index 908f272fe26c..eb822862a646 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -324,7 +324,7 @@ static void ir_handler(struct av7110 *av7110, u32 ircom)
}
-int __devinit av7110_ir_init(struct av7110 *av7110)
+int av7110_ir_init(struct av7110 *av7110)
{
struct input_dev *input_dev;
static struct proc_dir_entry *e;
@@ -385,7 +385,7 @@ int __devinit av7110_ir_init(struct av7110 *av7110)
}
-void __devexit av7110_ir_exit(struct av7110 *av7110)
+void av7110_ir_exit(struct av7110 *av7110)
{
int i;
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 12ddb53c58dc..1f8b1bb0bf9f 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1477,8 +1477,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
if (saa7113_init(budget_av) == 0) {
budget_av->has_saa7113 = 1;
-
- if (0 != saa7146_vv_init(dev, &vv_data)) {
+ err = saa7146_vv_init(dev, &vv_data);
+ if (err != 0) {
/* fixme: proper cleanup here */
ERR("cannot init vv subsystem\n");
return err;
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index fffc54b452c8..a90a3b9b09bf 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -369,7 +369,7 @@ static const unsigned short bt819_addrs[] = { 0x45, I2C_CLIENT_END };
static const unsigned short bt856_addrs[] = { 0x44, I2C_CLIENT_END };
static const unsigned short bt866_addrs[] = { 0x44, I2C_CLIENT_END };
-static struct card_info zoran_cards[NUM_CARDS] __devinitdata = {
+static struct card_info zoran_cards[NUM_CARDS] = {
{
.type = DC10_old,
.name = "DC10(old)",
@@ -948,8 +948,7 @@ zoran_open_init_params (struct zoran *zr)
zr->testing = 0;
}
-static void __devinit
-test_interrupts (struct zoran *zr)
+static void test_interrupts (struct zoran *zr)
{
DEFINE_WAIT(wait);
int timeout, icr;
@@ -974,8 +973,7 @@ test_interrupts (struct zoran *zr)
btwrite(icr, ZR36057_ICR);
}
-static int __devinit
-zr36057_init (struct zoran *zr)
+static int zr36057_init (struct zoran *zr)
{
int j, err;
@@ -1083,7 +1081,7 @@ exit_free:
return err;
}
-static void __devexit zoran_remove(struct pci_dev *pdev)
+static void zoran_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct zoran *zr = to_zoran(v4l2_dev);
@@ -1129,9 +1127,8 @@ zoran_vdev_release (struct video_device *vdev)
kfree(vdev);
}
-static struct videocodec_master * __devinit
-zoran_setup_videocodec (struct zoran *zr,
- int type)
+static struct videocodec_master *zoran_setup_videocodec(struct zoran *zr,
+ int type)
{
struct videocodec_master *m = NULL;
@@ -1192,8 +1189,7 @@ static void zoran_subdev_notify(struct v4l2_subdev *sd, unsigned int cmd, void *
* Scan for a Buz card (actually for the PCI controller ZR36057),
* request the irq and map the io memory
*/
-static int __devinit zoran_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned char latency, need_latency;
struct zoran *zr;
@@ -1459,7 +1455,7 @@ static struct pci_driver zoran_driver = {
.name = "zr36067",
.id_table = zr36067_pci_tbl,
.probe = zoran_probe,
- .remove = __devexit_p(zoran_remove),
+ .remove = zoran_remove,
};
static int __init zoran_init(void)
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 53f12c7466b0..e60ae41e2319 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -3080,7 +3080,7 @@ static const struct v4l2_file_operations zoran_fops = {
.poll = zoran_poll,
};
-struct video_device zoran_template __devinitdata = {
+struct video_device zoran_template = {
.name = ZORAN_NAME,
.fops = &zoran_fops,
.ioctl_ops = &zoran_ioctl_ops,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 181c7686e412..3dcfea612c42 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -109,6 +109,18 @@ config VIDEO_OMAP3_DEBUG
---help---
Enable debug messages on OMAP 3 camera controller driver.
+config VIDEO_S3C_CAMIF
+ tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (PLAT_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
+ host interface (CAMIF).
+
+ To compile this driver as a module, choose M here: the module
+ will be called s3c-camif.
+
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/s5p-fimc/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index baaa55026c8e..4817d2802171 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_VIDEO_CODA) += coda.o
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index cb2eb26850b1..1aad2a65d2f3 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -862,7 +862,7 @@ static struct v4l2_file_operations bcap_fops = {
.poll = bcap_poll
};
-static int __devinit bcap_probe(struct platform_device *pdev)
+static int bcap_probe(struct platform_device *pdev)
{
struct bcap_device *bcap_dev;
struct video_device *vfd;
@@ -1026,7 +1026,7 @@ err_free_dev:
return ret;
}
-static int __devexit bcap_remove(struct platform_device *pdev)
+static int bcap_remove(struct platform_device *pdev)
{
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct bcap_device *bcap_dev = container_of(v4l2_dev,
@@ -1048,21 +1048,9 @@ static struct platform_driver bcap_driver = {
.owner = THIS_MODULE,
},
.probe = bcap_probe,
- .remove = __devexit_p(bcap_remove),
+ .remove = bcap_remove,
};
-
-static __init int bcap_init(void)
-{
- return platform_driver_register(&bcap_driver);
-}
-
-static __exit void bcap_exit(void)
-{
- platform_driver_unregister(&bcap_driver);
-}
-
-module_init(bcap_init);
-module_exit(bcap_exit);
+module_platform_driver(bcap_driver);
MODULE_DESCRIPTION("Analog Devices blackfin video capture driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index cd04ae252c30..4a980e029ca7 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -23,8 +23,8 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/of.h>
+#include <linux/platform_data/imx-iram.h>
-#include <mach/iram.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -1540,7 +1540,7 @@ static irqreturn_t coda_irq_handler(int irq, void *data)
u32 wr_ptr, start_ptr;
struct coda_ctx *ctx;
- __cancel_delayed_work(&dev->timeout);
+ cancel_delayed_work(&dev->timeout);
/* read status register to attend the IRQ */
coda_read(dev, CODA_REG_BIT_INT_STATUS);
@@ -1877,7 +1877,7 @@ static const struct coda_devtype coda_devdata[] = {
static struct platform_device_id coda_platform_ids[] = {
{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
- { .name = "coda-imx53", .driver_data = CODA_7541 },
+ { .name = "coda-imx53", .driver_data = CODA_IMX53 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, coda_platform_ids);
@@ -1891,7 +1891,7 @@ static const struct of_device_id coda_dt_ids[] = {
MODULE_DEVICE_TABLE(of, coda_dt_ids);
#endif
-static int __devinit coda_probe(struct platform_device *pdev)
+static int coda_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
@@ -2033,7 +2033,7 @@ static int coda_remove(struct platform_device *pdev)
static struct platform_driver coda_driver = {
.probe = coda_probe,
- .remove = __devexit_p(coda_remove),
+ .remove = coda_remove,
.driver = {
.name = CODA_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index 78e26d24f637..3c56037c82fc 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -101,7 +101,7 @@ config VIDEO_DM644X_VPBE
tristate "DM644X VPBE HW module"
depends on ARCH_DAVINCI_DM644x
select VIDEO_VPSS_SYSTEM
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
help
Enables VPBE modules used for display on a DM644x
SoC.
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index ce0e4131c067..f263cabade7a 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -965,7 +965,7 @@ static struct ccdc_hw_device ccdc_hw_dev = {
},
};
-static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
+static int dm355_ccdc_probe(struct platform_device *pdev)
{
void (*setup_pinmux)(void);
struct resource *res;
@@ -1003,7 +1003,7 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.mclk);
goto fail_nomap;
}
- if (clk_enable(ccdc_cfg.mclk)) {
+ if (clk_prepare_enable(ccdc_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1014,7 +1014,7 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.sclk);
goto fail_mclk;
}
- if (clk_enable(ccdc_cfg.sclk)) {
+ if (clk_prepare_enable(ccdc_cfg.sclk)) {
status = -ENODEV;
goto fail_sclk;
}
@@ -1034,8 +1034,10 @@ static int __devinit dm355_ccdc_probe(struct platform_device *pdev)
printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
return 0;
fail_sclk:
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.sclk);
fail_mclk:
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
fail_nomap:
iounmap(ccdc_cfg.base_addr);
@@ -1050,6 +1052,8 @@ static int dm355_ccdc_remove(struct platform_device *pdev)
{
struct resource *res;
+ clk_disable_unprepare(ccdc_cfg.sclk);
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
clk_put(ccdc_cfg.sclk);
iounmap(ccdc_cfg.base_addr);
@@ -1065,7 +1069,7 @@ static struct platform_driver dm355_ccdc_driver = {
.name = "dm355_ccdc",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(dm355_ccdc_remove),
+ .remove = dm355_ccdc_remove,
.probe = dm355_ccdc_probe,
};
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index ee7942b1996e..318e80512998 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -957,7 +957,7 @@ static struct ccdc_hw_device ccdc_hw_dev = {
},
};
-static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
+static int dm644x_ccdc_probe(struct platform_device *pdev)
{
struct resource *res;
int status = 0;
@@ -994,7 +994,7 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.mclk);
goto fail_nomap;
}
- if (clk_enable(ccdc_cfg.mclk)) {
+ if (clk_prepare_enable(ccdc_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1005,7 +1005,7 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
status = PTR_ERR(ccdc_cfg.sclk);
goto fail_mclk;
}
- if (clk_enable(ccdc_cfg.sclk)) {
+ if (clk_prepare_enable(ccdc_cfg.sclk)) {
status = -ENODEV;
goto fail_sclk;
}
@@ -1013,8 +1013,10 @@ static int __devinit dm644x_ccdc_probe(struct platform_device *pdev)
printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
return 0;
fail_sclk:
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.sclk);
fail_mclk:
+ clk_disable_unprepare(ccdc_cfg.mclk);
clk_put(ccdc_cfg.mclk);
fail_nomap:
iounmap(ccdc_cfg.base_addr);
@@ -1029,6 +1031,8 @@ static int dm644x_ccdc_remove(struct platform_device *pdev)
{
struct resource *res;
+ clk_disable_unprepare(ccdc_cfg.mclk);
+ clk_disable_unprepare(ccdc_cfg.sclk);
clk_put(ccdc_cfg.mclk);
clk_put(ccdc_cfg.sclk);
iounmap(ccdc_cfg.base_addr);
@@ -1046,8 +1050,8 @@ static int dm644x_ccdc_suspend(struct device *dev)
/* Disable CCDC */
ccdc_enable(0);
/* Disable both master and slave clock */
- clk_disable(ccdc_cfg.mclk);
- clk_disable(ccdc_cfg.sclk);
+ clk_disable_unprepare(ccdc_cfg.mclk);
+ clk_disable_unprepare(ccdc_cfg.sclk);
return 0;
}
@@ -1055,8 +1059,8 @@ static int dm644x_ccdc_suspend(struct device *dev)
static int dm644x_ccdc_resume(struct device *dev)
{
/* Enable both master and slave clock */
- clk_enable(ccdc_cfg.mclk);
- clk_enable(ccdc_cfg.sclk);
+ clk_prepare_enable(ccdc_cfg.mclk);
+ clk_prepare_enable(ccdc_cfg.sclk);
/* Restore CCDC context */
ccdc_restore_context();
@@ -1074,7 +1078,7 @@ static struct platform_driver dm644x_ccdc_driver = {
.owner = THIS_MODULE,
.pm = &dm644x_ccdc_pm_ops,
},
- .remove = __devexit_p(dm644x_ccdc_remove),
+ .remove = dm644x_ccdc_remove,
.probe = dm644x_ccdc_probe,
};
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index b99d5423e3a8..5050f9265f48 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1032,7 +1032,7 @@ static struct ccdc_hw_device isif_hw_dev = {
},
};
-static int __devinit isif_probe(struct platform_device *pdev)
+static int isif_probe(struct platform_device *pdev)
{
void (*setup_pinmux)(void);
struct resource *res;
@@ -1053,7 +1053,7 @@ static int __devinit isif_probe(struct platform_device *pdev)
status = PTR_ERR(isif_cfg.mclk);
goto fail_mclk;
}
- if (clk_enable(isif_cfg.mclk)) {
+ if (clk_prepare_enable(isif_cfg.mclk)) {
status = -ENODEV;
goto fail_mclk;
}
@@ -1125,6 +1125,7 @@ fail_nobase_res:
i--;
}
fail_mclk:
+ clk_disable_unprepare(isif_cfg.mclk);
clk_put(isif_cfg.mclk);
vpfe_unregister_ccdc_device(&isif_hw_dev);
return status;
@@ -1145,6 +1146,8 @@ static int isif_remove(struct platform_device *pdev)
i++;
}
vpfe_unregister_ccdc_device(&isif_hw_dev);
+ clk_disable_unprepare(isif_cfg.mclk);
+ clk_put(isif_cfg.mclk);
return 0;
}
@@ -1153,7 +1156,7 @@ static struct platform_driver isif_driver = {
.name = "isif",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(isif_remove),
+ .remove = isif_remove,
.probe = isif_probe,
};
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 69d7a58c92c3..841b91a3d255 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -612,7 +612,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
ret = PTR_ERR(vpbe_dev->dac_clk);
goto fail_mutex_unlock;
}
- if (clk_enable(vpbe_dev->dac_clk)) {
+ if (clk_prepare_enable(vpbe_dev->dac_clk)) {
ret = -ENODEV;
goto fail_mutex_unlock;
}
@@ -759,8 +759,10 @@ fail_kfree_encoders:
fail_dev_unregister:
v4l2_device_unregister(&vpbe_dev->v4l2_dev);
fail_clk_put:
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
clk_put(vpbe_dev->dac_clk);
+ }
fail_mutex_unlock:
mutex_unlock(&vpbe_dev->lock);
return ret;
@@ -777,8 +779,10 @@ fail_mutex_unlock:
static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
{
v4l2_device_unregister(&vpbe_dev->v4l2_dev);
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ clk_disable_unprepare(vpbe_dev->dac_clk);
clk_put(vpbe_dev->dac_clk);
+ }
kfree(vpbe_dev->amp);
kfree(vpbe_dev->encoders);
@@ -803,7 +807,7 @@ static struct vpbe_device_ops vpbe_dev_ops = {
.set_mode = vpbe_set_mode,
};
-static __devinit int vpbe_probe(struct platform_device *pdev)
+static int vpbe_probe(struct platform_device *pdev)
{
struct vpbe_device *vpbe_dev;
struct vpbe_config *cfg;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 161c77650e2f..e707a6f2325b 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -47,6 +47,9 @@ static int debug;
module_param(debug, int, 0644);
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer);
+
static int venc_is_second_field(struct vpbe_display *disp_dev)
{
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
@@ -73,10 +76,11 @@ static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
if (layer->cur_frm == layer->next_frm)
return;
ktime_get_ts(&timevalue);
- layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
- layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
- layer->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&layer->cur_frm->done);
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_sec =
+ timevalue.tv_sec;
+ layer->cur_frm->vb.v4l2_buf.timestamp.tv_usec =
+ timevalue.tv_nsec / NSEC_PER_USEC;
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
layer->cur_frm = layer->next_frm;
}
@@ -99,16 +103,14 @@ static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
* otherwise hold on current frame
* Get next from the buffer queue
*/
- layer->next_frm = list_entry(
- layer->dma_queue.next,
- struct videobuf_buffer,
- queue);
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
/* Remove that from the buffer queue */
- list_del(&layer->next_frm->queue);
+ list_del(&layer->next_frm->list);
spin_unlock(&disp_obj->dma_queue_lock);
/* Mark state of the frame to active */
- layer->next_frm->state = VIDEOBUF_ACTIVE;
- addr = videobuf_to_dma_contig(layer->next_frm);
+ layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0);
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
addr,
@@ -199,39 +201,29 @@ static irqreturn_t venc_isr(int irq, void *arg)
/*
* vpbe_buffer_prepare()
- * This is the callback function called from videobuf_qbuf() function
+ * This is the callback function called from vb2_qbuf() function
* the buffer is prepared and user space virtual address is converted into
* physical address
*/
-static int vpbe_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vpbe_buffer_prepare(struct vb2_buffer *vb)
{
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
unsigned long addr;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"vpbe_buffer_prepare\n");
- /* If buffer is not initialized, initialize it */
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = layer->pix_fmt.width;
- vb->height = layer->pix_fmt.height;
- vb->size = layer->pix_fmt.sizeimage;
- vb->field = field;
-
- ret = videobuf_iolock(q, vb, NULL);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
- user address\n");
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
return -EINVAL;
- }
-
- addr = videobuf_to_dma_contig(vb);
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (q->streaming) {
if (!IS_ALIGNED(addr, 8)) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -240,7 +232,6 @@ static int vpbe_buffer_prepare(struct videobuf_queue *q,
return -EINVAL;
}
}
- vb->state = VIDEOBUF_PREPARED;
}
return 0;
}
@@ -249,22 +240,26 @@ static int vpbe_buffer_prepare(struct videobuf_queue *q,
* vpbe_buffer_setup()
* This function allocates memory for the buffers
*/
-static int vpbe_buffer_setup(struct videobuf_queue *q,
- unsigned int *count,
- unsigned int *size)
+static int
+vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
- *size = layer->pix_fmt.sizeimage;
-
/* Store number of buffers allocated in numbuffer member */
- if (*count < VPBE_DEFAULT_NUM_BUFS)
- *count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+ if (*nbuffers < VPBE_DEFAULT_NUM_BUFS)
+ *nbuffers = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+ *nplanes = 1;
+ sizes[0] = layer->pix_fmt.sizeimage;
+ alloc_ctxs[0] = layer->alloc_ctx;
return 0;
}
@@ -273,11 +268,12 @@ static int vpbe_buffer_setup(struct videobuf_queue *q,
* vpbe_buffer_queue()
* This function adds the buffer to DMA queue
*/
-static void vpbe_buffer_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpbe_buffer_queue(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
struct vpbe_layer *layer = fh->layer;
struct vpbe_display *disp = fh->disp_dev;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
@@ -288,39 +284,125 @@ static void vpbe_buffer_queue(struct videobuf_queue *q,
/* add the buffer to the DMA queue */
spin_lock_irqsave(&disp->dma_queue_lock, flags);
- list_add_tail(&vb->queue, &layer->dma_queue);
+ list_add_tail(&buf->list, &layer->dma_queue);
spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
- /* Change state of the buffer */
- vb->state = VIDEOBUF_QUEUED;
}
/*
- * vpbe_buffer_release()
- * This function is called from the videobuf layer to free memory allocated to
+ * vpbe_buf_cleanup()
+ * This function is called from the vb2 layer to free memory allocated to
* the buffers
*/
-static void vpbe_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpbe_buf_cleanup(struct vb2_buffer *vb)
{
/* Get the file handle object and layer object */
- struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+ unsigned long flags;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe_buffer_release\n");
+ "vpbe_buf_cleanup\n");
- if (V4L2_MEMORY_USERPTR != layer->memory)
- videobuf_dma_contig_free(q, vb);
+ spin_lock_irqsave(&layer->irqlock, flags);
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&layer->irqlock, flags);
+}
- vb->state = VIDEOBUF_NEEDS_INIT;
+static void vpbe_wait_prepare(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_unlock(&layer->opslock);
}
-static struct videobuf_queue_ops video_qops = {
- .buf_setup = vpbe_buffer_setup,
+static void vpbe_wait_finish(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ mutex_lock(&layer->opslock);
+}
+
+static int vpbe_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpbe_disp_buffer *buf = container_of(vb,
+ struct vpbe_disp_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+ return 0;
+}
+
+static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&layer->dma_queue)) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
+ return -EINVAL;
+ }
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->list);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(fh->disp_dev, layer);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->started = 1;
+ layer->layer_first_int = 1;
+
+ return ret;
+}
+
+static int vpbe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpbe_fh *fh = vb2_get_drv_priv(vq);
+ struct vpbe_layer *layer = fh->layer;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+
+ /* release all active buffers */
+ while (!list_empty(&layer->dma_queue)) {
+ layer->next_frm = list_entry(layer->dma_queue.next,
+ struct vpbe_disp_buffer, list);
+ list_del(&layer->next_frm->list);
+ vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ return 0;
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpbe_buffer_queue_setup,
+ .wait_prepare = vpbe_wait_prepare,
+ .wait_finish = vpbe_wait_finish,
+ .buf_init = vpbe_buffer_init,
.buf_prepare = vpbe_buffer_prepare,
+ .start_streaming = vpbe_start_streaming,
+ .stop_streaming = vpbe_stop_streaming,
+ .buf_cleanup = vpbe_buf_cleanup,
.buf_queue = vpbe_buffer_queue,
- .buf_release = vpbe_buffer_release,
};
static
@@ -345,7 +427,7 @@ static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
unsigned long addr;
int ret;
- addr = videobuf_to_dma_contig(layer->cur_frm);
+ addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0);
/* Set address in the display registers */
osd_device->ops.start_layer(osd_device,
layer->layer_info.id,
@@ -620,9 +702,12 @@ static int vpbe_display_querycap(struct file *file, void *priv,
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
cap->version = VPBE_DISPLAY_VERSION_CODE;
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
- strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ snprintf(cap->driver, sizeof(cap->driver), "%s",
+ dev_name(vpbe_dev->pdev));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vpbe_dev->pdev));
strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
return 0;
@@ -1161,7 +1246,7 @@ static int vpbe_display_streamoff(struct file *file, void *priv,
osd_device->ops.disable_layer(osd_device,
layer->layer_info.id);
layer->started = 0;
- ret = videobuf_streamoff(&layer->buffer_queue);
+ ret = vb2_streamoff(&layer->buffer_queue, buf_type);
return ret;
}
@@ -1199,46 +1284,15 @@ static int vpbe_display_streamon(struct file *file, void *priv,
}
/*
- * Call videobuf_streamon to start streaming
+ * Call vb2_streamon to start streaming
* in videobuf
*/
- ret = videobuf_streamon(&layer->buffer_queue);
+ ret = vb2_streamon(&layer->buffer_queue, buf_type);
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev,
- "error in videobuf_streamon\n");
+ "error in vb2_streamon\n");
return ret;
}
- /* If buffer queue is empty, return error */
- if (list_empty(&layer->dma_queue)) {
- v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
- goto streamoff;
- }
- /* Get the next frame from the buffer queue */
- layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
- struct videobuf_buffer, queue);
- /* Remove buffer from the buffer queue */
- list_del(&layer->cur_frm->queue);
- /* Mark state of the current frame to active */
- layer->cur_frm->state = VIDEOBUF_ACTIVE;
- /* Initialize field_id and started member */
- layer->field_id = 0;
-
- /* Set parameters in OSD and VENC */
- ret = vpbe_set_osd_display_params(disp_dev, layer);
- if (ret < 0)
- goto streamoff;
-
- /*
- * if request format is yuv420 semiplanar, need to
- * enable both video windows
- */
- layer->started = 1;
-
- layer->layer_first_int = 1;
-
- return ret;
-streamoff:
- ret = videobuf_streamoff(&layer->buffer_queue);
return ret;
}
@@ -1265,10 +1319,10 @@ static int vpbe_display_dqbuf(struct file *file, void *priv,
}
if (file->f_flags & O_NONBLOCK)
/* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 1);
else
/* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
+ ret = vb2_dqbuf(&layer->buffer_queue, buf, 0);
return ret;
}
@@ -1295,7 +1349,7 @@ static int vpbe_display_qbuf(struct file *file, void *priv,
return -EACCES;
}
- return videobuf_qbuf(&layer->buffer_queue, p);
+ return vb2_qbuf(&layer->buffer_queue, p);
}
static int vpbe_display_querybuf(struct file *file, void *priv,
@@ -1304,7 +1358,6 @@ static int vpbe_display_querybuf(struct file *file, void *priv,
struct vpbe_fh *fh = file->private_data;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
- int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
"VIDIOC_QUERYBUF, layer id = %d\n",
@@ -1314,11 +1367,8 @@ static int vpbe_display_querybuf(struct file *file, void *priv,
v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
return -EINVAL;
}
-
- /* Call videobuf_querybuf to get information */
- ret = videobuf_querybuf(&layer->buffer_queue, buf);
-
- return ret;
+ /* Call vb2_querybuf to get information */
+ return vb2_querybuf(&layer->buffer_queue, buf);
}
static int vpbe_display_reqbufs(struct file *file, void *priv,
@@ -1327,8 +1377,8 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
struct vpbe_fh *fh = file->private_data;
struct vpbe_layer *layer = fh->layer;
struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct vb2_queue *q;
int ret;
-
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
@@ -1342,15 +1392,26 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
return -EBUSY;
}
/* Initialize videobuf queue as per the buffer type */
- videobuf_queue_dma_contig_init(&layer->buffer_queue,
- &video_qops,
- vpbe_dev->pdev,
- &layer->irqlock,
- V4L2_BUF_TYPE_VIDEO_OUTPUT,
- layer->pix_fmt.field,
- sizeof(struct videobuf_buffer),
- fh, NULL);
+ layer->alloc_ctx = vb2_dma_contig_init_ctx(vpbe_dev->pdev);
+ if (!layer->alloc_ctx) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to get the context\n");
+ return -EINVAL;
+ }
+ q = &layer->buffer_queue;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
+ ret = vb2_queue_init(q);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(layer->alloc_ctx);
+ return ret;
+ }
/* Set io allowed member of file handle to TRUE */
fh->io_allowed = 1;
/* Increment io usrs member of layer object to 1 */
@@ -1360,9 +1421,7 @@ static int vpbe_display_reqbufs(struct file *file, void *priv,
/* Initialize buffer queue */
INIT_LIST_HEAD(&layer->dma_queue);
/* Allocate buffers */
- ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
-
- return ret;
+ return vb2_reqbufs(q, req_buf);
}
/*
@@ -1381,7 +1440,7 @@ static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
if (mutex_lock_interruptible(&layer->opslock))
return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&layer->buffer_queue, vma);
+ ret = vb2_mmap(&layer->buffer_queue, vma);
mutex_unlock(&layer->opslock);
return ret;
}
@@ -1398,7 +1457,7 @@ static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
if (layer->started) {
mutex_lock(&layer->opslock);
- err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
+ err = vb2_poll(&layer->buffer_queue, filep, wait);
mutex_unlock(&layer->opslock);
}
return err;
@@ -1488,8 +1547,8 @@ static int vpbe_display_release(struct file *file)
layer->layer_info.id);
layer->started = 0;
/* Free buffers allocated */
- videobuf_queue_cancel(&layer->buffer_queue);
- videobuf_mmap_free(&layer->buffer_queue);
+ vb2_queue_release(&layer->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(&layer->buffer_queue);
}
/* Decrement layer usrs counter */
@@ -1603,8 +1662,8 @@ static int vpbe_device_get(struct device *dev, void *data)
return 0;
}
-static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
- struct platform_device *pdev)
+static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
{
struct vpbe_layer *vpbe_display_layer = NULL;
struct video_device *vbd = NULL;
@@ -1659,9 +1718,10 @@ static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
return 0;
}
-static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
- struct vpbe_display *disp_dev,
- struct platform_device *pdev) {
+static int register_device(struct vpbe_layer *vpbe_display_layer,
+ struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
int err;
v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
@@ -1693,7 +1753,7 @@ static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
* This function creates device entries by register itself to the V4L2 driver
* and initializes fields of each layer objects
*/
-static __devinit int vpbe_display_probe(struct platform_device *pdev)
+static int vpbe_display_probe(struct platform_device *pdev)
{
struct vpbe_layer *vpbe_display_layer;
struct vpbe_display *disp_dev;
@@ -1827,7 +1887,7 @@ static struct platform_driver vpbe_display_driver = {
.bus = &platform_bus_type,
},
.probe = vpbe_display_probe,
- .remove = __devexit_p(vpbe_display_remove),
+ .remove = vpbe_display_remove,
};
module_platform_driver(vpbe_display_driver);
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index bba299dbf396..707f243f810d 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -62,7 +62,7 @@ static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 val = readl(addr) | mask;
writel(val, addr);
@@ -74,7 +74,7 @@ static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 val = readl(addr) & ~mask;
writel(val, addr);
@@ -87,7 +87,7 @@ static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
{
struct osd_state *osd = sd;
- u32 addr = osd->osd_base + offset;
+ void __iomem *addr = osd->osd_base + offset;
u32 new_val = (readl(addr) & ~mask) | (val & mask);
writel(new_val, addr);
@@ -1559,8 +1559,7 @@ static int osd_probe(struct platform_device *pdev)
ret = -ENODEV;
goto free_mem;
}
- osd->osd_base = (unsigned long)ioremap_nocache(res->start,
- osd->osd_size);
+ osd->osd_base = ioremap_nocache(res->start, osd->osd_size);
if (!osd->osd_base) {
dev_err(osd->dev, "Unable to map the OSD region\n");
ret = -ENODEV;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 8be492cd8ed4..be9d3e1b4868 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1831,7 +1831,7 @@ static struct vpfe_device *vpfe_initialize(void)
* itself to the V4L2 driver and initializes fields of each
* device objects
*/
-static __devinit int vpfe_probe(struct platform_device *pdev)
+static int vpfe_probe(struct platform_device *pdev)
{
struct vpfe_subdev_info *sdinfo;
struct vpfe_config *vpfe_cfg;
@@ -2038,7 +2038,7 @@ probe_free_dev_mem:
/*
* vpfe_remove : It un-register device from V4L2 driver
*/
-static int __devexit vpfe_remove(struct platform_device *pdev)
+static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
@@ -2075,7 +2075,7 @@ static struct platform_driver vpfe_driver = {
.pm = &vpfe_dev_pm_ops,
},
.probe = vpfe_probe,
- .remove = __devexit_p(vpfe_remove),
+ .remove = vpfe_remove,
};
module_platform_driver(vpfe_driver);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index cff3c0ab501f..28638a86f129 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -419,7 +419,7 @@ int vpif_channel_getfid(u8 channel_id)
}
EXPORT_SYMBOL(vpif_channel_getfid);
-static int __devinit vpif_probe(struct platform_device *pdev)
+static int vpif_probe(struct platform_device *pdev)
{
int status = 0;
@@ -444,7 +444,7 @@ static int __devinit vpif_probe(struct platform_device *pdev)
status = PTR_ERR(vpif_clk);
goto clk_fail;
}
- clk_enable(vpif_clk);
+ clk_prepare_enable(vpif_clk);
spin_lock_init(&vpif_lock);
dev_info(&pdev->dev, "vpif probe success\n");
@@ -457,10 +457,10 @@ fail:
return status;
}
-static int __devexit vpif_remove(struct platform_device *pdev)
+static int vpif_remove(struct platform_device *pdev)
{
if (vpif_clk) {
- clk_disable(vpif_clk);
+ clk_disable_unprepare(vpif_clk);
clk_put(vpif_clk);
}
@@ -472,13 +472,13 @@ static int __devexit vpif_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int vpif_suspend(struct device *dev)
{
- clk_disable(vpif_clk);
+ clk_disable_unprepare(vpif_clk);
return 0;
}
static int vpif_resume(struct device *dev)
{
- clk_enable(vpif_clk);
+ clk_prepare_enable(vpif_clk);
return 0;
}
@@ -498,7 +498,7 @@ static struct platform_driver vpif_driver = {
.owner = THIS_MODULE,
.pm = vpif_pm_ops,
},
- .remove = __devexit_p(vpif_remove),
+ .remove = vpif_remove,
.probe = vpif_probe,
};
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index fcabc023885d..a409ccefb380 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -201,13 +201,16 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
struct vpif_cap_buffer *buf = container_of(vb,
struct vpif_cap_buffer, vb);
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
vpif_dbg(2, debug, "vpif_buffer_queue\n");
+ spin_lock_irqsave(&common->irqlock, flags);
/* add the buffer to the DMA queue */
list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
}
/**
@@ -278,10 +281,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
unsigned long addr = 0;
+ unsigned long flags;
int ret;
- /* If buffer queue is empty, return error */
+ /* If buffer queue is empty, return error */
+ spin_lock_irqsave(&common->irqlock, flags);
if (list_empty(&common->dma_queue)) {
+ spin_unlock_irqrestore(&common->irqlock, flags);
vpif_dbg(1, debug, "buffer queue is empty\n");
return -EIO;
}
@@ -291,6 +297,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vpif_cap_buffer, list);
/* Remove buffer from the buffer queue */
list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
/* Mark state of the current frame to active */
common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
/* Initialize field_id and started member */
@@ -362,6 +369,7 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
@@ -369,12 +377,14 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
/* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return 0;
}
@@ -420,10 +430,12 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
{
unsigned long addr = 0;
+ spin_lock(&common->irqlock);
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
@@ -468,8 +480,12 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
/* Check the field format */
if (1 == ch->vpifparams.std_info.frm_fmt) {
/* Progressive mode */
- if (list_empty(&common->dma_queue))
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
if (!channel_first_int[i][channel_id])
vpif_process_buffer_complete(common);
@@ -513,9 +529,13 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
vpif_process_buffer_complete(common);
} else if (1 == fid) {
/* odd field */
+ spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue) ||
- (common->cur_frm != common->next_frm))
+ (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
vpif_schedule_next_buffer(common);
}
@@ -1004,9 +1024,9 @@ static int vpif_reqbufs(struct file *file, void *priv,
/* Initialize videobuf2 queue as per the buffer type */
common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (!common->alloc_ctx) {
+ if (IS_ERR(common->alloc_ctx)) {
vpif_err("Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(common->alloc_ctx);
}
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1715,7 +1735,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
@@ -1735,7 +1755,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -ENODATA;
return ret;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index b716fbd4241f..9f2b603be9c9 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -177,11 +177,14 @@ static void vpif_buffer_queue(struct vb2_buffer *vb)
struct vpif_disp_buffer, vb);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
/* add the buffer to the DMA queue */
+ spin_lock_irqsave(&common->irqlock, flags);
list_add_tail(&buf->list, &common->dma_queue);
+ spin_unlock_irqrestore(&common->irqlock, flags);
}
/*
@@ -246,10 +249,13 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct vpif_params *vpif = &ch->vpifparams;
unsigned long addr = 0;
+ unsigned long flags;
int ret;
/* If buffer queue is empty, return error */
+ spin_lock_irqsave(&common->irqlock, flags);
if (list_empty(&common->dma_queue)) {
+ spin_unlock_irqrestore(&common->irqlock, flags);
vpif_err("buffer queue is empty\n");
return -EIO;
}
@@ -260,6 +266,7 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vpif_disp_buffer, list);
list_del(&common->cur_frm->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
/* Mark state of the current frame to active */
common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
@@ -330,6 +337,7 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
@@ -337,12 +345,14 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
/* release all active buffers */
+ spin_lock_irqsave(&common->irqlock, flags);
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
list_del(&common->next_frm->list);
vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return 0;
}
@@ -363,11 +373,13 @@ static void process_progressive_mode(struct common_obj *common)
{
unsigned long addr = 0;
+ spin_lock(&common->irqlock);
/* Get the next buffer from buffer queue */
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
+ spin_unlock(&common->irqlock);
/* Mark status of the buffer as active */
common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
@@ -398,16 +410,18 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
common->cur_frm = common->next_frm;
} else if (1 == fid) { /* odd field */
+ spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue)
|| (common->cur_frm != common->next_frm)) {
+ spin_unlock(&common->irqlock);
return;
}
+ spin_unlock(&common->irqlock);
/* one field is displayed configure the next
* frame if it is available else hold on current
* frame */
/* Get next from the buffer queue */
process_progressive_mode(common);
-
}
}
@@ -437,8 +451,12 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
continue;
if (1 == ch->vpifparams.std_info.frm_fmt) {
- if (list_empty(&common->dma_queue))
+ spin_lock(&common->irqlock);
+ if (list_empty(&common->dma_queue)) {
+ spin_unlock(&common->irqlock);
continue;
+ }
+ spin_unlock(&common->irqlock);
/* Progressive mode */
if (!channel_first_int[i][channel_id]) {
@@ -972,9 +990,9 @@ static int vpif_reqbufs(struct file *file, void *priv,
}
/* Initialize videobuf2 queue as per the buffer type */
common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (!common->alloc_ctx) {
+ if (IS_ERR(common->alloc_ctx)) {
vpif_err("Failed to get the context\n");
- return -EINVAL;
+ return PTR_ERR(common->alloc_ctx);
}
q = &common->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
@@ -1380,7 +1398,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
int ret;
ret = v4l2_subdev_call(ch->sd, video, enum_dv_timings, timings);
- if (ret == -ENOIOCTLCMD && ret == -ENODEV)
+ if (ret == -ENOIOCTLCMD || ret == -ENODEV)
return -EINVAL;
return ret;
}
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 146e4b01ac17..cdbff88e0f1e 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -357,7 +357,7 @@ void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
}
EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
-static int __devinit vpss_probe(struct platform_device *pdev)
+static int vpss_probe(struct platform_device *pdev)
{
struct resource *r1, *r2;
char *platform_name;
@@ -445,7 +445,7 @@ fail1:
return status;
}
-static int __devexit vpss_remove(struct platform_device *pdev)
+static int vpss_remove(struct platform_device *pdev)
{
struct resource *res;
@@ -465,7 +465,7 @@ static struct platform_driver vpss_driver = {
.name = "vpss",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(vpss_remove),
+ .remove = vpss_remove,
.probe = vpss_probe,
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 19cbb12a12a2..2b1b9f30e1f9 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -982,7 +982,7 @@ static void *gsc_get_drv_data(struct platform_device *pdev)
match = of_match_node(of_match_ptr(exynos_gsc_match),
pdev->dev.of_node);
if (match)
- driver_data = match->data;
+ driver_data = (struct gsc_driverdata *)match->data;
} else {
driver_data = (struct gsc_driverdata *)
platform_get_device_id(pdev)->driver_data;
@@ -1151,7 +1151,7 @@ err_clk:
return ret;
}
-static int __devexit gsc_remove(struct platform_device *pdev)
+static int gsc_remove(struct platform_device *pdev)
{
struct gsc_dev *gsc = platform_get_drvdata(pdev);
@@ -1237,7 +1237,7 @@ static const struct dev_pm_ops gsc_pm_ops = {
static struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove = __devexit_p(gsc_remove),
+ .remove = gsc_remove,
.id_table = gsc_driver_ids,
.driver = {
.name = GSC_MODULE_NAME,
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index c065d040ed94..c267c57c76fd 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -122,7 +122,7 @@ static void gsc_m2m_device_run(void *priv)
struct gsc_ctx *ctx = priv;
struct gsc_dev *gsc;
unsigned long flags;
- u32 ret;
+ int ret;
bool is_set = false;
if (WARN(!ctx, "null hardware context\n"))
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 31ac4dc69247..9115a2c8d075 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -352,8 +352,7 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
return 0;
buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
if (prev == NULL) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
+ list_move_tail(&buf->vb.queue, &vidq->active);
dprintk(1, "Restarting video dma\n");
viu_stop_dma(vidq->dev);
@@ -367,8 +366,7 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
- list_del(&buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
+ list_move_tail(&buf->vb.queue, &vidq->active);
buf->vb.state = VIDEOBUF_ACTIVE;
dprintk(2, "[%p/%d] restart_queue - move to active\n",
buf, buf->vb.i);
@@ -1480,7 +1478,7 @@ static struct video_device viu_template = {
.current_norm = V4L2_STD_NTSC_M,
};
-static int __devinit viu_of_probe(struct platform_device *op)
+static int viu_of_probe(struct platform_device *op)
{
struct viu_dev *viu_dev;
struct video_device *vdev;
@@ -1617,7 +1615,7 @@ err:
return ret;
}
-static int __devexit viu_of_remove(struct platform_device *op)
+static int viu_of_remove(struct platform_device *op)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&op->dev);
struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
@@ -1670,7 +1668,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
static struct platform_driver viu_of_platform_driver = {
.probe = viu_of_probe,
- .remove = __devexit_p(viu_of_remove),
+ .remove = viu_of_remove,
#ifdef CONFIG_PM
.suspend = viu_suspend,
.resume = viu_resume,
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 45164c4f8452..05c560f2ef06 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -218,15 +218,14 @@ static void dma_callback(void *data)
static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
int do_callback)
{
- struct deinterlace_q_data *s_q_data, *d_q_data;
+ struct deinterlace_q_data *s_q_data;
struct vb2_buffer *src_buf, *dst_buf;
struct deinterlace_dev *pcdev = ctx->dev;
struct dma_chan *chan = pcdev->dma_chan;
struct dma_device *dmadev = chan->device;
struct dma_async_tx_descriptor *tx;
unsigned int s_width, s_height;
- unsigned int d_width, d_height;
- unsigned int d_size, s_size;
+ unsigned int s_size;
dma_addr_t p_in, p_out;
enum dma_ctrl_flags flags;
@@ -238,11 +237,6 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
s_height = s_q_data->height;
s_size = s_width * s_height;
- d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
- d_width = d_q_data->width;
- d_height = d_q_data->height;
- d_size = d_width * d_height;
-
p_in = (dma_addr_t)vb2_dma_contig_plane_dma_addr(src_buf, 0);
p_out = (dma_addr_t)vb2_dma_contig_plane_dma_addr(dst_buf, 0);
if (!p_in || !p_out) {
@@ -1108,17 +1102,5 @@ static struct platform_driver deinterlace_pdrv = {
.owner = THIS_MODULE,
},
};
-
-static void __exit deinterlace_exit(void)
-{
- platform_driver_unregister(&deinterlace_pdrv);
-}
-
-static int __init deinterlace_init(void)
-{
- return platform_driver_register(&deinterlace_pdrv);
-}
-
-module_init(deinterlace_init);
-module_exit(deinterlace_exit);
+module_platform_driver(deinterlace_pdrv);
diff --git a/drivers/media/platform/mem2mem_testdev.c b/drivers/media/platform/mem2mem_testdev.c
index 2e2121e98133..7487d7208dea 100644
--- a/drivers/media/platform/mem2mem_testdev.c
+++ b/drivers/media/platform/mem2mem_testdev.c
@@ -839,7 +839,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &m2mtest_qops;
@@ -850,7 +850,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &m2mtest_qops;
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 8f22ce543cf7..6b155d7be8e0 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -371,7 +371,7 @@ static irqreturn_t emmaprp_irq(int irq_emma, void *data)
if (!curr_ctx->aborting) {
if ((irqst & PRP_INTR_ST_RDERR) ||
(irqst & PRP_INTR_ST_CH2WERR)) {
- pr_err("PrP bus error ocurred, this transfer is probably corrupted\n");
+ pr_err("PrP bus error occurred, this transfer is probably corrupted\n");
writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
} else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
@@ -1013,16 +1013,4 @@ static struct platform_driver emmaprp_pdrv = {
.owner = THIS_MODULE,
},
};
-
-static void __exit emmaprp_exit(void)
-{
- platform_driver_unregister(&emmaprp_pdrv);
-}
-
-static int __init emmaprp_init(void)
-{
- return platform_driver_register(&emmaprp_pdrv);
-}
-
-module_init(emmaprp_init);
-module_exit(emmaprp_exit);
+module_platform_driver(emmaprp_pdrv);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index a3b1a34c896d..35cc526e6c93 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -44,9 +44,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <plat/cpu.h>
-#include <plat/dma.h>
-#include <plat/vrfb.h>
+#include <video/omapvrfb.h>
#include <video/omapdss.h>
#include "omap_voutlib.h"
@@ -1174,13 +1172,6 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* set default crop and win */
omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
- /* Save the changes in the overlay strcuture */
- ret = omapvid_init(vout, 0);
- if (ret) {
- v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
- goto s_fmt_vid_out_exit;
- }
-
ret = 0;
s_fmt_vid_out_exit:
@@ -1684,20 +1675,6 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
omap_dispc_register_isr(omap_vout_isr, vout, mask);
- for (j = 0; j < ovid->num_overlays; j++) {
- struct omap_overlay *ovl = ovid->overlays[j];
-
- if (ovl->get_device(ovl)) {
- struct omap_overlay_info info;
- ovl->get_overlay_info(ovl, &info);
- info.paddr = addr;
- if (ovl->set_overlay_info(ovl, &info)) {
- ret = -EINVAL;
- goto streamon_err1;
- }
- }
- }
-
/* First save the configuration in ovelray structure */
ret = omapvid_init(vout, addr);
if (ret)
@@ -2064,7 +2041,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
vout->vid_info.id = k + 1;
/* Set VRFB as rotation_type for omap2 and omap3 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
vout->vid_info.rotation_type = VOUT_ROT_VRFB;
/* Setup the default configuration for the video devices
@@ -2094,11 +2071,12 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
}
video_set_drvdata(vfd, vout);
- /* Configure the overlay structure */
- ret = omapvid_init(vid_dev->vouts[k], 0);
- if (!ret)
- goto success;
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ continue;
error2:
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
omap_vout_release_vrfb(vout);
@@ -2108,12 +2086,6 @@ error1:
error:
kfree(vout);
return ret;
-
-success:
- dev_info(&pdev->dev, ": registered and initialized"
- " video device %d\n", vfd->minor);
- if (k == (pdev->num_resources - 1))
- return 0;
}
return -ENODEV;
@@ -2186,14 +2158,23 @@ static int __init omap_vout_probe(struct platform_device *pdev)
struct omap_dss_device *def_display;
struct omap2video_device *vid_dev = NULL;
+ ret = omapdss_compat_init();
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init dss\n");
+ return ret;
+ }
+
if (pdev->num_resources == 0) {
dev_err(&pdev->dev, "probed for an unknown device\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_dss_init;
}
vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
- if (vid_dev == NULL)
- return -ENOMEM;
+ if (vid_dev == NULL) {
+ ret = -ENOMEM;
+ goto err_dss_init;
+ }
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
@@ -2288,6 +2269,8 @@ probe_err1:
}
probe_err0:
kfree(vid_dev);
+err_dss_init:
+ omapdss_compat_uninit();
return ret;
}
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 4be26abf6cea..cf1c437a8687 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -16,12 +16,14 @@
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
-#include <plat/dma.h>
-#include <plat/vrfb.h>
+#include <linux/omap-dma.h>
+#include <video/omapvrfb.h>
#include "omap_voutdef.h"
#include "omap_voutlib.h"
+#define OMAP_DMA_NO_DEVICE 0
+
/*
* Function for allocating video buffers
*/
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 27a95d23b913..9ccfe1f475a4 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -12,7 +12,7 @@
#define OMAP_VOUTDEF_H
#include <video/omapdss.h>
-#include <plat/vrfb.h>
+#include <video/omapvrfb.h>
#define YUYV_BPP 2
#define RGB565_BPP 2
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 115408b9274f..80b0d88f125c 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
-#include <plat/cpu.h>
+#include <video/omapdss.h>
#include "omap_voutlib.h"
@@ -124,7 +124,7 @@ int omap_vout_new_window(struct v4l2_rect *crop,
win->chromakey = new_win->chromakey;
/* Adjust the cropping window to allow for resizing limitation */
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
/* For 24xx limit is 8x to 1/2x scaling. */
if ((crop->height/win->w.height) >= 2)
crop->height = win->w.height * 2;
@@ -140,7 +140,7 @@ int omap_vout_new_window(struct v4l2_rect *crop,
if (crop->height != win->w.height)
crop->width = 768;
}
- } else if (cpu_is_omap34xx()) {
+ } else if (omap_vout_dss_omap34xx()) {
/* For 34xx limit is 8x to 1/4x scaling. */
if ((crop->height/win->w.height) >= 4)
crop->height = win->w.height * 4;
@@ -196,7 +196,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.width <= 0 || try_crop.height <= 0)
return -EINVAL;
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
@@ -207,9 +207,9 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
/* vertical resizing */
vresize = (1024 * try_crop.height) / win->w.height;
- if (cpu_is_omap24xx() && (vresize > 2048))
+ if (omap_vout_dss_omap24xx() && (vresize > 2048))
vresize = 2048;
- else if (cpu_is_omap34xx() && (vresize > 4096))
+ else if (omap_vout_dss_omap34xx() && (vresize > 4096))
vresize = 4096;
win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
@@ -226,9 +226,9 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
/* horizontal resizing */
hresize = (1024 * try_crop.width) / win->w.width;
- if (cpu_is_omap24xx() && (hresize > 2048))
+ if (omap_vout_dss_omap24xx() && (hresize > 2048))
hresize = 2048;
- else if (cpu_is_omap34xx() && (hresize > 4096))
+ else if (omap_vout_dss_omap34xx() && (hresize > 4096))
hresize = 4096;
win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
@@ -243,7 +243,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.width == 0)
try_crop.width = 2;
}
- if (cpu_is_omap24xx()) {
+ if (omap_vout_dss_omap24xx()) {
if ((try_crop.height/win->w.height) >= 2)
try_crop.height = win->w.height * 2;
@@ -258,7 +258,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
if (try_crop.height != win->w.height)
try_crop.width = 768;
}
- } else if (cpu_is_omap34xx()) {
+ } else if (omap_vout_dss_omap34xx()) {
if ((try_crop.height/win->w.height) >= 4)
try_crop.height = win->w.height * 4;
@@ -337,3 +337,21 @@ void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
}
free_pages((unsigned long) virtaddr, order);
}
+
+bool omap_vout_dss_omap24xx(void)
+{
+ return omapdss_get_version() == OMAPDSS_VER_OMAP24xx;
+}
+
+bool omap_vout_dss_omap34xx(void)
+{
+ switch (omapdss_get_version()) {
+ case OMAPDSS_VER_OMAP34xx_ES1:
+ case OMAPDSS_VER_OMAP34xx_ES3:
+ case OMAPDSS_VER_OMAP3630:
+ case OMAPDSS_VER_AM35xx:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/media/platform/omap/omap_voutlib.h b/drivers/media/platform/omap/omap_voutlib.h
index e51750a597e3..f9d1c0779f33 100644
--- a/drivers/media/platform/omap/omap_voutlib.h
+++ b/drivers/media/platform/omap/omap_voutlib.h
@@ -32,5 +32,8 @@ void omap_vout_new_format(struct v4l2_pix_format *pix,
struct v4l2_window *win);
unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
+
+bool omap_vout_dss_omap24xx(void);
+bool omap_vout_dss_omap34xx(void);
#endif /* #ifndef OMAP_VOUTLIB_H */
diff --git a/drivers/media/platform/omap24xxcam.c b/drivers/media/platform/omap24xxcam.c
index 70f45c381318..8b7ccea982e7 100644
--- a/drivers/media/platform/omap24xxcam.c
+++ b/drivers/media/platform/omap24xxcam.c
@@ -1736,7 +1736,7 @@ static struct v4l2_int_device omap24xxcam = {
*
*/
-static int __devinit omap24xxcam_probe(struct platform_device *pdev)
+static int omap24xxcam_probe(struct platform_device *pdev)
{
struct omap24xxcam_device *cam;
struct resource *mem;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 99640d8c1db0..e4aaee91201d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -61,6 +61,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/omap-iommu.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -70,8 +71,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
-#include <plat/cpu.h>
-
#include "isp.h"
#include "ispreg.h"
#include "ispccdc.h"
@@ -102,7 +101,8 @@ static const struct isp_res_mapping isp_res_maps[] = {
1 << OMAP3_ISP_IOMEM_RESZ |
1 << OMAP3_ISP_IOMEM_SBL |
1 << OMAP3_ISP_IOMEM_CSI2A_REGS1 |
- 1 << OMAP3_ISP_IOMEM_CSIPHY2,
+ 1 << OMAP3_ISP_IOMEM_CSIPHY2 |
+ 1 << OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
},
{
.isp_rev = ISP_REVISION_15_0,
@@ -119,7 +119,8 @@ static const struct isp_res_mapping isp_res_maps[] = {
1 << OMAP3_ISP_IOMEM_CSI2A_REGS2 |
1 << OMAP3_ISP_IOMEM_CSI2C_REGS1 |
1 << OMAP3_ISP_IOMEM_CSIPHY1 |
- 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ 1 << OMAP3_ISP_IOMEM_CSI2C_REGS2 |
+ 1 << OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
},
};
@@ -1330,7 +1331,8 @@ void omap3isp_subclk_disable(struct isp_device *isp,
* isp_enable_clocks - Enable ISP clocks
* @isp: OMAP3 ISP device
*
- * Return 0 if successful, or clk_enable return value if any of tthem fails.
+ * Return 0 if successful, or clk_prepare_enable return value if any of them
+ * fails.
*/
static int isp_enable_clocks(struct isp_device *isp)
{
@@ -1347,14 +1349,11 @@ static int isp_enable_clocks(struct isp_device *isp)
* has to be twice of what is set on OMAP3430 to get
* the required value for cam_mclk
*/
- if (cpu_is_omap3630())
- divisor = 1;
- else
- divisor = 2;
+ divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;
- r = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
if (r) {
- dev_err(isp->dev, "clk_enable cam_ick failed\n");
+ dev_err(isp->dev, "failed to enable cam_ick clock\n");
goto out_clk_enable_ick;
}
r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
@@ -1363,9 +1362,9 @@ static int isp_enable_clocks(struct isp_device *isp)
dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
goto out_clk_enable_mclk;
}
- r = clk_enable(isp->clock[ISP_CLK_CAM_MCLK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
if (r) {
- dev_err(isp->dev, "clk_enable cam_mclk failed\n");
+ dev_err(isp->dev, "failed to enable cam_mclk clock\n");
goto out_clk_enable_mclk;
}
rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
@@ -1373,17 +1372,17 @@ static int isp_enable_clocks(struct isp_device *isp)
dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
" expected : %d\n"
" actual : %ld\n", CM_CAM_MCLK_HZ, rate);
- r = clk_enable(isp->clock[ISP_CLK_CSI2_FCK]);
+ r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
if (r) {
- dev_err(isp->dev, "clk_enable csi2_fck failed\n");
+ dev_err(isp->dev, "failed to enable csi2_fck clock\n");
goto out_clk_enable_csi2_fclk;
}
return 0;
out_clk_enable_csi2_fclk:
- clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
out_clk_enable_mclk:
- clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
out_clk_enable_ick:
return r;
}
@@ -1394,9 +1393,9 @@ out_clk_enable_ick:
*/
static void isp_disable_clocks(struct isp_device *isp)
{
- clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
- clk_disable(isp->clock[ISP_CLK_CAM_MCLK]);
- clk_disable(isp->clock[ISP_CLK_CSI2_FCK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
+ clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
}
static const char *isp_clocks[] = {
@@ -1677,7 +1676,7 @@ isp_register_subdev_group(struct isp_device *isp,
adapter = i2c_get_adapter(board_info->i2c_adapter_id);
if (adapter == NULL) {
- printk(KERN_ERR "%s: Unable to get I2C adapter %d for "
+ dev_err(isp->dev, "%s: Unable to get I2C adapter %d for "
"device %s\n", __func__,
board_info->i2c_adapter_id,
board_info->board_info->type);
@@ -1687,7 +1686,7 @@ isp_register_subdev_group(struct isp_device *isp,
subdev = v4l2_i2c_new_subdev_board(&isp->v4l2_dev, adapter,
board_info->board_info, NULL);
if (subdev == NULL) {
- printk(KERN_ERR "%s: Unable to register subdev %s\n",
+ dev_err(isp->dev, "%s: Unable to register subdev %s\n",
__func__, board_info->board_info->type);
continue;
}
@@ -1712,7 +1711,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->media_dev.link_notify = isp_pipeline_link_notify;
ret = media_device_register(&isp->media_dev);
if (ret < 0) {
- printk(KERN_ERR "%s: Media device registration failed (%d)\n",
+ dev_err(isp->dev, "%s: Media device registration failed (%d)\n",
__func__, ret);
return ret;
}
@@ -1720,7 +1719,7 @@ static int isp_register_entities(struct isp_device *isp)
isp->v4l2_dev.mdev = &isp->media_dev;
ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
if (ret < 0) {
- printk(KERN_ERR "%s: V4L2 device registration failed (%d)\n",
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
__func__, ret);
goto done;
}
@@ -1765,6 +1764,7 @@ static int isp_register_entities(struct isp_device *isp)
struct media_entity *input;
unsigned int flags;
unsigned int pad;
+ unsigned int i;
sensor = isp_register_subdev_group(isp, subdevs->subdevs);
if (sensor == NULL)
@@ -1806,13 +1806,25 @@ static int isp_register_entities(struct isp_device *isp)
break;
default:
- printk(KERN_ERR "%s: invalid interface type %u\n",
- __func__, subdevs->interface);
+ dev_err(isp->dev, "%s: invalid interface type %u\n",
+ __func__, subdevs->interface);
ret = -EINVAL;
goto done;
}
- ret = media_entity_create_link(&sensor->entity, 0, input, pad,
+ for (i = 0; i < sensor->entity.num_pads; i++) {
+ if (sensor->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+ if (i == sensor->entity.num_pads) {
+ dev_err(isp->dev,
+ "%s: no source pad in external entity\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = media_entity_create_link(&sensor->entity, i, input, pad,
flags);
if (ret < 0)
goto done;
@@ -1978,7 +1990,7 @@ error_csiphy:
*
* Always returns 0.
*/
-static int __devexit isp_remove(struct platform_device *pdev)
+static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
int i;
@@ -2059,7 +2071,7 @@ static int isp_map_mem_resource(struct platform_device *pdev,
* -EINVAL if couldn't install ISR,
* or clk_get return error value.
*/
-static int __devinit isp_probe(struct platform_device *pdev)
+static int isp_probe(struct platform_device *pdev)
{
struct isp_platform_data *pdata = pdev->dev.platform_data;
struct isp_device *isp;
@@ -2095,7 +2107,11 @@ static int __devinit isp_probe(struct platform_device *pdev)
isp->isp_csiphy1.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY1");
isp->isp_csiphy2.vdd = regulator_get(&pdev->dev, "VDD_CSIPHY2");
- /* Clocks */
+ /* Clocks
+ *
+ * The ISP clock tree is revision-dependent. We thus need to enable ICLK
+ * manually to read the revision before calling __omap3isp_get().
+ */
ret = isp_map_mem_resource(pdev, isp, OMAP3_ISP_IOMEM_MAIN);
if (ret < 0)
goto error;
@@ -2104,6 +2120,16 @@ static int __devinit isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
+ ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
+ if (ret < 0)
+ goto error;
+
+ isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
+ dev_info(isp->dev, "Revision %d.%d found\n",
+ (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
+
+ clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
+
if (__omap3isp_get(isp, false) == NULL) {
ret = -ENODEV;
goto error;
@@ -2114,10 +2140,6 @@ static int __devinit isp_probe(struct platform_device *pdev)
goto error_isp;
/* Memory resources */
- isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
- dev_info(isp->dev, "Revision %d.%d found\n",
- (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
-
for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
if (isp->revision == isp_res_maps[m].isp_rev)
break;
@@ -2228,7 +2250,7 @@ MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
static struct platform_driver omap3isp_driver = {
.probe = isp_probe,
- .remove = __devexit_p(isp_remove),
+ .remove = isp_remove,
.id_table = omap3isp_id_table,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 8be7487c326f..517d348ce32b 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -31,11 +31,9 @@
#include <media/v4l2-device.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
-#include <linux/iommu.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
#include "ispstat.h"
#include "ispccdc.h"
@@ -72,6 +70,8 @@ enum isp_mem_resources {
OMAP3_ISP_IOMEM_CSI2C_REGS1,
OMAP3_ISP_IOMEM_CSIPHY1,
OMAP3_ISP_IOMEM_CSI2C_REGS2,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL,
OMAP3_ISP_IOMEM_LAST
};
@@ -127,9 +127,6 @@ struct isp_reg {
struct isp_platform_callback {
u32 (*set_xclk)(struct isp_device *isp, u32 xclk, u8 xclksel);
- int (*csiphy_config)(struct isp_csiphy *phy,
- struct isp_csiphy_dphy_cfg *dphy,
- struct isp_csiphy_lanes_cfg *lanes);
};
/*
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index aa9df9d71a7b..60e60aa64fb4 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -30,6 +30,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
+#include <linux/omap-iommu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 6a3ff792af7d..783f4b05b153 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -517,7 +517,7 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
} while (soft_reset_retries < 5);
if (soft_reset_retries == 5) {
- printk(KERN_ERR "CSI2: Soft reset try count exceeded!\n");
+ dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n");
return -EBUSY;
}
@@ -535,8 +535,8 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
} while (--i > 0);
if (i == 0) {
- printk(KERN_ERR
- "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
+ dev_err(isp->dev,
+ "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n");
return -EBUSY;
}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 348f67ebbbc9..3d56b33f85e8 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -32,34 +32,92 @@
#include "ispreg.h"
#include "ispcsiphy.h"
-/*
- * csiphy_lanes_config - Configuration of CSIPHY lanes.
- *
- * Updates HW configuration.
- * Called with phy->mutex taken.
- */
-static void csiphy_lanes_config(struct isp_csiphy *phy)
+static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, u32 iface,
+ bool ccp2_strobe)
{
- unsigned int i;
- u32 reg;
+ u32 reg = isp_reg_readl(
+ phy->isp, OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+ u32 shift, mode;
+
+ switch (iface) {
+ case ISP_INTERFACE_CCP2B_PHY1:
+ reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2C_PHY1:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ case ISP_INTERFACE_CCP2B_PHY2:
+ reg |= OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2;
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ break;
+ case ISP_INTERFACE_CSI2A_PHY2:
+ shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT;
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY;
+ break;
+ }
- reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ /* Select data/clock or data/strobe mode for CCP2 */
+ switch (iface) {
+ case ISP_INTERFACE_CCP2B_PHY1:
+ case ISP_INTERFACE_CCP2B_PHY2:
+ if (ccp2_strobe)
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE;
+ else
+ mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK;
+ }
- for (i = 0; i < phy->num_data_lanes; i++) {
- reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
- ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
- reg |= (phy->lanes.data[i].pol <<
- ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
- reg |= (phy->lanes.data[i].pos <<
- ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift);
+ reg |= mode << shift;
+
+ isp_reg_writel(phy->isp, reg,
+ OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL, 0);
+}
+
+static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ u32 csirxfe = OMAP343X_CONTROL_CSIRXFE_PWRDNZ
+ | OMAP343X_CONTROL_CSIRXFE_RESET;
+
+ /* Only the CCP2B on PHY1 is configurable. */
+ if (iface != ISP_INTERFACE_CCP2B_PHY1)
+ return;
+
+ if (!on) {
+ isp_reg_writel(phy->isp, 0,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+ return;
}
- reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
- ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
- reg |= phy->lanes.clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
- reg |= phy->lanes.clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+ if (ccp2_strobe)
+ csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM;
- isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ isp_reg_writel(phy->isp, csirxfe,
+ OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE, 0);
+}
+
+/*
+ * Configure OMAP 3 CSI PHY routing.
+ * @phy: relevant phy device
+ * @iface: ISP_INTERFACE_*
+ * @on: power on or off
+ * @ccp2_strobe: false: data/clock, true: data/strobe
+ *
+ * Note that the underlying routing configuration registers are part of the
+ * control (SCM) register space and part of the CORE power domain on both 3430
+ * and 3630, so they will not hold their contents in off-mode. This isn't an
+ * issue since the MPU power domain is forced on whilst the ISP is in use.
+ */
+static void csiphy_routing_cfg(struct isp_csiphy *phy, u32 iface, bool on,
+ bool ccp2_strobe)
+{
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_3630_CONTROL_CAMERA_PHY_CTRL]
+ && on)
+ return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe);
+ if (phy->isp->mmio_base[OMAP3_ISP_IOMEM_343X_CONTROL_CSIRXFE])
+ return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe);
}
/*
@@ -99,7 +157,7 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
} while ((reg != power >> 2) && (retry_count < 100));
if (retry_count == 100) {
- printk(KERN_ERR "CSI2 CIO set power failed!\n");
+ dev_err(phy->isp->dev, "CSI2 CIO set power failed!\n");
return -EBUSY;
}
@@ -107,43 +165,28 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
}
/*
- * csiphy_dphy_config - Configure CSI2 D-PHY parameters.
- *
- * Called with phy->mutex taken.
+ * TCLK values are OK at their reset values
*/
-static void csiphy_dphy_config(struct isp_csiphy *phy)
-{
- u32 reg;
-
- /* Set up ISPCSIPHY_REG0 */
- reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
-
- reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
- ISPCSIPHY_REG0_THS_SETTLE_MASK);
- reg |= phy->dphy.ths_term << ISPCSIPHY_REG0_THS_TERM_SHIFT;
- reg |= phy->dphy.ths_settle << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
-
- isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
-
- /* Set up ISPCSIPHY_REG1 */
- reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
-
- reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
- ISPCSIPHY_REG1_TCLK_MISS_MASK |
- ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
- reg |= phy->dphy.tclk_term << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
- reg |= phy->dphy.tclk_miss << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
- reg |= phy->dphy.tclk_settle << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
-
- isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
-}
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
-static int csiphy_config(struct isp_csiphy *phy,
- struct isp_csiphy_dphy_cfg *dphy,
- struct isp_csiphy_lanes_cfg *lanes)
+static int omap3isp_csiphy_config(struct isp_csiphy *phy)
{
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
+ struct isp_csiphy_lanes_cfg *lanes;
+ int csi2_ddrclk_khz;
unsigned int used_lanes = 0;
unsigned int i;
+ u32 reg;
+
+ if (subdevs->interface == ISP_INTERFACE_CCP2B_PHY1
+ || subdevs->interface == ISP_INTERFACE_CCP2B_PHY2)
+ lanes = &subdevs->bus.ccp2.lanecfg;
+ else
+ lanes = &subdevs->bus.csi2.lanecfg;
/* Clock and data lanes verification */
for (i = 0; i < phy->num_data_lanes; i++) {
@@ -162,10 +205,61 @@ static int csiphy_config(struct isp_csiphy *phy,
if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
return -EINVAL;
- mutex_lock(&phy->mutex);
- phy->dphy = *dphy;
- phy->lanes = *lanes;
- mutex_unlock(&phy->mutex);
+ /*
+ * The PHY configuration is lost in off mode, that's not an
+ * issue since the MPU power domain is forced on whilst the
+ * ISP is in use.
+ */
+ csiphy_routing_cfg(phy, subdevs->interface, true,
+ subdevs->bus.ccp2.phy_layer);
+
+ /* DPHY timing configuration */
+ /* CSI-2 is DDR and we only count used lanes. */
+ csi2_ddrclk_khz = pipe->external_rate / 1000
+ / (2 * hweight32(used_lanes)) * pipe->external_width;
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
+ ISPCSIPHY_REG0_THS_SETTLE_MASK);
+ /* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1. */
+ reg |= (DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1)
+ << ISPCSIPHY_REG0_THS_TERM_SHIFT;
+ /* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3. */
+ reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3)
+ << ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+
+ reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG1);
+
+ reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
+ ISPCSIPHY_REG1_TCLK_MISS_MASK |
+ ISPCSIPHY_REG1_TCLK_SETTLE_MASK);
+ reg |= TCLK_TERM << ISPCSIPHY_REG1_TCLK_TERM_SHIFT;
+ reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
+ reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+
+ /* DPHY lane configuration */
+ reg = isp_reg_readl(csi2->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+
+ for (i = 0; i < phy->num_data_lanes; i++) {
+ reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
+ ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
+ reg |= (lanes->data[i].pol <<
+ ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1));
+ reg |= (lanes->data[i].pos <<
+ ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1));
+ }
+
+ reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK |
+ ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK);
+ reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
+ reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
+
+ isp_reg_writel(csi2->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
return 0;
}
@@ -190,8 +284,9 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
if (rval < 0)
goto done;
- csiphy_dphy_config(phy);
- csiphy_lanes_config(phy);
+ rval = omap3isp_csiphy_config(phy);
+ if (rval < 0)
+ goto done;
rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
if (rval) {
@@ -211,6 +306,14 @@ void omap3isp_csiphy_release(struct isp_csiphy *phy)
{
mutex_lock(&phy->mutex);
if (phy->phy_in_use) {
+ struct isp_csi2_device *csi2 = phy->csi2;
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&csi2->subdev.entity);
+ struct isp_v4l2_subdevs_group *subdevs =
+ pipe->external->host_priv;
+
+ csiphy_routing_cfg(phy, subdevs->interface, false,
+ subdevs->bus.ccp2.phy_layer);
csiphy_power_autoswitch_enable(phy, false);
csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
regulator_disable(phy->vdd);
@@ -227,8 +330,6 @@ int omap3isp_csiphy_init(struct isp_device *isp)
struct isp_csiphy *phy1 = &isp->isp_csiphy1;
struct isp_csiphy *phy2 = &isp->isp_csiphy2;
- isp->platform_cb.csiphy_config = csiphy_config;
-
phy2->isp = isp;
phy2->csi2 = &isp->isp_csi2a;
phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES;
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index e93a661e65d9..14551fd77697 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -32,14 +32,6 @@
struct isp_csi2_device;
struct regulator;
-struct isp_csiphy_dphy_cfg {
- u8 ths_term;
- u8 ths_settle;
- u8 tclk_term;
- unsigned tclk_miss:1;
- u8 tclk_settle;
-};
-
struct isp_csiphy {
struct isp_device *isp;
struct mutex mutex; /* serialize csiphy configuration */
@@ -52,8 +44,6 @@ struct isp_csiphy {
unsigned int phy_regs;
u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
- struct isp_csiphy_lanes_cfg lanes;
- struct isp_csiphy_dphy_cfg dphy;
};
int omap3isp_csiphy_acquire(struct isp_csiphy *phy);
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index d1a8dee5e1ca..2d759c56f37c 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -34,6 +34,8 @@
#include "ispreg.h"
#include "isphist.h"
+#define OMAP24XX_DMA_NO_DEVICE 0
+
#define HIST_CONFIG_DMA 1
#define HIST_USING_DMA(hist) ((hist)->dma_ch >= 0)
@@ -72,11 +74,14 @@ static void hist_reset_mem(struct ispstat *hist)
static void hist_dma_config(struct ispstat *hist)
{
+ struct isp_device *isp = hist->isp;
+
hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
hist->dma_config.frame_count = 1;
hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
- hist->dma_config.src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA;
+ hist->dma_config.src_start = isp->mmio_base_phys[OMAP3_ISP_IOMEM_HIST]
+ + ISPHIST_DATA;
hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
}
@@ -477,6 +482,8 @@ int omap3isp_hist_init(struct isp_device *isp)
return -ENOMEM;
memset(hist, 0, sizeof(*hist));
+ hist->isp = isp;
+
if (HIST_CONFIG_DMA)
ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
hist_dma_cb, hist, &hist->dma_ch);
@@ -494,7 +501,6 @@ int omap3isp_hist_init(struct isp_device *isp)
hist->ops = &hist_ops;
hist->priv = hist_cfg;
hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
- hist->isp = isp;
ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
if (ret) {
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 1ae1c0909ed1..691b92a3c3e7 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -200,10 +200,10 @@ static void preview_enable_invalaw(struct isp_prev_device *prev, bool enable)
if (enable)
isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+ ISPPRV_PCR_INVALAW);
else
isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_WIDTH | ISPPRV_PCR_INVALAW);
+ ISPPRV_PCR_INVALAW);
}
/*
@@ -1014,7 +1014,7 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
/*
* preview_config_input_format - Configure the input format
* @prev: The preview engine
- * @format: Format on the preview engine sink pad
+ * @info: Sink pad format information
*
* Enable and configure CFA interpolation for Bayer formats and disable it for
* greyscale formats.
@@ -1025,22 +1025,29 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
* reordered to support non-GRBG Bayer patterns.
*/
static void preview_config_input_format(struct isp_prev_device *prev,
- const struct v4l2_mbus_framefmt *format)
+ const struct isp_format_info *info)
{
struct isp_device *isp = to_isp_device(prev);
struct prev_params *params;
- switch (format->code) {
- case V4L2_MBUS_FMT_SGRBG10_1X10:
+ if (info->width == 8)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_WIDTH);
+
+ switch (info->flavor) {
+ case V4L2_MBUS_FMT_SGRBG8_1X8:
prev->params.cfa_order = 0;
break;
- case V4L2_MBUS_FMT_SRGGB10_1X10:
+ case V4L2_MBUS_FMT_SRGGB8_1X8:
prev->params.cfa_order = 1;
break;
- case V4L2_MBUS_FMT_SBGGR10_1X10:
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
prev->params.cfa_order = 2;
break;
- case V4L2_MBUS_FMT_SGBRG10_1X10:
+ case V4L2_MBUS_FMT_SGBRG8_1X8:
prev->params.cfa_order = 3;
break;
default:
@@ -1081,7 +1088,8 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
+ if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
+ format->code != V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
@@ -1389,6 +1397,7 @@ static unsigned int preview_max_out_width(struct isp_prev_device *prev)
static void preview_configure(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
+ const struct isp_format_info *info;
struct v4l2_mbus_framefmt *format;
unsigned long flags;
u32 update;
@@ -1402,17 +1411,18 @@ static void preview_configure(struct isp_prev_device *prev)
/* PREV_PAD_SINK */
format = &prev->formats[PREV_PAD_SINK];
+ info = omap3isp_video_format_info(format->code);
preview_adjust_bandwidth(prev);
- preview_config_input_format(prev, format);
+ preview_config_input_format(prev, info);
preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_CCDC)
preview_config_inlineoffset(prev, 0);
else
- preview_config_inlineoffset(prev,
- ALIGN(format->width, 0x20) * 2);
+ preview_config_inlineoffset(prev, ALIGN(format->width, 0x20) *
+ info->bpp);
preview_setup_hw(prev, update, active);
@@ -1709,6 +1719,11 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
+ V4L2_MBUS_FMT_Y8_1X8,
+ V4L2_MBUS_FMT_SGRBG8_1X8,
+ V4L2_MBUS_FMT_SRGGB8_1X8,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SGBRG8_1X8,
V4L2_MBUS_FMT_Y10_1X10,
V4L2_MBUS_FMT_SGRBG10_1X10,
V4L2_MBUS_FMT_SRGGB10_1X10,
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index e2c57f334c5d..b7d90e6fb01d 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -29,83 +29,6 @@
#define CM_CAM_MCLK_HZ 172800000 /* Hz */
-/* ISP Submodules offset */
-
-#define L4_34XX_BASE 0x48000000
-#define OMAP3430_ISP_BASE (L4_34XX_BASE + 0xBC000)
-
-#define OMAP3ISP_REG_BASE OMAP3430_ISP_BASE
-#define OMAP3ISP_REG(offset) (OMAP3ISP_REG_BASE + (offset))
-
-#define OMAP3ISP_CCP2_REG_OFFSET 0x0400
-#define OMAP3ISP_CCP2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CCP2_REG_OFFSET)
-#define OMAP3ISP_CCP2_REG(offset) (OMAP3ISP_CCP2_REG_BASE + (offset))
-
-#define OMAP3ISP_CCDC_REG_OFFSET 0x0600
-#define OMAP3ISP_CCDC_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CCDC_REG_OFFSET)
-#define OMAP3ISP_CCDC_REG(offset) (OMAP3ISP_CCDC_REG_BASE + (offset))
-
-#define OMAP3ISP_HIST_REG_OFFSET 0x0A00
-#define OMAP3ISP_HIST_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_HIST_REG_OFFSET)
-#define OMAP3ISP_HIST_REG(offset) (OMAP3ISP_HIST_REG_BASE + (offset))
-
-#define OMAP3ISP_H3A_REG_OFFSET 0x0C00
-#define OMAP3ISP_H3A_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_H3A_REG_OFFSET)
-#define OMAP3ISP_H3A_REG(offset) (OMAP3ISP_H3A_REG_BASE + (offset))
-
-#define OMAP3ISP_PREV_REG_OFFSET 0x0E00
-#define OMAP3ISP_PREV_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_PREV_REG_OFFSET)
-#define OMAP3ISP_PREV_REG(offset) (OMAP3ISP_PREV_REG_BASE + (offset))
-
-#define OMAP3ISP_RESZ_REG_OFFSET 0x1000
-#define OMAP3ISP_RESZ_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_RESZ_REG_OFFSET)
-#define OMAP3ISP_RESZ_REG(offset) (OMAP3ISP_RESZ_REG_BASE + (offset))
-
-#define OMAP3ISP_SBL_REG_OFFSET 0x1200
-#define OMAP3ISP_SBL_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_SBL_REG_OFFSET)
-#define OMAP3ISP_SBL_REG(offset) (OMAP3ISP_SBL_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2A_REGS1_REG_OFFSET 0x1800
-#define OMAP3ISP_CSI2A_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2A_REGS1_REG_OFFSET)
-#define OMAP3ISP_CSI2A_REGS1_REG(offset) \
- (OMAP3ISP_CSI2A_REGS1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSIPHY2_REG_OFFSET 0x1970
-#define OMAP3ISP_CSIPHY2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSIPHY2_REG_OFFSET)
-#define OMAP3ISP_CSIPHY2_REG(offset) (OMAP3ISP_CSIPHY2_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2A_REGS2_REG_OFFSET 0x19C0
-#define OMAP3ISP_CSI2A_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2A_REGS2_REG_OFFSET)
-#define OMAP3ISP_CSI2A_REGS2_REG(offset) \
- (OMAP3ISP_CSI2A_REGS2_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2C_REGS1_REG_OFFSET 0x1C00
-#define OMAP3ISP_CSI2C_REGS1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2C_REGS1_REG_OFFSET)
-#define OMAP3ISP_CSI2C_REGS1_REG(offset) \
- (OMAP3ISP_CSI2C_REGS1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSIPHY1_REG_OFFSET 0x1D70
-#define OMAP3ISP_CSIPHY1_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSIPHY1_REG_OFFSET)
-#define OMAP3ISP_CSIPHY1_REG(offset) (OMAP3ISP_CSIPHY1_REG_BASE + (offset))
-
-#define OMAP3ISP_CSI2C_REGS2_REG_OFFSET 0x1DC0
-#define OMAP3ISP_CSI2C_REGS2_REG_BASE (OMAP3ISP_REG_BASE + \
- OMAP3ISP_CSI2C_REGS2_REG_OFFSET)
-#define OMAP3ISP_CSI2C_REGS2_REG(offset) \
- (OMAP3ISP_CSI2C_REGS2_REG_BASE + (offset))
-
/* ISP module register offset */
#define ISP_REVISION (0x000)
@@ -1583,4 +1506,26 @@
#define ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_MASK \
(0x7fffff << ISPCSIPHY_REG2_CCP2_SYNC_PATTERN_SHIFT)
+/* -----------------------------------------------------------------------------
+ * CONTROL registers for CSI-2 phy routing
+ */
+
+/* OMAP343X_CONTROL_CSIRXFE */
+#define OMAP343X_CONTROL_CSIRXFE_CSIB_INV (1 << 7)
+#define OMAP343X_CONTROL_CSIRXFE_RESENABLE (1 << 8)
+#define OMAP343X_CONTROL_CSIRXFE_SELFORM (1 << 10)
+#define OMAP343X_CONTROL_CSIRXFE_PWRDNZ (1 << 12)
+#define OMAP343X_CONTROL_CSIRXFE_RESET (1 << 13)
+
+/* OMAP3630_CONTROL_CAMERA_PHY_CTRL */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT 2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT 0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY 0x0
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE 0x1
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK 0x2
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_GPI 0x3
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK 0x3
+/* CCP2B: set to receive data from PHY2 instead of PHY1 */
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2 (1 << 4)
+
#endif /* OMAP3_ISP_REG_H */
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index b8640be692f1..61e17f9bd8b9 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/omap-iommu.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -256,7 +257,7 @@ static int isp_stat_buf_queue(struct ispstat *stat)
if (!stat->active_buf)
return STAT_NO_BUF;
- do_gettimeofday(&stat->active_buf->ts);
+ ktime_get_ts(&stat->active_buf->ts);
stat->active_buf->buf_size = stat->buf_size;
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
@@ -536,7 +537,8 @@ int omap3isp_stat_request_statistics(struct ispstat *stat,
return PTR_ERR(buf);
}
- data->ts = buf->ts;
+ data->ts.tv_sec = buf->ts.tv_sec;
+ data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
data->config_counter = buf->config_counter;
data->frame_number = buf->frame_number;
data->buf_size = buf->buf_size;
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index 9b7c8654dc8a..9a047c929b9f 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <linux/omap3isp.h>
-#include <plat/dma.h>
+#include <linux/omap-dma.h>
#include <media/v4l2-event.h>
#include "isp.h"
@@ -50,7 +50,7 @@ struct ispstat_buffer {
struct iovm_struct *iovm;
void *virt_addr;
dma_addr_t dma_addr;
- struct timeval ts;
+ struct timespec ts;
u32 buf_size;
u32 frame_number;
u16 config_counter;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 75cd309035f9..8dac17511e61 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -27,6 +27,7 @@
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/omap-iommu.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
@@ -34,9 +35,6 @@
#include <linux/vmalloc.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-#include <plat/omap-pm.h>
#include "ispvideo.h"
#include "isp.h"
@@ -1391,7 +1389,8 @@ int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
if (ret < 0)
- printk(KERN_ERR "%s: could not register video device (%d)\n",
+ dev_err(video->isp->dev,
+ "%s: could not register video device (%d)\n",
__func__, ret);
return ret;
diff --git a/drivers/media/platform/s3c-camif/Makefile b/drivers/media/platform/s3c-camif/Makefile
new file mode 100644
index 000000000000..50bf8c59b99c
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/Makefile
@@ -0,0 +1,5 @@
+# Makefile for s3c244x/s3c64xx CAMIF driver
+
+s3c-camif-objs := camif-core.o camif-capture.o camif-regs.o
+
+obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif.o
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
new file mode 100644
index 000000000000..a55793c3d811
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -0,0 +1,1672 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * Based on drivers/media/platform/s5p-fimc,
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+#include "camif-regs.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+/* Locking: called with vp->camif->slock spinlock held */
+static void camif_cfg_video_path(struct camif_vp *vp)
+{
+ WARN_ON(s3c_camif_get_scaler_config(vp, &vp->scaler));
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_target_format(vp);
+ camif_hw_set_output_dma(vp);
+}
+
+static void camif_prepare_dma_offset(struct camif_vp *vp)
+{
+ struct camif_frame *f = &vp->out_frame;
+
+ f->dma_offset.initial = f->rect.top * f->f_width + f->rect.left;
+ f->dma_offset.line = f->f_width - (f->rect.left + f->rect.width);
+
+ pr_debug("dma_offset: initial: %d, line: %d\n",
+ f->dma_offset.initial, f->dma_offset.line);
+}
+
+/* Locking: called with camif->slock spinlock held */
+static int s3c_camif_hw_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+
+ if (camif->sensor.sd == NULL || vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (variant->ip_revision == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_hw_set_camera_bus(camif);
+ camif_hw_set_source_format(camif);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cb, camif->colorfx_cr);
+ if (variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ camif_hw_set_input_path(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+
+ return 0;
+}
+
+/*
+ * Initialize the video path, only up from the scaler stage. The camera
+ * input interface set up is skipped. This is useful to enable one of the
+ * video paths when the other is already running.
+ * Locking: called with camif->slock spinlock held.
+ */
+static int s3c_camif_hw_vp_init(struct camif_dev *camif, struct camif_vp *vp)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ camif_prepare_dma_offset(vp);
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_hw_clear_fifo_overflow(vp);
+ camif_cfg_video_path(vp);
+ vp->state &= ~ST_VP_CONFIG;
+ return 0;
+}
+
+static int sensor_set_power(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (!on == camif->sensor.power_count)
+ err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (!err)
+ sensor->power_count += on ? 1 : -1;
+
+ pr_debug("on: %d, power_count: %d, err: %d\n",
+ on, sensor->power_count, err);
+
+ return err;
+}
+
+static int sensor_set_streaming(struct camif_dev *camif, int on)
+{
+ struct cam_sensor *sensor = &camif->sensor;
+ int err = 0;
+
+ if (!on == camif->sensor.stream_count)
+ err = v4l2_subdev_call(sensor->sd, video, s_stream, on);
+ if (!err)
+ sensor->stream_count += on ? 1 : -1;
+
+ pr_debug("on: %d, stream_count: %d, err: %d\n",
+ on, sensor->stream_count, err);
+
+ return err;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start streaming again.
+ * Return any buffers to vb2, perform CAMIF software reset and
+ * turn off streaming at the data pipeline (sensor) if required.
+ */
+static int camif_reinitialize(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ streaming = vp->state & ST_VP_SENSOR_STREAMING;
+
+ vp->state &= ~(ST_VP_PENDING | ST_VP_RUNNING | ST_VP_OFF |
+ ST_VP_ABORTING | ST_VP_STREAMING |
+ ST_VP_SENSOR_STREAMING | ST_VP_LASTIRQ);
+
+ /* Release unused buffers */
+ while (!list_empty(&vp->pending_buf_q)) {
+ buf = camif_pending_queue_pop(vp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vp->active_buf_q)) {
+ buf = camif_active_queue_pop(vp);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!streaming)
+ return 0;
+
+ return sensor_set_streaming(camif, 0);
+}
+
+static bool s3c_vp_active(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ ret = (vp->state & ST_VP_RUNNING) || (vp->state & ST_VP_PENDING);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return ret;
+}
+
+static bool camif_is_streaming(struct camif_dev *camif)
+{
+ unsigned long flags;
+ bool status;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ status = camif->stream_count > 0;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return status;
+}
+
+static int camif_stop_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ if (!s3c_vp_active(vp))
+ return 0;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->state &= ~(ST_VP_OFF | ST_VP_LASTIRQ);
+ vp->state |= ST_VP_ABORTING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ ret = wait_event_timeout(vp->irq_queue,
+ !(vp->state & ST_VP_ABORTING),
+ msecs_to_jiffies(CAMIF_STOP_TIMEOUT));
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (ret == 0 && !(vp->state & ST_VP_OFF)) {
+ /* Timed out, forcibly stop capture */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return camif_reinitialize(vp);
+}
+
+static int camif_prepare_addr(struct camif_vp *vp, struct vb2_buffer *vb,
+ struct camif_addr *paddr)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 pix_size;
+
+ if (vb == NULL || frame == NULL)
+ return -EINVAL;
+
+ pix_size = frame->rect.width * frame->rect.height;
+
+ pr_debug("colplanes: %d, pix_size: %u\n",
+ vp->out_fmt->colplanes, pix_size);
+
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ switch (vp->out_fmt->colplanes) {
+ case 1:
+ paddr->cb = 0;
+ paddr->cr = 0;
+ break;
+ case 2:
+ /* decompose Y into Y/Cb */
+ paddr->cb = (u32)(paddr->y + pix_size);
+ paddr->cr = 0;
+ break;
+ case 3:
+ paddr->cb = (u32)(paddr->y + pix_size);
+ /* decompose Y into Y/Cb/Cr */
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 1));
+ else /* 420 */
+ paddr->cr = (u32)(paddr->cb + (pix_size >> 2));
+
+ if (vp->out_fmt->color == IMG_FMT_YCRCB420)
+ swap(paddr->cb, paddr->cr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("DMA address: y: %#x cb: %#x cr: %#x\n",
+ paddr->y, paddr->cb, paddr->cr);
+
+ return 0;
+}
+
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
+{
+ struct camif_vp *vp = priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ unsigned int status;
+
+ spin_lock(&camif->slock);
+
+ if (ip_rev == S3C6410_CAMIF_IP_REV)
+ camif_hw_clear_pending_irq(vp);
+
+ status = camif_hw_get_status(vp);
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV && (status & CISTATUS_OVF_MASK)) {
+ camif_hw_clear_fifo_overflow(vp);
+ goto unlock;
+ }
+
+ if (vp->state & ST_VP_ABORTING) {
+ if (vp->state & ST_VP_OFF) {
+ /* Last IRQ */
+ vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
+ ST_VP_LASTIRQ);
+ wake_up(&vp->irq_queue);
+ goto unlock;
+ } else if (vp->state & ST_VP_LASTIRQ) {
+ camif_hw_disable_capture(vp);
+ camif_hw_enable_scaler(vp, false);
+ camif_hw_set_lastirq(vp, false);
+ vp->state |= ST_VP_OFF;
+ } else {
+ /* Disable capture, enable last IRQ */
+ camif_hw_set_lastirq(vp, true);
+ vp->state |= ST_VP_LASTIRQ;
+ }
+ }
+
+ if (!list_empty(&vp->pending_buf_q) && (vp->state & ST_VP_RUNNING) &&
+ !list_empty(&vp->active_buf_q)) {
+ unsigned int index;
+ struct camif_buffer *vbuf;
+ struct timeval *tv;
+ struct timespec ts;
+ /*
+ * Get previous DMA write buffer index:
+ * 0 => DMA buffer 0, 2;
+ * 1 => DMA buffer 1, 3.
+ */
+ index = (CISTATUS_FRAMECNT(status) + 2) & 1;
+
+ ktime_get_ts(&ts);
+ vbuf = camif_active_queue_peek(vp, index);
+
+ if (!WARN_ON(vbuf == NULL)) {
+ /* Dequeue a filled buffer */
+ tv = &vbuf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->vb.v4l2_buf.sequence = vp->frame_sequence++;
+ vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+
+ /* Set up an empty buffer at the DMA engine */
+ vbuf = camif_pending_queue_pop(vp);
+ vbuf->index = index;
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index);
+ camif_hw_set_output_addr(vp, &vbuf->paddr, index + 2);
+
+ /* Scheduled in H/W, add to the queue */
+ camif_active_queue_add(vp, vbuf);
+ }
+ } else if (!(vp->state & ST_VP_ABORTING) &&
+ (vp->state & ST_VP_PENDING)) {
+ vp->state |= ST_VP_RUNNING;
+ }
+
+ if (vp->state & ST_VP_CONFIG) {
+ camif_prepare_dma_offset(vp);
+ camif_hw_set_camera_crop(camif);
+ camif_hw_set_scaler(vp);
+ camif_hw_set_flip(vp);
+ camif_hw_set_test_pattern(camif, camif->test_pattern);
+ if (camif->variant->has_img_effect)
+ camif_hw_set_effect(camif, camif->colorfx,
+ camif->colorfx_cb, camif->colorfx_cr);
+ vp->state &= ~ST_VP_CONFIG;
+ }
+unlock:
+ spin_unlock(&camif->slock);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * We assume the codec capture path is always activated
+ * first, before the preview path starts streaming.
+ * This is required to avoid internal FIFO overflow and
+ * a need for CAMIF software reset.
+ */
+ spin_lock_irqsave(&camif->slock, flags);
+
+ if (camif->stream_count == 0) {
+ camif_hw_reset(camif);
+ ret = s3c_camif_hw_init(camif, vp);
+ } else {
+ ret = s3c_camif_hw_vp_init(camif, vp);
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (ret < 0) {
+ camif_reinitialize(vp);
+ return ret;
+ }
+
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->frame_sequence = 0;
+ vp->state |= ST_VP_PENDING;
+
+ if (!list_empty(&vp->pending_buf_q) &&
+ (!(vp->state & ST_VP_STREAMING) ||
+ !(vp->state & ST_VP_SENSOR_STREAMING))) {
+
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ vp->state |= ST_VP_STREAMING;
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ ret = sensor_set_streaming(camif, 1);
+ if (ret)
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+
+ return ret;
+ }
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ return camif_stop_capture(vp);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format *pix = NULL;
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int size;
+
+ if (pfmt) {
+ pix = &pfmt->fmt.pix;
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, -1);
+ size = (pix->width * pix->height * fmt->depth) / 8;
+ } else {
+ size = (frame->f_width * frame->f_height * fmt->depth) / 8;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+ *num_planes = 1;
+
+ if (pix)
+ sizes[0] = max(size, pix->sizeimage);
+ else
+ sizes[0] = size;
+ allocators[0] = camif->alloc_ctx;
+
+ pr_debug("size: %u\n", sizes[0]);
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vp->out_fmt == NULL)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < vp->payload) {
+ v4l2_err(&vp->vdev, "buffer too small: %lu, required: %u\n",
+ vb2_plane_size(vb, 0), vp->payload);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, vp->payload);
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct camif_buffer *buf = container_of(vb, struct camif_buffer, vb);
+ struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ WARN_ON(camif_prepare_addr(vp, &buf->vb, &buf->paddr));
+
+ if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
+ /* Schedule an empty buffer in H/W */
+ buf->index = vp->buf_index;
+
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index);
+ camif_hw_set_output_addr(vp, &buf->paddr, buf->index + 2);
+
+ camif_active_queue_add(vp, buf);
+ vp->buf_index = !vp->buf_index;
+ } else {
+ camif_pending_queue_add(vp, buf);
+ }
+
+ if (vb2_is_streaming(&vp->vb_queue) && !list_empty(&vp->pending_buf_q)
+ && !(vp->state & ST_VP_STREAMING)) {
+
+ vp->state |= ST_VP_STREAMING;
+ camif_hw_enable_scaler(vp, vp->scaler.enable);
+ camif_hw_enable_capture(vp);
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
+ if (sensor_set_streaming(camif, 1) == 0)
+ vp->state |= ST_VP_SENSOR_STREAMING;
+ else
+ v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
+
+ if (debug)
+ camif_hw_dump_regs(camif, __func__);
+ }
+ return;
+ }
+ spin_unlock_irqrestore(&camif->slock, flags);
+}
+
+static void camif_lock(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ mutex_lock(&vp->camif->lock);
+}
+
+static void camif_unlock(struct vb2_queue *vq)
+{
+ struct camif_vp *vp = vb2_get_drv_priv(vq);
+ mutex_unlock(&vp->camif->lock);
+}
+
+static const struct vb2_ops s3c_camif_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = camif_unlock,
+ .wait_finish = camif_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static int s3c_camif_open(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ if (mutex_lock_interruptible(&camif->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = pm_runtime_get_sync(camif->dev);
+ if (ret < 0)
+ goto err_pm;
+
+ ret = sensor_set_power(camif, 1);
+ if (!ret)
+ goto unlock;
+
+ pm_runtime_put(camif->dev);
+err_pm:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_close(struct file *file)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
+ vp->state, vp->owner, task_pid_nr(current));
+
+ mutex_lock(&camif->lock);
+
+ if (vp->owner == file->private_data) {
+ camif_stop_capture(vp);
+ vb2_queue_release(&vp->vb_queue);
+ vp->owner = NULL;
+ }
+
+ sensor_set_power(camif, 0);
+
+ pm_runtime_put(camif->dev);
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static unsigned int s3c_camif_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ mutex_lock(&camif->lock);
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_poll(&vp->vb_queue, file, wait);
+
+ mutex_unlock(&camif->lock);
+ return ret;
+}
+
+static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != file->private_data)
+ ret = -EBUSY;
+ else
+ ret = vb2_mmap(&vp->vb_queue, vma);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations s3c_camif_fops = {
+ .owner = THIS_MODULE,
+ .open = s3c_camif_open,
+ .release = s3c_camif_close,
+ .poll = s3c_camif_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s3c_camif_mmap,
+};
+
+/*
+ * Video node IOCTLs
+ */
+
+static int s3c_camif_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ strlcpy(cap->driver, S3C_CAMIF_DRIVER_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, S3C_CAMIF_DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
+ dev_name(vp->camif->dev), vp->id);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_subdev *sensor = vp->camif->sensor.sd;
+
+ if (input->index || sensor == NULL)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(input->name, sensor->name, sizeof(input->name));
+ return 0;
+}
+
+static int s3c_camif_vidioc_s_input(struct file *file, void *priv,
+ unsigned int i)
+{
+ return i == 0 ? 0 : -EINVAL;
+}
+
+static int s3c_camif_vidioc_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int s3c_camif_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, NULL, f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ pr_debug("fmt(%d): %s\n", f->index, f->description);
+ return 0;
+}
+
+static int s3c_camif_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+
+ pix->bytesperline = frame->f_width * fmt->ybpp;
+ pix->sizeimage = vp->payload;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->width = frame->f_width;
+ pix->height = frame->f_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ return 0;
+}
+
+static int __camif_video_try_format(struct camif_vp *vp,
+ struct v4l2_pix_format *pix,
+ const struct camif_fmt **ffmt)
+{
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ unsigned int wmin, hmin, sc_hrmax, sc_vrmax;
+ const struct vp_pix_limits *pix_lim;
+ const struct camif_fmt *fmt;
+
+ fmt = s3c_camif_find_format(vp, &pix->pixelformat, 0);
+
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+
+ if (ffmt)
+ *ffmt = fmt;
+
+ pix_lim = &camif->variant->vp_pix_limits[vp->id];
+
+ pr_debug("fmt: %ux%u, crop: %ux%u, bytesperline: %u\n",
+ pix->width, pix->height, crop->width, crop->height,
+ pix->bytesperline);
+ /*
+ * Calculate minimum width and height according to the configured
+ * camera input interface crop rectangle and the resizer's capabilities.
+ */
+ sc_hrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->width) - 3));
+ sc_vrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->height) - 1));
+
+ wmin = max_t(u32, pix_lim->min_out_width, crop->width / sc_hrmax);
+ wmin = round_up(wmin, pix_lim->out_width_align);
+ hmin = max_t(u32, 8, crop->height / sc_vrmax);
+ hmin = round_up(hmin, 8);
+
+ v4l_bound_align_image(&pix->width, wmin, pix_lim->max_sc_out_width,
+ ffs(pix_lim->out_width_align) - 1,
+ &pix->height, hmin, pix_lim->max_height, 0, 0);
+
+ pix->bytesperline = pix->width * fmt->ybpp;
+ pix->sizeimage = (pix->width * pix->height * fmt->depth) / 8;
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->field = V4L2_FIELD_NONE;
+
+ pr_debug("%ux%u, wmin: %d, hmin: %d, sc_hrmax: %d, sc_vrmax: %d\n",
+ pix->width, pix->height, wmin, hmin, sc_hrmax, sc_vrmax);
+
+ return 0;
+}
+
+static int s3c_camif_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return __camif_video_try_format(vp, &f->fmt.pix, NULL);
+}
+
+static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_frame *out_frame = &vp->out_frame;
+ const struct camif_fmt *fmt = NULL;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vb2_is_busy(&vp->vb_queue))
+ return -EBUSY;
+
+ ret = __camif_video_try_format(vp, &f->fmt.pix, &fmt);
+ if (ret < 0)
+ return ret;
+
+ vp->out_fmt = fmt;
+ vp->payload = pix->sizeimage;
+ out_frame->f_width = pix->width;
+ out_frame->f_height = pix->height;
+
+ /* Reset composition rectangle */
+ out_frame->rect.width = pix->width;
+ out_frame->rect.height = pix->height;
+ out_frame->rect.left = 0;
+ out_frame->rect.top = 0;
+
+ if (vp->owner == NULL)
+ vp->owner = priv;
+
+ pr_debug("%ux%u. payload: %u. fmt: %s. %d %d. sizeimage: %d. bpl: %d\n",
+ out_frame->f_width, out_frame->f_height, vp->payload, fmt->name,
+ pix->width * pix->height * fmt->depth, fmt->depth,
+ pix->sizeimage, pix->bytesperline);
+
+ return 0;
+}
+
+/* Only check pixel formats at the sensor and the camif subdev pads */
+static int camif_pipeline_validate(struct camif_dev *camif)
+{
+ struct v4l2_subdev_format src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ /* Retrieve format at the sensor subdev source pad */
+ pad = media_entity_remote_source(&camif->pads[0]);
+ if (!pad || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EPIPE;
+
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(camif->sensor.sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != camif->mbus_fmt.width ||
+ src_fmt.format.height != camif->mbus_fmt.height ||
+ src_fmt.format.code != camif->mbus_fmt.code)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int s3c_camif_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct media_entity *sensor = &camif->sensor.sd->entity;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (s3c_vp_active(vp))
+ return 0;
+
+ ret = media_entity_pipeline_start(sensor, camif->m_pipeline);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_pipeline_validate(camif);
+ if (ret < 0) {
+ media_entity_pipeline_stop(sensor);
+ return ret;
+ }
+
+ return vb2_streamon(&vp->vb_queue, type);
+}
+
+static int s3c_camif_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ int ret;
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ ret = vb2_streamoff(&vp->vb_queue, type);
+ if (ret == 0)
+ media_entity_pipeline_stop(&camif->sensor.sd->entity);
+ return ret;
+}
+
+static int s3c_camif_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
+ vp->id, rb->count, vp->owner, priv);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ if (rb->count)
+ rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
+ else
+ vp->owner = NULL;
+
+ ret = vb2_reqbufs(&vp->vb_queue, rb);
+ if (!ret) {
+ vp->reqbufs_count = rb->count;
+ if (vp->owner == NULL && rb->count > 0)
+ vp->owner = priv;
+ }
+
+ return ret;
+}
+
+static int s3c_camif_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_querybuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d]\n", vp->id);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_qbuf(&vp->vb_queue, buf);
+}
+
+static int s3c_camif_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int s3c_camif_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ int ret;
+
+ if (vp->owner && vp->owner != priv)
+ return -EBUSY;
+
+ create->count = max_t(u32, 1, create->count);
+ ret = vb2_create_bufs(&vp->vb_queue, create);
+
+ if (!ret && vp->owner == NULL)
+ vp->owner = priv;
+
+ return ret;
+}
+
+static int s3c_camif_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ return vb2_prepare_buf(&vp->vb_queue, b);
+}
+
+static int s3c_camif_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = vp->out_frame.f_width;
+ sel->r.height = vp->out_frame.f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vp->out_frame.rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void __camif_try_compose(struct camif_dev *camif, struct camif_vp *vp,
+ struct v4l2_rect *r)
+{
+ /* s3c244x doesn't support composition */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ *r = vp->out_frame.rect;
+ return;
+ }
+
+ /* TODO: s3c64xx */
+}
+
+static int s3c_camif_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct camif_vp *vp = video_drvdata(file);
+ struct camif_dev *camif = vp->camif;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ __camif_try_compose(camif, vp, &rect);
+
+ sel->r = rect;
+ spin_lock_irqsave(&camif->slock, flags);
+ vp->out_frame.rect = rect;
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
+ sel->type, sel->target, sel->flags,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s3c_camif_ioctl_ops = {
+ .vidioc_querycap = s3c_camif_vidioc_querycap,
+ .vidioc_enum_input = s3c_camif_vidioc_enum_input,
+ .vidioc_g_input = s3c_camif_vidioc_g_input,
+ .vidioc_s_input = s3c_camif_vidioc_s_input,
+ .vidioc_enum_fmt_vid_cap = s3c_camif_vidioc_enum_fmt,
+ .vidioc_try_fmt_vid_cap = s3c_camif_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = s3c_camif_vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap = s3c_camif_vidioc_g_fmt,
+ .vidioc_g_selection = s3c_camif_g_selection,
+ .vidioc_s_selection = s3c_camif_s_selection,
+ .vidioc_reqbufs = s3c_camif_reqbufs,
+ .vidioc_querybuf = s3c_camif_querybuf,
+ .vidioc_prepare_buf = s3c_camif_prepare_buf,
+ .vidioc_create_bufs = s3c_camif_create_bufs,
+ .vidioc_qbuf = s3c_camif_qbuf,
+ .vidioc_dqbuf = s3c_camif_dqbuf,
+ .vidioc_streamon = s3c_camif_streamon,
+ .vidioc_streamoff = s3c_camif_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+/*
+ * Video node controls
+ */
+static int s3c_camif_video_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_vp *vp = ctrl->priv;
+ struct camif_dev *camif = vp->camif;
+ unsigned long flags;
+
+ pr_debug("[vp%d] ctrl: %s, value: %d\n", vp->id,
+ ctrl->name, ctrl->val);
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ vp->hflip = ctrl->val;
+ break;
+
+ case V4L2_CID_VFLIP:
+ vp->vflip = ctrl->val;
+ break;
+ }
+
+ vp->state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+ return 0;
+}
+
+/* Codec and preview video node control ops */
+static const struct v4l2_ctrl_ops s3c_camif_video_ctrl_ops = {
+ .s_ctrl = s3c_camif_video_s_ctrl,
+};
+
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
+{
+ struct camif_vp *vp = &camif->vp[idx];
+ struct vb2_queue *q = &vp->vb_queue;
+ struct video_device *vfd = &vp->vdev;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ memset(vfd, 0, sizeof(*vfd));
+ snprintf(vfd->name, sizeof(vfd->name), "camif-%s",
+ vp->id == 0 ? "codec" : "preview");
+
+ vfd->fops = &s3c_camif_fops;
+ vfd->ioctl_ops = &s3c_camif_ioctl_ops;
+ vfd->v4l2_dev = &camif->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &camif->lock;
+ vp->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&vp->pending_buf_q);
+ INIT_LIST_HEAD(&vp->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &s3c_camif_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct camif_buffer);
+ q->drv_priv = vp;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_vd_rel;
+
+ vp->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &vp->pad, 0);
+ if (ret)
+ goto err_vd_rel;
+
+ video_set_drvdata(vfd, vp);
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+
+ v4l2_ctrl_handler_init(&vp->ctrl_handler, 1);
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+ ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ctrl)
+ ctrl->priv = vp;
+
+ ret = vp->ctrl_handler.error;
+ if (ret < 0)
+ goto err_me_cleanup;
+
+ vfd->ctrl_handler = &vp->ctrl_handler;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrlh_free;
+
+ v4l2_info(&camif->v4l2_dev, "registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_ctrlh_free:
+ v4l2_ctrl_handler_free(&vp->ctrl_handler);
+err_me_cleanup:
+ media_entity_cleanup(&vfd->entity);
+err_vd_rel:
+ video_device_release(vfd);
+ return ret;
+}
+
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
+{
+ struct video_device *vfd = &camif->vp[idx].vdev;
+
+ if (video_is_registered(vfd)) {
+ video_unregister_device(vfd);
+ media_entity_cleanup(&vfd->entity);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ }
+}
+
+/* Media bus pixel formats supported at the camif input */
+static const enum v4l2_mbus_pixelcode camif_mbus_formats[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8,
+ V4L2_MBUS_FMT_YVYU8_2X8,
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_VYUY8_2X8,
+};
+
+/*
+ * Camera input interface subdev operations
+ */
+
+static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(camif_mbus_formats))
+ return -EINVAL;
+
+ code->code = camif_mbus_formats[code->index];
+ return 0;
+}
+
+static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ /* full camera input pixel size */
+ *mf = camif->mbus_fmt;
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* crop rectangle at camera interface input */
+ mf->width = camif->camif_crop.width;
+ mf->height = camif->camif_crop.height;
+ mf->code = camif->mbus_fmt.code;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ return 0;
+}
+
+static void __camif_subdev_try_format(struct camif_dev *camif,
+ struct v4l2_mbus_framefmt *mf, int pad)
+{
+ const struct s3c_camif_variant *variant = camif->variant;
+ const struct vp_pix_limits *pix_lim;
+ int i = ARRAY_SIZE(camif_mbus_formats);
+
+ /* FIXME: constraints against codec or preview path ? */
+ pix_lim = &variant->vp_pix_limits[VP_CODEC];
+
+ while (i-- >= 0)
+ if (camif_mbus_formats[i] == mf->code)
+ break;
+
+ mf->code = camif_mbus_formats[i];
+
+ if (pad == CAMIF_SD_PAD_SINK) {
+ v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, CAMIF_MAX_PIX_HEIGHT, 0,
+ 0);
+ } else {
+ struct v4l2_rect *crop = &camif->camif_crop;
+ v4l_bound_align_image(&mf->width, 8, crop->width,
+ ffs(pix_lim->out_width_align) - 1,
+ &mf->height, 8, crop->height,
+ 0, 0);
+ }
+
+ v4l2_dbg(1, debug, &camif->subdev, "%ux%u\n", mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ int i;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %ux%u\n",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mutex_lock(&camif->lock);
+
+ /*
+ * No pixel format change at the camera input is allowed
+ * while streaming.
+ */
+ if (vb2_is_busy(&camif->vp[VP_CODEC].vb_queue) ||
+ vb2_is_busy(&camif->vp[VP_PREVIEW].vb_queue)) {
+ mutex_unlock(&camif->lock);
+ return -EBUSY;
+ }
+
+ __camif_subdev_try_format(camif, mf, fmt->pad);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ mutex_unlock(&camif->lock);
+ return 0;
+ }
+
+ switch (fmt->pad) {
+ case CAMIF_SD_PAD_SINK:
+ camif->mbus_fmt = *mf;
+ /* Reset sink crop rectangle. */
+ crop->width = mf->width;
+ crop->height = mf->height;
+ crop->left = 0;
+ crop->top = 0;
+ /*
+ * Reset source format (the camif's crop rectangle)
+ * and the video output resolution.
+ */
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_frame *frame = &camif->vp[i].out_frame;
+ frame->rect = *crop;
+ frame->f_width = mf->width;
+ frame->f_height = mf->height;
+ }
+ break;
+
+ case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
+ /* Pixel format can be only changed on the sink pad. */
+ mf->code = camif->mbus_fmt.code;
+ mf->width = crop->width;
+ mf->height = crop->height;
+ break;
+ }
+
+ mutex_unlock(&camif->lock);
+ return 0;
+}
+
+static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&camif->lock);
+
+ if (sel->target == V4L2_SEL_TGT_CROP) {
+ sel->r = *crop;
+ } else { /* crop bounds */
+ sel->r.width = mf->width;
+ sel->r.height = mf->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
+ __func__, crop->left, crop->top, crop->width,
+ crop->height, mf->width, mf->height);
+
+ return 0;
+}
+
+static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ const struct camif_pix_limits *pix_lim = &camif->variant->pix_limits;
+ unsigned int left = 2 * r->left;
+ unsigned int top = 2 * r->top;
+
+ /*
+ * Following constraints must be met:
+ * - r->width + 2 * r->left = mf->width;
+ * - r->height + 2 * r->top = mf->height;
+ * - crop rectangle size and position must be aligned
+ * to 8 or 2 pixels, depending on SoC version.
+ */
+ v4l_bound_align_image(&r->width, 0, mf->width,
+ ffs(pix_lim->win_hor_offset_align) - 1,
+ &r->height, 0, mf->height, 1, 0);
+
+ v4l_bound_align_image(&left, 0, mf->width - r->width,
+ ffs(pix_lim->win_hor_offset_align),
+ &top, 0, mf->height - r->height, 2, 0);
+
+ r->left = left / 2;
+ r->top = top / 2;
+ r->width = mf->width - left;
+ r->height = mf->height - top;
+ /*
+ * Make sure we either downscale or upscale both the pixel
+ * width and height. Just return current crop rectangle if
+ * this scaler constraint is not met.
+ */
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV &&
+ camif_is_streaming(camif)) {
+ unsigned int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct v4l2_rect *or = &camif->vp[i].out_frame.rect;
+ if ((or->width > r->width) == (or->height > r->height))
+ continue;
+ *r = camif->camif_crop;
+ pr_debug("Width/height scaling direction limitation\n");
+ break;
+ }
+ }
+
+ v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
+ r->left, r->top, r->width, r->height, mf->width, mf->height);
+}
+
+static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct camif_dev *camif = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop = &camif->camif_crop;
+ struct camif_scaler scaler;
+
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CAMIF_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&camif->lock);
+ __camif_try_crop(camif, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&camif->slock, flags);
+ *crop = sel->r;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ scaler = vp->scaler;
+ if (s3c_camif_get_scaler_config(vp, &scaler))
+ continue;
+ vp->scaler = scaler;
+ vp->state |= ST_VP_CONFIG;
+ }
+
+ spin_unlock_irqrestore(&camif->slock, flags);
+ }
+ mutex_unlock(&camif->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
+ __func__, crop->left, crop->top, crop->width, crop->height,
+ camif->mbus_fmt.width, camif->mbus_fmt.height);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops s3c_camif_subdev_pad_ops = {
+ .enum_mbus_code = s3c_camif_subdev_enum_mbus_code,
+ .get_selection = s3c_camif_subdev_get_selection,
+ .set_selection = s3c_camif_subdev_set_selection,
+ .get_fmt = s3c_camif_subdev_get_fmt,
+ .set_fmt = s3c_camif_subdev_set_fmt,
+};
+
+static struct v4l2_subdev_ops s3c_camif_subdev_ops = {
+ .pad = &s3c_camif_subdev_pad_ops,
+};
+
+static int s3c_camif_subdev_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct camif_dev *camif = container_of(ctrl->handler, struct camif_dev,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&camif->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ camif->colorfx = camif->ctrl_colorfx->val;
+ /* Set Cb, Cr */
+ switch (ctrl->val) {
+ case V4L2_COLORFX_SEPIA:
+ camif->colorfx_cb = 115;
+ camif->colorfx_cr = 145;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ camif->colorfx_cb = camif->ctrl_colorfx_cbcr->val >> 8;
+ camif->colorfx_cr = camif->ctrl_colorfx_cbcr->val & 0xff;
+ break;
+ default:
+ /* for V4L2_COLORFX_BW and others */
+ camif->colorfx_cb = 128;
+ camif->colorfx_cr = 128;
+ }
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ camif->test_pattern = camif->ctrl_test_pattern->val;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ camif->vp[VP_CODEC].state |= ST_VP_CONFIG;
+ camif->vp[VP_PREVIEW].state |= ST_VP_CONFIG;
+ spin_unlock_irqrestore(&camif->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops s3c_camif_subdev_ctrl_ops = {
+ .s_ctrl = s3c_camif_subdev_s_ctrl,
+};
+
+static const char * const s3c_camif_test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+ "Horizontal increment",
+ "Vertical increment",
+};
+
+int s3c_camif_create_subdev(struct camif_dev *camif)
+{
+ struct v4l2_ctrl_handler *handler = &camif->ctrl_handler;
+ struct v4l2_subdev *sd = &camif->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &s3c_camif_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strlcpy(sd->name, "S3C-CAMIF", sizeof(sd->name));
+
+ camif->pads[CAMIF_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ camif->pads[CAMIF_SD_PAD_SOURCE_C].flags = MEDIA_PAD_FL_SOURCE;
+ camif->pads[CAMIF_SD_PAD_SOURCE_P].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_init(&sd->entity, CAMIF_SD_PADS_NUM,
+ camif->pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 3);
+ camif->ctrl_test_pattern = v4l2_ctrl_new_std_menu_items(handler,
+ &s3c_camif_subdev_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(s3c_camif_test_pattern_menu) - 1, 0, 0,
+ s3c_camif_test_pattern_menu);
+
+ camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x981f, V4L2_COLORFX_NONE);
+
+ camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
+ &s3c_camif_subdev_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+ if (handler->error) {
+ v4l2_ctrl_handler_free(handler);
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
+ V4L2_COLORFX_SET_CBCR, false);
+ if (!camif->variant->has_img_effect) {
+ camif->ctrl_colorfx->flags |= V4L2_CTRL_FLAG_DISABLED;
+ camif->ctrl_colorfx_cbcr->flags |= V4L2_CTRL_FLAG_DISABLED;
+ }
+ sd->ctrl_handler = handler;
+ v4l2_set_subdevdata(sd, camif);
+
+ return 0;
+}
+
+void s3c_camif_unregister_subdev(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = &camif->subdev;
+
+ /* Return if not registered */
+ if (v4l2_get_subdevdata(sd) == NULL)
+ return;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&camif->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+int s3c_camif_set_defaults(struct camif_dev *camif)
+{
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+ struct camif_frame *f = &vp->out_frame;
+
+ vp->camif = camif;
+ vp->id = i;
+ vp->offset = camif->variant->vp_offset;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ vp->fmt_flags = i ? FMT_FL_S3C24XX_PREVIEW :
+ FMT_FL_S3C24XX_CODEC;
+ else
+ vp->fmt_flags = FMT_FL_S3C64XX;
+
+ vp->out_fmt = s3c_camif_find_format(vp, NULL, 0);
+ BUG_ON(vp->out_fmt == NULL);
+
+ memset(f, 0, sizeof(*f));
+ f->f_width = CAMIF_DEF_WIDTH;
+ f->f_height = CAMIF_DEF_HEIGHT;
+ f->rect.width = CAMIF_DEF_WIDTH;
+ f->rect.height = CAMIF_DEF_HEIGHT;
+
+ /* Scaler is always enabled */
+ vp->scaler.enable = 1;
+
+ vp->payload = (f->f_width * f->f_height *
+ vp->out_fmt->depth) / 8;
+ }
+
+ memset(&camif->mbus_fmt, 0, sizeof(camif->mbus_fmt));
+ camif->mbus_fmt.width = CAMIF_DEF_WIDTH;
+ camif->mbus_fmt.height = CAMIF_DEF_HEIGHT;
+ camif->mbus_fmt.code = camif_mbus_formats[0];
+
+ memset(&camif->camif_crop, 0, sizeof(camif->camif_crop));
+ camif->camif_crop.width = CAMIF_DEF_WIDTH;
+ camif->camif_crop.height = CAMIF_DEF_HEIGHT;
+
+ return 0;
+}
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
new file mode 100644
index 000000000000..e2716c35f8f1
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -0,0 +1,662 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "camif-core.h"
+
+static char *camif_clocks[CLK_MAX_NUM] = {
+ /* HCLK CAMIF clock */
+ [CLK_GATE] = "camif",
+ /* CAMIF / external camera sensor master clock */
+ [CLK_CAM] = "camera",
+};
+
+static const struct camif_fmt camif_formats[] = {
+ {
+ .name = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR422P,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCBCR420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "YVU 4:2:0 planar, Y/Cr/Cb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .ybpp = 1,
+ .color = IMG_FMT_YCRCB420,
+ .colplanes = 3,
+ .flags = FMT_FL_S3C24XX_CODEC |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "RGB565, 16 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .ybpp = 2,
+ .color = IMG_FMT_RGB565,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "XRGB8888, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_XRGB8888,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C24XX_PREVIEW |
+ FMT_FL_S3C64XX,
+ }, {
+ .name = "BGR666",
+ .fourcc = V4L2_PIX_FMT_BGR666,
+ .depth = 32,
+ .ybpp = 4,
+ .color = IMG_FMT_RGB666,
+ .colplanes = 1,
+ .flags = FMT_FL_S3C64XX,
+ }
+};
+
+/**
+ * s3c_camif_find_format() - lookup camif color format by fourcc or an index
+ * @pixelformat: fourcc to match, ignored if null
+ * @index: index to the camif_formats array, ignored if negative
+ */
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat,
+ int index)
+{
+ const struct camif_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(camif_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) {
+ fmt = &camif_formats[i];
+ if (vp && !(vp->fmt_flags & fmt->flags))
+ continue;
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ unsigned int sh = 6;
+
+ if (src >= 64 * tar)
+ return -EINVAL;
+
+ while (sh--) {
+ unsigned int tmp = 1 << sh;
+ if (src >= tar * tmp) {
+ *shift = sh, *ratio = tmp;
+ return 0;
+ }
+ }
+ *shift = 0, *ratio = 1;
+ return 0;
+}
+
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler)
+{
+ struct v4l2_rect *camif_crop = &vp->camif->camif_crop;
+ int source_x = camif_crop->width;
+ int source_y = camif_crop->height;
+ int target_x = vp->out_frame.rect.width;
+ int target_y = vp->out_frame.rect.height;
+ int ret;
+
+ if (vp->rotation == 90 || vp->rotation == 270)
+ swap(target_x, target_y);
+
+ ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio,
+ &scaler->h_shift);
+ if (ret < 0)
+ return ret;
+
+ ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio,
+ &scaler->v_shift);
+ if (ret < 0)
+ return ret;
+
+ scaler->pre_dst_width = source_x / scaler->pre_h_ratio;
+ scaler->pre_dst_height = source_y / scaler->pre_v_ratio;
+
+ scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift);
+ scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift);
+
+ scaler->scaleup_h = (target_x >= source_x);
+ scaler->scaleup_v = (target_y >= source_y);
+
+ scaler->copy = 0;
+
+ pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n",
+ scaler->pre_h_ratio, scaler->h_shift,
+ scaler->pre_v_ratio, scaler->v_shift);
+
+ pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n",
+ source_x, source_y, target_x, target_y,
+ scaler->scaleup_h, scaler->scaleup_v);
+
+ return 0;
+}
+
+static int camif_register_sensor(struct camif_dev *camif)
+{
+ struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ struct i2c_adapter *adapter;
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ camif->sensor.sd = NULL;
+
+ if (sensor->i2c_board_info.addr == 0)
+ return -EINVAL;
+
+ adapter = i2c_get_adapter(sensor->i2c_bus_num);
+ if (adapter == NULL) {
+ v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n",
+ sensor->i2c_bus_num);
+ return -EPROBE_DEFER;
+ }
+
+ sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter,
+ &sensor->i2c_board_info, NULL);
+ if (sd == NULL) {
+ i2c_put_adapter(adapter);
+ v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n",
+ sensor->i2c_board_info.type);
+ return -EPROBE_DEFER;
+ }
+ camif->sensor.sd = sd;
+
+ v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name);
+
+ /* Get initial pixel format and set it at the camif sink pad */
+ format.pad = 0;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+
+ if (ret < 0)
+ return 0;
+
+ format.pad = CAMIF_SD_PAD_SINK;
+ v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format);
+
+ v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n",
+ format.format.width, format.format.height,
+ format.format.code);
+ return 0;
+}
+
+static void camif_unregister_sensor(struct camif_dev *camif)
+{
+ struct v4l2_subdev *sd = camif->sensor.sd;
+ struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL;
+ struct i2c_adapter *adapter;
+
+ if (client == NULL)
+ return;
+
+ adapter = client->adapter;
+ v4l2_device_unregister_subdev(sd);
+ camif->sensor.sd = NULL;
+ i2c_unregister_device(client);
+ if (adapter)
+ i2c_put_adapter(adapter);
+}
+
+static int camif_create_media_links(struct camif_dev *camif)
+{
+ int i, ret;
+
+ ret = media_entity_create_link(&camif->sensor.sd->entity, 0,
+ &camif->subdev.entity, CAMIF_SD_PAD_SINK,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) {
+ ret = media_entity_create_link(&camif->subdev.entity, i,
+ &camif->vp[i - 1].vdev.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ }
+
+ return ret;
+}
+
+static int camif_register_video_nodes(struct camif_dev *camif)
+{
+ int ret = s3c_camif_register_video_node(camif, VP_CODEC);
+ if (ret < 0)
+ return ret;
+
+ return s3c_camif_register_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_video_nodes(struct camif_dev *camif)
+{
+ s3c_camif_unregister_video_node(camif, VP_CODEC);
+ s3c_camif_unregister_video_node(camif, VP_PREVIEW);
+}
+
+static void camif_unregister_media_entities(struct camif_dev *camif)
+{
+ camif_unregister_video_nodes(camif);
+ camif_unregister_sensor(camif);
+ s3c_camif_unregister_subdev(camif);
+}
+
+/*
+ * Media device
+ */
+static int camif_media_dev_register(struct camif_dev *camif)
+{
+ struct media_device *md = &camif->media_dev;
+ struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
+ unsigned int ip_rev = camif->variant->ip_revision;
+ int ret;
+
+ memset(md, 0, sizeof(*md));
+ snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF",
+ ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X");
+ strlcpy(md->bus_info, "platform", sizeof(md->bus_info));
+ md->hw_revision = ip_rev;
+ md->driver_version = KERNEL_VERSION(1, 0, 0);
+
+ md->dev = camif->dev;
+
+ strlcpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name));
+ v4l2_dev->mdev = md;
+
+ ret = v4l2_device_register(camif->dev, v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = media_device_register(md);
+ if (ret < 0)
+ v4l2_device_unregister(v4l2_dev);
+
+ return ret;
+}
+
+static void camif_clk_put(struct camif_dev *camif)
+{
+ int i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ if (IS_ERR_OR_NULL(camif->clock[i]))
+ continue;
+ clk_unprepare(camif->clock[i]);
+ clk_put(camif->clock[i]);
+ }
+}
+
+static int camif_clk_get(struct camif_dev *camif)
+{
+ int ret, i;
+
+ for (i = 0; i < CLK_MAX_NUM; i++) {
+ camif->clock[i] = clk_get(camif->dev, camif_clocks[i]);
+ if (IS_ERR(camif->clock[i])) {
+ ret = PTR_ERR(camif->clock[i]);
+ goto err;
+ }
+ ret = clk_prepare(camif->clock[i]);
+ if (ret < 0) {
+ clk_put(camif->clock[i]);
+ camif->clock[i] = NULL;
+ goto err;
+ }
+ }
+ return 0;
+err:
+ camif_clk_put(camif);
+ dev_err(camif->dev, "failed to get clock: %s\n",
+ camif_clocks[i]);
+ return ret;
+}
+
+/*
+ * The CAMIF device has two relatively independent data processing paths
+ * that can source data from memory or the common camera input frontend.
+ * Register interrupts for each data processing path (camif_vp).
+ */
+static int camif_request_irqs(struct platform_device *pdev,
+ struct camif_dev *camif)
+{
+ int irq, ret, i;
+
+ for (i = 0; i < CAMIF_VP_NUM; i++) {
+ struct camif_vp *vp = &camif->vp[i];
+
+ init_waitqueue_head(&vp->irq_queue);
+
+ irq = platform_get_irq(pdev, i);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get IRQ %d\n", i);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
+ 0, dev_name(&pdev->dev), vp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int s3c_camif_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s3c_camif_plat_data *pdata = dev->platform_data;
+ struct s3c_camif_drvdata *drvdata;
+ struct camif_dev *camif;
+ struct resource *mres;
+ int ret = 0;
+
+ camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
+ if (!camif)
+ return -ENOMEM;
+
+ spin_lock_init(&camif->slock);
+ mutex_init(&camif->lock);
+
+ camif->dev = dev;
+
+ if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
+ dev_err(dev, "wrong platform data\n");
+ return -EINVAL;
+ }
+
+ camif->pdata = *pdata;
+ drvdata = (void *)platform_get_device_id(pdev)->driver_data;
+ camif->variant = drvdata->variant;
+
+ mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ camif->io_base = devm_request_and_ioremap(dev, mres);
+ if (!camif->io_base) {
+ dev_err(dev, "failed to obtain I/O memory\n");
+ return -ENOENT;
+ }
+
+ ret = camif_request_irqs(pdev, camif);
+ if (ret < 0)
+ return ret;
+
+ ret = pdata->gpio_get();
+ if (ret < 0)
+ return ret;
+
+ ret = s3c_camif_create_subdev(camif);
+ if (ret < 0)
+ goto err_sd;
+
+ ret = camif_clk_get(camif);
+ if (ret < 0)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, camif);
+ clk_set_rate(camif->clock[CLK_CAM],
+ camif->pdata.sensor.clock_frequency);
+
+ dev_info(dev, "sensor clock frequency: %lu\n",
+ clk_get_rate(camif->clock[CLK_CAM]));
+ /*
+ * Set initial pixel format, resolution and crop rectangle.
+ * Must be done before a sensor subdev is registered as some
+ * settings are overrode with values from sensor subdev.
+ */
+ s3c_camif_set_defaults(camif);
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm;
+
+ /* Initialize contiguous memory allocator */
+ camif->alloc_ctx = vb2_dma_contig_init_ctx(dev);
+ if (IS_ERR(camif->alloc_ctx)) {
+ ret = PTR_ERR(camif->alloc_ctx);
+ goto err_alloc;
+ }
+
+ ret = camif_media_dev_register(camif);
+ if (ret < 0)
+ goto err_mdev;
+
+ ret = camif_register_sensor(camif);
+ if (ret < 0)
+ goto err_sens;
+
+ ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
+ if (ret < 0)
+ goto err_sens;
+
+ mutex_lock(&camif->media_dev.graph_mutex);
+
+ ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = camif_register_video_nodes(camif);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = camif_create_media_links(camif);
+ if (ret < 0)
+ goto err_unlock;
+
+ mutex_unlock(&camif->media_dev.graph_mutex);
+ pm_runtime_put(dev);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&camif->media_dev.graph_mutex);
+err_sens:
+ v4l2_device_unregister(&camif->v4l2_dev);
+ media_device_unregister(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+err_mdev:
+ vb2_dma_contig_cleanup_ctx(camif->alloc_ctx);
+err_alloc:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+err_pm:
+ camif_clk_put(camif);
+err_clk:
+ s3c_camif_unregister_subdev(camif);
+err_sd:
+ pdata->gpio_put();
+ return ret;
+}
+
+static int s3c_camif_remove(struct platform_device *pdev)
+{
+ struct camif_dev *camif = platform_get_drvdata(pdev);
+ struct s3c_camif_plat_data *pdata = &camif->pdata;
+
+ media_device_unregister(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+ v4l2_device_unregister(&camif->v4l2_dev);
+
+ pm_runtime_disable(&pdev->dev);
+ camif_clk_put(camif);
+ pdata->gpio_put();
+
+ return 0;
+}
+
+static int s3c_camif_runtime_resume(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ clk_enable(camif->clock[CLK_GATE]);
+ /* null op on s3c244x */
+ clk_enable(camif->clock[CLK_CAM]);
+ return 0;
+}
+
+static int s3c_camif_runtime_suspend(struct device *dev)
+{
+ struct camif_dev *camif = dev_get_drvdata(dev);
+
+ /* null op on s3c244x */
+ clk_disable(camif->clock[CLK_CAM]);
+
+ clk_disable(camif->clock[CLK_GATE]);
+ return 0;
+}
+
+static const struct s3c_camif_variant s3c244x_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 640,
+ .max_sc_out_width = 640,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 480,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C244X_CAMIF_IP_REV,
+};
+
+static struct s3c_camif_drvdata s3c244x_camif_drvdata = {
+ .variant = &s3c244x_camif_variant,
+ .bus_clk_freq = 24000000UL,
+};
+
+static const struct s3c_camif_variant s3c6410_camif_variant = {
+ .vp_pix_limits = {
+ [VP_CODEC] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 2048,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ },
+ [VP_PREVIEW] = {
+ .max_out_width = 4096,
+ .max_sc_out_width = 720,
+ .out_width_align = 16,
+ .min_out_width = 16,
+ .max_height = 4096,
+ }
+ },
+ .pix_limits = {
+ .win_hor_offset_align = 8,
+ },
+ .ip_revision = S3C6410_CAMIF_IP_REV,
+ .has_img_effect = 1,
+ .vp_offset = 0x20,
+};
+
+static struct s3c_camif_drvdata s3c6410_camif_drvdata = {
+ .variant = &s3c6410_camif_variant,
+ .bus_clk_freq = 133000000UL,
+};
+
+static struct platform_device_id s3c_camif_driver_ids[] = {
+ {
+ .name = "s3c2440-camif",
+ .driver_data = (unsigned long)&s3c244x_camif_drvdata,
+ }, {
+ .name = "s3c6410-camif",
+ .driver_data = (unsigned long)&s3c6410_camif_drvdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids);
+
+static const struct dev_pm_ops s3c_camif_pm_ops = {
+ .runtime_suspend = s3c_camif_runtime_suspend,
+ .runtime_resume = s3c_camif_runtime_resume,
+};
+
+static struct platform_driver s3c_camif_driver = {
+ .probe = s3c_camif_probe,
+ .remove = s3c_camif_remove,
+ .id_table = s3c_camif_driver_ids,
+ .driver = {
+ .name = S3C_CAMIF_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &s3c_camif_pm_ops,
+ }
+};
+
+module_platform_driver(s3c_camif_driver);
+
+MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
new file mode 100644
index 000000000000..261134baa655
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -0,0 +1,393 @@
+/*
+ * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_CORE_H_
+#define CAMIF_CORE_H_
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-core.h>
+#include <media/s3c_camif.h>
+
+#define S3C_CAMIF_DRIVER_NAME "s3c-camif"
+#define CAMIF_REQ_BUFS_MIN 3
+#define CAMIF_MAX_OUT_BUFS 4
+#define CAMIF_MAX_PIX_WIDTH 4096
+#define CAMIF_MAX_PIX_HEIGHT 4096
+#define SCALER_MAX_RATIO 64
+#define CAMIF_DEF_WIDTH 640
+#define CAMIF_DEF_HEIGHT 480
+#define CAMIF_STOP_TIMEOUT 1500 /* ms */
+
+#define S3C244X_CAMIF_IP_REV 0x20 /* 2.0 */
+#define S3C2450_CAMIF_IP_REV 0x30 /* 3.0 - not implemented, not tested */
+#define S3C6400_CAMIF_IP_REV 0x31 /* 3.1 - not implemented, not tested */
+#define S3C6410_CAMIF_IP_REV 0x32 /* 3.2 */
+
+/* struct camif_vp::state */
+
+#define ST_VP_PENDING (1 << 0)
+#define ST_VP_RUNNING (1 << 1)
+#define ST_VP_STREAMING (1 << 2)
+#define ST_VP_SENSOR_STREAMING (1 << 3)
+
+#define ST_VP_ABORTING (1 << 4)
+#define ST_VP_OFF (1 << 5)
+#define ST_VP_LASTIRQ (1 << 6)
+
+#define ST_VP_CONFIG (1 << 8)
+
+#define CAMIF_SD_PAD_SINK 0
+#define CAMIF_SD_PAD_SOURCE_C 1
+#define CAMIF_SD_PAD_SOURCE_P 2
+#define CAMIF_SD_PADS_NUM 3
+
+enum img_fmt {
+ IMG_FMT_RGB565 = 0x0010,
+ IMG_FMT_RGB666,
+ IMG_FMT_XRGB8888,
+ IMG_FMT_YCBCR420 = 0x0020,
+ IMG_FMT_YCRCB420,
+ IMG_FMT_YCBCR422P,
+ IMG_FMT_YCBYCR422 = 0x0040,
+ IMG_FMT_YCRYCB422,
+ IMG_FMT_CBYCRY422,
+ IMG_FMT_CRYCBY422,
+};
+
+#define img_fmt_is_rgb(x) ((x) & 0x10)
+#define img_fmt_is_ycbcr(x) ((x) & 0x60)
+
+/* Possible values for struct camif_fmt::flags */
+#define FMT_FL_S3C24XX_CODEC (1 << 0)
+#define FMT_FL_S3C24XX_PREVIEW (1 << 1)
+#define FMT_FL_S3C64XX (1 << 2)
+
+/**
+ * struct camif_fmt - pixel format description
+ * @fourcc: fourcc code for this format, 0 if not applicable
+ * @color: a corresponding enum img_fmt
+ * @colplanes: number of physically contiguous data planes
+ * @flags: indicate for which SoCs revisions this format is valid
+ * @depth: bits per pixel (total)
+ * @ybpp: number of luminance bytes per pixel
+ */
+struct camif_fmt {
+ char *name;
+ u32 fourcc;
+ u32 color;
+ u16 colplanes;
+ u16 flags;
+ u8 depth;
+ u8 ybpp;
+};
+
+/**
+ * struct camif_dma_offset - pixel offset information for DMA
+ * @initial: offset (in pixels) to first pixel
+ * @line: offset (in pixels) from end of line to start of next line
+ */
+struct camif_dma_offset {
+ int initial;
+ int line;
+};
+
+/**
+ * struct camif_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ * @dma_offset: DMA offset configuration
+ */
+struct camif_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+ struct camif_dma_offset dma_offset;
+};
+
+/* CAMIF clocks enumeration */
+enum {
+ CLK_GATE,
+ CLK_CAM,
+ CLK_MAX_NUM,
+};
+
+struct vp_pix_limits {
+ u16 max_out_width;
+ u16 max_sc_out_width;
+ u16 out_width_align;
+ u16 max_height;
+ u8 min_out_width;
+ u16 out_hor_offset_align;
+};
+
+struct camif_pix_limits {
+ u16 win_hor_offset_align;
+};
+
+/**
+ * struct s3c_camif_variant - CAMIF variant structure
+ * @vp_pix_limits: pixel limits for the codec and preview paths
+ * @camif_pix_limits: pixel limits for the camera input interface
+ * @ip_revision: the CAMIF IP revision: 0x20 for s3c244x, 0x32 for s3c6410
+ */
+struct s3c_camif_variant {
+ struct vp_pix_limits vp_pix_limits[2];
+ struct camif_pix_limits pix_limits;
+ u8 ip_revision;
+ u8 has_img_effect;
+ unsigned int vp_offset;
+};
+
+struct s3c_camif_drvdata {
+ const struct s3c_camif_variant *variant;
+ unsigned long bus_clk_freq;
+};
+
+struct camif_scaler {
+ u8 scaleup_h;
+ u8 scaleup_v;
+ u8 copy;
+ u8 enable;
+ u32 h_shift;
+ u32 v_shift;
+ u32 pre_h_ratio;
+ u32 pre_v_ratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 main_h_ratio;
+ u32 main_v_ratio;
+};
+
+struct camif_dev;
+
+/**
+ * struct camif_vp - CAMIF data processing path structure (codec/preview)
+ * @irq_queue: interrupt handling waitqueue
+ * @irq: interrupt number for this data path
+ * @camif: pointer to the camif structure
+ * @pad: media pad for the video node
+ * @vdev video device
+ * @ctrl_handler: video node controls handler
+ * @owner: file handle that own the streaming
+ * @pending_buf_q: pending (empty) buffers queue head
+ * @active_buf_q: active (being written) buffers queue head
+ * @active_buffers: counter of buffer set up at the DMA engine
+ * @buf_index: identifier of a last empty buffer set up in H/W
+ * @frame_sequence: image frame sequence counter
+ * @reqbufs_count: the number of buffers requested
+ * @scaler: the scaler structure
+ * @out_fmt: pixel format at this video path output
+ * @payload: the output data frame payload size
+ * @out_frame: the output pixel resolution
+ * @state: the video path's state
+ * @fmt_flags: flags determining supported pixel formats
+ * @id: CAMIF id, 0 - codec, 1 - preview
+ * @rotation: current image rotation value
+ * @hflip: apply horizontal flip if set
+ * @vflip: apply vertical flip if set
+ */
+struct camif_vp {
+ wait_queue_head_t irq_queue;
+ int irq;
+ struct camif_dev *camif;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_fh *owner;
+ struct vb2_queue vb_queue;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ unsigned int active_buffers;
+ unsigned int buf_index;
+ unsigned int frame_sequence;
+ unsigned int reqbufs_count;
+ struct camif_scaler scaler;
+ const struct camif_fmt *out_fmt;
+ unsigned int payload;
+ struct camif_frame out_frame;
+ unsigned int state;
+ u16 fmt_flags;
+ u8 id;
+ u8 rotation;
+ u8 hflip;
+ u8 vflip;
+ unsigned int offset;
+};
+
+/* Video processing path enumeration */
+#define VP_CODEC 0
+#define VP_PREVIEW 1
+#define CAMIF_VP_NUM 2
+
+/**
+ * struct camif_dev - the CAMIF driver private data structure
+ * @media_dev: top-level media device structure
+ * @v4l2_dev: root v4l2_device
+ * @subdev: camera interface ("catchcam") subdev
+ * @mbus_fmt: camera input media bus format
+ * @camif_crop: camera input interface crop rectangle
+ * @pads: the camif subdev's media pads
+ * @stream_count: the camera interface streaming reference counter
+ * @sensor: image sensor data structure
+ * @m_pipeline: video entity pipeline description
+ * @ctrl_handler: v4l2 control handler (owned by @subdev)
+ * @test_pattern: test pattern controls
+ * @vp: video path (DMA) description (codec/preview)
+ * @alloc_ctx: memory buffer allocator context
+ * @variant: variant information for this device
+ * @dev: pointer to the CAMIF device struct
+ * @pdata: a copy of the driver's platform data
+ * @clock: clocks required for the CAMIF operation
+ * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting CAMIF registers
+ * @io_base: start address of the mmaped CAMIF registers
+ */
+struct camif_dev {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_rect camif_crop;
+ struct media_pad pads[CAMIF_SD_PADS_NUM];
+ int stream_count;
+
+ struct cam_sensor {
+ struct v4l2_subdev *sd;
+ short power_count;
+ short stream_count;
+ } sensor;
+ struct media_pipeline *m_pipeline;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_test_pattern;
+ struct {
+ struct v4l2_ctrl *ctrl_colorfx;
+ struct v4l2_ctrl *ctrl_colorfx_cbcr;
+ };
+ u8 test_pattern;
+ u8 colorfx;
+ u8 colorfx_cb;
+ u8 colorfx_cr;
+
+ struct camif_vp vp[CAMIF_VP_NUM];
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ const struct s3c_camif_variant *variant;
+ struct device *dev;
+ struct s3c_camif_plat_data pdata;
+ struct clk *clock[CLK_MAX_NUM];
+ struct mutex lock;
+ spinlock_t slock;
+ void __iomem *io_base;
+};
+
+/**
+ * struct camif_addr - Y/Cb/Cr DMA start address structure
+ * @y: luminance plane dma address
+ * @cb: Cb plane dma address
+ * @cr: Cr plane dma address
+ */
+struct camif_addr {
+ dma_addr_t y;
+ dma_addr_t cb;
+ dma_addr_t cr;
+};
+
+/**
+ * struct camif_buffer - the camif video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: DMA start addresses
+ * @index: an identifier of this buffer at the DMA engine
+ */
+struct camif_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ struct camif_addr paddr;
+ unsigned int index;
+};
+
+const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
+ const u32 *pixelformat, int index);
+int s3c_camif_register_video_node(struct camif_dev *camif, int idx);
+void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx);
+irqreturn_t s3c_camif_irq_handler(int irq, void *priv);
+int s3c_camif_create_subdev(struct camif_dev *camif);
+void s3c_camif_unregister_subdev(struct camif_dev *camif);
+int s3c_camif_set_defaults(struct camif_dev *camif);
+int s3c_camif_get_scaler_config(struct camif_vp *vp,
+ struct camif_scaler *scaler);
+
+static inline void camif_active_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->active_buf_q);
+ vp->active_buffers++;
+}
+
+static inline struct camif_buffer *camif_active_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->active_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+}
+
+static inline struct camif_buffer *camif_active_queue_peek(
+ struct camif_vp *vp, int index)
+{
+ struct camif_buffer *tmp, *buf;
+
+ if (WARN_ON(list_empty(&vp->active_buf_q)))
+ return NULL;
+
+ list_for_each_entry_safe(buf, tmp, &vp->active_buf_q, list) {
+ if (buf->index == index) {
+ list_del(&buf->list);
+ vp->active_buffers--;
+ return buf;
+ }
+ }
+
+ return NULL;
+}
+
+static inline void camif_pending_queue_add(struct camif_vp *vp,
+ struct camif_buffer *buf)
+{
+ list_add_tail(&buf->list, &vp->pending_buf_q);
+}
+
+static inline struct camif_buffer *camif_pending_queue_pop(
+ struct camif_vp *vp)
+{
+ struct camif_buffer *buf = list_first_entry(&vp->pending_buf_q,
+ struct camif_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* CAMIF_CORE_H_ */
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
new file mode 100644
index 000000000000..1a3b4fc05ec6
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -0,0 +1,606 @@
+/*
+ * Samsung s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include "camif-regs.h"
+
+#define camif_write(_camif, _off, _val) writel(_val, (_camif)->io_base + (_off))
+#define camif_read(_camif, _off) readl((_camif)->io_base + (_off))
+
+void camif_hw_reset(struct camif_dev *camif)
+{
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg |= CISRCFMT_ITU601_8BIT;
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+
+ /* S/W reset */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_SWRST;
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIGCTRL_IRQ_LEVEL;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_SWRST;
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+ udelay(10);
+}
+
+void camif_hw_clear_pending_irq(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg |= CIGCTRL_IRQ_CLR(vp->id);
+ camif_write(vp->camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+/*
+ * Sets video test pattern (off, color bar, horizontal or vertical gradient).
+ * External sensor pixel clock must be active for the test pattern to work.
+ */
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern)
+{
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+ cfg &= ~CIGCTRL_TESTPATTERN_MASK;
+ cfg |= (pattern << 27);
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb)
+{
+ static const struct v4l2_control colorfx[] = {
+ { V4L2_COLORFX_NONE, CIIMGEFF_FIN_BYPASS },
+ { V4L2_COLORFX_BW, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_SEPIA, CIIMGEFF_FIN_ARBITRARY },
+ { V4L2_COLORFX_NEGATIVE, CIIMGEFF_FIN_NEGATIVE },
+ { V4L2_COLORFX_ART_FREEZE, CIIMGEFF_FIN_ARTFREEZE },
+ { V4L2_COLORFX_EMBOSS, CIIMGEFF_FIN_EMBOSSING },
+ { V4L2_COLORFX_SILHOUETTE, CIIMGEFF_FIN_SILHOUETTE },
+ { V4L2_COLORFX_SET_CBCR, CIIMGEFF_FIN_ARBITRARY },
+ };
+ unsigned int i, cfg;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++)
+ if (colorfx[i].id == effect)
+ break;
+
+ if (i == ARRAY_SIZE(colorfx))
+ return;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset));
+ /* Set effect */
+ cfg &= ~CIIMGEFF_FIN_MASK;
+ cfg |= colorfx[i].value;
+ /* Set both paths */
+ if (camif->variant->ip_revision >= S3C6400_CAMIF_IP_REV) {
+ if (effect == V4L2_COLORFX_NONE)
+ cfg &= ~CIIMGEFF_IE_ENABLE_MASK;
+ else
+ cfg |= CIIMGEFF_IE_ENABLE_MASK;
+ }
+ cfg &= ~CIIMGEFF_PAT_CBCR_MASK;
+ cfg |= cr | (cb << 13);
+ camif_write(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset), cfg);
+}
+
+static const u32 src_pixfmt_map[8][2] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
+ { V4L2_MBUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
+ { V4L2_MBUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
+ { V4L2_MBUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
+};
+
+/* Set camera input pixel format and resolution */
+void camif_hw_set_source_format(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (i-- >= 0) {
+ if (src_pixfmt_map[i][0] == mf->code)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != mf->code) {
+ dev_err(camif->dev,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
+ cfg &= ~(CISRCFMT_ORDER422_MASK | CISRCFMT_SIZE_CAM_MASK);
+ cfg |= (mf->width << 16) | mf->height;
+ cfg |= src_pixfmt_map[i][1];
+ camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void camif_hw_set_camera_crop(struct camif_dev *camif)
+{
+ struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
+ struct v4l2_rect *crop = &camif->camif_crop;
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ /* Note: s3c244x requirement: left = f_width - rect.width / 2 */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ cfg &= ~(CIWDOFST_OFST_MASK | CIWDOFST_WINOFSEN);
+ cfg |= (crop->left << 16) | crop->top;
+ if (crop->left != 0 || crop->top != 0)
+ cfg |= CIWDOFST_WINOFSEN;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ hoff2 = mf->width - crop->width - crop->left;
+ voff2 = mf->height - crop->height - crop->top;
+ cfg = (hoff2 << 16) | voff2;
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST2, cfg);
+ }
+}
+
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
+ if (vp->id == 0)
+ cfg |= (CIWDOFST_CLROVCOFIY | CIWDOFST_CLROVCOFICB |
+ CIWDOFST_CLROVCOFICR);
+ else
+ cfg |= (/* CIWDOFST_CLROVPRFIY | */ CIWDOFST_CLROVPRFICB |
+ CIWDOFST_CLROVPRFICR);
+ camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
+}
+
+/* Set video bus signals polarity */
+void camif_hw_set_camera_bus(struct camif_dev *camif)
+{
+ unsigned int flags = camif->pdata.sensor.flags;
+
+ u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
+
+ cfg &= ~(CIGCTRL_INVPOLPCLK | CIGCTRL_INVPOLVSYNC |
+ CIGCTRL_INVPOLHREF | CIGCTRL_INVPOLFIELD);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= CIGCTRL_INVPOLVSYNC;
+ /*
+ * HREF is normally high during frame active data
+ * transmission and low during horizontal synchronization
+ * period. Thus HREF active high means HSYNC active low.
+ */
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ cfg |= CIGCTRL_INVPOLHREF; /* HREF active low */
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= CIGCTRL_INVPOLFIELD;
+ cfg |= CIGCTRL_FIELDMODE;
+ }
+
+ pr_debug("Setting CIGCTRL to: %#x\n", cfg);
+
+ camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
+}
+
+void camif_hw_set_output_addr(struct camif_vp *vp,
+ struct camif_addr *paddr, int i)
+{
+ struct camif_dev *camif = vp->camif;
+
+ camif_write(camif, S3C_CAMIF_REG_CIYSA(vp->id, i), paddr->y);
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV
+ || vp->id == VP_CODEC) {
+ camif_write(camif, S3C_CAMIF_REG_CICBSA(vp->id, i),
+ paddr->cb);
+ camif_write(camif, S3C_CAMIF_REG_CICRSA(vp->id, i),
+ paddr->cr);
+ }
+
+ pr_debug("dst_buf[%d]: %#X, cb: %#X, cr: %#X\n",
+ i, paddr->y, paddr->cb, paddr->cr);
+}
+
+static void camif_hw_set_out_dma_size(struct camif_vp *vp)
+{
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_get_dma_burst(u32 width, u32 ybpp, u32 *mburst, u32 *rburst)
+{
+ unsigned int nwords = width * ybpp / 4;
+ unsigned int div, rem;
+
+ if (WARN_ON(width < 8 || (width * ybpp) & 7))
+ return;
+
+ for (div = 16; div >= 2; div /= 2) {
+ if (nwords < div)
+ continue;
+
+ rem = nwords & (div - 1);
+ if (rem == 0) {
+ *mburst = div;
+ *rburst = div;
+ break;
+ }
+ if (rem == div / 2 || rem == div / 4) {
+ *mburst = div;
+ *rburst = rem;
+ break;
+ }
+ }
+}
+
+void camif_hw_set_output_dma(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ const struct camif_fmt *fmt = vp->out_fmt;
+ unsigned int ymburst = 0, yrburst = 0;
+ u32 cfg;
+
+ camif_hw_set_out_dma_size(vp);
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
+ struct camif_dma_offset *offset = &frame->dma_offset;
+ /* Set the input dma offsets. */
+ cfg = S3C_CISS_OFFS_INITIAL(offset->initial);
+ cfg |= S3C_CISS_OFFS_LINE(offset->line);
+ camif_write(camif, S3C_CAMIF_REG_CISSY(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCB(vp->id), cfg);
+ camif_write(camif, S3C_CAMIF_REG_CISSCR(vp->id), cfg);
+ }
+
+ /* Configure DMA burst values */
+ camif_get_dma_burst(frame->rect.width, fmt->ybpp, &ymburst, &yrburst);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset));
+ cfg &= ~CICTRL_BURST_MASK;
+
+ cfg |= CICTRL_YBURST1(ymburst) | CICTRL_YBURST2(yrburst);
+ cfg |= CICTRL_CBURST1(ymburst / 2) | CICTRL_CBURST2(yrburst / 2);
+
+ camif_write(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("ymburst: %u, yrburst: %u\n", ymburst, yrburst);
+}
+
+void camif_hw_set_input_path(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id));
+ cfg &= ~MSCTRL_SEL_DMA_CAM;
+ camif_write(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id), cfg);
+}
+
+void camif_hw_set_target_format(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_frame *frame = &vp->out_frame;
+ u32 cfg;
+
+ pr_debug("fw: %d, fh: %d color: %d\n", frame->f_width,
+ frame->f_height, vp->out_fmt->color);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+ cfg &= ~CITRGFMT_TARGETSIZE_MASK;
+
+ if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
+ /* We currently support only YCbCr 4:2:2 at the camera input */
+ cfg |= CITRGFMT_IN422;
+ cfg &= ~CITRGFMT_OUT422;
+ if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
+ cfg |= CITRGFMT_OUT422;
+ } else {
+ cfg &= ~CITRGFMT_OUTFORMAT_MASK;
+ switch (vp->out_fmt->color) {
+ case IMG_FMT_RGB565...IMG_FMT_XRGB8888:
+ cfg |= CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case IMG_FMT_YCBCR420...IMG_FMT_YCRCB420:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ case IMG_FMT_YCBCR422P:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case IMG_FMT_YCBYCR422...IMG_FMT_CRYCBY422:
+ cfg |= CITRGFMT_OUTFORMAT_YCBCR422I;
+ break;
+ }
+ }
+
+ /* Rotation is only supported by s3c64xx */
+ if (vp->rotation == 90 || vp->rotation == 270)
+ cfg |= (frame->f_height << 16) | frame->f_width;
+ else
+ cfg |= (frame->f_width << 16) | frame->f_height;
+ camif_write(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+
+ /* Target area, output pixel width * height */
+ cfg = camif_read(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset));
+ cfg &= ~CITAREA_MASK;
+ cfg |= (frame->f_width * frame->f_height);
+ camif_write(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset), cfg);
+}
+
+void camif_hw_set_flip(struct camif_vp *vp)
+{
+ u32 cfg = camif_read(vp->camif,
+ S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
+
+ cfg &= ~CITRGFMT_FLIP_MASK;
+
+ if (vp->hflip)
+ cfg |= CITRGFMT_FLIP_Y_MIRROR;
+ if (vp->vflip)
+ cfg |= CITRGFMT_FLIP_X_MIRROR;
+
+ camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
+}
+
+static void camif_hw_set_prescaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *sc = &vp->scaler;
+ u32 cfg, shfactor, addr;
+
+ addr = S3C_CAMIF_REG_CISCPRERATIO(vp->id, vp->offset);
+
+ shfactor = 10 - (sc->h_shift + sc->v_shift);
+ cfg = shfactor << 28;
+
+ cfg |= (sc->pre_h_ratio << 16) | sc->pre_v_ratio;
+ camif_write(camif, addr, cfg);
+
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ camif_write(camif, S3C_CAMIF_REG_CISCPREDST(vp->id, vp->offset), cfg);
+}
+
+void camif_s3c244x_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_SCALEUP_MASK | CISCCTRL_SCALERBYPASS |
+ CISCCTRL_MAIN_RATIO_MASK | CIPRSCCTRL_RGB_FORMAT_24BIT);
+
+ if (scaler->enable) {
+ if (scaler->scaleup_h) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_H;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_H;
+ }
+ if (scaler->scaleup_v) {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALEUP_V;
+ else
+ cfg |= CIPRSCCTRL_SCALEUP_V;
+ }
+ } else {
+ if (vp->id == VP_CODEC)
+ cfg |= CISCCTRL_SCALERBYPASS;
+ }
+
+ cfg |= ((scaler->main_h_ratio & 0x1ff) << 16);
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ if (vp->id == VP_PREVIEW) {
+ if (color == IMG_FMT_XRGB8888)
+ cfg |= CIPRSCCTRL_RGB_FORMAT_24BIT;
+ cfg |= CIPRSCCTRL_SAMPLE;
+ }
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_s3c64xx_hw_set_scaler(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ struct camif_scaler *scaler = &vp->scaler;
+ unsigned int color = vp->out_fmt->color;
+ u32 cfg;
+
+ camif_hw_set_prescaler(vp);
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
+
+ cfg &= ~(CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE
+ | CISCCTRL_SCALEUP_H | CISCCTRL_SCALEUP_V
+ | CISCCTRL_SCALERBYPASS | CISCCTRL_ONE2ONE
+ | CISCCTRL_INRGB_FMT_MASK | CISCCTRL_OUTRGB_FMT_MASK
+ | CISCCTRL_INTERLACE | CISCCTRL_EXTRGB_EXTENSION
+ | CISCCTRL_MAIN_RATIO_MASK);
+
+ cfg |= (CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE);
+
+ if (!scaler->enable) {
+ cfg |= CISCCTRL_SCALERBYPASS;
+ } else {
+ if (scaler->scaleup_h)
+ cfg |= CISCCTRL_SCALEUP_H;
+ if (scaler->scaleup_v)
+ cfg |= CISCCTRL_SCALEUP_V;
+ if (scaler->copy)
+ cfg |= CISCCTRL_ONE2ONE;
+ }
+
+ switch (color) {
+ case IMG_FMT_RGB666:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB666;
+ break;
+ case IMG_FMT_XRGB8888:
+ cfg |= CISCCTRL_OUTRGB_FMT_RGB888;
+ break;
+ }
+
+ cfg |= (scaler->main_h_ratio & 0x1ff) << 16;
+ cfg |= scaler->main_v_ratio & 0x1ff;
+
+ camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
+
+ pr_debug("main: h_ratio: %#x, v_ratio: %#x",
+ scaler->main_h_ratio, scaler->main_v_ratio);
+}
+
+void camif_hw_set_scaler(struct camif_vp *vp)
+{
+ unsigned int ip_rev = vp->camif->variant->ip_revision;
+
+ if (ip_rev == S3C244X_CAMIF_IP_REV)
+ camif_s3c244x_hw_set_scaler(vp);
+ else
+ camif_s3c64xx_hw_set_scaler(vp);
+}
+
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on)
+{
+ u32 addr = S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (on)
+ cfg |= CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~CISCCTRL_SCALERSTART;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable)
+{
+ u32 addr = S3C_CAMIF_REG_CICTRL(vp->id, vp->offset);
+ u32 cfg;
+
+ cfg = camif_read(vp->camif, addr);
+ if (enable)
+ cfg |= CICTRL_LASTIRQ_ENABLE;
+ else
+ cfg &= ~CICTRL_LASTIRQ_ENABLE;
+ camif_write(vp->camif, addr, cfg);
+}
+
+void camif_hw_enable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ camif->stream_count++;
+
+ if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
+ cfg |= CIIMGCPT_CPT_FREN_ENABLE(vp->id);
+
+ if (vp->scaler.enable)
+ cfg |= CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (camif->stream_count == 1)
+ cfg |= CIIMGCPT_IMGCPTEN;
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+}
+
+void camif_hw_disable_capture(struct camif_vp *vp)
+{
+ struct camif_dev *camif = vp->camif;
+ u32 cfg;
+
+ cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
+ cfg &= ~CIIMGCPT_IMGCPTEN_SC(vp->id);
+
+ if (WARN_ON(--(camif->stream_count) < 0))
+ camif->stream_count = 0;
+
+ if (camif->stream_count == 0)
+ cfg &= ~CIIMGCPT_IMGCPTEN;
+
+ pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
+ cfg, camif->stream_count);
+
+ camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
+}
+
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { S3C_CAMIF_REG_CISRCFMT, "CISRCFMT" },
+ { S3C_CAMIF_REG_CIWDOFST, "CIWDOFST" },
+ { S3C_CAMIF_REG_CIGCTRL, "CIGCTRL" },
+ { S3C_CAMIF_REG_CIWDOFST2, "CIWDOFST2" },
+ { S3C_CAMIF_REG_CIYSA(0, 0), "CICOYSA0" },
+ { S3C_CAMIF_REG_CICBSA(0, 0), "CICOCBSA0" },
+ { S3C_CAMIF_REG_CICRSA(0, 0), "CICOCRSA0" },
+ { S3C_CAMIF_REG_CIYSA(0, 1), "CICOYSA1" },
+ { S3C_CAMIF_REG_CICBSA(0, 1), "CICOCBSA1" },
+ { S3C_CAMIF_REG_CICRSA(0, 1), "CICOCRSA1" },
+ { S3C_CAMIF_REG_CIYSA(0, 2), "CICOYSA2" },
+ { S3C_CAMIF_REG_CICBSA(0, 2), "CICOCBSA2" },
+ { S3C_CAMIF_REG_CICRSA(0, 2), "CICOCRSA2" },
+ { S3C_CAMIF_REG_CIYSA(0, 3), "CICOYSA3" },
+ { S3C_CAMIF_REG_CICBSA(0, 3), "CICOCBSA3" },
+ { S3C_CAMIF_REG_CICRSA(0, 3), "CICOCRSA3" },
+ { S3C_CAMIF_REG_CIYSA(1, 0), "CIPRYSA0" },
+ { S3C_CAMIF_REG_CIYSA(1, 1), "CIPRYSA1" },
+ { S3C_CAMIF_REG_CIYSA(1, 2), "CIPRYSA2" },
+ { S3C_CAMIF_REG_CIYSA(1, 3), "CIPRYSA3" },
+ { S3C_CAMIF_REG_CITRGFMT(0, 0), "CICOTRGFMT" },
+ { S3C_CAMIF_REG_CITRGFMT(1, 0), "CIPRTRGFMT" },
+ { S3C_CAMIF_REG_CICTRL(0, 0), "CICOCTRL" },
+ { S3C_CAMIF_REG_CICTRL(1, 0), "CIPRCTRL" },
+ { S3C_CAMIF_REG_CISCPREDST(0, 0), "CICOSCPREDST" },
+ { S3C_CAMIF_REG_CISCPREDST(1, 0), "CIPRSCPREDST" },
+ { S3C_CAMIF_REG_CISCPRERATIO(0, 0), "CICOSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCPRERATIO(1, 0), "CIPRSCPRERATIO" },
+ { S3C_CAMIF_REG_CISCCTRL(0, 0), "CICOSCCTRL" },
+ { S3C_CAMIF_REG_CISCCTRL(1, 0), "CIPRSCCTRL" },
+ { S3C_CAMIF_REG_CITAREA(0, 0), "CICOTAREA" },
+ { S3C_CAMIF_REG_CITAREA(1, 0), "CIPRTAREA" },
+ { S3C_CAMIF_REG_CISTATUS(0, 0), "CICOSTATUS" },
+ { S3C_CAMIF_REG_CISTATUS(1, 0), "CIPRSTATUS" },
+ { S3C_CAMIF_REG_CIIMGCPT(0), "CIIMGCPT" },
+ };
+ u32 i;
+
+ pr_info("--- %s ---\n", label);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(camif->io_base + registers[i].offset);
+ printk(KERN_INFO "%s:\t0x%08x\n", registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/platform/s3c-camif/camif-regs.h b/drivers/media/platform/s3c-camif/camif-regs.h
new file mode 100644
index 000000000000..af2d472ea1dd
--- /dev/null
+++ b/drivers/media/platform/s3c-camif/camif-regs.h
@@ -0,0 +1,269 @@
+/*
+ * Register definition file for s3c24xx/s3c64xx SoC CAMIF driver
+ *
+ * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+ * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef CAMIF_REGS_H_
+#define CAMIF_REGS_H_
+
+#include "camif-core.h"
+#include <media/s3c_camif.h>
+
+/*
+ * The id argument indicates the processing path:
+ * id = 0 - codec (FIMC C), 1 - preview (FIMC P).
+ */
+
+/* Camera input format */
+#define S3C_CAMIF_REG_CISRCFMT 0x00
+#define CISRCFMT_ITU601_8BIT (1 << 31)
+#define CISRCFMT_ITU656_8BIT (0 << 31)
+#define CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define CISRCFMT_ORDER422_CRYCBY (3 << 14)
+#define CISRCFMT_ORDER422_MASK (3 << 14)
+#define CISRCFMT_SIZE_CAM_MASK (0x1fff << 16 | 0x1fff)
+
+/* Window offset */
+#define S3C_CAMIF_REG_CIWDOFST 0x04
+#define CIWDOFST_WINOFSEN (1 << 31)
+#define CIWDOFST_CLROVCOFIY (1 << 30)
+#define CIWDOFST_CLROVRLB_PR (1 << 28)
+/* #define CIWDOFST_CLROVPRFIY (1 << 27) */
+#define CIWDOFST_CLROVCOFICB (1 << 15)
+#define CIWDOFST_CLROVCOFICR (1 << 14)
+#define CIWDOFST_CLROVPRFICB (1 << 13)
+#define CIWDOFST_CLROVPRFICR (1 << 12)
+#define CIWDOFST_OFST_MASK (0x7ff << 16 | 0x7ff)
+
+/* Window offset 2 */
+#define S3C_CAMIF_REG_CIWDOFST2 0x14
+#define CIWDOFST2_OFST2_MASK (0xfff << 16 | 0xfff)
+
+/* Global control */
+#define S3C_CAMIF_REG_CIGCTRL 0x08
+#define CIGCTRL_SWRST (1 << 31)
+#define CIGCTRL_CAMRST (1 << 30)
+#define CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define CIGCTRL_INVPOLPCLK (1 << 26)
+#define CIGCTRL_INVPOLVSYNC (1 << 25)
+#define CIGCTRL_INVPOLHREF (1 << 24)
+#define CIGCTRL_IRQ_OVFEN (1 << 22)
+#define CIGCTRL_HREF_MASK (1 << 21)
+#define CIGCTRL_IRQ_LEVEL (1 << 20)
+/* IRQ_CLR_C, IRQ_CLR_P */
+#define CIGCTRL_IRQ_CLR(id) (1 << (19 - (id)))
+#define CIGCTRL_FIELDMODE (1 << 2)
+#define CIGCTRL_INVPOLFIELD (1 << 1)
+#define CIGCTRL_CAM_INTERLACE (1 << 0)
+
+/* Y DMA output frame start address. n = 0..3. */
+#define S3C_CAMIF_REG_CIYSA(id, n) (0x18 + (id) * 0x54 + (n) * 4)
+/* Cb plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICBSA(id, n) (0x28 + (id) * 0x54 + (n) * 4)
+/* Cr plane output DMA start address. n = 0..3. Only codec path. */
+#define S3C_CAMIF_REG_CICRSA(id, n) (0x38 + (id) * 0x54 + (n) * 4)
+
+/* CICOTRGFMT, CIPRTRGFMT - Target format */
+#define S3C_CAMIF_REG_CITRGFMT(id, _offs) (0x48 + (id) * (0x34 + (_offs)))
+#define CITRGFMT_IN422 (1 << 31) /* only for s3c24xx */
+#define CITRGFMT_OUT422 (1 << 30) /* only for s3c24xx */
+#define CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_YCBCR422I (2 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_RGB (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_OUTFORMAT_MASK (3 << 29) /* only for s3c6410 */
+#define CITRGFMT_TARGETHSIZE(x) ((x) << 16)
+#define CITRGFMT_FLIP_NORMAL (0 << 14)
+#define CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define CITRGFMT_FLIP_180 (3 << 14)
+#define CITRGFMT_FLIP_MASK (3 << 14)
+/* Preview path only */
+#define CITRGFMT_ROT90_PR (1 << 13)
+#define CITRGFMT_TARGETVSIZE(x) ((x) << 0)
+#define CITRGFMT_TARGETSIZE_MASK ((0x1fff << 16) | 0x1fff)
+
+/* CICOCTRL, CIPRCTRL. Output DMA control. */
+#define S3C_CAMIF_REG_CICTRL(id, _offs) (0x4c + (id) * (0x34 + (_offs)))
+#define CICTRL_BURST_MASK (0xfffff << 4)
+/* xBURSTn - 5-bits width */
+#define CICTRL_YBURST1(x) ((x) << 19)
+#define CICTRL_YBURST2(x) ((x) << 14)
+#define CICTRL_RGBBURST1(x) ((x) << 19)
+#define CICTRL_RGBBURST2(x) ((x) << 14)
+#define CICTRL_CBURST1(x) ((x) << 9)
+#define CICTRL_CBURST2(x) ((x) << 4)
+#define CICTRL_LASTIRQ_ENABLE (1 << 2)
+#define CICTRL_ORDER422_MASK (3 << 0)
+
+/* CICOSCPRERATIO, CIPRSCPRERATIO. Pre-scaler control 1. */
+#define S3C_CAMIF_REG_CISCPRERATIO(id, _offs) (0x50 + (id) * (0x34 + (_offs)))
+
+/* CICOSCPREDST, CIPRSCPREDST. Pre-scaler control 2. */
+#define S3C_CAMIF_REG_CISCPREDST(id, _offs) (0x54 + (id) * (0x34 + (_offs)))
+
+/* CICOSCCTRL, CIPRSCCTRL. Main scaler control. */
+#define S3C_CAMIF_REG_CISCCTRL(id, _offs) (0x58 + (id) * (0x34 + (_offs)))
+#define CISCCTRL_SCALERBYPASS (1 << 31)
+/* s3c244x preview path only, s3c64xx both */
+#define CIPRSCCTRL_SAMPLE (1 << 31)
+/* 0 - 16-bit RGB, 1 - 24-bit RGB */
+#define CIPRSCCTRL_RGB_FORMAT_24BIT (1 << 30) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_H (1 << 29) /* only for s3c244x */
+#define CIPRSCCTRL_SCALEUP_V (1 << 28) /* only for s3c244x */
+/* s3c64xx */
+#define CISCCTRL_SCALEUP_H (1 << 30)
+#define CISCCTRL_SCALEUP_V (1 << 29)
+#define CISCCTRL_SCALEUP_MASK (0x3 << 29)
+#define CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define CISCCTRL_INTERLACE (1 << 25)
+#define CISCCTRL_SCALERSTART (1 << 15)
+#define CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define CISCCTRL_ONE2ONE (1 << 9)
+#define CISCCTRL_MAIN_RATIO_MASK (0x1ff << 16 | 0x1ff)
+
+/* CICOTAREA, CIPRTAREA. Target area for DMA (Hsize x Vsize). */
+#define S3C_CAMIF_REG_CITAREA(id, _offs) (0x5c + (id) * (0x34 + (_offs)))
+#define CITAREA_MASK 0xfffffff
+
+/* Codec (id = 0) or preview (id = 1) path status. */
+#define S3C_CAMIF_REG_CISTATUS(id, _offs) (0x64 + (id) * (0x34 + (_offs)))
+#define CISTATUS_OVFIY_STATUS (1 << 31)
+#define CISTATUS_OVFICB_STATUS (1 << 30)
+#define CISTATUS_OVFICR_STATUS (1 << 29)
+#define CISTATUS_OVF_MASK (0x7 << 29)
+#define CIPRSTATUS_OVF_MASK (0x3 << 30)
+#define CISTATUS_VSYNC_STATUS (1 << 28)
+#define CISTATUS_FRAMECNT_MASK (3 << 26)
+#define CISTATUS_FRAMECNT(__reg) (((__reg) >> 26) & 0x3)
+#define CISTATUS_WINOFSTEN_STATUS (1 << 25)
+#define CISTATUS_IMGCPTEN_STATUS (1 << 22)
+#define CISTATUS_IMGCPTENSC_STATUS (1 << 21)
+#define CISTATUS_VSYNC_A_STATUS (1 << 20)
+#define CISTATUS_FRAMEEND_STATUS (1 << 19) /* 17 on s3c64xx */
+
+/* Image capture enable */
+#define S3C_CAMIF_REG_CIIMGCPT(_offs) (0xa0 + (_offs))
+#define CIIMGCPT_IMGCPTEN (1 << 31)
+#define CIIMGCPT_IMGCPTEN_SC(id) (1 << (30 - (id)))
+/* Frame control: 1 - one-shot, 0 - free run */
+#define CIIMGCPT_CPT_FREN_ENABLE(id) (1 << (25 - (id)))
+#define CIIMGCPT_CPT_FRMOD_ENABLE (0 << 18)
+#define CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Capture sequence */
+#define S3C_CAMIF_REG_CICPTSEQ 0xc4
+
+/* Image effects */
+#define S3C_CAMIF_REG_CIIMGEFF(_offs) (0xb0 + (_offs))
+#define CIIMGEFF_IE_ENABLE(id) (1 << (30 + (id)))
+#define CIIMGEFF_IE_ENABLE_MASK (3 << 30)
+/* Image effect: 1 - after scaler, 0 - before scaler */
+#define CIIMGEFF_IE_AFTER_SC (1 << 29)
+#define CIIMGEFF_FIN_MASK (7 << 26)
+#define CIIMGEFF_FIN_BYPASS (0 << 26)
+#define CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+#define CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define CIIMGEFF_PAT_CR(x) (x)
+
+/* MSCOY0SA, MSPRY0SA. Y/Cb/Cr frame start address for input DMA. */
+#define S3C_CAMIF_REG_MSY0SA(id) (0xd4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0SA(id) (0xd8 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0SA(id) (0xdc + ((id) * 0x2c))
+
+/* MSCOY0END, MSCOY0END. Y/Cb/Cr frame end address for input DMA. */
+#define S3C_CAMIF_REG_MSY0END(id) (0xe0 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCB0END(id) (0xe4 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCR0END(id) (0xe8 + ((id) * 0x2c))
+
+/* MSPRYOFF, MSPRYOFF. Y/Cb/Cr offset. n: 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSYOFF(id) (0x118 + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCBOFF(id) (0x11c + ((id) * 0x2c))
+#define S3C_CAMIF_REG_MSCROFF(id) (0x120 + ((id) * 0x2c))
+
+/* Real input DMA data size. n = 0 - codec, 1 - preview. */
+#define S3C_CAMIF_REG_MSWIDTH(id) (0xf8 + (id) * 0x2c)
+#define AUTOLOAD_ENABLE (1 << 31)
+#define ADDR_CH_DIS (1 << 30)
+#define MSHEIGHT(x) (((x) & 0x3ff) << 16)
+#define MSWIDTH(x) ((x) & 0x3ff)
+
+/* Input DMA control. n = 0 - codec, 1 - preview */
+#define S3C_CAMIF_REG_MSCTRL(id) (0xfc + (id) * 0x2c)
+#define MSCTRL_ORDER422_M_YCBYCR (0 << 4)
+#define MSCTRL_ORDER422_M_YCRYCB (1 << 4)
+#define MSCTRL_ORDER422_M_CBYCRY (2 << 4)
+#define MSCTRL_ORDER422_M_CRYCBY (3 << 4)
+/* 0 - camera, 1 - DMA */
+#define MSCTRL_SEL_DMA_CAM (1 << 3)
+#define MSCTRL_INFORMAT_M_YCBCR420 (0 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422 (1 << 1)
+#define MSCTRL_INFORMAT_M_YCBCR422I (2 << 1)
+#define MSCTRL_INFORMAT_M_RGB (3 << 1)
+#define MSCTRL_ENVID_M (1 << 0)
+
+/* CICOSCOSY, CIPRSCOSY. Scan line Y/Cb/Cr offset. */
+#define S3C_CAMIF_REG_CISSY(id) (0x12c + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCB(id) (0x130 + (id) * 0x0c)
+#define S3C_CAMIF_REG_CISSCR(id) (0x134 + (id) * 0x0c)
+#define S3C_CISS_OFFS_INITIAL(x) ((x) << 16)
+#define S3C_CISS_OFFS_LINE(x) ((x) << 0)
+
+/* ------------------------------------------------------------------ */
+
+void camif_hw_reset(struct camif_dev *camif);
+void camif_hw_clear_pending_irq(struct camif_vp *vp);
+void camif_hw_clear_fifo_overflow(struct camif_vp *vp);
+void camif_hw_set_lastirq(struct camif_vp *vp, int enable);
+void camif_hw_set_input_path(struct camif_vp *vp);
+void camif_hw_enable_scaler(struct camif_vp *vp, bool on);
+void camif_hw_enable_capture(struct camif_vp *vp);
+void camif_hw_disable_capture(struct camif_vp *vp);
+void camif_hw_set_camera_bus(struct camif_dev *camif);
+void camif_hw_set_source_format(struct camif_dev *camif);
+void camif_hw_set_camera_crop(struct camif_dev *camif);
+void camif_hw_set_scaler(struct camif_vp *vp);
+void camif_hw_set_flip(struct camif_vp *vp);
+void camif_hw_set_output_dma(struct camif_vp *vp);
+void camif_hw_set_target_format(struct camif_vp *vp);
+void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern);
+void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
+ unsigned int cr, unsigned int cb);
+void camif_hw_set_output_addr(struct camif_vp *vp, struct camif_addr *paddr,
+ int index);
+void camif_hw_dump_regs(struct camif_dev *camif, const char *label);
+
+static inline u32 camif_hw_get_status(struct camif_vp *vp)
+{
+ return readl(vp->camif->io_base + S3C_CAMIF_REG_CISTATUS(vp->id,
+ vp->offset));
+}
+
+#endif /* CAMIF_REGS_H_ */
diff --git a/drivers/media/platform/s5p-fimc/fimc-capture.c b/drivers/media/platform/s5p-fimc/fimc-capture.c
index 891ee873c62b..fdb6740248a7 100644
--- a/drivers/media/platform/s5p-fimc/fimc-capture.c
+++ b/drivers/media/platform/s5p-fimc/fimc-capture.c
@@ -1230,6 +1230,14 @@ static int fimc_cap_qbuf(struct file *file, void *priv,
return vb2_qbuf(&fimc->vid_cap.vbq, buf);
}
+static int fimc_cap_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+
+ return vb2_expbuf(&fimc->vid_cap.vbq, eb);
+}
+
static int fimc_cap_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
@@ -1354,6 +1362,7 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_qbuf = fimc_cap_qbuf,
.vidioc_dqbuf = fimc_cap_dqbuf,
+ .vidioc_expbuf = fimc_cap_expbuf,
.vidioc_prepare_buf = fimc_cap_prepare_buf,
.vidioc_create_bufs = fimc_cap_create_bufs,
@@ -1729,7 +1738,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
q = &fimc->vid_cap.vbq;
memset(q, 0, sizeof(*q));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->drv_priv = fimc->vid_cap.ctx;
q->ops = &fimc_capture_qops;
q->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c
index 8d0d2b94a135..545b46ae12a1 100644
--- a/drivers/media/platform/s5p-fimc/fimc-core.c
+++ b/drivers/media/platform/s5p-fimc/fimc-core.c
@@ -1035,7 +1035,7 @@ static int fimc_suspend(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static int __devexit fimc_remove(struct platform_device *pdev)
+static int fimc_remove(struct platform_device *pdev)
{
struct fimc_dev *fimc = platform_get_drvdata(pdev);
@@ -1234,7 +1234,7 @@ static const struct dev_pm_ops fimc_pm_ops = {
static struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = __devexit_p(fimc_remove),
+ .remove = fimc_remove,
.id_table = fimc_driver_ids,
.driver = {
.name = FIMC_MODULE_NAME,
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c
index 1b309a72f09f..ed67220d0a64 100644
--- a/drivers/media/platform/s5p-fimc/fimc-lite.c
+++ b/drivers/media/platform/s5p-fimc/fimc-lite.c
@@ -1406,7 +1406,7 @@ static int fimc_lite_clk_get(struct fimc_lite *fimc)
return ret;
}
-static int __devinit fimc_lite_probe(struct platform_device *pdev)
+static int fimc_lite_probe(struct platform_device *pdev)
{
struct flite_drvdata *drv_data = fimc_lite_get_drvdata(pdev);
struct fimc_lite *fimc;
@@ -1547,7 +1547,7 @@ static int fimc_lite_suspend(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static int __devexit fimc_lite_remove(struct platform_device *pdev)
+static int fimc_lite_remove(struct platform_device *pdev)
{
struct fimc_lite *fimc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -1595,7 +1595,7 @@ static const struct dev_pm_ops fimc_lite_pm_ops = {
static struct platform_driver fimc_lite_driver = {
.probe = fimc_lite_probe,
- .remove = __devexit_p(fimc_lite_remove),
+ .remove = fimc_lite_remove,
.id_table = fimc_lite_driver_ids,
.driver = {
.name = FIMC_LITE_DRV_NAME,
diff --git a/drivers/media/platform/s5p-fimc/fimc-m2m.c b/drivers/media/platform/s5p-fimc/fimc-m2m.c
index 62afed3162ea..1d21da4bd24b 100644
--- a/drivers/media/platform/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/platform/s5p-fimc/fimc-m2m.c
@@ -105,7 +105,7 @@ static void fimc_device_run(void *priv)
struct fimc_frame *sf, *df;
struct fimc_dev *fimc;
unsigned long flags;
- u32 ret;
+ int ret;
if (WARN(!ctx, "Null context\n"))
return;
@@ -439,6 +439,15 @@ static int fimc_m2m_dqbuf(struct file *file, void *fh,
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
+static int fimc_m2m_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
+}
+
+
static int fimc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
@@ -607,6 +616,7 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_querybuf = fimc_m2m_querybuf,
.vidioc_qbuf = fimc_m2m_qbuf,
.vidioc_dqbuf = fimc_m2m_dqbuf,
+ .vidioc_expbuf = fimc_m2m_expbuf,
.vidioc_streamon = fimc_m2m_streamon,
.vidioc_streamoff = fimc_m2m_streamoff,
.vidioc_g_crop = fimc_m2m_g_crop,
@@ -622,7 +632,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &fimc_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
@@ -633,7 +643,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &fimc_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
index 0531ab70a94c..b4a68ecf0ca7 100644
--- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c
@@ -213,7 +213,7 @@ static int fimc_pipeline_close(struct fimc_pipeline *p)
* @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
+static int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
int i, ret;
@@ -547,7 +547,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (ret)
break;
- v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
source->name, flags ? '=' : '-', sink->name);
if (flags == 0 || sensor == NULL)
@@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
{
struct media_entity *source, *sink;
unsigned int flags = MEDIA_LNK_FL_ENABLED;
- int i, ret;
+ int i, ret = 0;
for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
struct fimc_lite *fimc = fmd->fimc_lite[i];
@@ -1000,7 +1000,7 @@ err_md:
return ret;
}
-static int __devexit fimc_md_remove(struct platform_device *pdev)
+static int fimc_md_remove(struct platform_device *pdev)
{
struct fimc_md *fmd = platform_get_drvdata(pdev);
@@ -1015,7 +1015,7 @@ static int __devexit fimc_md_remove(struct platform_device *pdev)
static struct platform_driver fimc_md_driver = {
.probe = fimc_md_probe,
- .remove = __devexit_p(fimc_md_remove),
+ .remove = fimc_md_remove,
.driver = {
.name = "s5p-fimc-md",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-fimc/mipi-csis.c b/drivers/media/platform/s5p-fimc/mipi-csis.c
index 4c961b1b68e6..ec3fa7d75306 100644
--- a/drivers/media/platform/s5p-fimc/mipi-csis.c
+++ b/drivers/media/platform/s5p-fimc/mipi-csis.c
@@ -654,7 +654,7 @@ static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit s5pcsis_probe(struct platform_device *pdev)
+static int s5pcsis_probe(struct platform_device *pdev)
{
struct s5p_platform_mipi_csis *pdata;
struct resource *mem_res;
@@ -851,7 +851,7 @@ static int s5pcsis_runtime_resume(struct device *dev)
}
#endif
-static int __devexit s5pcsis_remove(struct platform_device *pdev)
+static int s5pcsis_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
@@ -876,7 +876,7 @@ static const struct dev_pm_ops s5pcsis_pm_ops = {
static struct platform_driver s5pcsis_driver = {
.probe = s5pcsis_probe,
- .remove = __devexit_p(s5pcsis_remove),
+ .remove = s5pcsis_remove,
.driver = {
.name = CSIS_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 3afe879d54d7..681bc6ba149d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -412,62 +412,48 @@ leave_handle_frame:
}
/* Error handling for interrupt */
-static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
- unsigned int reason, unsigned int err)
+static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
{
- struct s5p_mfc_dev *dev;
unsigned long flags;
- /* If no context is available then all necessary
- * processing has been done. */
- if (ctx == NULL)
- return;
-
- dev = ctx->dev;
mfc_err("Interrupt Error: %08x\n", err);
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
- wake_up_dev(dev, reason, err);
- /* Error recovery is dependent on the state of context */
- switch (ctx->state) {
- case MFCINST_INIT:
- /* This error had to happen while acquireing instance */
- case MFCINST_GOT_INST:
- /* This error had to happen while parsing the header */
- case MFCINST_HEAD_PARSED:
- /* This error had to happen while setting dst buffers */
- case MFCINST_RETURN_INST:
- /* This error had to happen while releasing instance */
- clear_work_bit(ctx);
- wake_up_ctx(ctx, reason, err);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
- s5p_mfc_clock_off();
- ctx->state = MFCINST_ERROR;
- break;
- case MFCINST_FINISHING:
- case MFCINST_FINISHED:
- case MFCINST_RUNNING:
- /* It is higly probable that an error occured
- * while decoding a frame */
- clear_work_bit(ctx);
- ctx->state = MFCINST_ERROR;
- /* Mark all dst buffers as having an error */
- spin_lock_irqsave(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
- &ctx->vq_dst);
- /* Mark all src buffers as having an error */
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
- &ctx->vq_src);
- spin_unlock_irqrestore(&dev->irqlock, flags);
- if (test_and_clear_bit(0, &dev->hw_lock) == 0)
- BUG();
- s5p_mfc_clock_off();
- break;
- default:
- mfc_err("Encountered an error interrupt which had not been handled\n");
- break;
+ if (ctx != NULL) {
+ /* Error recovery is dependent on the state of context */
+ switch (ctx->state) {
+ case MFCINST_RES_CHANGE_INIT:
+ case MFCINST_RES_CHANGE_FLUSH:
+ case MFCINST_RES_CHANGE_END:
+ case MFCINST_FINISHING:
+ case MFCINST_FINISHED:
+ case MFCINST_RUNNING:
+ /* It is higly probable that an error occured
+ * while decoding a frame */
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ /* Mark all dst buffers as having an error */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+ &ctx->dst_queue, &ctx->vq_dst);
+ /* Mark all src buffers as having an error */
+ s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+ &ctx->src_queue, &ctx->vq_src);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ wake_up_ctx(ctx, reason, err);
+ break;
+ default:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ wake_up_ctx(ctx, reason, err);
+ break;
+ }
}
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_clock_off();
+ wake_up_dev(dev, reason, err);
return;
}
@@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
dev->warn_start)
s5p_mfc_handle_frame(ctx, reason, err);
else
- s5p_mfc_handle_error(ctx, reason, err);
+ s5p_mfc_handle_error(dev, ctx, reason, err);
clear_bit(0, &dev->enter_suspend);
break;
@@ -1203,7 +1189,7 @@ err_res:
}
/* Remove the driver */
-static int __devexit s5p_mfc_remove(struct platform_device *pdev)
+static int s5p_mfc_remove(struct platform_device *pdev)
{
struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
@@ -1368,7 +1354,7 @@ MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
- .remove = __devexit_p(s5p_mfc_remove),
+ .remove = s5p_mfc_remove,
.id_table = mfc_driver_ids,
.driver = {
.name = S5P_MFC_NAME,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index eb6a70b0f821..6dad9a74f61c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -636,6 +636,19 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EINVAL;
}
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
/* Stream on */
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
@@ -813,6 +826,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_crop = vidioc_g_crop,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2af6d522f4ac..f92f6ddd739f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1165,6 +1165,19 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return ret;
}
+/* Export DMA buffer */
+static int vidioc_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_expbuf(&ctx->vq_src, eb);
+ if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_expbuf(&ctx->vq_dst, eb);
+ return -EINVAL;
+}
+
/* Stream on */
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
@@ -1542,7 +1555,7 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
}
static int vidioc_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
+ const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
@@ -1568,6 +1581,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_s_parm = vidioc_s_parm,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 367db7552289..2895333866fc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -28,7 +28,7 @@ static struct s5p_mfc_pm *pm;
static struct s5p_mfc_dev *p_dev;
#ifdef CLK_DEBUG
-atomic_t clk_ref;
+static atomic_t clk_ref;
#endif
int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 8a9cf43018f6..7c1116c73bf3 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -830,7 +830,7 @@ fail:
return -ENODEV;
}
-static int __devinit hdmi_probe(struct platform_device *pdev)
+static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -979,7 +979,7 @@ fail:
return ret;
}
-static int __devexit hdmi_remove(struct platform_device *pdev)
+static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_subdev *sd = dev_get_drvdata(dev);
@@ -997,7 +997,7 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
static struct platform_driver hdmi_driver __refdata = {
.probe = hdmi_probe,
- .remove = __devexit_p(hdmi_remove),
+ .remove = hdmi_remove,
.id_table = hdmi_driver_types,
.driver = {
.name = "s5p-hdmi",
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
index f67b38631801..06b5d2dbb2d9 100644
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
@@ -279,8 +279,8 @@ static const struct v4l2_subdev_ops hdmiphy_ops = {
.video = &hdmiphy_video_ops,
};
-static int __devinit hdmiphy_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct hdmiphy_ctx *ctx;
@@ -295,7 +295,7 @@ static int __devinit hdmiphy_probe(struct i2c_client *client,
return 0;
}
-static int __devexit hdmiphy_remove(struct i2c_client *client)
+static int hdmiphy_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
@@ -322,7 +322,7 @@ static struct i2c_driver hdmiphy_driver = {
.owner = THIS_MODULE,
},
.probe = hdmiphy_probe,
- .remove = __devexit_p(hdmiphy_remove),
+ .remove = hdmiphy_remove,
.id_table = hdmiphy_id,
};
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index ddb422e23550..b671e20e9318 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -290,7 +290,7 @@ static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
struct mxr_platform_data;
/** acquiring common video resources */
-int __devinit mxr_acquire_video(struct mxr_device *mdev,
+int mxr_acquire_video(struct mxr_device *mdev,
struct mxr_output_conf *output_cont, int output_count);
/** releasing common video resources */
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index ca0f29717448..02faea03aa7d 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -151,8 +151,8 @@ void mxr_power_put(struct mxr_device *mdev)
/* --------- RESOURCE MANAGEMENT -------------*/
-static int __devinit mxr_acquire_plat_resources(struct mxr_device *mdev,
- struct platform_device *pdev)
+static int mxr_acquire_plat_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
{
struct resource *res;
int ret;
@@ -271,8 +271,8 @@ fail:
return -ENODEV;
}
-static int __devinit mxr_acquire_resources(struct mxr_device *mdev,
- struct platform_device *pdev)
+static int mxr_acquire_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
{
int ret;
ret = mxr_acquire_plat_resources(mdev, pdev);
@@ -310,8 +310,8 @@ static void mxr_release_layers(struct mxr_device *mdev)
mxr_layer_release(mdev->layer[i]);
}
-static int __devinit mxr_acquire_layers(struct mxr_device *mdev,
- struct mxr_platform_data *pdata)
+static int mxr_acquire_layers(struct mxr_device *mdev,
+ struct mxr_platform_data *pdata)
{
mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
@@ -372,7 +372,7 @@ static const struct dev_pm_ops mxr_pm_ops = {
/* --------- DRIVER INITIALIZATION ---------- */
-static int __devinit mxr_probe(struct platform_device *pdev)
+static int mxr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxr_platform_data *pdata = dev->platform_data;
@@ -431,7 +431,7 @@ fail:
return ret;
}
-static int __devexit mxr_remove(struct platform_device *pdev)
+static int mxr_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxr_device *mdev = to_mdev(dev);
@@ -450,7 +450,7 @@ static int __devexit mxr_remove(struct platform_device *pdev)
static struct platform_driver mxr_driver __refdata = {
.probe = mxr_probe,
- .remove = __devexit_p(mxr_remove),
+ .remove = mxr_remove,
.driver = {
.name = MXR_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 0c1cd895ff66..1f3b7436511c 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -19,7 +19,6 @@
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
@@ -63,8 +62,8 @@ done:
return sd;
}
-int __devinit mxr_acquire_video(struct mxr_device *mdev,
- struct mxr_output_conf *output_conf, int output_count)
+int mxr_acquire_video(struct mxr_device *mdev,
+ struct mxr_output_conf *output_conf, int output_count)
{
struct device *dev = mdev->dev;
struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
@@ -698,6 +697,15 @@ static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
}
+static int mxr_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_expbuf(&layer->vb_queue, eb);
+}
+
static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct mxr_layer *layer = video_drvdata(file);
@@ -725,6 +733,7 @@ static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
.vidioc_querybuf = mxr_querybuf,
.vidioc_qbuf = mxr_qbuf,
.vidioc_dqbuf = mxr_dqbuf,
+ .vidioc_expbuf = mxr_expbuf,
/* Streaming control */
.vidioc_streamon = mxr_streamon,
.vidioc_streamoff = mxr_streamoff,
@@ -1093,7 +1102,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
layer->vb_queue = (struct vb2_queue) {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
- .io_modes = VB2_MMAP | VB2_USERPTR,
+ .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
.drv_priv = layer,
.buf_struct_size = sizeof(struct mxr_buffer),
.ops = &mxr_video_qops,
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index ad68bbed014e..91a6939a270a 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -292,7 +292,7 @@ static const struct dev_pm_ops sdo_pm_ops = {
.runtime_resume = sdo_runtime_resume,
};
-static int __devinit sdo_probe(struct platform_device *pdev)
+static int sdo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdo_device *sdev;
@@ -419,7 +419,7 @@ fail:
return ret;
}
-static int __devexit sdo_remove(struct platform_device *pdev)
+static int sdo_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
struct sdo_device *sdev = sd_to_sdev(sd);
@@ -437,7 +437,7 @@ static int __devexit sdo_remove(struct platform_device *pdev)
static struct platform_driver sdo_driver __refdata = {
.probe = sdo_probe,
- .remove = __devexit_p(sdo_remove),
+ .remove = sdo_remove,
.driver = {
.name = "s5p-sdo",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index 716d4846f8bd..49191aac9634 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -315,8 +315,8 @@ static const struct v4l2_subdev_ops sii9234_ops = {
.video = &sii9234_video_ops,
};
-static int __devinit sii9234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii9234_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct sii9234_platform_data *pdata = dev->platform_data;
@@ -378,7 +378,7 @@ fail:
return ret;
}
-static int __devexit sii9234_remove(struct i2c_client *client)
+static int sii9234_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -406,7 +406,7 @@ static struct i2c_driver sii9234_driver = {
.pm = &sii9234_pm_ops,
},
.probe = sii9234_probe,
- .remove = __devexit_p(sii9234_remove),
+ .remove = sii9234_remove,
.id_table = sii9234_id,
};
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index a1c87f0ceaab..f3c4571ac01e 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1326,7 +1326,7 @@ static const struct video_device sh_vou_video_template = {
.vfl_dir = VFL_DIR_TX,
};
-static int __devinit sh_vou_probe(struct platform_device *pdev)
+static int sh_vou_probe(struct platform_device *pdev)
{
struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
struct v4l2_rect *rect;
@@ -1461,7 +1461,7 @@ ereqmemreg:
return ret;
}
-static int __devexit sh_vou_remove(struct platform_device *pdev)
+static int sh_vou_remove(struct platform_device *pdev)
{
int irq = platform_get_irq(pdev, 0);
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
@@ -1487,7 +1487,7 @@ static int __devexit sh_vou_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata sh_vou = {
- .remove = __devexit_p(sh_vou_remove),
+ .remove = sh_vou_remove,
.driver = {
.name = "sh-vou",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 9afe1e7bde74..cb6791e62bd4 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -19,6 +19,7 @@ config MX1_VIDEO
config VIDEO_MX1
tristate "i.MX1/i.MXL CMOS Sensor Interface driver"
+ depends on BROKEN
depends on VIDEO_DEV && ARCH_MX1 && SOC_CAMERA
select FIQ
select VIDEOBUF_DMA_CONTIG
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 6274a91c25c7..d96c8c7e01d9 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -897,7 +897,7 @@ static struct soc_camera_host_ops isi_soc_camera_host_ops = {
};
/* -----------------------------------------------------------------------*/
-static int __devexit atmel_isi_remove(struct platform_device *pdev)
+static int atmel_isi_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct atmel_isi *isi = container_of(soc_host,
@@ -921,7 +921,7 @@ static int __devexit atmel_isi_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit atmel_isi_probe(struct platform_device *pdev)
+static int atmel_isi_probe(struct platform_device *pdev)
{
unsigned int irq;
struct atmel_isi *isi;
@@ -1074,7 +1074,7 @@ err_clk_prepare_pclk:
static struct platform_driver atmel_isi_driver = {
.probe = atmel_isi_probe,
- .remove = __devexit_p(atmel_isi_remove),
+ .remove = atmel_isi_remove,
.driver = {
.name = "atmel_isi",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index 9a55f4c4c7f4..8bda2c908aba 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -41,7 +41,6 @@
#include <linux/videodev2.h>
#include <linux/platform_data/camera-mx2.h>
-#include <mach/hardware.h>
#include <asm/dma.h>
@@ -121,11 +120,13 @@
#define CSICR1 0x00
#define CSICR2 0x04
-#define CSISR (cpu_is_mx27() ? 0x08 : 0x18)
+#define CSISR_IMX25 0x18
+#define CSISR_IMX27 0x08
#define CSISTATFIFO 0x0c
#define CSIRFIFO 0x10
#define CSIRXCNT 0x14
-#define CSICR3 (cpu_is_mx27() ? 0x1C : 0x08)
+#define CSICR3_IMX25 0x08
+#define CSICR3_IMX27 0x1c
#define CSIDMASA_STATFIFO 0x20
#define CSIDMATA_STATFIFO 0x24
#define CSIDMASA_FB1 0x28
@@ -268,11 +269,17 @@ struct mx2_buffer {
struct mx2_buf_internal internal;
};
+enum mx2_camera_type {
+ IMX25_CAMERA,
+ IMX27_CAMERA,
+};
+
struct mx2_camera_dev {
struct device *dev;
struct soc_camera_host soc_host;
struct soc_camera_device *icd;
- struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg;
+ struct clk *clk_emma_ahb, *clk_emma_ipg;
+ struct clk *clk_csi_ahb, *clk_csi_per;
void __iomem *base_csi, *base_emma;
@@ -291,6 +298,9 @@ struct mx2_camera_dev {
struct mx2_buffer *fb2_active;
u32 csicr1;
+ u32 reg_csisr;
+ u32 reg_csicr3;
+ enum mx2_camera_type devtype;
struct mx2_buf_internal buf_discard[2];
void *discard_buffer;
@@ -303,6 +313,29 @@ struct mx2_camera_dev {
struct vb2_alloc_ctx *alloc_ctx;
};
+static struct platform_device_id mx2_camera_devtype[] = {
+ {
+ .name = "imx25-camera",
+ .driver_data = IMX25_CAMERA,
+ }, {
+ .name = "imx27-camera",
+ .driver_data = IMX27_CAMERA,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mx2_camera_devtype);
+
+static inline int is_imx25_camera(struct mx2_camera_dev *pcdev)
+{
+ return pcdev->devtype == IMX25_CAMERA;
+}
+
+static inline int is_imx27_camera(struct mx2_camera_dev *pcdev)
+{
+ return pcdev->devtype == IMX27_CAMERA;
+}
+
static struct mx2_buffer *mx2_ibuf_to_buf(struct mx2_buf_internal *int_buf)
{
return container_of(int_buf, struct mx2_buffer, internal);
@@ -432,11 +465,12 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
{
unsigned long flags;
- clk_disable_unprepare(pcdev->clk_csi);
+ clk_disable_unprepare(pcdev->clk_csi_ahb);
+ clk_disable_unprepare(pcdev->clk_csi_per);
writel(0, pcdev->base_csi + CSICR1);
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
writel(0, pcdev->base_emma + PRP_CNTL);
- } else if (cpu_is_mx25()) {
+ } else if (is_imx25_camera(pcdev)) {
spin_lock_irqsave(&pcdev->lock, flags);
pcdev->fb1_active = NULL;
pcdev->fb2_active = NULL;
@@ -460,13 +494,17 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
if (pcdev->icd)
return -EBUSY;
- ret = clk_prepare_enable(pcdev->clk_csi);
+ ret = clk_prepare_enable(pcdev->clk_csi_ahb);
if (ret < 0)
return ret;
+ ret = clk_prepare_enable(pcdev->clk_csi_per);
+ if (ret < 0)
+ goto exit_csi_ahb;
+
csicr1 = CSICR1_MCLKEN;
- if (cpu_is_mx27())
+ if (is_imx27_camera(pcdev))
csicr1 |= CSICR1_PRP_IF_EN | CSICR1_FCC |
CSICR1_RXFF_LEVEL(0);
@@ -480,6 +518,11 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
icd->devnum);
return 0;
+
+exit_csi_ahb:
+ clk_disable_unprepare(pcdev->clk_csi_ahb);
+
+ return ret;
}
static void mx2_camera_remove_device(struct soc_camera_device *icd)
@@ -542,7 +585,7 @@ out:
static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
{
struct mx2_camera_dev *pcdev = data;
- u32 status = readl(pcdev->base_csi + CSISR);
+ u32 status = readl(pcdev->base_csi + pcdev->reg_csisr);
if (status & CSISR_DMA_TSF_FB1_INT)
mx25_camera_frame_done(pcdev, 1, MX2_STATE_DONE);
@@ -551,7 +594,7 @@ static irqreturn_t mx25_camera_irq(int irq_csi, void *data)
/* FIXME: handle CSISR_RFF_OR_INT */
- writel(status, pcdev->base_csi + CSISR);
+ writel(status, pcdev->base_csi + pcdev->reg_csisr);
return IRQ_HANDLED;
}
@@ -636,7 +679,7 @@ static void mx2_videobuf_queue(struct vb2_buffer *vb)
buf->state = MX2_STATE_QUEUED;
list_add_tail(&buf->internal.queue, &pcdev->capture);
- if (cpu_is_mx25()) {
+ if (is_imx25_camera(pcdev)) {
u32 csicr3, dma_inten = 0;
if (pcdev->fb1_active == NULL) {
@@ -655,20 +698,20 @@ static void mx2_videobuf_queue(struct vb2_buffer *vb)
list_del(&buf->internal.queue);
buf->state = MX2_STATE_ACTIVE;
- csicr3 = readl(pcdev->base_csi + CSICR3);
+ csicr3 = readl(pcdev->base_csi + pcdev->reg_csicr3);
/* Reflash DMA */
writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
- pcdev->base_csi + CSICR3);
+ pcdev->base_csi + pcdev->reg_csicr3);
/* clear & enable interrupts */
- writel(dma_inten, pcdev->base_csi + CSISR);
+ writel(dma_inten, pcdev->base_csi + pcdev->reg_csisr);
pcdev->csicr1 |= dma_inten;
writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
/* enable DMA */
csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
- writel(csicr3, pcdev->base_csi + CSICR3);
+ writel(csicr3, pcdev->base_csi + pcdev->reg_csicr3);
}
}
@@ -712,7 +755,7 @@ static void mx2_videobuf_release(struct vb2_buffer *vb)
*/
spin_lock_irqsave(&pcdev->lock, flags);
- if (cpu_is_mx25() && buf->state == MX2_STATE_ACTIVE) {
+ if (is_imx25_camera(pcdev) && buf->state == MX2_STATE_ACTIVE) {
if (pcdev->fb1_active == buf) {
pcdev->csicr1 &= ~CSICR1_FB1_DMA_INTEN;
writel(0, pcdev->base_csi + CSIDMASA_FB1);
@@ -835,7 +878,7 @@ static int mx2_start_streaming(struct vb2_queue *q, unsigned int count)
unsigned long phys;
int bytesperline;
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
unsigned long flags;
if (count < 2)
return -EINVAL;
@@ -934,7 +977,7 @@ static int mx2_stop_streaming(struct vb2_queue *q)
void *b;
u32 cntl;
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
spin_lock_irqsave(&pcdev->lock, flags);
cntl = readl(pcdev->base_emma + PRP_CNTL);
@@ -1086,11 +1129,11 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
if (bytesperline < 0)
return bytesperline;
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
ret = mx27_camera_emma_prp_reset(pcdev);
if (ret)
return ret;
- } else if (cpu_is_mx25()) {
+ } else if (is_imx25_camera(pcdev)) {
writel((bytesperline * icd->user_height) >> 2,
pcdev->base_csi + CSIRXCNT);
writel((bytesperline << 16) | icd->user_height,
@@ -1397,7 +1440,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
/* FIXME: implement MX27 limits */
/* limit to MX25 hardware capabilities */
- if (cpu_is_mx25()) {
+ if (is_imx25_camera(pcdev)) {
if (xlate->host_fmt->bits_per_sample <= 8)
width_limit = 0xffff * 4;
else
@@ -1649,7 +1692,7 @@ static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
return IRQ_HANDLED;
}
-static int __devinit mx27_camera_emma_init(struct platform_device *pdev)
+static int mx27_camera_emma_init(struct platform_device *pdev)
{
struct mx2_camera_dev *pcdev = platform_get_drvdata(pdev);
struct resource *res_emma;
@@ -1707,7 +1750,7 @@ out:
return err;
}
-static int __devinit mx2_camera_probe(struct platform_device *pdev)
+static int mx2_camera_probe(struct platform_device *pdev)
{
struct mx2_camera_dev *pcdev;
struct resource *res_csi;
@@ -1731,10 +1774,31 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
goto exit;
}
- pcdev->clk_csi = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(pcdev->clk_csi)) {
- dev_err(&pdev->dev, "Could not get csi clock\n");
- err = PTR_ERR(pcdev->clk_csi);
+ pcdev->devtype = pdev->id_entry->driver_data;
+ switch (pcdev->devtype) {
+ case IMX25_CAMERA:
+ pcdev->reg_csisr = CSISR_IMX25;
+ pcdev->reg_csicr3 = CSICR3_IMX25;
+ break;
+ case IMX27_CAMERA:
+ pcdev->reg_csisr = CSISR_IMX27;
+ pcdev->reg_csicr3 = CSICR3_IMX27;
+ break;
+ default:
+ break;
+ }
+
+ pcdev->clk_csi_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(pcdev->clk_csi_ahb)) {
+ dev_err(&pdev->dev, "Could not get csi ahb clock\n");
+ err = PTR_ERR(pcdev->clk_csi_ahb);
+ goto exit;
+ }
+
+ pcdev->clk_csi_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(pcdev->clk_csi_per)) {
+ dev_err(&pdev->dev, "Could not get csi per clock\n");
+ err = PTR_ERR(pcdev->clk_csi_per);
goto exit;
}
@@ -1744,12 +1808,13 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
pcdev->platform_flags = pcdev->pdata->flags;
- rate = clk_round_rate(pcdev->clk_csi, pcdev->pdata->clk * 2);
+ rate = clk_round_rate(pcdev->clk_csi_per,
+ pcdev->pdata->clk * 2);
if (rate <= 0) {
err = -ENODEV;
goto exit;
}
- err = clk_set_rate(pcdev->clk_csi, rate);
+ err = clk_set_rate(pcdev->clk_csi_per, rate);
if (err < 0)
goto exit;
}
@@ -1768,7 +1833,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
pcdev->dev = &pdev->dev;
platform_set_drvdata(pdev, pcdev);
- if (cpu_is_mx25()) {
+ if (is_imx25_camera(pcdev)) {
err = devm_request_irq(&pdev->dev, irq_csi, mx25_camera_irq, 0,
MX2_CAM_DRV_NAME, pcdev);
if (err) {
@@ -1777,7 +1842,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
}
}
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
err = mx27_camera_emma_init(pdev);
if (err)
goto exit;
@@ -1794,7 +1859,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
- if (cpu_is_mx25())
+ if (is_imx25_camera(pcdev))
pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
@@ -1807,14 +1872,14 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
goto exit_free_emma;
dev_info(&pdev->dev, "MX2 Camera (CSI) driver probed, clock frequency: %ld\n",
- clk_get_rate(pcdev->clk_csi));
+ clk_get_rate(pcdev->clk_csi_per));
return 0;
exit_free_emma:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
eallocctx:
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
clk_disable_unprepare(pcdev->clk_emma_ipg);
clk_disable_unprepare(pcdev->clk_emma_ahb);
}
@@ -1822,7 +1887,7 @@ exit:
return err;
}
-static int __devexit mx2_camera_remove(struct platform_device *pdev)
+static int mx2_camera_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct mx2_camera_dev *pcdev = container_of(soc_host,
@@ -1832,7 +1897,7 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
- if (cpu_is_mx27()) {
+ if (is_imx27_camera(pcdev)) {
clk_disable_unprepare(pcdev->clk_emma_ipg);
clk_disable_unprepare(pcdev->clk_emma_ahb);
}
@@ -1846,7 +1911,8 @@ static struct platform_driver mx2_camera_driver = {
.driver = {
.name = MX2_CAM_DRV_NAME,
},
- .remove = __devexit_p(mx2_camera_remove),
+ .id_table = mx2_camera_devtype,
+ .remove = mx2_camera_remove,
};
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 261f6e9e1b17..45aef1053a49 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -17,6 +17,7 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/dma/ipu-dma.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
@@ -24,7 +25,6 @@
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
-#include <mach/ipu.h>
#include <linux/platform_data/camera-mx3.h>
#include <linux/platform_data/dma-imx.h>
@@ -1143,7 +1143,7 @@ static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
.set_bus_param = mx3_camera_set_bus_param,
};
-static int __devinit mx3_camera_probe(struct platform_device *pdev)
+static int mx3_camera_probe(struct platform_device *pdev)
{
struct mx3_camera_dev *mx3_cam;
struct resource *res;
@@ -1246,7 +1246,7 @@ egetres:
return err;
}
-static int __devexit mx3_camera_remove(struct platform_device *pdev)
+static int mx3_camera_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct mx3_camera_dev *mx3_cam = container_of(soc_host,
@@ -1279,7 +1279,7 @@ static struct platform_driver mx3_camera_driver = {
.name = MX3_CAM_DRV_NAME,
},
.probe = mx3_camera_probe,
- .remove = __devexit_p(mx3_camera_remove),
+ .remove = mx3_camera_remove,
};
module_platform_driver(mx3_camera_driver);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 13636a585106..39a77f0b8860 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -34,12 +34,13 @@
#include <media/videobuf-dma-contig.h>
#include <media/videobuf-dma-sg.h>
-#include <plat/dma.h>
+#include <linux/omap-dma.h>
#define DRIVER_NAME "omap1-camera"
#define DRIVER_VERSION "0.0.2"
+#define OMAP_DMA_CAMERA_IF_RX 20
/*
* ---------------------------------------------------------------------------
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 3434ffe79c6e..523330d00dee 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1651,7 +1651,7 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
.set_bus_param = pxa_camera_set_bus_param,
};
-static int __devinit pxa_camera_probe(struct platform_device *pdev)
+static int pxa_camera_probe(struct platform_device *pdev)
{
struct pxa_camera_dev *pcdev;
struct resource *res;
@@ -1801,7 +1801,7 @@ exit:
return err;
}
-static int __devexit pxa_camera_remove(struct platform_device *pdev)
+static int pxa_camera_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct pxa_camera_dev *pcdev = container_of(soc_host,
@@ -1840,7 +1840,7 @@ static struct platform_driver pxa_camera_driver = {
.pm = &pxa_camera_pm,
},
.probe = pxa_camera_probe,
- .remove = __devexit_p(pxa_camera_remove),
+ .remove = pxa_camera_remove,
};
module_platform_driver(pxa_camera_driver);
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 2d8861c0e8f2..ebbc126e71a6 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -2071,7 +2071,7 @@ static int bus_notify(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
+static int sh_mobile_ceu_probe(struct platform_device *pdev)
{
struct sh_mobile_ceu_dev *pcdev;
struct resource *res;
@@ -2258,7 +2258,7 @@ exit:
return err;
}
-static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
+static int sh_mobile_ceu_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
@@ -2307,7 +2307,7 @@ static struct platform_driver sh_mobile_ceu_driver = {
.pm = &sh_mobile_ceu_dev_pm_ops,
},
.probe = sh_mobile_ceu_probe,
- .remove = __devexit_p(sh_mobile_ceu_remove),
+ .remove = sh_mobile_ceu_remove,
};
static int __init sh_mobile_ceu_init(void)
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
index 05286500b4d4..a17aba9a0104 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
@@ -294,7 +294,7 @@ static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
.video = &sh_csi2_subdev_video_ops,
};
-static __devinit int sh_csi2_probe(struct platform_device *pdev)
+static int sh_csi2_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int irq;
@@ -366,7 +366,7 @@ ereqreg:
return ret;
}
-static __devexit int sh_csi2_remove(struct platform_device *pdev)
+static int sh_csi2_remove(struct platform_device *pdev)
{
struct sh_csi2 *priv = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -382,7 +382,7 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata sh_csi2_pdrv = {
- .remove = __devexit_p(sh_csi2_remove),
+ .remove = sh_csi2_remove,
.probe = sh_csi2_probe,
.driver = {
.name = "sh-mobile-csi2",
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index d3f0b84e2d70..2ec90eae6ba0 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -645,11 +645,17 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct soc_camera_device *icd = file->private_data;
- int err = -EINVAL;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ dev_dbg(icd->pdev, "read called, buf %p\n", buf);
+
+ if (ici->ops->init_videobuf2 && icd->vb2_vidq.io_modes & VB2_READ)
+ return vb2_read(&icd->vb2_vidq, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
dev_err(icd->pdev, "camera device read not implemented\n");
- return err;
+ return -EINVAL;
}
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1048,10 +1054,8 @@ static void scan_add_host(struct soc_camera_host *ici)
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
- int ret;
-
icd->parent = ici->v4l2_dev.dev;
- ret = soc_camera_probe(icd);
+ soc_camera_probe(icd);
}
}
@@ -1526,7 +1530,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
return 0;
}
-static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
+static int soc_camera_pdrv_probe(struct platform_device *pdev)
{
struct soc_camera_link *icl = pdev->dev.platform_data;
struct soc_camera_device *icd;
@@ -1554,7 +1558,7 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
* hot-pluggable. Now we know, that all our users - hosts and devices have
* been unloaded already
*/
-static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
+static int soc_camera_pdrv_remove(struct platform_device *pdev)
{
struct soc_camera_device *icd = platform_get_drvdata(pdev);
@@ -1568,7 +1572,7 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
static struct platform_driver __refdata soc_camera_pdrv = {
.probe = soc_camera_pdrv_probe,
- .remove = __devexit_p(soc_camera_pdrv_remove),
+ .remove = soc_camera_pdrv_remove,
.driver = {
.name = "soc-camera-pdrv",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
index 02194c056b00..d854d08a6c7f 100644
--- a/drivers/media/platform/timblogiw.c
+++ b/drivers/media/platform/timblogiw.c
@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
/* Platform device functions */
-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_querycap = timblogiw_querycap,
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
};
-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
+static struct v4l2_file_operations timblogiw_fops = {
.owner = THIS_MODULE,
.open = timblogiw_open,
.release = timblogiw_close,
@@ -777,7 +777,7 @@ static __devinitconst struct v4l2_file_operations timblogiw_fops = {
.poll = timblogiw_poll,
};
-static __devinitconst struct video_device timblogiw_template = {
+static struct video_device timblogiw_template = {
.name = TIMBLOGIWIN_NAME,
.fops = &timblogiw_fops,
.ioctl_ops = &timblogiw_ioctl_ops,
@@ -786,7 +786,7 @@ static __devinitconst struct video_device timblogiw_template = {
.tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
};
-static int __devinit timblogiw_probe(struct platform_device *pdev)
+static int timblogiw_probe(struct platform_device *pdev)
{
int err;
struct timblogiw *lw = NULL;
@@ -848,7 +848,7 @@ err:
return err;
}
-static int __devexit timblogiw_remove(struct platform_device *pdev)
+static int timblogiw_remove(struct platform_device *pdev)
{
struct timblogiw *lw = platform_get_drvdata(pdev);
@@ -869,7 +869,7 @@ static struct platform_driver timblogiw_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timblogiw_probe,
- .remove = __devexit_p(timblogiw_remove),
+ .remove = timblogiw_remove,
};
module_platform_driver(timblogiw_platform_driver);
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index eb404c2ce270..63e8c3461239 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -1324,7 +1324,7 @@ static struct video_device viacam_v4l_template = {
#define VIACAM_SERIAL_CREG 0x46
#define VIACAM_SERIAL_BIT 0x40
-static __devinit bool viacam_serial_is_enabled(void)
+static bool viacam_serial_is_enabled(void)
{
struct pci_bus *pbus = pci_find_bus(0, 0);
u8 cbyte;
@@ -1353,7 +1353,7 @@ static struct ov7670_config sensor_cfg = {
.clock_speed = 90,
};
-static __devinit int viacam_probe(struct platform_device *pdev)
+static int viacam_probe(struct platform_device *pdev)
{
int ret;
struct i2c_adapter *sensor_adapter;
@@ -1490,7 +1490,7 @@ out_unregister:
return ret;
}
-static __devexit int viacam_remove(struct platform_device *pdev)
+static int viacam_remove(struct platform_device *pdev)
{
struct via_camera *cam = via_cam_info;
struct viafb_dev *viadev = pdev->dev.platform_data;
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index b366b050a3dd..0d59b9db83cb 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -39,7 +39,6 @@
/* Wake up at about 30 fps */
#define WAKE_NUMERATOR 30
#define WAKE_DENOMINATOR 1001
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
@@ -352,11 +351,6 @@ static void precalculate_bars(struct vivi_dev *dev)
}
}
-#define TSTAMP_MIN_Y 24
-#define TSTAMP_MAX_Y (TSTAMP_MIN_Y + 15)
-#define TSTAMP_INPUT_X 10
-#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-
/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
{
@@ -1308,7 +1302,7 @@ static int __init vivi_create_instance(int inst)
/* initialize queue */
q = &dev->vb_vidq;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivi_buffer);
q->ops = &vivi_video_qops;
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 12c70e876f58..a739ad492e7b 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -82,7 +82,7 @@ static struct radio_isa_card *rtrack_alloc(void)
#define AIMS_BIT_VOL_UP (1 << 6) /* active low */
#define AIMS_BIT_VOL_DN (1 << 7) /* active low */
-void rtrack_set_pins(void *handle, u8 pins)
+static void rtrack_set_pins(void *handle, u8 pins)
{
struct radio_isa_card *isa = handle;
struct rtrack *rt = container_of(isa, struct rtrack, isa);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 697a421c9940..643d80ac28fb 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -645,7 +645,8 @@ static int __init cadet_init(void)
set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
- if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
+ res = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr);
+ if (res < 0)
goto err_hdl;
v4l2_info(v4l2_dev, "ADS Cadet Radio Card at 0x%x\n", dev->io);
return 0;
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 3c0067de4324..84b7b9f4385e 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -191,7 +191,7 @@ static bool radio_isa_valid_io(const struct radio_isa_driver *drv, int io)
return false;
}
-struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
+static struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
struct device *pdev)
{
struct v4l2_device *v4l2_dev;
@@ -207,8 +207,9 @@ struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
return isa;
}
-int radio_isa_common_probe(struct radio_isa_card *isa, struct device *pdev,
- int radio_nr, unsigned region_size)
+static int radio_isa_common_probe(struct radio_isa_card *isa,
+ struct device *pdev,
+ int radio_nr, unsigned region_size)
{
const struct radio_isa_driver *drv = isa->drv;
const struct radio_isa_ops *ops = drv->ops;
@@ -287,7 +288,8 @@ err_dev_reg:
return res;
}
-int radio_isa_common_remove(struct radio_isa_card *isa, unsigned region_size)
+static int radio_isa_common_remove(struct radio_isa_card *isa,
+ unsigned region_size)
{
const struct radio_isa_ops *ops = isa->drv->ops;
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index e10e525f33e5..296941a9ae25 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
+ radio->vdev.vfl_dir = VFL_DIR_TX;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index b415211d0c4b..bd4d3a7cdadd 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -114,7 +114,8 @@ static struct snd_tea575x_ops maxiradio_tea_ops = {
.set_direction = maxiradio_tea575x_set_direction,
};
-static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int maxiradio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct maxiradio *dev;
struct v4l2_device *v4l2_dev;
@@ -172,7 +173,7 @@ errfr:
return retval;
}
-static void __devexit maxiradio_remove(struct pci_dev *pdev)
+static void maxiradio_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct maxiradio *dev = to_maxiradio(v4l2_dev);
@@ -196,7 +197,7 @@ static struct pci_driver maxiradio_driver = {
.name = "radio-maxiradio",
.id_table = maxiradio_pci_tbl,
.probe = maxiradio_probe,
- .remove = __devexit_p(maxiradio_remove),
+ .remove = maxiradio_remove,
};
static int __init maxiradio_init(void)
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 227dcdb54df3..637a55564958 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -64,7 +64,7 @@ bool pnp_attached;
#define FMI_BIT_VOL_SW (1 << 3)
#define FMI_BIT_TUN_STRQ (1 << 4)
-void fmi_set_pins(void *handle, u8 pins)
+static void fmi_set_pins(void *handle, u8 pins)
{
struct fmi *fmi = handle;
u8 bits = FMI_BIT_TUN_STRQ;
@@ -265,7 +265,7 @@ static const struct v4l2_ioctl_ops fmi_ioctl_ops = {
};
/* ladis: this is my card. does any other types exist? */
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
/* SF16-FMI */
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('M','F','R'), ISAPNP_FUNCTION(0xad10), 0},
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 4efcbec74c52..9c0990457a7c 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -197,13 +197,13 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea)
return 0;
}
-static struct pnp_device_id fmr2_pnp_ids[] __devinitdata = {
+static struct pnp_device_id fmr2_pnp_ids[] = {
{ .id = "MFRad13" }, /* tuner subdevice of SF16-FMD2 */
{ .id = "" }
};
MODULE_DEVICE_TABLE(pnp, fmr2_pnp_ids);
-static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
+static int fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
{
int err, i;
char *card_name = fmr2->is_fmd2 ? "SF16-FMD2" : "SF16-FMR2";
@@ -249,7 +249,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
return 0;
}
-static int __devinit fmr2_isa_match(struct device *pdev, unsigned int ndev)
+static int fmr2_isa_match(struct device *pdev, unsigned int ndev)
{
struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
if (!fmr2)
@@ -265,8 +265,7 @@ static int __devinit fmr2_isa_match(struct device *pdev, unsigned int ndev)
return 1;
}
-static int __devinit fmr2_pnp_probe(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int fmr2_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
{
int ret;
struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
@@ -285,7 +284,7 @@ static int __devinit fmr2_pnp_probe(struct pnp_dev *pdev,
return 0;
}
-static void __devexit fmr2_remove(struct fmr2 *fmr2)
+static void fmr2_remove(struct fmr2 *fmr2)
{
snd_tea575x_exit(&fmr2->tea);
release_region(fmr2->io, 2);
@@ -293,7 +292,7 @@ static void __devexit fmr2_remove(struct fmr2 *fmr2)
kfree(fmr2);
}
-static int __devexit fmr2_isa_remove(struct device *pdev, unsigned int ndev)
+static int fmr2_isa_remove(struct device *pdev, unsigned int ndev)
{
fmr2_remove(dev_get_drvdata(pdev));
dev_set_drvdata(pdev, NULL);
@@ -301,7 +300,7 @@ static int __devexit fmr2_isa_remove(struct device *pdev, unsigned int ndev)
return 0;
}
-static void __devexit fmr2_pnp_remove(struct pnp_dev *pdev)
+static void fmr2_pnp_remove(struct pnp_dev *pdev)
{
fmr2_remove(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
@@ -309,7 +308,7 @@ static void __devexit fmr2_pnp_remove(struct pnp_dev *pdev)
struct isa_driver fmr2_isa_driver = {
.match = fmr2_isa_match,
- .remove = __devexit_p(fmr2_isa_remove),
+ .remove = fmr2_isa_remove,
.driver = {
.name = "radio-sf16fmr2",
},
@@ -319,7 +318,7 @@ struct pnp_driver fmr2_pnp_driver = {
.name = "radio-sf16fmr2",
.id_table = fmr2_pnp_ids,
.probe = fmr2_pnp_probe,
- .remove = __devexit_p(fmr2_pnp_remove),
+ .remove = fmr2_pnp_remove,
};
static int __init fmr2_init(void)
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index a082e400ed0f..1507c9d508d7 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
.name = "radio-si4713",
.release = video_device_release,
.ioctl_ops = &radio_si4713_ioctl_ops,
+ .vfl_dir = VFL_DIR_TX,
};
/* Platform driver interface */
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index d0c905310071..1978516af67e 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -145,7 +145,7 @@ struct tea5764_device {
};
/* I2C code related */
-int tea5764_i2c_read(struct tea5764_device *radio)
+static int tea5764_i2c_read(struct tea5764_device *radio)
{
int i;
u16 *p = (u16 *) &radio->regs;
@@ -165,7 +165,7 @@ int tea5764_i2c_read(struct tea5764_device *radio)
return 0;
}
-int tea5764_i2c_write(struct tea5764_device *radio)
+static int tea5764_i2c_write(struct tea5764_device *radio)
{
struct tea5764_write_regs wr;
struct tea5764_regs *r = &radio->regs;
@@ -493,8 +493,8 @@ static struct video_device tea5764_radio_template = {
};
/* I2C probe: check if the device exists and register with v4l if it is */
-static int __devinit tea5764_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tea5764_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tea5764_device *radio;
struct tea5764_regs *r;
@@ -552,7 +552,7 @@ errfr:
return ret;
}
-static int __devexit tea5764_i2c_remove(struct i2c_client *client)
+static int tea5764_i2c_remove(struct i2c_client *client)
{
struct tea5764_device *radio = i2c_get_clientdata(client);
@@ -578,7 +578,7 @@ static struct i2c_driver tea5764_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = tea5764_i2c_probe,
- .remove = __devexit_p(tea5764_i2c_remove),
+ .remove = tea5764_i2c_remove,
.id_table = tea5764_id,
};
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 5cf07779f4bb..b87effeb5dc6 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -145,7 +145,7 @@ static const struct v4l2_file_operations timbradio_fops = {
.unlocked_ioctl = video_ioctl2,
};
-static int __devinit timbradio_probe(struct platform_device *pdev)
+static int timbradio_probe(struct platform_device *pdev)
{
struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
struct timbradio *tr;
@@ -201,7 +201,7 @@ err:
return err;
}
-static int __devexit timbradio_remove(struct platform_device *pdev)
+static int timbradio_remove(struct platform_device *pdev)
{
struct timbradio *tr = platform_get_drvdata(pdev);
@@ -219,7 +219,7 @@ static struct platform_driver timbradio_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timbradio_probe,
- .remove = __devexit_p(timbradio_remove),
+ .remove = timbradio_remove,
};
module_platform_driver(timbradio_platform_driver);
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 9b0c9fa0beb8..cabbe3adf435 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
.ioctl_ops = &wl1273_ioctl_ops,
.name = WL1273_FM_DRIVER_NAME,
.release = wl1273_vdev_release,
+ .vfl_dir = VFL_DIR_TX,
};
static int wl1273_fm_radio_remove(struct platform_device *pdev)
@@ -1990,7 +1991,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
+static int wl1273_fm_radio_probe(struct platform_device *pdev)
{
struct wl1273_core **core = pdev->dev.platform_data;
struct wl1273_device *radio;
@@ -2145,7 +2146,7 @@ pdata_err:
static struct platform_driver wl1273_fm_radio_driver = {
.probe = wl1273_fm_radio_probe,
- .remove = __devexit_p(wl1273_fm_radio_remove),
+ .remove = wl1273_fm_radio_remove,
.driver = {
.name = "wl1273_fm_radio",
.owner = THIS_MODULE,
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 54db36ccb9ee..06c06cc9ff25 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -373,8 +373,8 @@ static const struct v4l2_subdev_ops saa7706h_ops = {
* concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
*/
-static int __devinit saa7706h_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int saa7706h_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct saa7706h_state *state;
struct v4l2_subdev *sd;
@@ -418,7 +418,7 @@ err:
return err;
}
-static int __devexit saa7706h_remove(struct i2c_client *client)
+static int saa7706h_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -441,7 +441,7 @@ static struct i2c_driver saa7706h_driver = {
.name = DRIVER_NAME,
},
.probe = saa7706h_probe,
- .remove = __devexit_p(saa7706h_remove),
+ .remove = saa7706h_remove,
.id_table = saa7706h_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 4ef55ec8045e..e5fc9acd0c4f 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -347,8 +347,8 @@ end:
/*
* si470x_i2c_probe - probe for the device
*/
-static int __devinit si470x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int si470x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct si470x_device *radio;
int retval = 0;
@@ -451,7 +451,7 @@ err_initial:
/*
* si470x_i2c_remove - remove the device
*/
-static __devexit int si470x_i2c_remove(struct i2c_client *client)
+static int si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
@@ -514,7 +514,7 @@ static struct i2c_driver si470x_i2c_driver = {
#endif
},
.probe = si470x_i2c_probe,
- .remove = __devexit_p(si470x_i2c_remove),
+ .remove = si470x_i2c_remove,
.id_table = si470x_i2c_id,
};
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index e3079c142c5f..bd61b3bd0ca3 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1769,7 +1769,7 @@ exit:
}
/* si4713_ioctl - deal with private ioctls (only rnl for now) */
-long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+static long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct si4713_device *sdev = to_si4713_device(sd);
struct si4713_rnl *rnl = arg;
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 06d47e5cce9f..b18c2dc268ba 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -165,8 +165,8 @@ static const struct v4l2_subdev_ops tef6862_ops = {
* concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
*/
-static int __devinit tef6862_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tef6862_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tef6862_state *state;
struct v4l2_subdev *sd;
@@ -189,7 +189,7 @@ static int __devinit tef6862_probe(struct i2c_client *client,
return 0;
}
-static int __devexit tef6862_remove(struct i2c_client *client)
+static int tef6862_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -211,7 +211,7 @@ static struct i2c_driver tef6862_driver = {
.name = DRIVER_NAME,
},
.probe = tef6862_probe,
- .remove = __devexit_p(tef6862_remove),
+ .remove = tef6862_remove,
.id_table = tef6862_id,
};
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index d84ad9dad323..aac0f025f767 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -60,7 +60,7 @@
#define fmdbg(format, ...) \
printk(KERN_DEBUG "fmdrv: " format, ## __VA_ARGS__)
#else /* DEBUG */
-#define fmdbg(format, ...)
+#define fmdbg(format, ...) do {} while(0)
#endif
enum {
FM_MODE_OFF,
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index bf867a6b5ea0..602ef7ac8c24 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -742,7 +742,7 @@ static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
break;
- if (blk_idx < FM_RDS_BLK_IDX_A || blk_idx > FM_RDS_BLK_IDX_D) {
+ if (blk_idx > FM_RDS_BLK_IDX_D) {
fmdbg("Block sequence mismatch\n");
rds->last_blk_idx = -1;
break;
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index 3dd9fc097c47..ebf09a3927de 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -305,7 +305,7 @@ int fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
- if (vol_to_set < FM_RX_VOLUME_MIN || vol_to_set > FM_RX_VOLUME_MAX) {
+ if (vol_to_set > FM_RX_VOLUME_MAX) {
fmerr("Volume is not within(%d-%d) range\n",
FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX);
return -EINVAL;
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 048de4536036..0a8ee8fab924 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
.ioctl_ops = &fm_drv_ioctl_ops,
.name = FM_DRV_NAME,
.release = video_device_release,
+ /*
+ * To ensure both the tuner and modulator ioctls are accessible we
+ * set the vfl_dir to M2M to indicate this.
+ *
+ * It is not really a mem2mem device of course, but it can both receive
+ * and transmit using the same radio device. It's the only radio driver
+ * that does this and it should really be split in two radio devices,
+ * but that would affect applications using this driver.
+ */
+ .vfl_dir = VFL_DIR_M2M,
};
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 49bb356ed14c..2d6fb26a0170 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -784,7 +784,7 @@ static void ati_remote_rc_init(struct ati_remote *ati_remote)
rdev->priv = ati_remote;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_TYPE_OTHER;
+ rdev->allowed_protos = RC_BIT_OTHER;
rdev->driver_name = "ati_remote";
rdev->open = ati_remote_rc_open;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d05ac15b5de4..cef04786b52f 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -329,7 +329,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
}
/* Sense current received carrier */
-void ene_rx_sense_carrier(struct ene_device *dev)
+static void ene_rx_sense_carrier(struct ene_device *dev)
{
DEFINE_IR_RAW_EVENT(ev);
@@ -1003,7 +1003,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device();
if (!dev || !rdev)
- goto error1;
+ goto failure;
/* validate resources */
error = -ENODEV;
@@ -1014,10 +1014,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
- goto error;
+ goto failure;
if (!pnp_irq_valid(pnp_dev, 0))
- goto error;
+ goto failure;
spin_lock_init(&dev->hw_lock);
@@ -1033,7 +1033,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
- goto error;
+ goto failure;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
@@ -1046,7 +1046,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
learning_mode_force = false;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
@@ -1078,30 +1078,27 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
- dev->hw_io = -1;
- dev->irq = -1;
- goto error;
+ goto failure;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
- dev->irq = -1;
- goto error;
+ goto failure2;
}
error = rc_register_device(rdev);
if (error < 0)
- goto error;
+ goto failure3;
pr_notice("driver has been successfully loaded\n");
return 0;
-error:
- if (dev && dev->irq >= 0)
- free_irq(dev->irq, dev);
- if (dev && dev->hw_io >= 0)
- release_region(dev->hw_io, ENE_IO_SIZE);
-error1:
+
+failure3:
+ free_irq(dev->irq, dev);
+failure2:
+ release_region(dev->hw_io, ENE_IO_SIZE);
+failure:
rc_free_device(rdev);
kfree(dev);
return error;
@@ -1175,7 +1172,7 @@ static struct pnp_driver ene_driver = {
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = ene_probe,
- .remove = __devexit_p(ene_remove),
+ .remove = ene_remove,
#ifdef CONFIG_PM
.suspend = ene_suspend,
.resume = ene_resume,
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 52fd7696b1ba..1df410e13688 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -541,7 +541,7 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
/* Set up the rc device */
rdev->priv = fintek;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = fintek_open;
rdev->close = fintek_close;
rdev->input_name = FINTEK_DESCRIPTION;
@@ -590,7 +590,7 @@ failure:
return ret;
}
-static void __devexit fintek_remove(struct pnp_dev *pdev)
+static void fintek_remove(struct pnp_dev *pdev)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -678,18 +678,18 @@ static struct pnp_driver fintek_driver = {
.id_table = fintek_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = fintek_probe,
- .remove = __devexit_p(fintek_remove),
+ .remove = fintek_remove,
.suspend = fintek_suspend,
.resume = fintek_resume,
.shutdown = fintek_shutdown,
};
-int fintek_init(void)
+static int fintek_init(void)
{
return pnp_register_driver(&fintek_driver);
}
-void fintek_exit(void)
+static void fintek_exit(void)
{
pnp_unregister_driver(&fintek_driver);
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 04cb272db16a..4f71a7d1f019 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -58,7 +58,7 @@ err_get_value:
return IRQ_HANDLED;
}
-static int __devinit gpio_ir_recv_probe(struct platform_device *pdev)
+static int gpio_ir_recv_probe(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
@@ -95,7 +95,7 @@ static int __devinit gpio_ir_recv_probe(struct platform_device *pdev)
if (pdata->allowed_protos)
rcdev->allowed_protos = pdata->allowed_protos;
else
- rcdev->allowed_protos = RC_TYPE_ALL;
+ rcdev->allowed_protos = RC_BIT_ALL;
rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
@@ -140,7 +140,7 @@ err_allocate_device:
return rc;
}
-static int __devexit gpio_ir_recv_remove(struct platform_device *pdev)
+static int gpio_ir_recv_remove(struct platform_device *pdev)
{
struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
@@ -188,7 +188,7 @@ static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
- .remove = __devexit_p(gpio_ir_recv_remove),
+ .remove = gpio_ir_recv_remove,
.driver = {
.name = GPIO_IR_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 51d7057aca04..b99b096d8a8f 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -425,8 +425,8 @@ static void iguanair_close(struct rc_dev *rdev)
mutex_unlock(&ir->lock);
}
-static int __devinit iguanair_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int iguanair_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct iguanair *ir;
@@ -499,7 +499,7 @@ static int __devinit iguanair_probe(struct usb_interface *intf,
usb_to_input_id(ir->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->priv = ir;
rc->open = iguanair_open;
rc->close = iguanair_close;
@@ -538,7 +538,7 @@ out:
return ret;
}
-static void __devexit iguanair_disconnect(struct usb_interface *intf)
+static void iguanair_disconnect(struct usb_interface *intf)
{
struct iguanair *ir = usb_get_intfdata(intf);
@@ -604,7 +604,7 @@ static const struct usb_device_id iguanair_table[] = {
static struct usb_driver iguanair_driver = {
.name = DRIVER_NAME,
.probe = iguanair_probe,
- .disconnect = __devexit_p(iguanair_disconnect),
+ .disconnect = iguanair_disconnect,
.suspend = iguanair_suspend,
.resume = iguanair_resume,
.reset_resume = iguanair_resume,
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 5dd0386604f0..78d109b978dd 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -255,7 +255,7 @@ static struct usb_device_id imon_usb_id_table[] = {
static struct usb_driver imon_driver = {
.name = MOD_NAME,
.probe = imon_probe,
- .disconnect = __devexit_p(imon_disconnect),
+ .disconnect = imon_disconnect,
.suspend = imon_suspend,
.resume = imon_resume,
.id_table = imon_usb_id_table,
@@ -1001,7 +1001,7 @@ static void imon_touch_display_timeout(unsigned long data)
* it is not, so we must acquire it prior to calling send_packet, which
* requires that the lock is held.
*/
-static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
int retval;
struct imon_context *ictx = rc->priv;
@@ -1010,31 +1010,27 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
- if (rc_type && !(rc_type & rc->allowed_protos))
+ if (*rc_type && !(*rc_type & rc->allowed_protos))
dev_warn(dev, "Looks like you're trying to use an IR protocol "
"this device does not support\n");
- switch (rc_type) {
- case RC_TYPE_RC6:
+ if (*rc_type & RC_BIT_RC6_MCE) {
dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
ir_proto_packet[0] = 0x01;
- break;
- case RC_TYPE_UNKNOWN:
- case RC_TYPE_OTHER:
+ *rc_type = RC_BIT_RC6_MCE;
+ } else if (*rc_type & RC_BIT_OTHER) {
dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
- rc_type = RC_TYPE_OTHER;
- break;
- default:
+ *rc_type = RC_BIT_OTHER;
+ } else {
dev_warn(dev, "Unsupported IR protocol specified, overriding "
"to iMON IR protocol\n");
if (!pad_stabilize)
dev_dbg(dev, "PAD stabilize functionality disabled\n");
/* ir_proto_packet[0] = 0x00; // already the default */
- rc_type = RC_TYPE_OTHER;
- break;
+ *rc_type = RC_BIT_OTHER;
}
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
@@ -1048,7 +1044,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
if (retval)
goto out;
- ictx->rc_type = rc_type;
+ ictx->rc_type = *rc_type;
ictx->pad_mouse = false;
out:
@@ -1323,7 +1319,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
rel_x = buf[2];
rel_y = buf[3];
- if (ictx->rc_type == RC_TYPE_OTHER && pad_stabilize) {
+ if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) {
if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
@@ -1390,7 +1386,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
buf[0] = 0x01;
buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
- if (ictx->rc_type == RC_TYPE_OTHER && pad_stabilize) {
+ if (ictx->rc_type == RC_BIT_OTHER && pad_stabilize) {
dir = stabilize((int)rel_x, (int)rel_y,
timeout, threshold);
if (!dir) {
@@ -1511,7 +1507,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
kc = imon_panel_key_lookup(scancode);
} else {
scancode = be32_to_cpu(*((u32 *)buf));
- if (ictx->rc_type == RC_TYPE_RC6) {
+ if (ictx->rc_type == RC_BIT_RC6_MCE) {
ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
ktype = IMON_KEY_MCE;
@@ -1744,7 +1740,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
{
u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
- u64 allowed_protos = RC_TYPE_OTHER;
+ u64 allowed_protos = RC_BIT_OTHER;
switch (ffdc_cfg_byte) {
/* iMON Knob, no display, iMON IR + vol knob */
@@ -1775,13 +1771,13 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
- allowed_protos = RC_TYPE_RC6;
+ allowed_protos = RC_BIT_RC6_MCE;
break;
/* iMON LCD, MCE IR */
case 0x9f:
dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_LCD;
- allowed_protos = RC_TYPE_RC6;
+ allowed_protos = RC_BIT_RC6_MCE;
break;
default:
dev_info(ictx->dev, "Unknown 0xffdc device, "
@@ -1789,7 +1785,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
detected_display_type = IMON_DISPLAY_TYPE_VFD;
/* We don't know which one it is, allow user to set the
* RC6 one from userspace if OTHER wasn't correct. */
- allowed_protos |= RC_TYPE_RC6;
+ allowed_protos |= RC_BIT_RC6_MCE;
break;
}
@@ -1875,7 +1871,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
rdev->priv = ictx;
rdev->driver_type = RC_DRIVER_SCANCODE;
- rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
+ rdev->allowed_protos = RC_BIT_OTHER | RC_BIT_RC6_MCE; /* iMON PAD or MCE */
rdev->change_protocol = imon_ir_change_protocol;
rdev->driver_name = MOD_NAME;
@@ -1893,7 +1889,7 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
imon_set_display_type(ictx);
- if (ictx->rc_type == RC_TYPE_RC6)
+ if (ictx->rc_type == RC_BIT_RC6_MCE)
rdev->map_name = RC_MAP_IMON_MCE;
else
rdev->map_name = RC_MAP_IMON_PAD;
@@ -2292,8 +2288,8 @@ static void imon_init_display(struct imon_context *ictx,
/**
* Callback function for USB core API: Probe
*/
-static int __devinit imon_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+static int imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
struct usb_device *usbdev = NULL;
struct usb_host_interface *iface_desc = NULL;
@@ -2376,7 +2372,7 @@ fail:
/**
* Callback function for USB core API: disconnect
*/
-static void __devexit imon_disconnect(struct usb_interface *interface)
+static void imon_disconnect(struct usb_interface *interface)
{
struct imon_context *ictx;
struct device *dev;
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 035668e27f6b..69edffb9fe9a 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -47,7 +47,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct jvc_dec *data = &dev->raw->jvc;
- if (!(dev->raw->enabled_protocols & RC_TYPE_JVC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_JVC))
return 0;
if (!is_timing_event(ev)) {
@@ -174,7 +174,7 @@ out:
}
static struct ir_raw_handler jvc_handler = {
- .protocols = RC_TYPE_JVC,
+ .protocols = RC_BIT_JVC,
.decode = ir_jvc_decode,
};
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 870c93052fd0..9945e5e7f61a 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!(dev->raw->enabled_protocols & RC_TYPE_LIRC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_LIRC))
return 0;
if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
@@ -408,7 +408,7 @@ static int ir_lirc_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler lirc_handler = {
- .protocols = RC_TYPE_LIRC,
+ .protocols = RC_BIT_LIRC,
.decode = ir_lirc_decode,
.raw_register = ir_lirc_register,
.raw_unregister = ir_lirc_unregister,
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 3784ebf80ec7..33fafa4cf7cb 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -216,7 +216,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
unsigned long delay;
- if (!(dev->raw->enabled_protocols & RC_TYPE_MCE_KBD))
+ if (!(dev->raw->enabled_protocols & RC_BIT_MCE_KBD))
return 0;
if (!is_timing_event(ev)) {
@@ -422,7 +422,7 @@ static int ir_mce_kbd_unregister(struct rc_dev *dev)
}
static struct ir_raw_handler mce_kbd_handler = {
- .protocols = RC_TYPE_MCE_KBD,
+ .protocols = RC_BIT_MCE_KBD,
.decode = ir_mce_kbd_decode,
.raw_register = ir_mce_kbd_register,
.raw_unregister = ir_mce_kbd_unregister,
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 2ca509e6e16b..a47ee3634969 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -52,7 +52,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 address, not_address, command, not_command;
bool send_32bits = false;
- if (!(dev->raw->enabled_protocols & RC_TYPE_NEC))
+ if (!(dev->raw->enabled_protocols & RC_BIT_NEC))
return 0;
if (!is_timing_event(ev)) {
@@ -201,7 +201,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler nec_handler = {
- .protocols = RC_TYPE_NEC,
+ .protocols = RC_BIT_NEC,
.decode = ir_nec_decode,
};
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 9ab663a507a4..5b4d1ddeac4e 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -52,8 +52,8 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle;
u32 scancode;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC5))
- return 0;
+ if (!(dev->raw->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X)))
+ return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
@@ -128,6 +128,10 @@ again:
if (data->wanted_bits == RC5X_NBITS) {
/* RC5X */
u8 xdata, command, system;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5X)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
xdata = (data->bits & 0x0003F) >> 0;
command = (data->bits & 0x00FC0) >> 6;
system = (data->bits & 0x1F000) >> 12;
@@ -141,6 +145,10 @@ again:
} else {
/* RC5 */
u8 command, system;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
command = (data->bits & 0x0003F) >> 0;
system = (data->bits & 0x007C0) >> 6;
toggle = (data->bits & 0x00800) ? 1 : 0;
@@ -164,7 +172,7 @@ out:
}
static struct ir_raw_handler rc5_handler = {
- .protocols = RC_TYPE_RC5,
+ .protocols = RC_BIT_RC5 | RC_BIT_RC5X,
.decode = ir_rc5_decode,
};
diff --git a/drivers/media/rc/ir-rc5-sz-decoder.c b/drivers/media/rc/ir-rc5-sz-decoder.c
index ec8d4a2e2c5a..fd807a8308d8 100644
--- a/drivers/media/rc/ir-rc5-sz-decoder.c
+++ b/drivers/media/rc/ir-rc5-sz-decoder.c
@@ -48,8 +48,8 @@ static int ir_rc5_sz_decode(struct rc_dev *dev, struct ir_raw_event ev)
u8 toggle, command, system;
u32 scancode;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC5_SZ))
- return 0;
+ if (!(dev->raw->enabled_protocols & RC_BIT_RC5_SZ))
+ return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
@@ -128,7 +128,7 @@ out:
}
static struct ir_raw_handler rc5_sz_handler = {
- .protocols = RC_TYPE_RC5_SZ,
+ .protocols = RC_BIT_RC5_SZ,
.decode = ir_rc5_sz_decode,
};
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 4cfdd7fa4bbd..e19072ffb36c 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -89,7 +89,9 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 toggle;
- if (!(dev->raw->enabled_protocols & RC_TYPE_RC6))
+ if (!(dev->raw->enabled_protocols &
+ (RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE)))
return 0;
if (!is_timing_event(ev)) {
@@ -271,7 +273,9 @@ out:
}
static struct ir_raw_handler rc6_handler = {
- .protocols = RC_TYPE_RC6,
+ .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
};
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 546199e9ccc7..8ead492d03aa 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -28,7 +28,6 @@
#include <plat/dmtimer.h>
#include <plat/clock.h>
-#include <plat/omap-pm.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
@@ -444,7 +443,7 @@ static int lirc_rx51_resume(struct platform_device *dev)
#endif /* CONFIG_PM */
-static int __devinit lirc_rx51_probe(struct platform_device *dev)
+static int lirc_rx51_probe(struct platform_device *dev)
{
lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES;
lirc_rx51.pdata = dev->dev.platform_data;
@@ -480,18 +479,7 @@ struct platform_driver lirc_rx51_platform_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init lirc_rx51_init(void)
-{
- return platform_driver_register(&lirc_rx51_platform_driver);
-}
-module_init(lirc_rx51_init);
-
-static void __exit lirc_rx51_exit(void)
-{
- platform_driver_unregister(&lirc_rx51_platform_driver);
-}
-module_exit(lirc_rx51_exit);
+module_platform_driver(lirc_rx51_platform_driver);
MODULE_DESCRIPTION("LIRC TX driver for Nokia RX51");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 7e54ec57bcf9..7e69a3b65370 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -58,7 +58,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 address, command, not_command;
- if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
+ if (!(dev->raw->enabled_protocols & RC_BIT_SANYO))
return 0;
if (!is_timing_event(ev)) {
@@ -179,7 +179,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler sanyo_handler = {
- .protocols = RC_TYPE_SANYO,
+ .protocols = RC_BIT_SANYO,
.decode = ir_sanyo_decode,
};
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index dab98b37621a..fb914342cf4d 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -45,7 +45,8 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
u32 scancode;
u8 device, subdevice, function;
- if (!(dev->raw->enabled_protocols & RC_TYPE_SONY))
+ if (!(dev->raw->enabled_protocols &
+ (RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20)))
return 0;
if (!is_timing_event(ev)) {
@@ -123,16 +124,28 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
switch (data->count) {
case 12:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY12)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits << 3) & 0xF8);
subdevice = 0;
function = bitrev8((data->bits >> 4) & 0xFE);
break;
case 15:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY15)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits >> 0) & 0xFF);
subdevice = 0;
function = bitrev8((data->bits >> 7) & 0xFE);
break;
case 20:
+ if (!(dev->raw->enabled_protocols & RC_BIT_SONY20)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
device = bitrev8((data->bits >> 5) & 0xF8);
subdevice = bitrev8((data->bits >> 0) & 0xFF);
function = bitrev8((data->bits >> 12) & 0xFE);
@@ -157,7 +170,7 @@ out:
}
static struct ir_raw_handler sony_handler = {
- .protocols = RC_TYPE_SONY,
+ .protocols = RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20,
.decode = ir_sony_decode,
};
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 24c77a42fc36..1b8669b6d042 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1563,7 +1563,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* set up ir-core props */
rdev->priv = itdev;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = ite_open;
rdev->close = ite_close;
rdev->s_idle = ite_s_idle;
@@ -1620,7 +1620,7 @@ failure:
return ret;
}
-static void __devexit ite_remove(struct pnp_dev *pdev)
+static void ite_remove(struct pnp_dev *pdev)
{
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -1702,18 +1702,18 @@ static struct pnp_driver ite_driver = {
.name = ITE_DRIVER_NAME,
.id_table = ite_ids,
.probe = ite_probe,
- .remove = __devexit_p(ite_remove),
+ .remove = ite_remove,
.suspend = ite_suspend,
.resume = ite_resume,
.shutdown = ite_shutdown,
};
-int ite_init(void)
+static int ite_init(void)
{
return pnp_register_driver(&ite_driver);
}
-void ite_exit(void)
+static void ite_exit(void)
{
pnp_unregister_driver(&ite_driver);
}
diff --git a/drivers/media/rc/keymaps/rc-imon-mce.c b/drivers/media/rc/keymaps/rc-imon-mce.c
index 124c7228ba8c..f0da960560b0 100644
--- a/drivers/media/rc/keymaps/rc-imon-mce.c
+++ b/drivers/media/rc/keymaps/rc-imon-mce.c
@@ -121,7 +121,7 @@ static struct rc_map_list imon_mce_map = {
.scan = imon_mce,
.size = ARRAY_SIZE(imon_mce),
/* its RC6, but w/a hardware decoder */
- .rc_type = RC_TYPE_RC6,
+ .rc_type = RC_TYPE_RC6_MCE,
.name = RC_MAP_IMON_MCE,
}
};
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 753e43ec787b..ef4006fe4de0 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -97,7 +97,7 @@ static struct rc_map_list rc6_mce_map = {
.map = {
.scan = rc6_mce,
.size = ARRAY_SIZE(rc6_mce),
- .rc_type = RC_TYPE_RC6,
+ .rc_type = RC_TYPE_RC6_MCE,
.name = RC_MAP_RC6_MCE,
}
};
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 850547fe711c..9afb9331217d 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1205,7 +1205,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
@@ -1229,8 +1229,8 @@ out:
return NULL;
}
-static int __devinit mceusb_dev_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int mceusb_dev_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *idesc;
@@ -1393,7 +1393,7 @@ mem_alloc_fail:
}
-static void __devexit mceusb_dev_disconnect(struct usb_interface *intf)
+static void mceusb_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct mceusb_dev *ir = usb_get_intfdata(intf);
@@ -1432,7 +1432,7 @@ static int mceusb_dev_resume(struct usb_interface *intf)
static struct usb_driver mceusb_dev_driver = {
.name = DRIVER_NAME,
.probe = mceusb_dev_probe,
- .disconnect = __devexit_p(mceusb_dev_disconnect),
+ .disconnect = mceusb_dev_disconnect,
.suspend = mceusb_dev_suspend,
.resume = mceusb_dev_resume,
.reset_resume = mceusb_dev_resume,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 2ea913a44ae8..b8aa9abb31ff 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -472,6 +472,7 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
}
+#if 0 /* Currently unused */
/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
{
@@ -504,7 +505,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
return carrier;
}
-
+#endif
/*
* set carrier frequency
*
@@ -620,7 +621,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
DEFINE_IR_RAW_EVENT(rawir);
- u32 carrier;
u8 sample;
int i;
@@ -629,9 +629,6 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
if (debug)
nvt_dump_rx_buf(nvt);
- if (nvt->carrier_detect_enabled)
- carrier = nvt_rx_carrier_detect(nvt);
-
nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
init_ir_raw_event(&rawir);
@@ -1045,7 +1042,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
@@ -1116,7 +1113,7 @@ failure:
return ret;
}
-static void __devexit nvt_remove(struct pnp_dev *pdev)
+static void nvt_remove(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -1214,18 +1211,18 @@ static struct pnp_driver nvt_driver = {
.id_table = nvt_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = nvt_probe,
- .remove = __devexit_p(nvt_remove),
+ .remove = nvt_remove,
.suspend = nvt_suspend,
.resume = nvt_resume,
.shutdown = nvt_shutdown,
};
-int nvt_init(void)
+static int nvt_init(void)
{
return pnp_register_driver(&nvt_driver);
}
-void nvt_exit(void)
+static void nvt_exit(void)
{
pnp_unregister_driver(&nvt_driver);
}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 0d5e0872a2ea..7c3674ff5ea2 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -103,7 +103,6 @@ struct nvt_dev {
/* rx settings */
bool learning_enabled;
- bool carrier_detect_enabled;
/* track cir wake state */
u8 wake_state;
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index f9be68132c67..53d02827a472 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -195,7 +195,7 @@ static int __init loop_init(void)
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
rc->max_timeout = UINT_MAX;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index cabc19c10515..601d1ac1c688 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -725,25 +725,36 @@ static struct class ir_input_class = {
.devnode = ir_devnode,
};
+/*
+ * These are the protocol textual descriptions that are
+ * used by the sysfs protocols file. Note that the order
+ * of the entries is relevant.
+ */
static struct {
u64 type;
char *name;
} proto_names[] = {
- { RC_TYPE_UNKNOWN, "unknown" },
- { RC_TYPE_RC5, "rc-5" },
- { RC_TYPE_NEC, "nec" },
- { RC_TYPE_RC6, "rc-6" },
- { RC_TYPE_JVC, "jvc" },
- { RC_TYPE_SONY, "sony" },
- { RC_TYPE_RC5_SZ, "rc-5-sz" },
- { RC_TYPE_SANYO, "sanyo" },
- { RC_TYPE_MCE_KBD, "mce_kbd" },
- { RC_TYPE_LIRC, "lirc" },
- { RC_TYPE_OTHER, "other" },
+ { RC_BIT_NONE, "none" },
+ { RC_BIT_OTHER, "other" },
+ { RC_BIT_UNKNOWN, "unknown" },
+ { RC_BIT_RC5 |
+ RC_BIT_RC5X, "rc-5" },
+ { RC_BIT_NEC, "nec" },
+ { RC_BIT_RC6_0 |
+ RC_BIT_RC6_6A_20 |
+ RC_BIT_RC6_6A_24 |
+ RC_BIT_RC6_6A_32 |
+ RC_BIT_RC6_MCE, "rc-6" },
+ { RC_BIT_JVC, "jvc" },
+ { RC_BIT_SONY12 |
+ RC_BIT_SONY15 |
+ RC_BIT_SONY20, "sony" },
+ { RC_BIT_RC5_SZ, "rc-5-sz" },
+ { RC_BIT_SANYO, "sanyo" },
+ { RC_BIT_MCE_KBD, "mce_kbd" },
+ { RC_BIT_LIRC, "lirc" },
};
-#define PROTO_NONE "none"
-
/**
* show_protocols() - shows the current IR protocol(s)
* @device: the device descriptor
@@ -790,6 +801,9 @@ static ssize_t show_protocols(struct device *device,
tmp += sprintf(tmp, "[%s] ", proto_names[i].name);
else if (allowed & proto_names[i].type)
tmp += sprintf(tmp, "%s ", proto_names[i].name);
+
+ if (allowed & proto_names[i].type)
+ allowed &= ~proto_names[i].type;
}
if (tmp != buf)
@@ -867,26 +881,20 @@ static ssize_t store_protocols(struct device *device,
disable = false;
}
- if (!enable && !disable && !strncasecmp(tmp, PROTO_NONE, sizeof(PROTO_NONE))) {
- tmp += sizeof(PROTO_NONE);
- mask = 0;
- count++;
- } else {
- for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
- if (!strcasecmp(tmp, proto_names[i].name)) {
- tmp += strlen(proto_names[i].name);
- mask = proto_names[i].type;
- break;
- }
- }
- if (i == ARRAY_SIZE(proto_names)) {
- IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
- ret = -EINVAL;
- goto out;
+ for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
+ if (!strcasecmp(tmp, proto_names[i].name)) {
+ mask = proto_names[i].type;
+ break;
}
- count++;
}
+ if (i == ARRAY_SIZE(proto_names)) {
+ IR_dprintk(1, "Unknown protocol: '%s'\n", tmp);
+ return -EINVAL;
+ }
+
+ count++;
+
if (enable)
type |= mask;
else if (disable)
@@ -902,7 +910,7 @@ static ssize_t store_protocols(struct device *device,
}
if (dev->change_protocol) {
- rc = dev->change_protocol(dev, type);
+ rc = dev->change_protocol(dev, &type);
if (rc < 0) {
IR_dprintk(1, "Error setting protocols to 0x%llx\n",
(long long)type);
@@ -1117,7 +1125,8 @@ int rc_register_device(struct rc_dev *dev)
}
if (dev->change_protocol) {
- rc = dev->change_protocol(dev, rc_map->rc_type);
+ u64 rc_type = (1 << rc_map->rc_type);
+ rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_raw;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 9f5a17bb5ef5..1800326f93e6 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1082,7 +1082,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->dev.parent = dev;
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->timeout = US_TO_NS(2750);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
@@ -1102,8 +1102,8 @@ out:
return NULL;
}
-static int __devinit redrat3_dev_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int redrat3_dev_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct device *dev = &intf->dev;
@@ -1241,7 +1241,7 @@ no_endpoints:
return retval;
}
-static void __devexit redrat3_dev_disconnect(struct usb_interface *intf)
+static void redrat3_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
@@ -1281,7 +1281,7 @@ static int redrat3_dev_resume(struct usb_interface *intf)
static struct usb_driver redrat3_dev_driver = {
.name = DRIVER_NAME,
.probe = redrat3_dev_probe,
- .disconnect = __devexit_p(redrat3_dev_disconnect),
+ .disconnect = redrat3_dev_disconnect,
.suspend = redrat3_dev_suspend,
.resume = redrat3_dev_resume,
.reset_resume = redrat3_dev_resume,
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index d6f4bfe09391..d7b11e6a9982 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -322,7 +322,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->dev.parent = dev;
rdev->priv = sz;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->allowed_protos = RC_TYPE_ALL;
+ rdev->allowed_protos = RC_BIT_ALL;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
@@ -346,8 +346,8 @@ out:
* On any failure the return value is the ERROR
* On success return 0
*/
-static int __devinit streamzap_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int streamzap_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
struct usb_host_interface *iface_host;
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index fef05235234a..78be8a914225 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -194,8 +194,8 @@ static void ttusbir_urb_complete(struct urb *urb)
dev_warn(tt->dev, "failed to resubmit urb: %d\n", rc);
}
-static int __devinit ttusbir_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int ttusbir_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct ttusbir *tt;
struct usb_interface_descriptor *idesc;
@@ -316,7 +316,7 @@ static int __devinit ttusbir_probe(struct usb_interface *intf,
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protos = RC_TYPE_ALL;
+ rc->allowed_protos = RC_BIT_ALL;
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
@@ -367,7 +367,7 @@ out:
return ret;
}
-static void __devexit ttusbir_disconnect(struct usb_interface *intf)
+static void ttusbir_disconnect(struct usb_interface *intf)
{
struct ttusbir *tt = usb_get_intfdata(intf);
struct usb_device *udev = tt->udev;
@@ -435,7 +435,7 @@ static struct usb_driver ttusbir_driver = {
.suspend = ttusbir_suspend,
.resume = ttusbir_resume,
.reset_resume = ttusbir_resume,
- .disconnect = __devexit_p(ttusbir_disconnect)
+ .disconnect = ttusbir_disconnect,
};
module_usb_driver(ttusbir_driver);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 7c9b5f33113b..930c61499037 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -7,6 +7,7 @@
* with minor modifications.
*
* Original Author: David Härdeman <david@hardeman.nu>
+ * Copyright (C) 2012 Sean Young <sean@mess.org>
* Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu>
*
* Dedicated to my daughter Matilda, without whose loving attention this
@@ -22,9 +23,7 @@
* o IR Receive
* o IR Transmit
* o Wake-On-CIR functionality
- *
- * To do:
- * o Learning
+ * o Carrier detection
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -149,6 +148,12 @@
#define WBCIR_REGSEL_MASK 0x20
/* Starting address of selected register in WBCIR_REG_WCEIR_INDEX */
#define WBCIR_REG_ADDR0 0x00
+/* Enable carrier counter */
+#define WBCIR_CNTR_EN 0x01
+/* Reset carrier counter */
+#define WBCIR_CNTR_R 0x02
+/* Invert TX */
+#define WBCIR_IRTX_INV 0x04
/* Valid banks for the SP3 UART */
enum wbcir_bank {
@@ -184,7 +189,7 @@ enum wbcir_txstate {
};
/* Misc */
-#define WBCIR_NAME "winbond-cir"
+#define WBCIR_NAME "Winbond CIR"
#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */
@@ -207,7 +212,8 @@ struct wbcir_data {
/* RX state */
enum wbcir_rxstate rxstate;
struct led_trigger *rxtrigger;
- struct ir_raw_event rxev;
+ int carrier_report_enabled;
+ u32 pulse_duration;
/* TX state */
enum wbcir_txstate txstate;
@@ -330,6 +336,30 @@ wbcir_to_rc6cells(u8 val)
*****************************************************************************/
static void
+wbcir_carrier_report(struct wbcir_data *data)
+{
+ unsigned counter = inb(data->ebase + WBCIR_REG_ECEIR_CNT_LO) |
+ inb(data->ebase + WBCIR_REG_ECEIR_CNT_HI) << 8;
+
+ if (counter > 0 && counter < 0xffff) {
+ DEFINE_IR_RAW_EVENT(ev);
+
+ ev.carrier_report = 1;
+ ev.carrier = DIV_ROUND_CLOSEST(counter * 1000000u,
+ data->pulse_duration);
+
+ ir_raw_event_store(data->dev, &ev);
+ }
+
+ /* reset and restart the counter */
+ data->pulse_duration = 0;
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_EN,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+}
+
+static void
wbcir_idle_rx(struct rc_dev *dev, bool idle)
{
struct wbcir_data *data = dev->priv;
@@ -339,9 +369,16 @@ wbcir_idle_rx(struct rc_dev *dev, bool idle)
led_trigger_event(data->rxtrigger, LED_FULL);
}
- if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE)
+ if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE) {
+ data->rxstate = WBCIR_RXSTATE_INACTIVE;
+ led_trigger_event(data->rxtrigger, LED_OFF);
+
+ if (data->carrier_report_enabled)
+ wbcir_carrier_report(data);
+
/* Tell hardware to go idle by setting RXINACTIVE */
outb(WBCIR_RX_DISABLE, data->sbase + WBCIR_REG_SP3_ASCR);
+ }
}
static void
@@ -349,21 +386,22 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
{
u8 irdata;
DEFINE_IR_RAW_EVENT(rawir);
+ unsigned duration;
/* Since RXHDLEV is set, at least 8 bytes are in the FIFO */
while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL) {
irdata = inb(data->sbase + WBCIR_REG_SP3_RXDATA);
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
+
+ duration = ((irdata & 0x7F) + 1) * 2;
rawir.pulse = irdata & 0x80 ? false : true;
- rawir.duration = US_TO_NS(((irdata & 0x7F) + 1) * 10);
- ir_raw_event_store_with_filter(data->dev, &rawir);
- }
+ rawir.duration = US_TO_NS(duration);
- /* Check if we should go idle */
- if (data->dev->idle) {
- led_trigger_event(data->rxtrigger, LED_OFF);
- data->rxstate = WBCIR_RXSTATE_INACTIVE;
+ if (rawir.pulse)
+ data->pulse_duration += duration;
+
+ ir_raw_event_store_with_filter(data->dev, &rawir);
}
ir_raw_event_handle(data->dev);
@@ -492,6 +530,33 @@ wbcir_irq_handler(int irqno, void *cookie)
*****************************************************************************/
static int
+wbcir_set_carrier_report(struct rc_dev *dev, int enable)
+{
+ struct wbcir_data *data = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->spinlock, flags);
+
+ if (data->carrier_report_enabled == enable) {
+ spin_unlock_irqrestore(&data->spinlock, flags);
+ return 0;
+ }
+
+ data->pulse_duration = 0;
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL, WBCIR_CNTR_R,
+ WBCIR_CNTR_EN | WBCIR_CNTR_R);
+
+ if (enable && data->dev->idle)
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CCTL,
+ WBCIR_CNTR_EN, WBCIR_CNTR_EN | WBCIR_CNTR_R);
+
+ data->carrier_report_enabled = enable;
+ spin_unlock_irqrestore(&data->spinlock, flags);
+
+ return 0;
+}
+
+static int
wbcir_txcarrier(struct rc_dev *dev, u32 carrier)
{
struct wbcir_data *data = dev->priv;
@@ -837,7 +902,7 @@ wbcir_init_hw(struct wbcir_data *data)
/* Set IRTX_INV */
if (invert)
- outb(0x04, data->ebase + WBCIR_REG_ECEIR_CCTL);
+ outb(WBCIR_IRTX_INV, data->ebase + WBCIR_REG_ECEIR_CCTL);
else
outb(0x00, data->ebase + WBCIR_REG_ECEIR_CCTL);
@@ -866,8 +931,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to sample every 10 us */
- outb(0x0F, data->sbase + WBCIR_REG_SP3_BGDL);
+ /* Set baud divisor to sample every 2 ns */
+ outb(0x03, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -876,9 +941,12 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /* Disable RX demod, enable run-length enc/dec, set freq span */
+ /*
+ * Disable RX demod, enable run-length enc/dec, set freq span and
+ * enable over-sampling
+ */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0xd0, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
@@ -915,9 +983,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* Clear RX state */
data->rxstate = WBCIR_RXSTATE_INACTIVE;
- data->rxev.duration = 0;
ir_raw_event_reset(data->dev);
- ir_raw_event_handle(data->dev);
+ ir_raw_event_set_idle(data->dev, true);
/* Clear TX state */
if (data->txstate == WBCIR_TXSTATE_ACTIVE) {
@@ -941,7 +1008,7 @@ wbcir_resume(struct pnp_dev *device)
return 0;
}
-static int __devinit
+static int
wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
{
struct device *dev = &device->dev;
@@ -1007,7 +1074,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
}
data->dev->driver_type = RC_DRIVER_IR_RAW;
- data->dev->driver_name = WBCIR_NAME;
+ data->dev->driver_name = DRVNAME;
data->dev->input_name = WBCIR_NAME;
data->dev->input_phys = "wbcir/cir0";
data->dev->input_id.bustype = BUS_HOST;
@@ -1016,13 +1083,15 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->input_id.version = WBCIR_ID_CHIP;
data->dev->map_name = RC_MAP_RC6_MCE;
data->dev->s_idle = wbcir_idle_rx;
+ data->dev->s_carrier_report = wbcir_set_carrier_report;
data->dev->s_tx_mask = wbcir_txmask;
data->dev->s_tx_carrier = wbcir_txcarrier;
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
data->dev->timeout = MS_TO_NS(100);
- data->dev->allowed_protos = RC_TYPE_ALL;
+ data->dev->rx_resolution = US_TO_NS(2);
+ data->dev->allowed_protos = RC_BIT_ALL;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
@@ -1086,7 +1155,7 @@ exit:
return err;
}
-static void __devexit
+static void
wbcir_remove(struct pnp_dev *device)
{
struct wbcir_data *data = pnp_get_drvdata(device);
@@ -1132,7 +1201,7 @@ static struct pnp_driver wbcir_driver = {
.name = WBCIR_NAME,
.id_table = wbcir_ids,
.probe = wbcir_probe,
- .remove = __devexit_p(wbcir_remove),
+ .remove = wbcir_remove,
.suspend = wbcir_suspend,
.resume = wbcir_resume,
.shutdown = wbcir_shutdown
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index aff39ae457a0..81f38aae9c66 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -35,8 +35,6 @@
* Currently it blind writes bunch of static registers from the
* fc2580_freq_regs_lut[] when fc2580_set_params() is called. Add some
* logic to reduce unneeded register writes.
- * There is also don't-care registers, initialized with value 0xff, and those
- * are also written to the chip currently (yes, not wise).
*/
/* write multiple registers */
@@ -111,6 +109,17 @@ static int fc2580_rd_reg(struct fc2580_priv *priv, u8 reg, u8 *val)
return fc2580_rd_regs(priv, reg, val, 1);
}
+/* write single register conditionally only when value differs from 0xff
+ * XXX: This is special routine meant only for writing fc2580_freq_regs_lut[]
+ * values. Do not use for the other purposes. */
+static int fc2580_wr_reg_ff(struct fc2580_priv *priv, u8 reg, u8 val)
+{
+ if (val == 0xff)
+ return 0;
+ else
+ return fc2580_wr_regs(priv, reg, &val, 1);
+}
+
static int fc2580_set_params(struct dvb_frontend *fe)
{
struct fc2580_priv *priv = fe->tuner_priv;
@@ -213,99 +222,99 @@ static int fc2580_set_params(struct dvb_frontend *fe)
if (i == ARRAY_SIZE(fc2580_freq_regs_lut))
goto err;
- ret = fc2580_wr_reg(priv, 0x25, fc2580_freq_regs_lut[i].r25_val);
+ ret = fc2580_wr_reg_ff(priv, 0x25, fc2580_freq_regs_lut[i].r25_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x27, fc2580_freq_regs_lut[i].r27_val);
+ ret = fc2580_wr_reg_ff(priv, 0x27, fc2580_freq_regs_lut[i].r27_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x28, fc2580_freq_regs_lut[i].r28_val);
+ ret = fc2580_wr_reg_ff(priv, 0x28, fc2580_freq_regs_lut[i].r28_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x29, fc2580_freq_regs_lut[i].r29_val);
+ ret = fc2580_wr_reg_ff(priv, 0x29, fc2580_freq_regs_lut[i].r29_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2b, fc2580_freq_regs_lut[i].r2b_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2b, fc2580_freq_regs_lut[i].r2b_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2c, fc2580_freq_regs_lut[i].r2c_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2c, fc2580_freq_regs_lut[i].r2c_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x2d, fc2580_freq_regs_lut[i].r2d_val);
+ ret = fc2580_wr_reg_ff(priv, 0x2d, fc2580_freq_regs_lut[i].r2d_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x30, fc2580_freq_regs_lut[i].r30_val);
+ ret = fc2580_wr_reg_ff(priv, 0x30, fc2580_freq_regs_lut[i].r30_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x44, fc2580_freq_regs_lut[i].r44_val);
+ ret = fc2580_wr_reg_ff(priv, 0x44, fc2580_freq_regs_lut[i].r44_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x50, fc2580_freq_regs_lut[i].r50_val);
+ ret = fc2580_wr_reg_ff(priv, 0x50, fc2580_freq_regs_lut[i].r50_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x53, fc2580_freq_regs_lut[i].r53_val);
+ ret = fc2580_wr_reg_ff(priv, 0x53, fc2580_freq_regs_lut[i].r53_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x5f, fc2580_freq_regs_lut[i].r5f_val);
+ ret = fc2580_wr_reg_ff(priv, 0x5f, fc2580_freq_regs_lut[i].r5f_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x61, fc2580_freq_regs_lut[i].r61_val);
+ ret = fc2580_wr_reg_ff(priv, 0x61, fc2580_freq_regs_lut[i].r61_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x62, fc2580_freq_regs_lut[i].r62_val);
+ ret = fc2580_wr_reg_ff(priv, 0x62, fc2580_freq_regs_lut[i].r62_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x63, fc2580_freq_regs_lut[i].r63_val);
+ ret = fc2580_wr_reg_ff(priv, 0x63, fc2580_freq_regs_lut[i].r63_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x67, fc2580_freq_regs_lut[i].r67_val);
+ ret = fc2580_wr_reg_ff(priv, 0x67, fc2580_freq_regs_lut[i].r67_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x68, fc2580_freq_regs_lut[i].r68_val);
+ ret = fc2580_wr_reg_ff(priv, 0x68, fc2580_freq_regs_lut[i].r68_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x69, fc2580_freq_regs_lut[i].r69_val);
+ ret = fc2580_wr_reg_ff(priv, 0x69, fc2580_freq_regs_lut[i].r69_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6a, fc2580_freq_regs_lut[i].r6a_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6a, fc2580_freq_regs_lut[i].r6a_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6b, fc2580_freq_regs_lut[i].r6b_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6b, fc2580_freq_regs_lut[i].r6b_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6c, fc2580_freq_regs_lut[i].r6c_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6c, fc2580_freq_regs_lut[i].r6c_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6d, fc2580_freq_regs_lut[i].r6d_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6d, fc2580_freq_regs_lut[i].r6d_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6e, fc2580_freq_regs_lut[i].r6e_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6e, fc2580_freq_regs_lut[i].r6e_val);
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x6f, fc2580_freq_regs_lut[i].r6f_val);
+ ret = fc2580_wr_reg_ff(priv, 0x6f, fc2580_freq_regs_lut[i].r6f_val);
if (ret < 0)
goto err;
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index ba84936aafd6..95ed46f2cd26 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -161,7 +161,7 @@ static int max2165_set_bandwidth(struct max2165_priv *priv, u32 bw)
return 0;
}
-int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
+static int fixpt_div32(u32 dividend, u32 divisor, u32 *quotient, u32 *fraction)
{
u32 remainder;
u32 q, f = 0;
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index 389668474070..83a6240f64d3 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -136,7 +136,7 @@ static int tua9001_set_params(struct dvb_frontend *fe)
{
struct tua9001_priv *priv = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
+ int ret = 0, i;
u16 val;
u32 frequency;
struct reg_val data[2];
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 4937712278f6..5c0fd787cc8f 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -934,7 +934,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
int rc = 0, is_retry = 0;
u16 hwmodel;
v4l2_std_id std0;
- u8 hw_major, hw_minor, fw_major, fw_minor;
+ u8 hw_major = 0, hw_minor = 0, fw_major = 0, fw_minor = 0;
dprintk(1, "%s called\n", __func__);
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 448361c6a13e..0cb7c28dcb17 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -25,7 +25,7 @@
#include "media/tuner.h"
#include "media/v4l2-common.h"
-void hvr950q_cs5340_audio(void *priv, int enable)
+static void hvr950q_cs5340_audio(void *priv, int enable)
{
/* Because the HVR-950q shares an i2s bus between the cs5340 and the
au8522, we need to hold cs5340 in reset when using the au8522 */
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index b328f6550d0b..9a6f15613a38 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -272,7 +272,6 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
struct au0828_dev *dev = container_of(work, struct au0828_dev,
restart_streaming);
struct au0828_dvb *dvb = &dev->dvb;
- int ret;
if (dev->urb_streaming == 0)
return;
@@ -282,7 +281,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
mutex_lock(&dvb->lock);
/* Stop transport */
- ret = stop_urb_transfer(dev);
+ stop_urb_transfer(dev);
au0828_write(dev, 0x608, 0x00);
au0828_write(dev, 0x609, 0x00);
au0828_write(dev, 0x60a, 0x00);
@@ -293,7 +292,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
au0828_write(dev, 0x609, 0x72);
au0828_write(dev, 0x60a, 0x71);
au0828_write(dev, 0x60b, 0x01);
- ret = start_urb_transfer(dev);
+ start_urb_transfer(dev);
mutex_unlock(&dvb->lock);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 870585570571..45387aab10c7 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -158,7 +158,7 @@ static void au0828_irq_callback(struct urb *urb)
/*
* Stop and Deallocate URBs
*/
-void au0828_uninit_isoc(struct au0828_dev *dev)
+static void au0828_uninit_isoc(struct au0828_dev *dev)
{
struct urb *urb;
int i;
@@ -197,9 +197,9 @@ void au0828_uninit_isoc(struct au0828_dev *dev)
/*
* Allocate URBs and start IRQ
*/
-int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
- int num_bufs, int max_pkt_size,
- int (*isoc_copy) (struct au0828_dev *dev, struct urb *urb))
+static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
+ int num_bufs, int max_pkt_size,
+ int (*isoc_copy) (struct au0828_dev *dev, struct urb *urb))
{
struct au0828_dmaqueue *dma_q = &dev->vidq;
int i;
@@ -783,7 +783,7 @@ static int au0828_i2s_init(struct au0828_dev *dev)
* Auvitek au0828 analog stream enable
* Please set interface0 to AS5 before enable the stream
*/
-int au0828_analog_stream_enable(struct au0828_dev *d)
+static int au0828_analog_stream_enable(struct au0828_dev *d)
{
dprintk(1, "au0828_analog_stream_enable called\n");
au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
@@ -810,7 +810,7 @@ int au0828_analog_stream_disable(struct au0828_dev *d)
return 0;
}
-void au0828_analog_stream_reset(struct au0828_dev *dev)
+static void au0828_analog_stream_reset(struct au0828_dev *dev)
{
dprintk(1, "au0828_analog_stream_reset called\n");
au0828_writereg(dev, AU0828_SENSORCTRL_100, 0x0);
@@ -913,7 +913,7 @@ static int get_ressource(struct au0828_fh *fh)
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(unsigned long data)
{
struct au0828_dev *dev = (struct au0828_dev *) data;
struct au0828_dmaqueue *dma_q = &dev->vidq;
@@ -937,7 +937,7 @@ void au0828_vid_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(unsigned long data)
{
struct au0828_dev *dev = (struct au0828_dev *) data;
struct au0828_dmaqueue *dma_q = &dev->vbiq;
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 447148eff958..722207913740 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1068,12 +1068,12 @@ int cx231xx_unmute_audio(struct cx231xx *dev)
}
EXPORT_SYMBOL_GPL(cx231xx_unmute_audio);
-int stopAudioFirmware(struct cx231xx *dev)
+static int stopAudioFirmware(struct cx231xx *dev)
{
return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03);
}
-int restartAudioFirmware(struct cx231xx *dev)
+static int restartAudioFirmware(struct cx231xx *dev)
{
return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13);
}
@@ -2631,11 +2631,6 @@ int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
rc = cx231xx_stop_stream(dev, ep_mask);
}
- if (dev->mode == CX231XX_ANALOG_MODE)
- ;/* do any in Analog mode */
- else
- ;/* do any in digital mode */
-
return rc;
}
EXPORT_SYMBOL_GPL(cx231xx_capture_start);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index b84ebc54d91b..bbed1e40eeda 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -686,7 +686,7 @@ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg)
}
EXPORT_SYMBOL_GPL(cx231xx_tuner_callback);
-void cx231xx_reset_out(struct cx231xx *dev)
+static void cx231xx_reset_out(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
msleep(200);
@@ -694,11 +694,13 @@ void cx231xx_reset_out(struct cx231xx *dev)
msleep(200);
cx231xx_set_gpio_value(dev, CX23417_RESET, 1);
}
-void cx231xx_enable_OSC(struct cx231xx *dev)
+
+static void cx231xx_enable_OSC(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1);
}
-void cx231xx_sleep_s5h1432(struct cx231xx *dev)
+
+static void cx231xx_sleep_s5h1432(struct cx231xx *dev)
{
cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0);
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 781feed406f7..96a5a0965399 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -72,8 +72,8 @@ static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus,
/*
* cx231xx_i2c_send_bytes()
*/
-int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap,
- const struct i2c_msg *msg)
+static int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap,
+ const struct i2c_msg *msg)
{
struct cx231xx_i2c *bus = i2c_adap->algo_data;
struct cx231xx *dev = bus->dev;
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 96176e9db5a2..0f7b42446826 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -99,7 +99,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
/* The i2c micro-controller only outputs the cmd part of NEC protocol */
dev->init_data.rc_dev->scanmask = 0xff;
dev->init_data.rc_dev->driver_name = "cx231xx";
- dev->init_data.type = RC_TYPE_NEC;
+ dev->init_data.type = RC_BIT_NEC;
info.addr = 0x30;
/* Load and bind ir-kbd-i2c */
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 3d7526e28d42..943d93423705 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1306,7 +1306,7 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = af9015_rc_query;
rc->interval = 500;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index ea27eaff4e34..61ae7f9d0b27 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1023,10 +1023,10 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
switch (tmp) {
case 0: /* NEC */
default:
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
break;
case 1: /* RC6 */
- rc->allowed_protos = RC_TYPE_RC6;
+ rc->allowed_protos = RC_BIT_RC6_MCE;
break;
}
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index ec540140c810..d05c5b563dac 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -1048,7 +1048,7 @@ static int anysee_rc_query(struct dvb_usb_device *d)
static int anysee_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = anysee_rc_query;
rc->interval = 250; /* windows driver uses 500ms */
@@ -1170,7 +1170,7 @@ static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot,
struct dvb_usb_device *d = ci->data;
struct anysee_state *state = d_to_priv(d);
int ret;
- u8 tmp;
+ u8 tmp = 0;
ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40);
if (ret)
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 54f1221d930d..d75dbf27e99e 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -826,7 +826,7 @@ static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
pr_debug("Getting az6007 Remote Control properties\n");
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = az6007_rc_query;
rc->interval = 400;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index bae16a1189d6..059291b892b8 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -137,7 +137,7 @@ struct dvb_usb_driver_info {
struct dvb_usb_rc {
const char *map_name;
u64 allowed_protos;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
int (*query) (struct dvb_usb_device *d);
unsigned int interval;
const enum rc_driver_type driver_type;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index ba51f65204de..671b4fa232b4 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -224,7 +224,7 @@ static void dvb_usb_data_complete_raw(struct usb_data_stream *stream, u8 *buf,
dvb_dmx_swfilter_raw(&adap->demux, buf, len);
}
-int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -236,7 +236,7 @@ int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
return usb_urb_initv2(&adap->stream, &adap->props->stream);
}
-int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -368,7 +368,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
return dvb_usb_ctrl_feed(dvbdmxfeed, -1);
}
-int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
@@ -440,7 +440,7 @@ err_dvb_register_adapter:
return ret;
}
-int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
@@ -456,7 +456,7 @@ int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
return 0;
}
-int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
+static int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
@@ -553,7 +553,7 @@ err:
return ret;
}
-int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
{
int ret, i, count_registered = 0;
struct dvb_usb_device *d = adap_to_d(adap);
@@ -622,7 +622,7 @@ err:
return ret;
}
-int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
+static int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
{
int i;
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
diff --git a/drivers/media/usb/dvb-usb-v2/it913x.c b/drivers/media/usb/dvb-usb-v2/it913x.c
index 695f9106bc54..47204280b8b3 100644
--- a/drivers/media/usb/dvb-usb-v2/it913x.c
+++ b/drivers/media/usb/dvb-usb-v2/it913x.c
@@ -659,13 +659,19 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
it913x_wr_reg(d, DEV_0_DMOD, MP2IF2_SW_RST, 0x1);
it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x0f);
it913x_wr_reg(d, DEV_0, EP0_TX_NAK, 0x1b);
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
+ if (st->proprietary_ir == false) /* Enable endpoint 3 */
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x3f);
+ else
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x2f);
it913x_wr_reg(d, DEV_0, EP4_TX_LEN_LSB,
ep_size & 0xff);
it913x_wr_reg(d, DEV_0, EP4_TX_LEN_MSB, ep_size >> 8);
ret = it913x_wr_reg(d, DEV_0, EP4_MAX_PKT, pkt_size);
} else if (adap->id == 1 && adap->fe[0]) {
- it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
+ if (st->proprietary_ir == false)
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x7f);
+ else
+ it913x_wr_reg(d, DEV_0, EP0_TX_EN, 0x6f);
it913x_wr_reg(d, DEV_0, EP5_TX_LEN_LSB,
ep_size & 0xff);
it913x_wr_reg(d, DEV_0, EP5_TX_LEN_MSB, ep_size >> 8);
@@ -698,7 +704,7 @@ static int it913x_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
}
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = it913x_rc_query;
rc->interval = 250;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index c41d9d9ec7b5..6427ac359f21 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -799,7 +799,7 @@ static const char fw_c_rs2000[] = LME2510_C_RS2000;
static const char fw_lg[] = LME2510_LG;
static const char fw_s0194[] = LME2510_S0194;
-const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
+static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
{
struct lme2510_state *st = d->priv;
struct usb_device *udev = d->udev;
@@ -1253,7 +1253,7 @@ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
static int lme2510_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 093f1acce403..a4c302d0aa37 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1197,7 +1197,7 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = rtl2831u_rc_query;
rc->interval = 400;
@@ -1269,7 +1269,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_NEC;
rc->query = rtl2832u_rc_query;
rc->interval = 400;
@@ -1338,6 +1338,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
&rtl2832u_props, "G-Tek Electronics Group Lifeview LV5TDLX DVB-T", NULL) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK,
&rtl2832u_props, "NOXON DAB/DAB+ USB dongle", NULL) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK_REV2,
+ &rtl2832u_props, "NOXON DAB/DAB+ USB dongle (rev 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_TREKSTOR_TERRES_2_0,
&rtl2832u_props, "Trekstor DVB-T Stick Terres 2.0", NULL) },
{ DVB_USB_DEVICE(USB_VID_DEXATEK, 0x1101,
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 5989b6590377..7346f85f3f2f 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -112,7 +112,7 @@ int usb_urb_submitv2(struct usb_data_stream *stream,
return 0;
}
-int usb_urb_free_urbs(struct usb_data_stream *stream)
+static int usb_urb_free_urbs(struct usb_data_stream *stream)
{
int i;
@@ -205,7 +205,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
return 0;
}
-int usb_free_stream_buffers(struct usb_data_stream *stream)
+static int usb_free_stream_buffers(struct usb_data_stream *stream)
{
if (stream->state & USB_STATE_URB_BUF) {
while (stream->buf_num) {
@@ -223,8 +223,8 @@ int usb_free_stream_buffers(struct usb_data_stream *stream)
return 0;
}
-int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
- unsigned long size)
+static int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num,
+ unsigned long size)
{
stream->buf_num = 0;
stream->buf_size = size;
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 5e45ae605427..91e0119e8a87 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -298,7 +298,8 @@ struct stb6100_config az6027_stb6100_config = {
/* check for mutex FIXME */
-int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req,
+ u16 value, u16 index, u8 *b, int blen)
{
int ret = -1;
if (mutex_lock_interruptible(&d->usb_mutex))
@@ -1051,10 +1052,10 @@ static struct i2c_algorithm az6027_i2c_algo = {
.functionality = az6027_i2c_func,
};
-int az6027_identify_state(struct usb_device *udev,
- struct dvb_usb_device_properties *props,
- struct dvb_usb_device_description **desc,
- int *cold)
+static int az6027_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
{
u8 *b;
s16 ret;
diff --git a/drivers/media/usb/dvb-usb/dib0700.h b/drivers/media/usb/dvb-usb/dib0700.h
index 7de125c0b36f..637b6123f391 100644
--- a/drivers/media/usb/dvb-usb/dib0700.h
+++ b/drivers/media/usb/dvb-usb/dib0700.h
@@ -64,7 +64,7 @@ extern int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
extern struct i2c_algorithm dib0700_i2c_algo;
extern int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc, int *cold);
-extern int dib0700_change_protocol(struct rc_dev *dev, u64 rc_type);
+extern int dib0700_change_protocol(struct rc_dev *dev, u64 *rc_type);
extern int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz);
extern int dib0700_device_count;
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index ef87229de6af..19b5ed2825d7 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -605,7 +605,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return ret;
}
-int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
+int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
struct dvb_usb_device *d = rc->priv;
struct dib0700_state *st = d->priv;
@@ -621,17 +621,19 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
st->buf[2] = 0;
/* Set the IR mode */
- if (rc_type == RC_TYPE_RC5)
+ if (*rc_type & RC_BIT_RC5) {
new_proto = 1;
- else if (rc_type == RC_TYPE_NEC)
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
new_proto = 0;
- else if (rc_type == RC_TYPE_RC6) {
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type & RC_BIT_RC6_MCE) {
if (st->fw_version < 0x10200) {
ret = -EINVAL;
goto out;
}
-
new_proto = 2;
+ *rc_type = RC_BIT_RC6_MCE;
} else {
ret = -EINVAL;
goto out;
@@ -645,7 +647,7 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
goto out;
}
- d->props.rc.core.protocol = rc_type;
+ d->props.rc.core.protocol = *rc_type;
out:
mutex_unlock(&d->usb_mutex);
@@ -707,7 +709,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
purb->actual_length);
switch (d->props.rc.core.protocol) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
toggle = 0;
/* NEC protocol sends repeat code as 0 0 0 FF */
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 510001da6e83..11798426fa88 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -518,7 +518,7 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
d->last_event = 0;
switch (d->props.rc.core.protocol) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
(key[3] == 0xff))
@@ -3658,9 +3658,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3698,9 +3698,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3763,9 +3763,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_interval = DEFAULT_RC_INTERVAL,
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3808,9 +3808,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3890,9 +3890,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3936,9 +3936,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -3987,9 +3987,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4055,9 +4055,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4106,9 +4106,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_NEC_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4177,9 +4177,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4215,9 +4215,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4295,9 +4295,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4341,9 +4341,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_NEC_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4394,9 +4394,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4433,9 +4433,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4472,9 +4472,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4511,9 +4511,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4550,9 +4550,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4589,9 +4589,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4644,9 +4644,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4681,9 +4681,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4721,9 +4721,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4761,9 +4761,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -4802,9 +4802,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
.module_name = "dib0700",
.rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
},
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index aab0f99bc892..ce4c4e3b58bb 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -202,7 +202,7 @@ struct dvb_rc {
u64 protocol;
u64 allowed_protos;
enum rc_driver_type driver_type;
- int (*change_protocol)(struct rc_dev *dev, u64 rc_type);
+ int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
char *module_name;
int (*rc_query) (struct dvb_usb_device *d);
int rc_interval;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 02e878577c3d..d1ddfa13de86 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -927,7 +927,7 @@ static struct dvb_usb_device_properties pctv452e_properties = {
.rc.core = {
.rc_codes = RC_MAP_DIB0700_RC5_TABLE,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
.rc_query = pctv452e_rc_query,
.rc_interval = 100,
},
@@ -980,7 +980,7 @@ static struct dvb_usb_device_properties tt_connect_s2_3600_properties = {
.rc.core = {
.rc_codes = RC_MAP_TT_1500,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
.rc_query = pctv452e_rc_query,
.rc_interval = 100,
},
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 7a8c8c18590f..40832a1aef6c 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -732,7 +732,7 @@ static struct dvb_usb_device_properties technisat_usb2_devices = {
.rc_codes = RC_MAP_TECHNISAT_USB2,
.module_name = "technisat-usb2",
.rc_query = technisat_usb2_rc_query,
- .allowed_protos = RC_TYPE_ALL,
+ .allowed_protos = RC_BIT_ALL,
.driver_type = RC_DRIVER_IR_RAW,
}
};
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index 6a50cdea3bce..bcdac225ebe1 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -741,7 +741,7 @@ static struct dvb_usb_device_properties ttusb2_properties_ct3650 = {
.rc_interval = 150, /* Less than IR_KEYPRESS_TIMEOUT */
.rc_codes = RC_MAP_TT_1500,
.rc_query = tt3650_rc_query,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.num_adapters = 1,
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index 07c673a6e764..22cf9f96cb9e 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -56,7 +56,7 @@ static int vp702x_usb_in_op_unlocked(struct dvb_usb_device *d, u8 req,
}
int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen)
+ u16 index, u8 *b, int blen)
{
int ret;
@@ -67,8 +67,8 @@ int vp702x_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
-int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen)
+static int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req,
+ u16 value, u16 index, u8 *b, int blen)
{
int ret;
deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ",req,value,index);
@@ -86,7 +86,7 @@ int vp702x_usb_out_op_unlocked(struct dvb_usb_device *d, u8 req, u16 value,
return 0;
}
-int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int vp702x_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
int ret;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 16a84f9f46d8..619bffbab3bc 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1979,6 +1979,15 @@ struct em28xx_board em28xx_boards[] = {
EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
},
+ [EM2884_BOARD_TERRATEC_HTC_USB_XS] = {
+ .name = "Terratec Cinergy HTC USB XS",
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ .tuner_type = TUNER_ABSENT,
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -2057,9 +2066,9 @@ struct usb_device_id em28xx_id_table[] = {
{ USB_DEVICE(0x0ccd, 0x0043),
.driver_info = EM2870_BOARD_TERRATEC_XS },
{ USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */
- .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS },
{ USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */
- .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ .driver_info = EM2884_BOARD_TERRATEC_HTC_USB_XS },
{ USB_DEVICE(0x0ccd, 0x10a2), /* H5 Rev. 1 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
{ USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */
@@ -3297,7 +3306,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->num_alt = interface->num_altsetting;
- if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
+ if ((unsigned)card[nr] < em28xx_bcount)
dev->model = card[nr];
/* save our data pointer in this interface device */
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 13ae821949e9..63f2e7070c00 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -331,7 +331,7 @@ static struct drxk_config hauppauge_930c_drxk = {
.load_firmware_sync = true,
};
-struct drxk_config terratec_htc_stick_drxk = {
+static struct drxk_config terratec_htc_stick_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
@@ -520,7 +520,10 @@ static void terratec_htc_stick_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- /* Init the analog decoder? */
+ /*
+ * Init the analog decoder (not yet supported), but
+ * it's probably still a good idea.
+ */
struct {
unsigned char r[4];
int len;
@@ -547,6 +550,64 @@ static void terratec_htc_stick_init(struct em28xx *dev)
em28xx_gpio_set(dev, terratec_htc_stick_end);
};
+static void terratec_htc_usb_xs_init(struct em28xx *dev)
+{
+ int i;
+
+ struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xb2, 0xff, 100},
+ {EM2874_R80_GPIO, 0xb2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xb6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ {EM2874_R80_GPIO, 0xa6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 50},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+
+ /*
+ * Init the analog decoder (not yet supported), but
+ * it's probably still a good idea.
+ */
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
+ {{ 0x04, 0x00 }, 2},
+ {{ 0x00, 0x04 }, 2},
+ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
+ {{ 0x04, 0x14 }, 2},
+ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
+ };
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+
+ em28xx_gpio_set(dev, terratec_htc_usb_xs_init);
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+
+ em28xx_gpio_set(dev, terratec_htc_usb_xs_end);
+};
+
static void pctv_520e_init(struct em28xx *dev)
{
/*
@@ -1155,6 +1216,25 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2884_BOARD_TERRATEC_HTC_USB_XS:
+ terratec_htc_usb_xs_init(dev);
+
+ /* attach demodulator */
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_htc_stick_drxk,
+ &dev->i2c_adap);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap,
+ &em28xx_cxd2820r_tda18271_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 97d36b4f19db..660bf803c9e4 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -345,7 +345,7 @@ static void em28xx_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
+static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
{
int rc = 0;
struct em28xx_IR *ir = rc_dev->priv;
@@ -354,14 +354,16 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
/* Adjust xclk based o IR table for RC5/NEC tables */
- if (rc_type == RC_TYPE_RC5) {
+ if (*rc_type & RC_BIT_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
- } else if (rc_type == RC_TYPE_NEC) {
+ *rc_type = RC_BIT_RC5;
+ } else if (*rc_type & RC_BIT_NEC) {
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- } else if (rc_type != RC_TYPE_UNKNOWN)
+ *rc_type = RC_BIT_NEC;
+ } else if (*rc_type != RC_BIT_UNKNOWN)
rc = -EINVAL;
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
@@ -524,6 +526,7 @@ static int em28xx_ir_init(struct em28xx *dev)
struct em28xx_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
+ u64 rc_type;
if (dev->board.ir_codes == NULL) {
/* No remote control support */
@@ -546,14 +549,15 @@ static int em28xx_ir_init(struct em28xx *dev)
* em2874 supports more protocols. For now, let's just announce
* the two protocols that were already tested
*/
- rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
rc->priv = ir;
rc->change_protocol = em28xx_ir_change_protocol;
rc->open = em28xx_ir_start;
rc->close = em28xx_ir_stop;
/* By default, keep protocol field untouched */
- err = em28xx_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
+ rc_type = RC_BIT_UNKNOWN;
+ err = em28xx_ir_change_protocol(rc, &rc_type);
if (err)
goto err_out_free;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 8757523e6863..86e90d86da6d 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -128,6 +128,7 @@
#define EM2874_BOARD_MAXMEDIA_UB425_TC 84
#define EM2884_BOARD_PCTV_510E 85
#define EM2884_BOARD_PCTV_520E 86
+#define EM2884_BOARD_TERRATEC_HTC_USB_XS 87
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index a2b934146ebf..e0a431bb0d42 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1586,8 +1586,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
- if (v4l2_buf->index < 0
- || v4l2_buf->index >= gspca_dev->nframes)
+ if (v4l2_buf->index >= gspca_dev->nframes)
return -EINVAL;
frame = &gspca_dev->frame[v4l2_buf->index];
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index e3eab82cd4e5..352317d7acdb 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -32,7 +32,7 @@ do { \
#define D_USBO 0x00
#define D_V4L2 0x0100
#else
-#define PDEBUG(level, fmt, ...)
+#define PDEBUG(level, fmt, ...) do {} while(0)
#endif
#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */
diff --git a/drivers/media/usb/gspca/jeilinj.c b/drivers/media/usb/gspca/jeilinj.c
index b897aa86f315..1ba29fe7fada 100644
--- a/drivers/media/usb/gspca/jeilinj.c
+++ b/drivers/media/usb/gspca/jeilinj.c
@@ -114,7 +114,7 @@ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command)
}
/* Responses are one byte only */
-static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
+static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char *response)
{
int retval;
@@ -123,7 +123,7 @@ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
retval = usb_bulk_msg(gspca_dev->dev,
usb_rcvbulkpipe(gspca_dev->dev, 0x84),
gspca_dev->usb_buf, 1, NULL, 500);
- response = gspca_dev->usb_buf[0];
+ *response = gspca_dev->usb_buf[0];
if (retval < 0) {
pr_err("read command [%02x] error %d\n",
gspca_dev->usb_buf[0], retval);
@@ -260,7 +260,7 @@ static int jlj_start(struct gspca_dev *gspca_dev)
if (start_commands[i].delay)
msleep(start_commands[i].delay);
if (start_commands[i].ack_wanted)
- jlj_read1(gspca_dev, response);
+ jlj_read1(gspca_dev, &response);
}
setcamquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual));
msleep(2);
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 40ad6687ee5d..3773a8a745df 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x045e, 0x02ae)},
+ {USB_DEVICE(0x045e, 0x02bf)},
{}
};
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
index cc8ec3f7e8dc..c8e1572eb502 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
@@ -74,6 +74,12 @@ static
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 2548")
}
}, {
+ .ident = "Fujitsu-Siemens Amilo Pi 2530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 2530")
+ }
+ }, {
.ident = "MSI GX700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2d5c6d8343a0..4f5869a98082 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -29,14 +29,13 @@
* Register page 0:
*
* Address Description
- * 0x02 Red balance control
- * 0x03 Green balance control
- * 0x04 Blue balance control
- * Valus are inverted (0=max, 255=min).
+ * 0x01 Red balance control
+ * 0x02 Green balance control
+ * 0x03 Blue balance control
* The Windows driver uses a quadratic approach to map
* the settable values (0-200) on register values:
- * min=0x80, default=0x40, max=0x20
- * 0x0f-0x20 Colors, saturation and exposure control
+ * min=0x20, default=0x40, max=0x80
+ * 0x0f-0x20 Color and saturation control
* 0xa2-0xab Brightness, contrast and gamma control
* 0xb6 Sharpness control (bits 0-4)
*
@@ -78,12 +77,12 @@
*
* Page | Register | Function
* -----+------------+---------------------------------------------------
+ * 0 | 0x01 | setredbalance()
+ * 0 | 0x03 | setbluebalance()
* 0 | 0x0f..0x20 | setcolors()
* 0 | 0xa2..0xab | setbrightcont()
* 0 | 0xb6 | setsharpness()
- * 0 | 0xc5 | setredbalance()
* 0 | 0xc6 | setwhitebalance()
- * 0 | 0xc7 | setbluebalance()
* 0 | 0xdc | setbrightcont(), setcolors()
* 3 | 0x02 | setexposure()
* 3 | 0x10, 0x12 | setgain()
@@ -99,10 +98,13 @@
/* Include pac common sof detection functions */
#include "pac_common.h"
-#define PAC7302_GAIN_DEFAULT 15
-#define PAC7302_GAIN_KNEE 42
-#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
-#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
+#define PAC7302_RGB_BALANCE_MIN 0
+#define PAC7302_RGB_BALANCE_MAX 200
+#define PAC7302_RGB_BALANCE_DEFAULT 100
+#define PAC7302_GAIN_DEFAULT 15
+#define PAC7302_GAIN_KNEE 42
+#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
+#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
"Thomas Kaiser thomas@kaiser-linux.li");
@@ -439,12 +441,31 @@ static void setwhitebalance(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xdc, 0x01);
}
+static u8 rgbbalance_ctrl_to_reg_value(s32 rgb_ctrl_val)
+{
+ const unsigned int k = 1000; /* precision factor */
+ unsigned int norm;
+
+ /* Normed value [0...k] */
+ norm = k * (rgb_ctrl_val - PAC7302_RGB_BALANCE_MIN)
+ / (PAC7302_RGB_BALANCE_MAX - PAC7302_RGB_BALANCE_MIN);
+ /* Qudratic apporach improves control at small (register) values: */
+ return 64 * norm * norm / (k*k) + 32 * norm / k + 32;
+ /* Y = 64*X*X + 32*X + 32
+ * => register values 0x20-0x80; Windows driver uses these limits */
+
+ /* NOTE: for full value range (0x00-0xff) use
+ * Y = 254*X*X + X
+ * => 254 * norm * norm / (k*k) + 1 * norm / k */
+}
+
static void setredbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc5, sd->red_balance->val);
+ reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
+ reg_w(gspca_dev, 0x01,
+ rgbbalance_ctrl_to_reg_value(sd->red_balance->val));
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -454,7 +475,8 @@ static void setbluebalance(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc7, sd->blue_balance->val);
+ reg_w(gspca_dev, 0x03,
+ rgbbalance_ctrl_to_reg_value(sd->blue_balance->val));
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -643,9 +665,15 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_WHITE_BALANCE_TEMPERATURE,
0, 255, 1, 55);
sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_RED_BALANCE, 0, 3, 1, 1);
+ V4L2_CID_RED_BALANCE,
+ PAC7302_RGB_BALANCE_MIN,
+ PAC7302_RGB_BALANCE_MAX,
+ 1, PAC7302_RGB_BALANCE_DEFAULT);
sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_BLUE_BALANCE, 0, 3, 1, 1);
+ V4L2_CID_BLUE_BALANCE,
+ PAC7302_RGB_BALANCE_MIN,
+ PAC7302_RGB_BALANCE_MAX,
+ 1, PAC7302_RGB_BALANCE_DEFAULT);
gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index fd1f8d2d3b0b..1220340e7602 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
}
}
-static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
+static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
{
int retry = 60;
@@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
return;
/* is i2c ready */
- reg_w(gspca_dev, 0x08, buffer, 8);
+ reg_w(gspca_dev, 0x08, buf, 8);
while (retry--) {
if (gspca_dev->usb_err < 0)
return;
- msleep(10);
+ msleep(1);
reg_r(gspca_dev, 0x08);
if (gspca_dev->usb_buf[0] & 0x04) {
if (gspca_dev->usb_buf[0] & 0x08) {
dev_err(gspca_dev->v4l2_dev.dev,
- "i2c write error\n");
+ "i2c error writing %02x %02x %02x %02x"
+ " %02x %02x %02x %02x\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7]);
gspca_dev->usb_err = -EIO;
}
return;
@@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
for (;;) {
if (gspca_dev->usb_err < 0)
return;
- reg_w(gspca_dev, 0x08, *buffer, 8);
+ i2c_w(gspca_dev, *buffer);
len -= 8;
if (len <= 0)
break;
@@ -1449,6 +1452,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
#endif
+ {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
{USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
{USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
{USB_DEVICE(0x0c45, 0x602a), SB(HV7131D, 102)},
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index 5a86047b846f..36307a9028a9 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
0,
gspca_dev->usb_buf, 8,
500);
+ msleep(2);
if (ret < 0) {
pr_err("i2c_w1 err %d\n", ret);
gspca_dev->usb_err = ret;
diff --git a/drivers/media/usb/gspca/spca506.c b/drivers/media/usb/gspca/spca506.c
index bab01c86c315..bcd2c04c770e 100644
--- a/drivers/media/usb/gspca/spca506.c
+++ b/drivers/media/usb/gspca/spca506.c
@@ -590,8 +590,7 @@ static const struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 304f43ef59eb..84dc26fe80ee 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -401,12 +401,14 @@ static int hdpvr_probe(struct usb_interface *interface,
client = hdpvr_register_ir_rx_i2c(dev);
if (!client) {
v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+ retval = -ENODEV;
goto reg_fail;
}
client = hdpvr_register_ir_tx_i2c(dev);
if (!client) {
v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+ retval = -ENODEV;
goto reg_fail;
}
#endif
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 82e819fa91c0..031cf024304c 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -55,7 +55,7 @@ struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
/* Our default information for ir-kbd-i2c.c to use */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = "HD-PVR";
init_data->polling_interval = 405; /* ms, duplicated from Windows */
hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index fb828ba1dbbe..299751a8b06b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3563,9 +3563,9 @@ void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,
enum pvr2_v4l_type index,int v)
{
switch (index) {
- case pvr2_v4l_type_video: hdw->v4l_minor_number_video = v;
- case pvr2_v4l_type_vbi: hdw->v4l_minor_number_vbi = v;
- case pvr2_v4l_type_radio: hdw->v4l_minor_number_radio = v;
+ case pvr2_v4l_type_video: hdw->v4l_minor_number_video = v;break;
+ case pvr2_v4l_type_vbi: hdw->v4l_minor_number_vbi = v;break;
+ case pvr2_v4l_type_radio: hdw->v4l_minor_number_radio = v;break;
default: break;
}
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 885ce11f222d..9ab596c78a4e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -581,7 +581,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = hdw->hdw_desc->description;
init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
/* IR Receiver */
@@ -596,7 +596,7 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_TYPE_RC5;
+ init_data->type = RC_BIT_RC5;
init_data->name = hdw->hdw_desc->description;
/* IR Receiver */
info.addr = 0x71;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index db249cad3cd9..6930676051e7 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -196,7 +196,7 @@ static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
return ret;
}
-int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
+static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
@@ -365,7 +365,7 @@ static int pvr2_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
vt->audmode);
}
-int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+static int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
diff --git a/drivers/media/usb/pwc/pwc-ctrl.c b/drivers/media/usb/pwc/pwc-ctrl.c
index 1f506fde97d0..3a1618580ed6 100644
--- a/drivers/media/usb/pwc/pwc-ctrl.c
+++ b/drivers/media/usb/pwc/pwc-ctrl.c
@@ -179,6 +179,8 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
return -EINVAL;
if (frames < 4)
frames = 4;
+ else if (size > PSZ_QCIF && frames > 15)
+ frames = 15;
else if (frames > 25)
frames = 25;
frames = frames2frames[frames];
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 42e36bac4d72..5210239cbaee 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -155,7 +155,7 @@ static struct video_device pwc_template = {
/***************************************************************************/
/* Private functions */
-struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
+static struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
{
unsigned long flags = 0;
struct pwc_frame_buf *buf = NULL;
@@ -1000,7 +1000,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
pdev->vb_queue.ops = &pwc_vb_queue_ops;
pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
- vb2_queue_init(&pdev->vb_queue);
+ rc = vb2_queue_init(&pdev->vb_queue);
+ if (rc < 0) {
+ PWC_ERROR("Oops, could not initialize vb2 queue.\n");
+ goto err_free_mem;
+ }
/* Init video_device structure */
memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 2191f6ddf9e7..8ebec0d7bf59 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1651,7 +1651,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
int is_ntsc = 0;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
- if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
+ if (fe->index >= NUM_FRAME_ENUMS)
return -EINVAL;
switch (fe->width) {
case 640:
diff --git a/drivers/media/usb/siano/Kconfig b/drivers/media/usb/siano/Kconfig
index 3c76e62d820d..5afbd9a4b55c 100644
--- a/drivers/media/usb/siano/Kconfig
+++ b/drivers/media/usb/siano/Kconfig
@@ -4,7 +4,8 @@
config SMS_USB_DRV
tristate "Siano SMS1xxx based MDTV receiver"
- depends on DVB_CORE && RC_CORE && HAS_DMA
+ depends on DVB_CORE && HAS_DMA
+ select MEDIA_COMMON_OPTIONS
---help---
Choose if you would like to have Siano's support for USB interface
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index aac622200e99..de2c10289eec 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -389,7 +389,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
return rc;
}
-static int __devinit smsusb_probe(struct usb_interface *intf,
+static int smsusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
diff --git a/drivers/media/usb/sn9c102/sn9c102_core.c b/drivers/media/usb/sn9c102/sn9c102_core.c
index 5bfc8e2f018f..73605864fffa 100644
--- a/drivers/media/usb/sn9c102/sn9c102_core.c
+++ b/drivers/media/usb/sn9c102/sn9c102_core.c
@@ -2481,11 +2481,13 @@ sn9c102_vidioc_enum_framesizes(struct sn9c102_device* cam, void __user * arg)
if (frmsize.pixel_format != V4L2_PIX_FMT_SN9C10X &&
frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
return -EINVAL;
+ break;
case BRIDGE_SN9C105:
case BRIDGE_SN9C120:
if (frmsize.pixel_format != V4L2_PIX_FMT_JPEG &&
frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
return -EINVAL;
+ break;
}
frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE;
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 176ac937306b..850cf285ada8 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -116,7 +116,7 @@ static int stk1160_i2c_read_reg(struct stk1160 *dev, u8 addr,
if (rc < 0)
return rc;
- stk1160_read_reg(dev, STK1160_SBUSR_RD, value);
+ rc = stk1160_read_reg(dev, STK1160_SBUSR_RD, value);
if (rc < 0)
return rc;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 8bdfb0275313..fa3671de02aa 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -475,7 +475,11 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
if (!dev->isoc_ctl.transfer_buffer[i]) {
stk1160_err("cannot alloc %d bytes for tx[%d] buffer\n",
sb_size, i);
- goto free_i_bufs;
+
+ /* Not enough transfer buffers, so just give up */
+ if (i < STK1160_MIN_BUFS)
+ goto free_i_bufs;
+ goto nomore_tx_bufs;
}
memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
@@ -506,13 +510,28 @@ int stk1160_alloc_isoc(struct stk1160 *dev)
}
}
- stk1160_dbg("urbs allocated\n");
+ stk1160_dbg("%d urbs allocated\n", num_bufs);
/* At last we can say we have some buffers */
dev->isoc_ctl.num_bufs = num_bufs;
return 0;
+nomore_tx_bufs:
+ /*
+ * Failed to allocate desired buffer count. However, we may have
+ * enough to work fine, so we just free the extra urb,
+ * store the allocated count and keep going, fingers crossed!
+ */
+ usb_free_urb(dev->isoc_ctl.urb[i]);
+ dev->isoc_ctl.urb[i] = NULL;
+
+ stk1160_warn("%d urbs allocated. Trying to continue...\n", i - 1);
+
+ dev->isoc_ctl.num_bufs = i - 1;
+
+ return 0;
+
free_i_bufs:
/* Save the allocated buffers so far, so we can properly free them */
dev->isoc_ctl.num_bufs = i+1;
diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
index 68c8707d36ab..05b05b160e1e 100644
--- a/drivers/media/usb/stk1160/stk1160.h
+++ b/drivers/media/usb/stk1160/stk1160.h
@@ -30,11 +30,12 @@
#define STK1160_VERSION "0.9.5"
#define STK1160_VERSION_NUM 0x000905
-/* TODO: Decide on number of packets for each buffer */
+/* Decide on number of packets for each buffer */
#define STK1160_NUM_PACKETS 64
/* Number of buffers for isoc transfers */
-#define STK1160_NUM_BUFS 16 /* TODO */
+#define STK1160_NUM_BUFS 16
+#define STK1160_MIN_BUFS 1
/* TODO: This endpoint address should be retrieved */
#define STK1160_EP_VIDEO 0x82
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 86a0fc56c330..5d3c032d733c 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -54,10 +54,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jaime Velasco Juan <jsagarribay@gmail.com> and Nicolas VIVIEN");
MODULE_DESCRIPTION("Syntek DC1125 webcam driver");
-
-/* bool for webcam LED management */
-int first_init = 1;
-
/* Some cameras have audio interfaces, we aren't interested in those */
static struct usb_device_id stkwebcam_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x174f, 0xa311, 0xff, 0xff, 0xff) },
@@ -554,6 +550,7 @@ static void stk_free_buffers(struct stk_camera *dev)
static int v4l_stk_open(struct file *fp)
{
+ static int first_init = 1; /* webcam LED management */
struct stk_camera *dev;
struct video_device *vdev;
diff --git a/drivers/media/usb/tlg2300/pd-dvb.c b/drivers/media/usb/tlg2300/pd-dvb.c
index 30fcb117e898..ca4994a5190c 100644
--- a/drivers/media/usb/tlg2300/pd-dvb.c
+++ b/drivers/media/usb/tlg2300/pd-dvb.c
@@ -1,6 +1,7 @@
#include "pd-common.h"
#include <linux/kernel.h>
#include <linux/usb.h>
+#include <linux/time.h>
#include <linux/dvb/dmx.h>
#include <linux/delay.h>
#include <linux/gfp.h>
diff --git a/drivers/media/usb/tlg2300/pd-video.c b/drivers/media/usb/tlg2300/pd-video.c
index 1f448ac7a496..3082bfa9b2c5 100644
--- a/drivers/media/usb/tlg2300/pd-video.c
+++ b/drivers/media/usb/tlg2300/pd-video.c
@@ -888,7 +888,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *in)
{
struct front_face *front = fh;
- if (in->index < 0 || in->index >= POSEIDON_INPUTS)
+ if (in->index >= POSEIDON_INPUTS)
return -EINVAL;
strcpy(in->name, pd_inputs[in->index].name);
in->type = V4L2_INPUT_TYPE_TUNER;
@@ -923,7 +923,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
struct poseidon *pd = front->pd;
s32 ret, cmd_status;
- if (i < 0 || i >= POSEIDON_INPUTS)
+ if (i >= POSEIDON_INPUTS)
return -EINVAL;
ret = send_set_req(pd, SGNL_SRC_SEL,
pd_inputs[i].tlg_src, &cmd_status);
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index dffbd4bd47b1..8a6bbf1d80e1 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -109,12 +109,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir)
*/
switch (ir->rc_type) {
- case RC_TYPE_NEC:
+ case RC_BIT_NEC:
leader = 900; /* ms */
pulse = 700; /* ms - the actual value would be 562 */
break;
default:
- case RC_TYPE_RC5:
+ case RC_BIT_RC5:
leader = 900; /* ms - from the NEC decoding */
pulse = 1780; /* ms - The actual value would be 1776 */
break;
@@ -122,12 +122,12 @@ static int tm6000_ir_config(struct tm6000_IR *ir)
pulse = ir_clock_mhz * pulse;
leader = ir_clock_mhz * leader;
- if (ir->rc_type == RC_TYPE_NEC)
+ if (ir->rc_type == RC_BIT_NEC)
leader = leader | 0x8000;
dprintk(2, "%s: %s, %d MHz, leader = 0x%04x, pulse = 0x%06x \n",
__func__,
- (ir->rc_type == RC_TYPE_NEC) ? "NEC" : "RC-5",
+ (ir->rc_type == RC_BIT_NEC) ? "NEC" : "RC-5",
ir_clock_mhz, leader, pulse);
/* Remote WAKEUP = enable, normal mode, from IR decoder output */
@@ -297,7 +297,7 @@ static void tm6000_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
+static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
{
struct tm6000_IR *ir = rc->priv;
@@ -306,10 +306,10 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
dprintk(2, "%s\n",__func__);
- if ((rc->rc_map.scan) && (rc_type == RC_TYPE_NEC))
+ if ((rc->rc_map.scan) && (*rc_type == RC_BIT_NEC))
ir->key_addr = ((rc->rc_map.scan[0].scancode >> 8) & 0xffff);
- ir->rc_type = rc_type;
+ ir->rc_type = *rc_type;
tm6000_ir_config(ir);
/* TODO */
@@ -398,6 +398,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
struct tm6000_IR *ir;
struct rc_dev *rc;
int err = -ENOMEM;
+ u64 rc_type;
if (!enable_ir)
return -ENODEV;
@@ -421,7 +422,7 @@ int tm6000_ir_init(struct tm6000_core *dev)
ir->rc = rc;
/* input setup */
- rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ rc->allowed_protos = RC_BIT_RC5 | RC_BIT_NEC;
/* Neded, in order to support NEC remotes with 24 or 32 bits */
rc->scanmask = 0xffff;
rc->priv = ir;
@@ -444,7 +445,8 @@ int tm6000_ir_init(struct tm6000_core *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- tm6000_ir_change_protocol(rc, RC_TYPE_UNKNOWN);
+ rc_type = RC_BIT_UNKNOWN;
+ tm6000_ir_change_protocol(rc, &rc_type);
rc->input_name = ir->name;
rc->input_phys = ir->phys;
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 4342cd4f5c8a..f656fd7a39a2 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1802,6 +1802,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
if (!dev->radio_dev) {
printk(KERN_INFO "%s: can't register radio device\n",
dev->name);
+ ret = -ENXIO;
return ret; /* FIXME release resource */
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 5c36a57e6590..ad7f7448072e 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1363,7 +1363,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
}
/* register video4linux devices */
-static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
+static int usbvision_register_video(struct usb_usbvision *usbvision)
{
/* Video Device: */
usbvision->vdev = usbvision_vdev_init(usbvision,
@@ -1510,8 +1510,8 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
* if it looks like USBVISION video device
*
*/
-static int __devinit usbvision_probe(struct usb_interface *intf,
- const struct usb_device_id *devid)
+static int usbvision_probe(struct usb_interface *intf,
+ const struct usb_device_id *devid)
{
struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf));
struct usb_interface *uif;
@@ -1619,7 +1619,7 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
* with no ill consequences.
*
*/
-static void __devexit usbvision_disconnect(struct usb_interface *intf)
+static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
@@ -1664,7 +1664,7 @@ static struct usb_driver usbvision_driver = {
.name = "usbvision",
.id_table = usbvision_table,
.probe = usbvision_probe,
- .disconnect = __devexit_p(usbvision_disconnect),
+ .disconnect = usbvision_disconnect,
};
/*
diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h
index 43cf61fe4943..8a25876d72c6 100644
--- a/drivers/media/usb/usbvision/usbvision.h
+++ b/drivers/media/usb/usbvision/usbvision.h
@@ -167,7 +167,7 @@ enum {
/* This macro restricts an int variable to an inclusive range */
#define RESTRICT_TO_RANGE(v, mi, ma) \
- { if ((v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
+ { if (((int)v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
/*
* We use macros to do YUV -> RGB conversion because this is
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index f7061a5ef1d2..d5baab17a5ef 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -927,7 +927,7 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
- return -EINVAL;
+ return -EACCES;
if (!ctrl->loaded) {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
@@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0)
+ if (ctrl == NULL)
return -EINVAL;
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+ return -EACCES;
/* Clamp out of range values. */
switch (mapping->v4l2_type) {
@@ -1452,8 +1454,12 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
if (step == 0)
step = 1;
- xctrl->value = min + (xctrl->value - min + step/2) / step * step;
- xctrl->value = clamp(xctrl->value, min, max);
+ xctrl->value = min + ((u32)(xctrl->value - min) + step / 2)
+ / step * step;
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
+ xctrl->value = clamp(xctrl->value, min, max);
+ else
+ xctrl->value = clamp_t(u32, xctrl->value, min, max);
value = xctrl->value;
break;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 5967081747ce..5dbefa68b1d2 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1562,6 +1562,9 @@ static int uvc_scan_device(struct uvc_device *dev)
INIT_LIST_HEAD(&chain->entities);
mutex_init(&chain->ctrl_mutex);
chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
kfree(chain);
@@ -1722,6 +1725,8 @@ static int uvc_register_video(struct uvc_device *dev,
vdev->v4l2_dev = &dev->vdev;
vdev->fops = &uvc_fops;
vdev->release = uvc_release;
+ vdev->prio = &stream->chain->prio;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags);
if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
vdev->vfl_dir = VFL_DIR_TX;
strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1741,6 +1746,11 @@ static int uvc_register_video(struct uvc_device *dev,
return ret;
}
+ if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ stream->chain->caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else
+ stream->chain->caps |= V4L2_CAP_VIDEO_OUTPUT;
+
atomic_inc(&dev->nstreams);
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index 29e239911d0e..dc56a59ecadc 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -93,6 +93,8 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
} else if (entity->vdev != NULL) {
ret = media_entity_init(&entity->vdev->entity,
entity->num_pads, entity->pads, 0);
+ if (entity->flags & UVC_ENTITY_FLAG_DEFAULT)
+ entity->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
} else
ret = 0;
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 18a91fae6bc1..778addc5caff 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -128,7 +128,7 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
int ret;
queue->queue.type = type;
- queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
+ queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue->queue.drv_priv = queue;
queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue->queue.ops = &uvc_queue_qops;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f00db3060e0e..68d59b527492 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -165,17 +165,18 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fcc[0], fcc[1], fcc[2], fcc[3],
fmt->fmt.pix.width, fmt->fmt.pix.height);
- /* Check if the hardware supports the requested format. */
+ /* Check if the hardware supports the requested format, use the default
+ * format otherwise.
+ */
for (i = 0; i < stream->nformats; ++i) {
format = &stream->format[i];
if (format->fcc == fmt->fmt.pix.pixelformat)
break;
}
- if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
- uvc_trace(UVC_TRACE_FORMAT, "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
- return -EINVAL;
+ if (i == stream->nformats) {
+ format = stream->def_format;
+ fmt->fmt.pix.pixelformat = format->fcc;
}
/* Find the closest image size. The distance between image sizes is
@@ -564,15 +565,30 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
usb_make_path(stream->dev->udev,
cap->bus_info, sizeof(cap->bus_info));
cap->version = LINUX_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | chain->caps;
if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
- | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
break;
}
+ /* Priority */
+ case VIDIOC_G_PRIORITY:
+ *(u32 *)arg = v4l2_prio_max(vdev->prio);
+ break;
+
+ case VIDIOC_S_PRIORITY:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_prio_change(vdev->prio, &handle->vfh.prio,
+ *(u32 *)arg);
+
/* Get, Set & Query control */
case VIDIOC_QUERYCTRL:
return uvc_query_v4l2_ctrl(chain, arg);
@@ -601,6 +617,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
struct v4l2_control *ctrl = arg;
struct v4l2_ext_control xctrl;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
memset(&xctrl, 0, sizeof xctrl);
xctrl.id = ctrl->id;
xctrl.value = ctrl->value;
@@ -647,6 +667,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
case VIDIOC_S_EXT_CTRLS:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+ /* Fall through */
case VIDIOC_TRY_EXT_CTRLS:
{
struct v4l2_ext_controls *ctrls = arg;
@@ -661,7 +685,8 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_set(chain, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = i;
+ ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS
+ ? ctrls->count : i;
return ret;
}
}
@@ -739,6 +764,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
u32 input = *(u32 *)arg + 1;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -792,6 +821,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
}
case VIDIOC_S_FMT:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -894,6 +927,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_v4l2_get_streamparm(stream, arg);
case VIDIOC_S_PARM:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -924,10 +961,14 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_CROP:
case VIDIOC_S_CROP:
- return -EINVAL;
+ return -ENOTTY;
/* Buffers & streaming */
case VIDIOC_REQBUFS:
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
@@ -973,6 +1014,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (*type != stream->type)
return -EINVAL;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if (!uvc_has_privileges(handle))
return -EBUSY;
@@ -991,6 +1036,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (*type != stream->type)
return -EINVAL;
+ ret = v4l2_prio_check(vdev->prio, handle->vfh.prio);
+ if (ret < 0)
+ return ret;
+
if (!uvc_has_privileges(handle))
return -EBUSY;
@@ -1030,7 +1079,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_ENUMOUTPUT:
uvc_trace(UVC_TRACE_IOCTL, "Unsupported ioctl 0x%08x\n", cmd);
- return -EINVAL;
+ return -ENOTTY;
case UVCIOC_CTRL_MAP:
return uvc_ioctl_ctrl_map(chain, arg);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 57c3076a4625..3394c3432011 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1812,6 +1812,7 @@ int uvc_video_init(struct uvc_streaming *stream)
probe->bFormatIndex = format->index;
probe->bFrameIndex = frame->bFrameIndex;
+ stream->def_format = format;
stream->cur_format = format;
stream->cur_frame = frame;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index af216ec45e39..af505fdd9b3f 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -225,10 +225,14 @@ struct uvc_format_desc {
* always be accessed with the UVC_ENTITY_* macros and never directly.
*/
+#define UVC_ENTITY_FLAG_DEFAULT (1 << 0)
+
struct uvc_entity {
struct list_head list; /* Entity as part of a UVC device. */
struct list_head chain; /* Entity as part of a video device
* chain. */
+ unsigned int flags;
+
__u8 id;
__u16 type;
char name[64];
@@ -371,6 +375,9 @@ struct uvc_video_chain {
struct uvc_entity *selector; /* Selector unit */
struct mutex ctrl_mutex; /* Protects ctrl.info */
+
+ struct v4l2_prio_state prio; /* V4L2 priority state */
+ u32 caps; /* V4L2 chain-wide caps */
};
struct uvc_stats_frame {
@@ -436,6 +443,7 @@ struct uvc_streaming {
struct uvc_format *format;
struct uvc_streaming_control ctrl;
+ struct uvc_format *def_format;
struct uvc_format *cur_format;
struct uvc_frame *cur_frame;
/* Protect access to ctrl, cur_format, cur_frame and hardware video
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 9afab35878b4..39edd4442932 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1007,8 +1007,7 @@ static void read_pipe_completion(struct urb *purb)
return;
}
- if (purb->actual_length < 0 ||
- purb->actual_length > pipe_info->transfer_size) {
+ if (purb->actual_length > pipe_info->transfer_size) {
dev_err(&cam->udev->dev, "wrong number of bytes\n");
return;
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 0c54e19d9944..65875c3aba1b 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -59,6 +59,7 @@ config VIDEOBUF_DVB
# Used by drivers that need Videobuf2 modules
config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
tristate
config VIDEOBUF2_MEMOPS
@@ -68,11 +69,13 @@ config VIDEOBUF2_DMA_CONTIG
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_VMALLOC
tristate
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
config VIDEOBUF2_DMA_SG
tristate
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index f995dd31151d..380ddd89fa4c 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -837,7 +837,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
struct v4l2_dv_timings *fmt)
{
int pix_clk;
- int v_fp, v_bp, h_fp, h_bp, hsync;
+ int v_fp, v_bp, h_fp, hsync;
int frame_width, image_height, image_width;
bool default_gtf;
int h_blank;
@@ -885,7 +885,6 @@ bool v4l2_detect_gtf(unsigned frame_height,
hsync = hsync - hsync % GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
- h_bp = h_blank / 2;
fmt->bt.polarities = polarities;
fmt->bt.width = image_width;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 83ffb6436baf..7157af301b14 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -297,6 +297,7 @@ struct v4l2_plane32 {
union {
__u32 mem_offset;
compat_long_t userptr;
+ __s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
@@ -318,6 +319,7 @@ struct v4l2_buffer32 {
__u32 offset;
compat_long_t userptr;
compat_caddr_t planes;
+ __s32 fd;
} m;
__u32 length;
__u32 reserved2;
@@ -341,6 +343,9 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
up_pln = compat_ptr(p);
if (put_user((unsigned long)up_pln, &up->m.userptr))
return -EFAULT;
+ } else if (memory == V4L2_MEMORY_DMABUF) {
+ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
+ return -EFAULT;
} else {
if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
sizeof(__u32)))
@@ -364,6 +369,11 @@ static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
sizeof(__u32)))
return -EFAULT;
+ /* For DMABUF, driver might've set up the fd, so copy it back. */
+ if (memory == V4L2_MEMORY_DMABUF)
+ if (copy_in_user(&up32->m.fd, &up->m.fd,
+ sizeof(int)))
+ return -EFAULT;
return 0;
}
@@ -446,6 +456,10 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (get_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -510,6 +524,10 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
+ case V4L2_MEMORY_DMABUF:
+ if (put_user(kp->m.fd, &up->m.fd))
+ return -EFAULT;
+ break;
}
}
@@ -1000,6 +1018,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_S_FBUF32:
case VIDIOC_OVERLAY32:
case VIDIOC_QBUF32:
+ case VIDIOC_EXPBUF:
case VIDIOC_DQBUF32:
case VIDIOC_STREAMON32:
case VIDIOC_STREAMOFF32:
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index a2df842e5100..98dcad9c8a3b 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -571,6 +571,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 18a040b935a3..c72009218152 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 9e3fc040ea20..e57c002b4150 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
- * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8f388ff31ebb..aa6e7c788db2 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -155,6 +155,7 @@ static const char *v4l2_memory_names[] = {
[V4L2_MEMORY_MMAP] = "mmap",
[V4L2_MEMORY_USERPTR] = "userptr",
[V4L2_MEMORY_OVERLAY] = "overlay",
+ [V4L2_MEMORY_DMABUF] = "dmabuf",
};
#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
@@ -453,6 +454,15 @@ static void v4l_print_buffer(const void *arg, bool write_only)
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
+static void v4l_print_exportbuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_exportbuffer *p = arg;
+
+ pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
+ p->fd, prt_names(p->type, v4l2_type_names),
+ p->index, p->plane, p->flags);
+}
+
static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
@@ -1960,6 +1970,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_EXPBUF, vidioc_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 3ac83583ad7a..438ea45d1074 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -369,6 +369,19 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
+ return vb2_expbuf(vq, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
+/**
* v4l2_m2m_streamon() - turn on streaming for a video queue
*/
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -510,12 +523,10 @@ struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
- if (!m2m_ops)
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
+ WARN_ON(!m2m_ops->job_abort))
return ERR_PTR(-EINVAL);
- BUG_ON(!m2m_ops->device_run);
- BUG_ON(!m2m_ops->job_abort);
-
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
if (!m2m_dev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index dced41c1d993..996c248dea42 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -412,20 +412,20 @@ static int
v4l2_subdev_link_validate_get_format(struct media_pad *pad,
struct v4l2_subdev_format *fmt)
{
- switch (media_entity_type(pad->entity)) {
- case MEDIA_ENT_T_V4L2_SUBDEV:
+ if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt->pad = pad->index;
- return v4l2_subdev_call(media_entity_to_v4l2_subdev(
- pad->entity),
- pad, get_fmt, NULL, fmt);
- default:
- WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
- media_entity_type(pad->entity), pad->entity->name);
- /* Fall through */
- case MEDIA_ENT_T_DEVNODE_V4L:
- return -EINVAL;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
}
+
+ WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
+ "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
+ pad->entity->type, pad->entity->name);
+
+ return -EINVAL;
}
int v4l2_subdev_link_validate(struct media_link *link)
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index bf7a326b1cdc..5449e8aa984a 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -335,6 +335,9 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case V4L2_MEMORY_OVERLAY:
b->m.offset = vb->boff;
break;
+ case V4L2_MEMORY_DMABUF:
+ /* DMABUF is not handled in videobuf framework */
+ break;
}
b->flags = 0;
@@ -405,6 +408,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
break;
case V4L2_MEMORY_USERPTR:
case V4L2_MEMORY_OVERLAY:
+ case V4L2_MEMORY_DMABUF:
/* nothing */
break;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 432df119af27..e02c4797b1c6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -109,6 +109,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
}
/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_memop(q, unmap_dmabuf, p->mem_priv);
+
+ call_memop(q, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -230,6 +260,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
/* Free MMAP buffers or release USERPTR buffers */
if (q->memory == V4L2_MEMORY_MMAP)
__vb2_buf_mem_free(vb);
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
else
__vb2_buf_userptr_put(vb);
}
@@ -362,6 +394,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
b->m.offset = vb->v4l2_planes[0].m.mem_offset;
else if (q->memory == V4L2_MEMORY_USERPTR)
b->m.userptr = vb->v4l2_planes[0].m.userptr;
+ else if (q->memory == V4L2_MEMORY_DMABUF)
+ b->m.fd = vb->v4l2_planes[0].m.fd;
}
/*
@@ -454,13 +488,28 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
* __verify_memory_type() - Check whether the memory type and buffer type
* passed to a buffer operation are compatible with the queue.
*/
static int __verify_memory_type(struct vb2_queue *q,
enum v4l2_memory memory, enum v4l2_buf_type type)
{
- if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
+ memory != V4L2_MEMORY_DMABUF) {
dprintk(1, "reqbufs: unsupported memory type\n");
return -EINVAL;
}
@@ -484,6 +533,11 @@ static int __verify_memory_type(struct vb2_queue *q,
return -EINVAL;
}
+ if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
/*
* Place the busy tests at the end: -EBUSY can be ignored when
* create_bufs is called with count == 0, but count == 0 should still
@@ -790,6 +844,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
+ unsigned int plane;
if (vb->state != VB2_BUF_STATE_ACTIVE)
return;
@@ -800,6 +855,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
dprintk(4, "Done processing on buffer %d, state: %d\n",
vb->v4l2_buf.index, vb->state);
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, finish, vb->planes[plane].mem_priv);
+
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
@@ -845,6 +904,16 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
b->m.planes[plane].length;
}
}
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ v4l2_planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ v4l2_planes[plane].length =
+ b->m.planes[plane].length;
+ v4l2_planes[plane].data_offset =
+ b->m.planes[plane].data_offset;
+ }
+ }
} else {
/*
* Single-planar buffers do not use planes array,
@@ -852,13 +921,22 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
* In videobuf we use our internal V4l2_planes struct for
* single-planar buffers as well, for simplicity.
*/
- if (V4L2_TYPE_IS_OUTPUT(b->type))
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
v4l2_planes[0].bytesused = b->bytesused;
+ v4l2_planes[0].data_offset = 0;
+ }
if (b->memory == V4L2_MEMORY_USERPTR) {
v4l2_planes[0].m.userptr = b->m.userptr;
v4l2_planes[0].length = b->length;
}
+
+ if (b->memory == V4L2_MEMORY_DMABUF) {
+ v4l2_planes[0].m.fd = b->m.fd;
+ v4l2_planes[0].length = b->length;
+ v4l2_planes[0].data_offset = 0;
+ }
+
}
vb->v4l2_buf.field = b->field;
@@ -959,14 +1037,121 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret;
+ int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+ /* Verify and copy relevant information provided by the userspace */
+ __fill_vb2_buffer(vb, b, planes);
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* use DMABUF size if length is not provided */
+ if (planes[plane].length == 0)
+ planes[plane].length = dbuf->size;
+
+ if (planes[plane].length < planes[plane].data_offset +
+ q->plane_sizes[plane]) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf &&
+ vb->v4l2_planes[plane].length == planes[plane].length) {
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
+
+ /* Acquire each plane's memory */
+ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ dbuf, planes[plane].length, write);
+ if (IS_ERR(mem_priv)) {
+ dprintk(1, "qbuf: failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ dma_buf_put(dbuf);
+ goto err;
+ }
+
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
+ * really we want to do this just before the DMA, not while queueing
+ * the buffer(s)..
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
+ plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_qop(q, buf_init, vb);
+ if (ret) {
+ dprintk(1, "qbuf: buffer initialization failed\n");
+ goto err;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane] = planes[plane];
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/**
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->queued_count);
+
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_memop(q, prepare, vb->planes[plane].mem_priv);
+
q->ops->buf_queue(vb);
}
@@ -982,6 +1167,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
case V4L2_MEMORY_USERPTR:
ret = __qbuf_userptr(vb, b);
break;
+ case V4L2_MEMORY_DMABUF:
+ ret = __qbuf_dmabuf(vb, b);
+ break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
@@ -1303,6 +1491,30 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
/**
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int i;
+
+ /* nothing to do if the buffer is already dequeued */
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ return;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+
+ /* unmap DMABUF buffer */
+ if (q->memory == V4L2_MEMORY_DMABUF)
+ for (i = 0; i < vb->num_planes; ++i) {
+ if (!vb->planes[i].dbuf_mapped)
+ continue;
+ call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ vb->planes[i].dbuf_mapped = 0;
+ }
+}
+
+/**
* vb2_dqbuf() - Dequeue a buffer to the userspace
* @q: videobuf2 queue
* @b: buffer structure passed from userspace to vidioc_dqbuf handler
@@ -1363,11 +1575,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
__fill_v4l2_buffer(vb, b);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
+ /* go back to dequeued state */
+ __vb2_dqbuf(vb);
dprintk(1, "dqbuf of buffer %d, with state %d\n",
vb->v4l2_buf.index, vb->state);
- vb->state = VB2_BUF_STATE_DEQUEUED;
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
@@ -1406,7 +1619,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* Reinitialize all buffers for next use.
*/
for (i = 0; i < q->num_buffers; ++i)
- q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+ __vb2_dqbuf(q->bufs[i]);
}
/**
@@ -1540,6 +1753,79 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
}
/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to vidioc_expbuf
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ struct vb2_buffer *vb = NULL;
+ struct vb2_plane *vb_plane;
+ int ret;
+ struct dma_buf *dbuf;
+
+ if (q->memory != V4L2_MEMORY_MMAP) {
+ dprintk(1, "Queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (!q->mem_ops->get_dmabuf) {
+ dprintk(1, "Queue does not support DMA buffer exporting\n");
+ return -EINVAL;
+ }
+
+ if (eb->flags & ~O_CLOEXEC) {
+ dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ return -EINVAL;
+ }
+
+ if (eb->type != q->type) {
+ dprintk(1, "qbuf: invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (eb->index >= q->num_buffers) {
+ dprintk(1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[eb->index];
+
+ if (eb->plane >= vb->num_planes) {
+ dprintk(1, "buffer plane out of range\n");
+ return -EINVAL;
+ }
+
+ vb_plane = &vb->planes[eb->plane];
+
+ dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(1, "Failed to export buffer %d, plane %d\n",
+ eb->index, eb->plane);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_fd(dbuf, eb->flags);
+ if (ret < 0) {
+ dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+ eb->index, eb->plane, ret);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+ eb->index, eb->plane, ret);
+ eb->fd = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+/**
* vb2_mmap() - map video buffers into application address space
* @q: videobuf2 queue
* @vma: vma passed to the mmap file operation handler in the driver
@@ -2245,6 +2531,16 @@ int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
}
EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
/* v4l2_file_operations helpers */
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 4b7132660a93..10beaee7f0ae 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -10,7 +10,10 @@
* the Free Software Foundation.
*/
+#include <linux/dma-buf.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -23,40 +26,158 @@ struct vb2_dc_conf {
};
struct vb2_dc_buf {
- struct vb2_dc_conf *conf;
+ struct device *dev;
void *vaddr;
- dma_addr_t dma_addr;
unsigned long size;
- struct vm_area_struct *vma;
- atomic_t refcount;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ struct sg_table *dma_sgt;
+
+ /* MMAP related */
struct vb2_vmarea_handler handler;
+ atomic_t refcount;
+ struct sg_table *sgt_base;
+
+ /* USERPTR related */
+ struct vm_area_struct *vma;
+
+ /* DMABUF related */
+ struct dma_buf_attachment *db_attach;
};
-static void vb2_dma_contig_put(void *buf_priv);
+/*********************************************/
+/* scatterlist table functions */
+/*********************************************/
+
+
+static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
+ void (*cb)(struct page *pg))
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ struct page *page = sg_page(s);
+ unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
+ >> PAGE_SHIFT;
+ unsigned int j;
+
+ for (j = 0; j < n_pages; ++j, ++page)
+ cb(page);
+ }
+}
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+/*********************************************/
+/* callbacks for all buffers */
+/*********************************************/
+
+static void *vb2_dc_cookie(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return &buf->dma_addr;
+}
+
+static void *vb2_dc_vaddr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return atomic_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* DMABUF exporter will flush the cache for us */
+ if (!sgt || buf->db_attach)
+ return;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+}
+
+/*********************************************/
+/* callbacks for MMAP buffers */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!atomic_dec_and_test(&buf->refcount))
+ return;
+
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ put_device(buf->dev);
+ kfree(buf);
+}
-static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
{
struct vb2_dc_conf *conf = alloc_ctx;
+ struct device *dev = conf->dev;
struct vb2_dc_buf *buf;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(conf->dev, size, &buf->dma_addr,
- GFP_KERNEL);
+ /* align image size to PAGE_SIZE */
+ size = PAGE_ALIGN(size);
+
+ buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
if (!buf->vaddr) {
- dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
- size);
+ dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- buf->conf = conf;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
buf->size = size;
buf->handler.refcount = &buf->refcount;
- buf->handler.put = vb2_dma_contig_put;
+ buf->handler.put = vb2_dc_put;
buf->handler.arg = buf;
atomic_inc(&buf->refcount);
@@ -64,100 +185,569 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
return buf;
}
-static void vb2_dma_contig_put(void *buf_priv)
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
{
struct vb2_dc_buf *buf = buf_priv;
+ int ret;
- if (atomic_dec_and_test(&buf->refcount)) {
- dma_free_coherent(buf->conf->dev, buf->size, buf->vaddr,
- buf->dma_addr);
- kfree(buf);
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ /*
+ * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
+ * map whole buffer
+ */
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
+ buf->dma_addr, buf->size);
+
+ if (ret) {
+ pr_err("Remapping memory failed, error: %d\n", ret);
+ return ret;
}
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
+
+ return 0;
}
-static void *vb2_dma_contig_cookie(void *buf_priv)
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
+ struct vb2_dc_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dc_buf *buf = dbuf->priv;
+ int ret;
- return &buf->dma_addr;
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->sgt_base->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
}
-static void *vb2_dma_contig_vaddr(void *buf_priv)
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
{
- struct vb2_dc_buf *buf = buf_priv;
- if (!buf)
- return NULL;
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dir == dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dir);
+ attach->dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dir = dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dc_get_dmabuf */
+ vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_dc_buf *buf = dbuf->priv;
return buf->vaddr;
}
-static unsigned int vb2_dma_contig_num_users(void *buf_priv)
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
{
- struct vb2_dc_buf *buf = buf_priv;
+ return vb2_dc_mmap(dbuf->priv, vma);
+}
- return atomic_read(&buf->refcount);
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+ .attach = vb2_dc_dmabuf_ops_attach,
+ .detach = vb2_dc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+ .kmap = vb2_dc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+ .vmap = vb2_dc_dmabuf_ops_vmap,
+ .mmap = vb2_dc_dmabuf_ops_mmap,
+ .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ dev_err(buf->dev, "failed to alloc sg table\n");
+ return NULL;
+ }
+
+ ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+ buf->size);
+ if (ret < 0) {
+ dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+ kfree(sgt);
+ return NULL;
+ }
+
+ return sgt;
}
-static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
- if (!buf) {
- printk(KERN_ERR "No buffer to map\n");
- return -EINVAL;
+ if (!buf->sgt_base)
+ buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+ if (WARN_ON(!buf->sgt_base))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for USERPTR buffers */
+/*********************************************/
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
+ int n_pages, struct vm_area_struct *vma, int write)
+{
+ if (vma_is_io(vma)) {
+ unsigned int i;
+
+ for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
+ unsigned long pfn;
+ int ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ pages[i] = pfn_to_page(pfn);
+ }
+ } else {
+ int n;
+
+ n = get_user_pages(current, current->mm, start & PAGE_MASK,
+ n_pages, write, 1, pages, NULL);
+ /* negative error means that no page was pinned */
+ n = max(n, 0);
+ if (n != n_pages) {
+ pr_err("got only %d of %d user pages\n", n, n_pages);
+ while (n)
+ put_page(pages[--n]);
+ return -EFAULT;
+ }
}
- return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
- &vb2_common_vm_ops, &buf->handler);
+ return 0;
}
-static void *vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+static void vb2_dc_put_dirty_page(struct page *page)
{
+ set_page_dirty_lock(page);
+ put_page(page);
+}
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+
+ sg_free_table(sgt);
+ kfree(sgt);
+ vb2_put_vma(buf->vma);
+ kfree(buf);
+}
+
+static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
+ unsigned long start;
+ unsigned long end;
+ unsigned long offset;
+ struct page **pages;
+ int n_pages;
+ int ret = 0;
struct vm_area_struct *vma;
- dma_addr_t dma_addr = 0;
- int ret;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+ unsigned long dma_align = dma_get_cache_alignment();
+
+ /* Only cache aligned DMA transfers are reliable */
+ if (!IS_ALIGNED(vaddr | size, dma_align)) {
+ pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!size) {
+ pr_debug("size is zero\n");
+ return ERR_PTR(-EINVAL);
+ }
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- ret = vb2_get_contig_userptr(vaddr, size, &vma, &dma_addr);
+ buf->dev = conf->dev;
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ start = vaddr & PAGE_MASK;
+ offset = vaddr & ~PAGE_MASK;
+ end = PAGE_ALIGN(vaddr + size);
+ n_pages = (end - start) >> PAGE_SHIFT;
+
+ pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ pr_err("failed to allocate pages table\n");
+ goto fail_buf;
+ }
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ pr_err("no vma for address %lu\n", vaddr);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
+ ret = -EFAULT;
+ goto fail_pages;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ pr_err("failed to copy vma\n");
+ ret = -ENOMEM;
+ goto fail_pages;
+ }
+
+ /* extract page list from userspace mapping */
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {
- printk(KERN_ERR "Failed acquiring VMA for vaddr 0x%08lx\n",
- vaddr);
- kfree(buf);
- return ERR_PTR(ret);
+ pr_err("failed to get user pages\n");
+ goto fail_vma;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ pr_err("failed to allocate sg table\n");
+ ret = -ENOMEM;
+ goto fail_get_user_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, GFP_KERNEL);
+ if (ret) {
+ pr_err("failed to initialize sg table\n");
+ goto fail_sgt;
}
+ /* pages are no longer needed */
+ kfree(pages);
+ pages = NULL;
+
+ sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
+ if (sgt->nents <= 0) {
+ pr_err("failed to map scatterlist\n");
+ ret = -EIO;
+ goto fail_sgt_init;
+ }
+
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < size) {
+ pr_err("contiguous mapping is too small %lu/%lu\n",
+ contig_size, size);
+ ret = -EFAULT;
+ goto fail_map_sg;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
buf->size = size;
- buf->dma_addr = dma_addr;
- buf->vma = vma;
+ buf->dma_sgt = sgt;
return buf;
+
+fail_map_sg:
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+
+fail_sgt_init:
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, put_page);
+ sg_free_table(sgt);
+
+fail_sgt:
+ kfree(sgt);
+
+fail_get_user_pages:
+ if (pages && !vma_is_io(buf->vma))
+ while (n_pages)
+ put_page(pages[--n_pages]);
+
+fail_vma:
+ vb2_put_vma(buf->vma);
+
+fail_pages:
+ kfree(pages); /* kfree is NULL-proof */
+
+fail_buf:
+ kfree(buf);
+
+ return ERR_PTR(ret);
}
-static void vb2_dma_contig_put_userptr(void *mem_priv)
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt;
+ unsigned long contig_size;
- if (!buf)
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR_OR_NULL(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ /* checking if dmabuf is big enough to store contiguous chunk */
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < buf->size) {
+ pr_err("contiguous chunk is too small %lu/%lu b\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ return -EFAULT;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+
+ return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
return;
+ }
- vb2_put_vma(buf->vma);
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_addr = 0;
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_addr))
+ vb2_dc_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
kfree(buf);
}
+static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_dc_conf *conf = alloc_ctx;
+ struct vb2_dc_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = conf->dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+/*********************************************/
+/* DMA CONTIG exported functions */
+/*********************************************/
+
const struct vb2_mem_ops vb2_dma_contig_memops = {
- .alloc = vb2_dma_contig_alloc,
- .put = vb2_dma_contig_put,
- .cookie = vb2_dma_contig_cookie,
- .vaddr = vb2_dma_contig_vaddr,
- .mmap = vb2_dma_contig_mmap,
- .get_userptr = vb2_dma_contig_get_userptr,
- .put_userptr = vb2_dma_contig_put_userptr,
- .num_users = vb2_dma_contig_num_users,
+ .alloc = vb2_dc_alloc,
+ .put = vb2_dc_put,
+ .get_dmabuf = vb2_dc_get_dmabuf,
+ .cookie = vb2_dc_cookie,
+ .vaddr = vb2_dc_vaddr,
+ .mmap = vb2_dc_mmap,
+ .get_userptr = vb2_dc_get_userptr,
+ .put_userptr = vb2_dc_put_userptr,
+ .prepare = vb2_dc_prepare,
+ .finish = vb2_dc_finish,
+ .map_dmabuf = vb2_dc_map_dmabuf,
+ .unmap_dmabuf = vb2_dc_unmap_dmabuf,
+ .attach_dmabuf = vb2_dc_attach_dmabuf,
+ .detach_dmabuf = vb2_dc_detach_dmabuf,
+ .num_users = vb2_dc_num_users,
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 051ea3571b20..81c1ad8b2cf1 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -137,46 +137,6 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
/**
- * vb2_mmap_pfn_range() - map physical pages to userspace
- * @vma: virtual memory region for the mapping
- * @paddr: starting physical address of the memory to be mapped
- * @size: size of the memory to be mapped
- * @vm_ops: vm operations to be assigned to the created area
- * @priv: private data to be associated with the area
- *
- * Returns 0 on success.
- */
-int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
- unsigned long size,
- const struct vm_operations_struct *vm_ops,
- void *priv)
-{
- int ret;
-
- size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (ret) {
- printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
- return ret;
- }
-
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = priv;
- vma->vm_ops = vm_ops;
-
- vma->vm_ops->open(vma);
-
- pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
- __func__, paddr, vma->vm_start, size);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range);
-
-/**
* vb2_common_vm_open() - increase refcount of the vma
* @vma: virtual memory region for the mapping
*
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 94efa04d8d55..a47fd4f589a1 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -30,6 +30,7 @@ struct vb2_vmalloc_buf {
unsigned int n_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct dma_buf *dbuf;
};
static void vb2_vmalloc_put(void *buf_priv);
@@ -207,11 +208,66 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
return 0;
}
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ buf->vaddr = dma_buf_vmap(buf->dbuf);
+
+ return buf->vaddr ? 0 : -EFAULT;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+
+ if (buf->vaddr)
+ dma_buf_vunmap(buf->dbuf, buf->vaddr);
+
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+ unsigned long size, int write)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dbuf = dbuf;
+ buf->write = write;
+ buf->size = size;
+
+ return buf;
+}
+
+
const struct vb2_mem_ops vb2_vmalloc_memops = {
.alloc = vb2_vmalloc_alloc,
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+ .map_dmabuf = vb2_vmalloc_map_dmabuf,
+ .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
+ .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
+ .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
.vaddr = vb2_vmalloc_vaddr,
.mmap = vb2_vmalloc_mmap,
.num_users = vb2_vmalloc_num_users,
diff --git a/drivers/memory/tegra20-mc.c b/drivers/memory/tegra20-mc.c
index e6764bb41cb9..186f27d9e5f1 100644
--- a/drivers/memory/tegra20-mc.c
+++ b/drivers/memory/tegra20-mc.c
@@ -177,7 +177,7 @@ static void tegra20_mc_decode(struct tegra20_mc *mc, int n)
"carveout" : "trustzone") : "");
}
-static const struct of_device_id tegra20_mc_of_match[] __devinitconst = {
+static const struct of_device_id tegra20_mc_of_match[] = {
{ .compatible = "nvidia,tegra20-mc", },
{},
};
@@ -198,7 +198,7 @@ static irqreturn_t tegra20_mc_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit tegra20_mc_probe(struct platform_device *pdev)
+static int tegra20_mc_probe(struct platform_device *pdev)
{
struct resource *irq;
struct tegra20_mc *mc;
diff --git a/drivers/memory/tegra30-mc.c b/drivers/memory/tegra30-mc.c
index 802b9ea431fa..0b7ab9332a18 100644
--- a/drivers/memory/tegra30-mc.c
+++ b/drivers/memory/tegra30-mc.c
@@ -295,7 +295,7 @@ static UNIVERSAL_DEV_PM_OPS(tegra30_mc_pm,
tegra30_mc_suspend,
tegra30_mc_resume, NULL);
-static const struct of_device_id tegra30_mc_of_match[] __devinitconst = {
+static const struct of_device_id tegra30_mc_of_match[] = {
{ .compatible = "nvidia,tegra30-mc", },
{},
};
@@ -316,7 +316,7 @@ static irqreturn_t tegra30_mc_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit tegra30_mc_probe(struct platform_device *pdev)
+static int tegra30_mc_probe(struct platform_device *pdev)
{
struct resource *irq;
struct tegra30_mc *mc;
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index cc0997a05171..4f7a17fd1aa7 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -42,3 +42,13 @@ config MEMSTICK_R592
To compile this driver as a module, choose M here: the module will
be called r592.
+
+config MEMSTICK_REALTEK_PCI
+ tristate "Realtek PCI-E Memstick Card Interface Driver"
+ depends on MFD_RTSX_PCI
+ help
+ Say Y here to include driver code to support Memstick card interface
+ of Realtek PCI-E card reader
+
+ To compile this driver as a module, choose M here: the module will
+ be called rtsx_pci_ms.
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
index 31ba8d378e46..af3459d7686e 100644
--- a/drivers/memstick/host/Makefile
+++ b/drivers/memstick/host/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o
obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o
obj-$(CONFIG_MEMSTICK_R592) += r592.o
+obj-$(CONFIG_MEMSTICK_REALTEK_PCI) += rtsx_pci_ms.o
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
new file mode 100644
index 000000000000..f5ddb82dadb7
--- /dev/null
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -0,0 +1,641 @@
+/* Realtek PCI-Express Memstick Card Interface driver
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/memstick.h>
+#include <linux/mfd/rtsx_pci.h>
+#include <asm/unaligned.h>
+
+struct realtek_pci_ms {
+ struct platform_device *pdev;
+ struct rtsx_pcr *pcr;
+ struct memstick_host *msh;
+ struct memstick_request *req;
+
+ struct mutex host_mutex;
+ struct work_struct handle_req;
+
+ u8 ssc_depth;
+ unsigned int clock;
+ unsigned char ifmode;
+ bool eject;
+};
+
+static inline struct device *ms_dev(struct realtek_pci_ms *host)
+{
+ return &(host->pdev->dev);
+}
+
+static inline void ms_clear_error(struct realtek_pci_ms *host)
+{
+ rtsx_pci_write_register(host->pcr, CARD_STOP,
+ MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR);
+}
+
+#ifdef DEBUG
+
+static void ms_print_debug_regs(struct realtek_pci_ms *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ u16 i;
+ u8 *ptr;
+
+ /* Print MS host internal registers */
+ rtsx_pci_init_cmd(pcr);
+ for (i = 0xFD40; i <= 0xFD44; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
+ for (i = 0xFD52; i <= 0xFD69; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
+ rtsx_pci_send_cmd(pcr, 100);
+
+ ptr = rtsx_pci_get_cmd_data(pcr);
+ for (i = 0xFD40; i <= 0xFD44; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+ for (i = 0xFD52; i <= 0xFD69; i++)
+ dev_dbg(ms_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+}
+
+#else
+
+#define ms_print_debug_regs(host)
+
+#endif
+
+static int ms_power_on(struct realtek_pci_ms *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, MS_MOD_SEL);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_MS);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN,
+ MS_CLK_EN, MS_CLK_EN);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_MS_CARD);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_on(pcr, RTSX_MS_CARD);
+ if (err < 0)
+ return err;
+
+ /* Wait ms power stable */
+ msleep(150);
+
+ err = rtsx_pci_write_register(pcr, CARD_OE,
+ MS_OUTPUT_EN, MS_OUTPUT_EN);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int ms_power_off(struct realtek_pci_ms *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+}
+
+static int ms_transfer_data(struct realtek_pci_ms *host, unsigned char data_dir,
+ u8 tpc, u8 cfg, struct scatterlist *sg)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ unsigned int length = sg->length;
+ u16 sec_cnt = (u16)(length / 512);
+ u8 val, trans_mode, dma_dir;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x, data_dir = %s, length = %d\n",
+ __func__, tpc, (data_dir == READ) ? "READ" : "WRITE",
+ length);
+
+ if (data_dir == READ) {
+ dma_dir = DMA_DIR_FROM_CARD;
+ trans_mode = MS_TM_AUTO_READ;
+ } else {
+ dma_dir = DMA_DIR_TO_CARD;
+ trans_mode = MS_TM_AUTO_WRITE;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_H,
+ 0xFF, (u8)(sec_cnt >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_SECTOR_CNT_L,
+ 0xFF, (u8)sec_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ DMA_DONE_INT, DMA_DONE_INT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, 0xFF, (u8)(length >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, 0xFF, (u8)(length >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, 0xFF, (u8)(length >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)length);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK, dma_dir | DMA_EN | DMA_512);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | trans_mode);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+
+ rtsx_pci_send_cmd_no_wait(pcr);
+
+ err = rtsx_pci_transfer_data(pcr, sg, 1, data_dir == READ, 10000);
+ if (err < 0) {
+ ms_clear_error(host);
+ return err;
+ }
+
+ rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
+ if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT))
+ return -EIO;
+
+ return 0;
+}
+
+static int ms_write_bytes(struct realtek_pci_ms *host, u8 tpc,
+ u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err, i;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
+
+ if (!data)
+ return -EINVAL;
+
+ rtsx_pci_init_cmd(pcr);
+
+ for (i = 0; i < cnt; i++)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, data[i]);
+ if (cnt % 2)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ PPBUF_BASE2 + i, 0xFF, 0xFF);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+ if (int_reg)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 5000);
+ if (err < 0) {
+ u8 val;
+
+ rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
+ dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
+
+ if (int_reg)
+ *int_reg = val & 0x0F;
+
+ ms_print_debug_regs(host);
+
+ ms_clear_error(host);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR)
+ return -EIO;
+ } else {
+ if (!(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return -EIO;
+ }
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ if (int_reg) {
+ u8 *ptr = rtsx_pci_get_cmd_data(pcr) + 1;
+ *int_reg = *ptr & 0x0F;
+ }
+
+ return 0;
+}
+
+static int ms_read_bytes(struct realtek_pci_ms *host, u8 tpc,
+ u8 cfg, u8 cnt, u8 *data, u8 *int_reg)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err, i;
+ u8 *ptr;
+
+ dev_dbg(ms_dev(host), "%s: tpc = 0x%02x\n", __func__, tpc);
+
+ if (!data)
+ return -EINVAL;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TPC, 0xFF, tpc);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, MS_TRANSFER,
+ 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, MS_TRANSFER,
+ MS_TRANSFER_END, MS_TRANSFER_END);
+ for (i = 0; i < cnt - 1; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0);
+ if (cnt % 2)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, PPBUF_BASE2 + cnt, 0, 0);
+ else
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD,
+ PPBUF_BASE2 + cnt - 1, 0, 0);
+ if (int_reg)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, MS_TRANS_CFG, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 5000);
+ if (err < 0) {
+ u8 val;
+
+ rtsx_pci_read_register(pcr, MS_TRANS_CFG, &val);
+ dev_dbg(ms_dev(host), "MS_TRANS_CFG: 0x%02x\n", val);
+
+ if (int_reg)
+ *int_reg = val & 0x0F;
+
+ ms_print_debug_regs(host);
+
+ ms_clear_error(host);
+
+ if (!(tpc & 0x08)) {
+ if (val & MS_CRC16_ERR)
+ return -EIO;
+ } else {
+ if (!(val & 0x80)) {
+ if (val & (MS_INT_ERR | MS_INT_CMDNK))
+ return -EIO;
+ }
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ ptr = rtsx_pci_get_cmd_data(pcr) + 1;
+ for (i = 0; i < cnt; i++)
+ data[i] = *ptr++;
+
+ if (int_reg)
+ *int_reg = *ptr & 0x0F;
+
+ return 0;
+}
+
+static int rtsx_pci_ms_issue_cmd(struct realtek_pci_ms *host)
+{
+ struct memstick_request *req = host->req;
+ int err = 0;
+ u8 cfg = 0, int_reg;
+
+ dev_dbg(ms_dev(host), "%s\n", __func__);
+
+ if (req->need_card_int) {
+ if (host->ifmode != MEMSTICK_SERIAL)
+ cfg = WAIT_INT;
+ }
+
+ if (req->long_data) {
+ err = ms_transfer_data(host, req->data_dir,
+ req->tpc, cfg, &(req->sg));
+ } else {
+ if (req->data_dir == READ) {
+ err = ms_read_bytes(host, req->tpc, cfg,
+ req->data_len, req->data, &int_reg);
+ } else {
+ err = ms_write_bytes(host, req->tpc, cfg,
+ req->data_len, req->data, &int_reg);
+ }
+ }
+ if (err < 0)
+ return err;
+
+ if (req->need_card_int && (host->ifmode == MEMSTICK_SERIAL)) {
+ err = ms_read_bytes(host, MS_TPC_GET_INT,
+ NO_WAIT_INT, 1, &int_reg, NULL);
+ if (err < 0)
+ return err;
+ }
+
+ if (req->need_card_int) {
+ dev_dbg(ms_dev(host), "int_reg: 0x%02x\n", int_reg);
+
+ if (int_reg & MS_INT_CMDNK)
+ req->int_reg |= MEMSTICK_INT_CMDNAK;
+ if (int_reg & MS_INT_BREQ)
+ req->int_reg |= MEMSTICK_INT_BREQ;
+ if (int_reg & MS_INT_ERR)
+ req->int_reg |= MEMSTICK_INT_ERR;
+ if (int_reg & MS_INT_CED)
+ req->int_reg |= MEMSTICK_INT_CED;
+ }
+
+ return 0;
+}
+
+static void rtsx_pci_ms_handle_req(struct work_struct *work)
+{
+ struct realtek_pci_ms *host = container_of(work,
+ struct realtek_pci_ms, handle_req);
+ struct rtsx_pcr *pcr = host->pcr;
+ struct memstick_host *msh = host->msh;
+ int rc;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ rtsx_pci_switch_clock(host->pcr, host->clock, host->ssc_depth,
+ false, true, false);
+ rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, MS_MOD_SEL);
+ rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_MS);
+
+ if (!host->req) {
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ dev_dbg(ms_dev(host), "next req %d\n", rc);
+
+ if (!rc)
+ host->req->error = rtsx_pci_ms_issue_cmd(host);
+ } while (!rc);
+ }
+
+ mutex_unlock(&pcr->pcr_mutex);
+}
+
+static void rtsx_pci_ms_request(struct memstick_host *msh)
+{
+ struct realtek_pci_ms *host = memstick_priv(msh);
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ schedule_work(&host->handle_req);
+}
+
+static int rtsx_pci_ms_set_param(struct memstick_host *msh,
+ enum memstick_param param, int value)
+{
+ struct realtek_pci_ms *host = memstick_priv(msh);
+ struct rtsx_pcr *pcr = host->pcr;
+ unsigned int clock = 0;
+ u8 ssc_depth = 0;
+ int err;
+
+ dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
+ __func__, param, value);
+
+ switch (param) {
+ case MEMSTICK_POWER:
+ if (value == MEMSTICK_POWER_ON)
+ err = ms_power_on(host);
+ else if (value == MEMSTICK_POWER_OFF)
+ err = ms_power_off(host);
+ else
+ return -EINVAL;
+ break;
+
+ case MEMSTICK_INTERFACE:
+ if (value == MEMSTICK_SERIAL) {
+ clock = 19000000;
+ ssc_depth = RTSX_SSC_DEPTH_500K;
+
+ err = rtsx_pci_write_register(pcr, MS_CFG,
+ 0x18, MS_BUS_WIDTH_1);
+ if (err < 0)
+ return err;
+ } else if (value == MEMSTICK_PAR4) {
+ clock = 39000000;
+ ssc_depth = RTSX_SSC_DEPTH_1M;
+
+ err = rtsx_pci_write_register(pcr, MS_CFG,
+ 0x58, MS_BUS_WIDTH_4 | PUSH_TIME_ODD);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ err = rtsx_pci_switch_clock(pcr, clock,
+ ssc_depth, false, true, false);
+ if (err < 0)
+ return err;
+
+ host->ssc_depth = ssc_depth;
+ host->clock = clock;
+ host->ifmode = value;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int rtsx_pci_ms_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+ struct memstick_host *msh = host->msh;
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ memstick_suspend_host(msh);
+ return 0;
+}
+
+static int rtsx_pci_ms_resume(struct platform_device *pdev)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+ struct memstick_host *msh = host->msh;
+
+ dev_dbg(ms_dev(host), "--> %s\n", __func__);
+
+ memstick_resume_host(msh);
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define rtsx_pci_ms_suspend NULL
+#define rtsx_pci_ms_resume NULL
+
+#endif /* CONFIG_PM */
+
+static void rtsx_pci_ms_card_event(struct platform_device *pdev)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+
+ memstick_detect_change(host->msh);
+}
+
+static int rtsx_pci_ms_drv_probe(struct platform_device *pdev)
+{
+ struct memstick_host *msh;
+ struct realtek_pci_ms *host;
+ struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pdev->dev.platform_data;
+ int rc;
+
+ if (!handle)
+ return -ENXIO;
+
+ pcr = handle->pcr;
+ if (!pcr)
+ return -ENXIO;
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek PCI-E Memstick controller found\n");
+
+ msh = memstick_alloc_host(sizeof(*host), &pdev->dev);
+ if (!msh)
+ return -ENOMEM;
+
+ host = memstick_priv(msh);
+ host->pcr = pcr;
+ host->msh = msh;
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+ pcr->slots[RTSX_MS_CARD].p_dev = pdev;
+ pcr->slots[RTSX_MS_CARD].card_event = rtsx_pci_ms_card_event;
+
+ mutex_init(&host->host_mutex);
+
+ INIT_WORK(&host->handle_req, rtsx_pci_ms_handle_req);
+ msh->request = rtsx_pci_ms_request;
+ msh->set_param = rtsx_pci_ms_set_param;
+ msh->caps = MEMSTICK_CAP_PAR4;
+
+ rc = memstick_add_host(msh);
+ if (rc) {
+ memstick_free_host(msh);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
+{
+ struct realtek_pci_ms *host = platform_get_drvdata(pdev);
+ struct rtsx_pcr *pcr;
+ struct memstick_host *msh;
+ int rc;
+
+ if (!host)
+ return 0;
+
+ pcr = host->pcr;
+ pcr->slots[RTSX_MS_CARD].p_dev = NULL;
+ pcr->slots[RTSX_MS_CARD].card_event = NULL;
+ msh = host->msh;
+ host->eject = true;
+
+ mutex_lock(&host->host_mutex);
+ if (host->req) {
+ dev_dbg(&(pdev->dev),
+ "%s: Controller removed during transfer\n",
+ dev_name(&msh->dev));
+
+ rtsx_pci_complete_unfinished_transfer(pcr);
+
+ host->req->error = -ENOMEDIUM;
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ if (!rc)
+ host->req->error = -ENOMEDIUM;
+ } while (!rc);
+ }
+ mutex_unlock(&host->host_mutex);
+
+ memstick_remove_host(msh);
+ memstick_free_host(msh);
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek PCI-E Memstick controller has been removed\n");
+
+ return 0;
+}
+
+static struct platform_device_id rtsx_pci_ms_ids[] = {
+ {
+ .name = DRV_NAME_RTSX_PCI_MS,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rtsx_pci_ms_ids);
+
+static struct platform_driver rtsx_pci_ms_driver = {
+ .probe = rtsx_pci_ms_drv_probe,
+ .remove = rtsx_pci_ms_drv_remove,
+ .id_table = rtsx_pci_ms_ids,
+ .suspend = rtsx_pci_ms_suspend,
+ .resume = rtsx_pci_ms_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME_RTSX_PCI_MS,
+ },
+};
+module_platform_driver(rtsx_pci_ms_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
+MODULE_DESCRIPTION("Realtek PCI-E Memstick Card Host Driver");
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d784c36707c0..c13cd9bc590b 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -100,7 +100,7 @@ static int mptfc_slave_alloc(struct scsi_device *sdev);
static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
static void mptfc_target_destroy(struct scsi_target *starget);
static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
-static void __devexit mptfc_remove(struct pci_dev *pdev);
+static void mptfc_remove(struct pci_dev *pdev);
static int mptfc_abort(struct scsi_cmnd *SCpnt);
static int mptfc_dev_reset(struct scsi_cmnd *SCpnt);
static int mptfc_bus_reset(struct scsi_cmnd *SCpnt);
@@ -1360,7 +1360,7 @@ static struct pci_driver mptfc_driver = {
.name = "mptfc",
.id_table = mptfc_pci_table,
.probe = mptfc_probe,
- .remove = __devexit_p(mptfc_remove),
+ .remove = mptfc_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
@@ -1496,8 +1496,7 @@ mptfc_init(void)
* @pdev: Pointer to pci_dev structure
*
*/
-static void __devexit
-mptfc_remove(struct pci_dev *pdev)
+static void mptfc_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptfc_rport_info *p, *n;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 551262e4b96e..fa43c391c8ed 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5332,7 +5332,7 @@ mptsas_shutdown(struct pci_dev *pdev)
mptsas_cleanup_fw_event_q(ioc);
}
-static void __devexit mptsas_remove(struct pci_dev *pdev)
+static void mptsas_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptsas_portinfo *p, *n;
@@ -5387,7 +5387,7 @@ static struct pci_driver mptsas_driver = {
.name = "mptsas",
.id_table = mptsas_pci_table,
.probe = mptsas_probe,
- .remove = __devexit_p(mptsas_remove),
+ .remove = mptsas_remove,
.shutdown = mptsas_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 0c3ced70707b..164afa71bba7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -792,6 +792,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* than an unsolicited DID_ABORT.
*/
sc->result = DID_RESET << 16;
+ break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
if (ioc->bus_type == FC)
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 8f61ba6aac23..c3aabde2dc4f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1550,7 +1550,7 @@ static struct pci_driver mptspi_driver = {
.name = "mptspi",
.id_table = mptspi_pci_table,
.probe = mptspi_probe,
- .remove = __devexit_p(mptscsih_remove),
+ .remove = mptscsih_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl
index 5fb195af43e2..4a7d2ebdfc97 100644
--- a/drivers/message/i2o/README.ioctl
+++ b/drivers/message/i2o/README.ioctl
@@ -138,7 +138,7 @@ VI. Setting Parameters
The return value is the size in bytes of the data written into
ops->resbuf if no errors occur. If an error occurs, -1 is returned
- and errno is set appropriatly:
+ and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -222,7 +222,7 @@ VIII. Downloading Software
RETURNS
This function returns 0 no errors occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -264,7 +264,7 @@ IX. Uploading Software
RETURNS
This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -301,7 +301,7 @@ X. Removing Software
RETURNS
This function returns 0 if no errors occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
@@ -325,7 +325,7 @@ X. Validating Configuration
RETURNS
This function returns 0 if no erro occur. If an error occurs, -1 is
- returned and errno is set appropriatly:
+ returned and errno is set appropriately:
ETIMEDOUT Timeout waiting for reply message
ENXIO Invalid IOP number
@@ -360,7 +360,7 @@ XI. Configuration Dialog
RETURNS
This function returns 0 if no error occur. If an error occurs, -1
- is returned and errno is set appropriatly:
+ is returned and errno is set appropriately:
EFAULT Invalid user space pointer was passed
ENXIO Invalid IOP number
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 4796bbf0ae4e..49e86aed2bc4 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -609,7 +609,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode)
u8 operation;
/*
- * This is to deail with the case of an application
+ * This is to deal with the case of an application
* opening a device and then the device disappears while
* it's in use, and then the application tries to release
* it. ex: Unmounting a deleted RAID volume at reboot.
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 9a49c243a6ac..5451beff183f 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -189,7 +189,7 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
return -ENXIO;
/*
- * Stop users being able to try and allocate arbitary amounts
+ * Stop users being able to try and allocate arbitrary amounts
* of DMA space. 64K is way more than sufficient for this.
*/
if (kcmd.oplen > 65536)
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 7190d5239b4f..0f9f3e1a2b6b 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -37,7 +37,7 @@
#define OSM_DESCRIPTION "I2O-subsystem"
/* PCI device id table for all I2O controllers */
-static struct pci_device_id __devinitdata i2o_pci_ids[] = {
+static struct pci_device_id i2o_pci_ids[] = {
{PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
{PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
{.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962,
@@ -84,7 +84,7 @@ static void i2o_pci_free(struct i2o_controller *c)
*
* Returns 0 on success or negative error code on failure.
*/
-static int __devinit i2o_pci_alloc(struct i2o_controller *c)
+static int i2o_pci_alloc(struct i2o_controller *c)
{
struct pci_dev *pdev = c->pdev;
struct device *dev = &pdev->dev;
@@ -315,8 +315,7 @@ static void i2o_pci_irq_disable(struct i2o_controller *c)
*
* Returns 0 on success or negative error code on failure.
*/
-static int __devinit i2o_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int i2o_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct i2o_controller *c;
int rc;
@@ -453,7 +452,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
* Reset the I2O controller, disable interrupts and remove all allocated
* resources.
*/
-static void __devexit i2o_pci_remove(struct pci_dev *pdev)
+static void i2o_pci_remove(struct pci_dev *pdev)
{
struct i2o_controller *c;
c = pci_get_drvdata(pdev);
@@ -474,7 +473,7 @@ static struct pci_driver i2o_pci_driver = {
.name = "PCI_I2O",
.id_table = i2o_pci_ids,
.probe = i2o_pci_probe,
- .remove = __devexit_p(i2o_pci_remove),
+ .remove = i2o_pci_remove,
};
/**
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index ce229ea933d1..391e23e6a647 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -248,7 +248,7 @@ static const struct regmap_irq pm800_irqs[] = {
},
};
-static int __devinit device_gpadc_init(struct pm80x_chip *chip,
+static int device_gpadc_init(struct pm80x_chip *chip,
struct pm80x_platform_data *pdata)
{
struct pm80x_subchip *subchip = chip->subchip;
@@ -315,7 +315,7 @@ out:
return ret;
}
-static int __devinit device_irq_init_800(struct pm80x_chip *chip)
+static int device_irq_init_800(struct pm80x_chip *chip)
{
struct regmap *map = chip->regmap;
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
@@ -415,7 +415,7 @@ static void pm800_pages_exit(struct pm80x_chip *chip)
}
}
-static int __devinit device_800_init(struct pm80x_chip *chip,
+static int device_800_init(struct pm80x_chip *chip,
struct pm80x_platform_data *pdata)
{
int ret, pmic_id;
@@ -499,7 +499,7 @@ out:
return ret;
}
-static int __devinit pm800_probe(struct i2c_client *client,
+static int pm800_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0;
@@ -554,7 +554,7 @@ out_init:
return ret;
}
-static int __devexit pm800_remove(struct i2c_client *client)
+static int pm800_remove(struct i2c_client *client)
{
struct pm80x_chip *chip = i2c_get_clientdata(client);
@@ -576,7 +576,7 @@ static struct i2c_driver pm800_driver = {
.pm = &pm80x_pm_ops,
},
.probe = pm800_probe,
- .remove = __devexit_p(pm800_remove),
+ .remove = pm800_remove,
.id_table = pm80x_id_table,
};
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index c20a31136f04..e671230be2b1 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -135,7 +135,7 @@ static struct regmap_irq pm805_irqs[] = {
},
};
-static int __devinit device_irq_init_805(struct pm80x_chip *chip)
+static int device_irq_init_805(struct pm80x_chip *chip)
{
struct regmap *map = chip->regmap;
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
@@ -189,7 +189,7 @@ static struct regmap_irq_chip pm805_irq_chip = {
.ack_base = PM805_INT_STATUS1,
};
-static int __devinit device_805_init(struct pm80x_chip *chip)
+static int device_805_init(struct pm80x_chip *chip)
{
int ret = 0;
unsigned int val;
@@ -232,7 +232,7 @@ out_irq_init:
return ret;
}
-static int __devinit pm805_probe(struct i2c_client *client,
+static int pm805_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0;
@@ -262,7 +262,7 @@ out_init:
return ret;
}
-static int __devexit pm805_remove(struct i2c_client *client)
+static int pm805_remove(struct i2c_client *client)
{
struct pm80x_chip *chip = i2c_get_clientdata(client);
@@ -281,7 +281,7 @@ static struct i2c_driver pm805_driver = {
.pm = &pm80x_pm_ops,
},
.probe = pm805_probe,
- .remove = __devexit_p(pm805_remove),
+ .remove = pm805_remove,
.id_table = pm80x_id_table,
};
diff --git a/drivers/mfd/88pm80x.c b/drivers/mfd/88pm80x.c
index cd0bf527d764..1adb355d86d1 100644
--- a/drivers/mfd/88pm80x.c
+++ b/drivers/mfd/88pm80x.c
@@ -31,7 +31,7 @@ const struct regmap_config pm80x_regmap_config = {
};
EXPORT_SYMBOL_GPL(pm80x_regmap_config);
-int __devinit pm80x_init(struct i2c_client *client,
+int pm80x_init(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pm80x_chip *chip;
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 8fa86edf70d4..893fc1ba6ead 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -28,111 +28,111 @@
#define INT_STATUS_NUM 3
-static struct resource bk0_resources[] __devinitdata = {
+static struct resource bk0_resources[] = {
{2, 2, "duty cycle", IORESOURCE_REG, },
{3, 3, "always on", IORESOURCE_REG, },
{3, 3, "current", IORESOURCE_REG, },
};
-static struct resource bk1_resources[] __devinitdata = {
+static struct resource bk1_resources[] = {
{4, 4, "duty cycle", IORESOURCE_REG, },
{5, 5, "always on", IORESOURCE_REG, },
{5, 5, "current", IORESOURCE_REG, },
};
-static struct resource bk2_resources[] __devinitdata = {
+static struct resource bk2_resources[] = {
{6, 6, "duty cycle", IORESOURCE_REG, },
{7, 7, "always on", IORESOURCE_REG, },
{5, 5, "current", IORESOURCE_REG, },
};
-static struct resource led0_resources[] __devinitdata = {
+static struct resource led0_resources[] = {
/* RGB1 Red LED */
{0xd, 0xd, "control", IORESOURCE_REG, },
{0xc, 0xc, "blink", IORESOURCE_REG, },
};
-static struct resource led1_resources[] __devinitdata = {
+static struct resource led1_resources[] = {
/* RGB1 Green LED */
{0xe, 0xe, "control", IORESOURCE_REG, },
{0xc, 0xc, "blink", IORESOURCE_REG, },
};
-static struct resource led2_resources[] __devinitdata = {
+static struct resource led2_resources[] = {
/* RGB1 Blue LED */
{0xf, 0xf, "control", IORESOURCE_REG, },
{0xc, 0xc, "blink", IORESOURCE_REG, },
};
-static struct resource led3_resources[] __devinitdata = {
+static struct resource led3_resources[] = {
/* RGB2 Red LED */
{0x9, 0x9, "control", IORESOURCE_REG, },
{0x8, 0x8, "blink", IORESOURCE_REG, },
};
-static struct resource led4_resources[] __devinitdata = {
+static struct resource led4_resources[] = {
/* RGB2 Green LED */
{0xa, 0xa, "control", IORESOURCE_REG, },
{0x8, 0x8, "blink", IORESOURCE_REG, },
};
-static struct resource led5_resources[] __devinitdata = {
+static struct resource led5_resources[] = {
/* RGB2 Blue LED */
{0xb, 0xb, "control", IORESOURCE_REG, },
{0x8, 0x8, "blink", IORESOURCE_REG, },
};
-static struct resource buck1_resources[] __devinitdata = {
+static struct resource buck1_resources[] = {
{0x24, 0x24, "buck set", IORESOURCE_REG, },
};
-static struct resource buck2_resources[] __devinitdata = {
+static struct resource buck2_resources[] = {
{0x25, 0x25, "buck set", IORESOURCE_REG, },
};
-static struct resource buck3_resources[] __devinitdata = {
+static struct resource buck3_resources[] = {
{0x26, 0x26, "buck set", IORESOURCE_REG, },
};
-static struct resource ldo1_resources[] __devinitdata = {
+static struct resource ldo1_resources[] = {
{0x10, 0x10, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo2_resources[] __devinitdata = {
+static struct resource ldo2_resources[] = {
{0x11, 0x11, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo3_resources[] __devinitdata = {
+static struct resource ldo3_resources[] = {
{0x12, 0x12, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo4_resources[] __devinitdata = {
+static struct resource ldo4_resources[] = {
{0x13, 0x13, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo5_resources[] __devinitdata = {
+static struct resource ldo5_resources[] = {
{0x14, 0x14, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo6_resources[] __devinitdata = {
+static struct resource ldo6_resources[] = {
{0x15, 0x15, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo7_resources[] __devinitdata = {
+static struct resource ldo7_resources[] = {
{0x16, 0x16, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo8_resources[] __devinitdata = {
+static struct resource ldo8_resources[] = {
{0x17, 0x17, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo9_resources[] __devinitdata = {
+static struct resource ldo9_resources[] = {
{0x18, 0x18, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo10_resources[] __devinitdata = {
+static struct resource ldo10_resources[] = {
{0x19, 0x19, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo12_resources[] __devinitdata = {
+static struct resource ldo12_resources[] = {
{0x1a, 0x1a, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo_vibrator_resources[] __devinitdata = {
+static struct resource ldo_vibrator_resources[] = {
{0x28, 0x28, "ldo set", IORESOURCE_REG, },
};
-static struct resource ldo14_resources[] __devinitdata = {
+static struct resource ldo14_resources[] = {
{0x1b, 0x1b, "ldo set", IORESOURCE_REG, },
};
-static struct resource touch_resources[] __devinitdata = {
+static struct resource touch_resources[] = {
{PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,},
};
-static struct resource onkey_resources[] __devinitdata = {
+static struct resource onkey_resources[] = {
{PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,},
};
-static struct resource codec_resources[] __devinitdata = {
+static struct resource codec_resources[] = {
/* Headset microphone insertion or removal */
{PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,},
/* Hook-switch press or release */
@@ -143,12 +143,12 @@ static struct resource codec_resources[] __devinitdata = {
{PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,},
};
-static struct resource battery_resources[] __devinitdata = {
+static struct resource battery_resources[] = {
{PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,},
{PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,},
};
-static struct resource charger_resources[] __devinitdata = {
+static struct resource charger_resources[] = {
{PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,},
{PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,},
{PM8607_IRQ_CHG_FAIL, PM8607_IRQ_CHG_FAIL, "charging timeout", IORESOURCE_IRQ,},
@@ -158,11 +158,11 @@ static struct resource charger_resources[] __devinitdata = {
{PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,},
};
-static struct resource rtc_resources[] __devinitdata = {
+static struct resource rtc_resources[] = {
{PM8607_IRQ_RTC, PM8607_IRQ_RTC, "rtc", IORESOURCE_IRQ,},
};
-static struct mfd_cell bk_devs[] __devinitdata = {
+static struct mfd_cell bk_devs[] = {
{
.name = "88pm860x-backlight",
.id = 0,
@@ -181,7 +181,7 @@ static struct mfd_cell bk_devs[] __devinitdata = {
},
};
-static struct mfd_cell led_devs[] __devinitdata = {
+static struct mfd_cell led_devs[] = {
{
.name = "88pm860x-led",
.id = 0,
@@ -215,7 +215,7 @@ static struct mfd_cell led_devs[] __devinitdata = {
},
};
-static struct mfd_cell reg_devs[] __devinitdata = {
+static struct mfd_cell reg_devs[] = {
{
.name = "88pm860x-regulator",
.id = 0,
@@ -565,7 +565,7 @@ static struct irq_domain_ops pm860x_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-static int __devinit device_irq_init(struct pm860x_chip *chip,
+static int device_irq_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
@@ -730,7 +730,7 @@ out:
}
EXPORT_SYMBOL(pm8606_osc_disable);
-static void __devinit device_osc_init(struct i2c_client *i2c)
+static void device_osc_init(struct i2c_client *i2c)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
@@ -745,7 +745,7 @@ static void __devinit device_osc_init(struct i2c_client *i2c)
chip->osc_status = PM8606_REF_GP_OSC_OFF;
}
-static void __devinit device_bk_init(struct pm860x_chip *chip,
+static void device_bk_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret, i;
@@ -765,7 +765,7 @@ static void __devinit device_bk_init(struct pm860x_chip *chip,
dev_err(chip->dev, "Failed to add backlight subdev\n");
}
-static void __devinit device_led_init(struct pm860x_chip *chip,
+static void device_led_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret, i;
@@ -787,7 +787,7 @@ static void __devinit device_led_init(struct pm860x_chip *chip,
}
}
-static void __devinit device_regulator_init(struct pm860x_chip *chip,
+static void device_regulator_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret;
@@ -866,7 +866,7 @@ static void __devinit device_regulator_init(struct pm860x_chip *chip,
}
}
-static void __devinit device_rtc_init(struct pm860x_chip *chip,
+static void device_rtc_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret;
@@ -885,7 +885,7 @@ static void __devinit device_rtc_init(struct pm860x_chip *chip,
dev_err(chip->dev, "Failed to add rtc subdev\n");
}
-static void __devinit device_touch_init(struct pm860x_chip *chip,
+static void device_touch_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret;
@@ -904,7 +904,7 @@ static void __devinit device_touch_init(struct pm860x_chip *chip,
dev_err(chip->dev, "Failed to add touch subdev\n");
}
-static void __devinit device_power_init(struct pm860x_chip *chip,
+static void device_power_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret;
@@ -951,7 +951,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
}
}
-static void __devinit device_onkey_init(struct pm860x_chip *chip,
+static void device_onkey_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret;
@@ -965,7 +965,7 @@ static void __devinit device_onkey_init(struct pm860x_chip *chip,
dev_err(chip->dev, "Failed to add onkey subdev\n");
}
-static void __devinit device_codec_init(struct pm860x_chip *chip,
+static void device_codec_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
int ret;
@@ -979,7 +979,7 @@ static void __devinit device_codec_init(struct pm860x_chip *chip,
dev_err(chip->dev, "Failed to add codec subdev\n");
}
-static void __devinit device_8607_init(struct pm860x_chip *chip,
+static void device_8607_init(struct pm860x_chip *chip,
struct i2c_client *i2c,
struct pm860x_platform_data *pdata)
{
@@ -1040,7 +1040,7 @@ out:
return;
}
-static void __devinit device_8606_init(struct pm860x_chip *chip,
+static void device_8606_init(struct pm860x_chip *chip,
struct i2c_client *i2c,
struct pm860x_platform_data *pdata)
{
@@ -1049,7 +1049,7 @@ static void __devinit device_8606_init(struct pm860x_chip *chip,
device_led_init(chip, pdata);
}
-static int __devinit pm860x_device_init(struct pm860x_chip *chip,
+static int pm860x_device_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
chip->core_irq = 0;
@@ -1077,7 +1077,7 @@ static int __devinit pm860x_device_init(struct pm860x_chip *chip,
return 0;
}
-static void __devexit pm860x_device_exit(struct pm860x_chip *chip)
+static void pm860x_device_exit(struct pm860x_chip *chip)
{
device_irq_exit(chip);
mfd_remove_devices(chip->dev);
@@ -1109,7 +1109,7 @@ static struct regmap_config pm860x_regmap_config = {
.val_bits = 8,
};
-static int __devinit pm860x_dt_init(struct device_node *np,
+static int pm860x_dt_init(struct device_node *np,
struct device *dev,
struct pm860x_platform_data *pdata)
{
@@ -1127,7 +1127,7 @@ static int __devinit pm860x_dt_init(struct device_node *np,
return 0;
}
-static int __devinit pm860x_probe(struct i2c_client *client,
+static int pm860x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pm860x_platform_data *pdata = client->dev.platform_data;
@@ -1200,7 +1200,7 @@ err:
return ret;
}
-static int __devexit pm860x_remove(struct i2c_client *client)
+static int pm860x_remove(struct i2c_client *client)
{
struct pm860x_chip *chip = i2c_get_clientdata(client);
@@ -1258,7 +1258,7 @@ static struct i2c_driver pm860x_driver = {
.of_match_table = of_match_ptr(pm860x_dt_ids),
},
.probe = pm860x_probe,
- .remove = __devexit_p(pm860x_remove),
+ .remove = pm860x_remove,
.id_table = pm860x_id_table,
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index acab3ef8a310..ff553babf455 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -63,6 +63,16 @@ config MFD_SM501_GPIO
lines on the SM501. The platform data is used to supply the
base number for the first GPIO line to register.
+config MFD_RTSX_PCI
+ tristate "Support for Realtek PCI-E card reader"
+ depends on PCI
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5229, rtl8411, etc. Realtek card reader supports access to many
+ types of memory cards, such as Memory Stick, Memory Stick Pro,
+ Secure Digital and MultiMediaCard.
+
config MFD_ASIC3
bool "Support for Compaq ASIC3"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -94,6 +104,17 @@ config MFD_TI_SSP
To compile this driver as a module, choose M here: the
module will be called ti-ssp.
+config MFD_TI_AM335X_TSCADC
+ tristate "TI ADC / Touch Screen chip support"
+ select MFD_CORE
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for Texas Instruments series
+ of Touch Screen /ADC chips.
+ To compile this driver as a module, choose M here: the
+ module will be called ti_am335x_tscadc.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -201,7 +222,6 @@ config MFD_TPS6586X
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
- depends on REGULATOR
help
If you say yes here you get support for the TPS6586X series of
Power Management chips.
@@ -217,6 +237,7 @@ config MFD_TPS65910
depends on I2C=y && GPIOLIB
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
select IRQ_DOMAIN
help
if you say yes here you get support for the TPS65910 series of
@@ -244,6 +265,20 @@ config MFD_TPS65912_SPI
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
+config MFD_TPS80031
+ bool "TI TPS80031/TPS80032 Power Management chips"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ If you say yes here you get support for the Texas Instruments
+ TPS80031/ TPS80032 Fully Integrated Power Management with Power
+ Path and Battery Charger. The device provides five configurable
+ step-down converters, 11 general purpose LDOs, USB OTG Module,
+ ADC, RTC, 2 PWM, System Voltage Regulator/Battery Charger with
+ Power Path from USB, 32K clock generator.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -258,6 +293,7 @@ config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
select IRQ_DOMAIN
+ select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
@@ -300,10 +336,10 @@ config MFD_TWL4030_AUDIO
config TWL6040_CORE
bool "Support for TWL6040 audio codec"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y
select MFD_CORE
select REGMAP_I2C
- select IRQ_DOMAIN
+ select REGMAP_IRQ
default n
help
Say yes here if you want support for Texas Instruments TWL6040 audio
@@ -981,6 +1017,7 @@ config MFD_TPS65090
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
If you say yes here you get support for the TPS65090 series of
Power Management chips.
@@ -1025,6 +1062,7 @@ config MFD_STA2X11
bool "STA2X11 multi function device support"
depends on STA2X11
select MFD_CORE
+ select REGMAP_MMIO
config MFD_SYSCON
bool "System Controller Register R/W Based on Regmap"
@@ -1044,6 +1082,38 @@ config MFD_PALMAS
If you say yes here you get support for the Palmas
series of PMIC chips from Texas Instruments.
+config MFD_VIPERBOARD
+ tristate "Support for Nano River Technologies Viperboard"
+ select MFD_CORE
+ depends on USB
+ default n
+ help
+ Say yes here if you want support for Nano River Technologies
+ Viperboard.
+ There are mfd cell drivers available for i2c master, adc and
+ both gpios found on the board. The spi part does not yet
+ have a driver.
+ You need to select the mfd cell drivers separately.
+ The drivers do not support all features the board exposes.
+
+config MFD_RETU
+ tristate "Support for Retu multi-function device"
+ select MFD_CORE
+ depends on I2C
+ select REGMAP_IRQ
+ help
+ Retu is a multi-function device found on Nokia Internet Tablets
+ (770, N800 and N810).
+
+config MFD_AS3711
+ bool "Support for AS3711"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C=y
+ help
+ Support for the AS3711 PMIC from AMS
+
endmenu
endif
@@ -1070,3 +1140,9 @@ config MCP_UCB1200_TS
depends on MCP_UCB1200 && INPUT
endmenu
+
+config VEXPRESS_CONFIG
+ bool
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d8ccb630ddb0..8b977f8045ae 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -9,6 +9,9 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o
+obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
+
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -16,6 +19,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
+obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
@@ -52,18 +56,19 @@ obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
-obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o
tps65912-objs := tps65912-core.o tps65912-irq.o
obj-$(CONFIG_MFD_TPS65912) += tps65912.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
+obj-$(CONFIG_MFD_TPS80031) += tps80031.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
-obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
+obj-$(CONFIG_TWL6040_CORE) += twl6040.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
@@ -86,6 +91,7 @@ obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+obj-$(CONFIG_PMIC_DA9052) += da9052-irq.o
obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
@@ -134,7 +140,11 @@ obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
+obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
+obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o
+obj-$(CONFIG_MFD_RETU) += retu-mfd.o
+obj-$(CONFIG_MFD_AS3711) += as3711.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 2b3dde571a50..2ec7725f4a08 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -661,8 +661,7 @@ struct ab3100_init_setting {
u8 setting;
};
-static const struct ab3100_init_setting __devinitconst
-ab3100_init_settings[] = {
+static const struct ab3100_init_setting ab3100_init_settings[] = {
{
.abreg = AB3100_MCA,
.setting = 0x01
@@ -708,7 +707,7 @@ ab3100_init_settings[] = {
},
};
-static int __devinit ab3100_setup(struct ab3100 *ab3100)
+static int ab3100_setup(struct ab3100 *ab3100)
{
int err = 0;
int i;
@@ -803,7 +802,7 @@ struct ab_family_id {
char *name;
};
-static const struct ab_family_id ids[] __devinitconst = {
+static const struct ab_family_id ids[] = {
/* AB3100 */
{
.id = 0xc0,
@@ -857,7 +856,7 @@ static const struct ab_family_id ids[] __devinitconst = {
},
};
-static int __devinit ab3100_probe(struct i2c_client *client,
+static int ab3100_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ab3100 *ab3100;
@@ -962,7 +961,7 @@ static int __devinit ab3100_probe(struct i2c_client *client,
return err;
}
-static int __devexit ab3100_remove(struct i2c_client *client)
+static int ab3100_remove(struct i2c_client *client)
{
struct ab3100 *ab3100 = i2c_get_clientdata(client);
@@ -986,7 +985,7 @@ static struct i2c_driver ab3100_driver = {
},
.id_table = ab3100_id,
.probe = ab3100_probe,
- .remove = __devexit_p(ab3100_remove),
+ .remove = ab3100_remove,
};
static int __init ab3100_i2c_init(void)
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1667c77b5cde..4778bb124efe 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -19,6 +19,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/ab8500.h>
#include <linux/of.h>
@@ -565,15 +566,10 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
else
num_irqs = AB8500_NR_IRQS;
- if (ab8500->irq_base) {
- ab8500->domain = irq_domain_add_legacy(
- NULL, num_irqs, ab8500->irq_base,
- 0, &ab8500_irq_ops, ab8500);
- }
- else {
- ab8500->domain = irq_domain_add_linear(
- np, num_irqs, &ab8500_irq_ops, ab8500);
- }
+ /* If ->irq_base is zero this will give a linear mapping */
+ ab8500->domain = irq_domain_add_simple(NULL,
+ num_irqs, ab8500->irq_base,
+ &ab8500_irq_ops, ab8500);
if (!ab8500->domain) {
dev_err(ab8500->dev, "Failed to create irqdomain\n");
@@ -591,39 +587,7 @@ int ab8500_suspend(struct ab8500 *ab8500)
return 0;
}
-/* AB8500 GPIO Resources */
-static struct resource __devinitdata ab8500_gpio_resources[] = {
- {
- .name = "GPIO_INT6",
- .start = AB8500_INT_GPIO6R,
- .end = AB8500_INT_GPIO41F,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-/* AB9540 GPIO Resources */
-static struct resource __devinitdata ab9540_gpio_resources[] = {
- {
- .name = "GPIO_INT6",
- .start = AB8500_INT_GPIO6R,
- .end = AB8500_INT_GPIO41F,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "GPIO_INT14",
- .start = AB9540_INT_GPIO50R,
- .end = AB9540_INT_GPIO54R,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "GPIO_INT15",
- .start = AB9540_INT_GPIO50F,
- .end = AB9540_INT_GPIO54F,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource __devinitdata ab8500_gpadc_resources[] = {
+static struct resource ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
.start = AB8500_INT_GP_HW_ADC_CONV_END,
@@ -638,7 +602,7 @@ static struct resource __devinitdata ab8500_gpadc_resources[] = {
},
};
-static struct resource __devinitdata ab8500_rtc_resources[] = {
+static struct resource ab8500_rtc_resources[] = {
{
.name = "60S",
.start = AB8500_INT_RTC_60S,
@@ -653,7 +617,7 @@ static struct resource __devinitdata ab8500_rtc_resources[] = {
},
};
-static struct resource __devinitdata ab8500_poweronkey_db_resources[] = {
+static struct resource ab8500_poweronkey_db_resources[] = {
{
.name = "ONKEY_DBF",
.start = AB8500_INT_PON_KEY1DB_F,
@@ -668,7 +632,7 @@ static struct resource __devinitdata ab8500_poweronkey_db_resources[] = {
},
};
-static struct resource __devinitdata ab8500_av_acc_detect_resources[] = {
+static struct resource ab8500_av_acc_detect_resources[] = {
{
.name = "ACC_DETECT_1DB_F",
.start = AB8500_INT_ACC_DETECT_1DB_F,
@@ -707,7 +671,7 @@ static struct resource __devinitdata ab8500_av_acc_detect_resources[] = {
},
};
-static struct resource __devinitdata ab8500_charger_resources[] = {
+static struct resource ab8500_charger_resources[] = {
{
.name = "MAIN_CH_UNPLUG_DET",
.start = AB8500_INT_MAIN_CH_UNPLUG_DET,
@@ -788,7 +752,7 @@ static struct resource __devinitdata ab8500_charger_resources[] = {
},
};
-static struct resource __devinitdata ab8500_btemp_resources[] = {
+static struct resource ab8500_btemp_resources[] = {
{
.name = "BAT_CTRL_INDB",
.start = AB8500_INT_BAT_CTRL_INDB,
@@ -821,7 +785,7 @@ static struct resource __devinitdata ab8500_btemp_resources[] = {
},
};
-static struct resource __devinitdata ab8500_fg_resources[] = {
+static struct resource ab8500_fg_resources[] = {
{
.name = "NCONV_ACCU",
.start = AB8500_INT_CCN_CONV_ACC,
@@ -860,10 +824,10 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
},
};
-static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+static struct resource ab8500_chargalg_resources[] = {};
#ifdef CONFIG_DEBUG_FS
-static struct resource __devinitdata ab8500_debug_resources[] = {
+static struct resource ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -879,7 +843,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
};
#endif
-static struct resource __devinitdata ab8500_usb_resources[] = {
+static struct resource ab8500_usb_resources[] = {
{
.name = "ID_WAKEUP_R",
.start = AB8500_INT_ID_WAKEUP_R,
@@ -924,7 +888,7 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
},
};
-static struct resource __devinitdata ab8505_iddet_resources[] = {
+static struct resource ab8505_iddet_resources[] = {
{
.name = "KeyDeglitch",
.start = AB8505_INT_KEYDEGLITCH,
@@ -957,7 +921,7 @@ static struct resource __devinitdata ab8505_iddet_resources[] = {
},
};
-static struct resource __devinitdata ab8500_temp_resources[] = {
+static struct resource ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
@@ -966,7 +930,7 @@ static struct resource __devinitdata ab8500_temp_resources[] = {
},
};
-static struct mfd_cell __devinitdata abx500_common_devs[] = {
+static struct mfd_cell abx500_common_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -984,6 +948,10 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.of_compatible = "stericsson,ab8500-regulator",
},
{
+ .name = "abx500-clk",
+ .of_compatible = "stericsson,abx500-clk",
+ },
+ {
.name = "ab8500-gpadc",
.of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
@@ -1038,35 +1006,53 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
},
};
-static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+static struct mfd_cell ab8500_bm_devs[] = {
{
.name = "ab8500-charger",
+ .of_compatible = "stericsson,ab8500-charger",
.num_resources = ARRAY_SIZE(ab8500_charger_resources),
.resources = ab8500_charger_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
{
.name = "ab8500-btemp",
+ .of_compatible = "stericsson,ab8500-btemp",
.num_resources = ARRAY_SIZE(ab8500_btemp_resources),
.resources = ab8500_btemp_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
{
.name = "ab8500-fg",
+ .of_compatible = "stericsson,ab8500-fg",
.num_resources = ARRAY_SIZE(ab8500_fg_resources),
.resources = ab8500_fg_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
{
.name = "ab8500-chargalg",
+ .of_compatible = "stericsson,ab8500-chargalg",
.num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
.resources = ab8500_chargalg_resources,
+#ifndef CONFIG_OF
+ .platform_data = &ab8500_bm_data,
+ .pdata_size = sizeof(ab8500_bm_data),
+#endif
},
};
-static struct mfd_cell __devinitdata ab8500_devs[] = {
+static struct mfd_cell ab8500_devs[] = {
{
.name = "ab8500-gpio",
.of_compatible = "stericsson,ab8500-gpio",
- .num_resources = ARRAY_SIZE(ab8500_gpio_resources),
- .resources = ab8500_gpio_resources,
},
{
.name = "ab8500-usb",
@@ -1080,11 +1066,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
},
};
-static struct mfd_cell __devinitdata ab9540_devs[] = {
+static struct mfd_cell ab9540_devs[] = {
{
.name = "ab8500-gpio",
- .num_resources = ARRAY_SIZE(ab9540_gpio_resources),
- .resources = ab9540_gpio_resources,
},
{
.name = "ab9540-usb",
@@ -1097,7 +1081,7 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
};
/* Device list common to ab9540 and ab8505 */
-static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
+static struct mfd_cell ab9540_ab8505_devs[] = {
{
.name = "ab-iddet",
.num_resources = ARRAY_SIZE(ab8505_iddet_resources),
@@ -1248,7 +1232,7 @@ static struct attribute_group ab9540_attr_group = {
.attrs = ab9540_sysfs_entries,
};
-static int __devinit ab8500_probe(struct platform_device *pdev)
+static int ab8500_probe(struct platform_device *pdev)
{
static char *switch_off_status[] = {
"Swoff bit programming",
@@ -1269,7 +1253,7 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
int i;
u8 value;
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ ab8500 = devm_kzalloc(&pdev->dev, sizeof *ab8500, GFP_KERNEL);
if (!ab8500)
return -ENOMEM;
@@ -1279,10 +1263,8 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ab8500->dev = &pdev->dev;
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!resource) {
- ret = -ENODEV;
- goto out_free_ab8500;
- }
+ if (!resource)
+ return -ENODEV;
ab8500->irq = resource->start;
@@ -1305,7 +1287,7 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
if (ret < 0)
- goto out_free_ab8500;
+ return ret;
ab8500->version = value;
}
@@ -1313,7 +1295,7 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_REV_REG, &value);
if (ret < 0)
- goto out_free_ab8500;
+ return ret;
ab8500->chip_id = value;
@@ -1330,14 +1312,13 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ab8500->mask_size = AB8500_NUM_IRQ_REGS;
ab8500->irq_reg_offset = ab8500_irq_regoffset;
}
- ab8500->mask = kzalloc(ab8500->mask_size, GFP_KERNEL);
+ ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
if (!ab8500->mask)
return -ENOMEM;
- ab8500->oldmask = kzalloc(ab8500->mask_size, GFP_KERNEL);
- if (!ab8500->oldmask) {
- ret = -ENOMEM;
- goto out_freemask;
- }
+ ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
+ if (!ab8500->oldmask)
+ return -ENOMEM;
+
/*
* ab8500 has switched off due to (SWITCH_OFF_STATUS):
* 0x01 Swoff bit programming
@@ -1391,37 +1372,37 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
if (ret)
- goto out_freeoldmask;
+ return ret;
for (i = 0; i < ab8500->mask_size; i++)
ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
ret = ab8500_irq_init(ab8500, np);
if (ret)
- goto out_freeoldmask;
+ return ret;
/* Activate this feature only in ab9540 */
/* till tests are done on ab8500 1p2 or later*/
if (is_ab9540(ab8500)) {
- ret = request_threaded_irq(ab8500->irq, NULL,
- ab8500_hierarchical_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+ ab8500_hierarchical_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
}
else {
- ret = request_threaded_irq(ab8500->irq, NULL,
- ab8500_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+ ab8500_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
- goto out_freeoldmask;
+ return ret;
}
ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
ARRAY_SIZE(abx500_common_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (is_ab9540(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
@@ -1432,14 +1413,14 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ARRAY_SIZE(ab8500_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (is_ab9540(ab8500) || is_ab8505(ab8500))
ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
ARRAY_SIZE(ab9540_ab8505_devs), NULL,
ab8500->irq_base, ab8500->domain);
if (ret)
- goto out_freeirq;
+ return ret;
if (!no_bm) {
/* Add battery management devices */
@@ -1460,20 +1441,9 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
dev_err(ab8500->dev, "error creating sysfs entries\n");
return ret;
-
-out_freeirq:
- free_irq(ab8500->irq, ab8500);
-out_freeoldmask:
- kfree(ab8500->oldmask);
-out_freemask:
- kfree(ab8500->mask);
-out_free_ab8500:
- kfree(ab8500);
-
- return ret;
}
-static int __devexit ab8500_remove(struct platform_device *pdev)
+static int ab8500_remove(struct platform_device *pdev)
{
struct ab8500 *ab8500 = platform_get_drvdata(pdev);
@@ -1483,11 +1453,6 @@ static int __devexit ab8500_remove(struct platform_device *pdev)
sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
mfd_remove_devices(ab8500->dev);
- free_irq(ab8500->irq, ab8500);
-
- kfree(ab8500->oldmask);
- kfree(ab8500->mask);
- kfree(ab8500);
return 0;
}
@@ -1506,7 +1471,7 @@ static struct platform_driver ab8500_core_driver = {
.owner = THIS_MODULE,
},
.probe = ab8500_probe,
- .remove = __devexit_p(ab8500_remove),
+ .remove = ab8500_remove,
.id_table = ab8500_id,
};
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c4cb806978ac..5a8e707bc038 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -552,7 +552,7 @@ static struct dentry *ab8500_bank_file;
static struct dentry *ab8500_address_file;
static struct dentry *ab8500_val_file;
-static int __devinit ab8500_debug_probe(struct platform_device *plf)
+static int ab8500_debug_probe(struct platform_device *plf)
{
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
@@ -597,7 +597,7 @@ exit_no_debugfs:
return -ENOMEM;
}
-static int __devexit ab8500_debug_remove(struct platform_device *plf)
+static int ab8500_debug_remove(struct platform_device *plf)
{
debugfs_remove(ab8500_val_file);
debugfs_remove(ab8500_address_file);
@@ -614,7 +614,7 @@ static struct platform_driver ab8500_debug_driver = {
.owner = THIS_MODULE,
},
.probe = ab8500_debug_probe,
- .remove = __devexit_p(ab8500_debug_remove)
+ .remove = ab8500_debug_remove
};
static int __init ab8500_debug_init(void)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 29d72a259c85..3fb1f40d6389 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -571,7 +571,7 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
gpadc->cal_data[ADC_INPUT_VBAT].offset);
}
-static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
+static int ab8500_gpadc_probe(struct platform_device *pdev)
{
int ret = 0;
struct ab8500_gpadc *gpadc;
@@ -634,7 +634,7 @@ fail:
return ret;
}
-static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
+static int ab8500_gpadc_remove(struct platform_device *pdev)
{
struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
@@ -651,7 +651,7 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
- .remove = __devexit_p(ab8500_gpadc_remove),
+ .remove = ab8500_gpadc_remove,
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff0..8a33b2c7eead 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -49,13 +49,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
(u8)(reg & 0xFF), mask, value);
}
-static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
+static int ab8500_sysctrl_probe(struct platform_device *pdev)
{
sysctrl_dev = &pdev->dev;
return 0;
}
-static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
+static int ab8500_sysctrl_remove(struct platform_device *pdev)
{
sysctrl_dev = NULL;
return 0;
@@ -67,7 +67,7 @@ static struct platform_driver ab8500_sysctrl_driver = {
.owner = THIS_MODULE,
},
.probe = ab8500_sysctrl_probe,
- .remove = __devexit_p(ab8500_sysctrl_remove),
+ .remove = ab8500_sysctrl_remove,
};
static int __init ab8500_sysctrl_init(void)
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index ea8b9475731d..210dd038bb5a 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -203,7 +203,7 @@ static int adp5520_remove_subdevs(struct adp5520_chip *chip)
return device_for_each_child(chip->dev, NULL, __remove_subdev);
}
-static int __devinit adp5520_probe(struct i2c_client *client,
+static int adp5520_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adp5520_platform_data *pdata = client->dev.platform_data;
@@ -307,7 +307,7 @@ out_free_chip:
return ret;
}
-static int __devexit adp5520_remove(struct i2c_client *client)
+static int adp5520_remove(struct i2c_client *client)
{
struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
@@ -356,7 +356,7 @@ static struct i2c_driver adp5520_driver = {
.pm = &adp5520_pm,
},
.probe = adp5520_probe,
- .remove = __devexit_p(adp5520_remove),
+ .remove = adp5520_remove,
.id_table = adp5520_id,
};
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index f4f9bf84bc7b..222c03a5ddc0 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -239,7 +239,12 @@ static int arizona_runtime_resume(struct device *dev)
return ret;
}
- regcache_sync(arizona->regmap);
+ ret = regcache_sync(arizona->regmap);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to restore register cache\n");
+ regulator_disable(arizona->dcvdd);
+ return ret;
+ }
return 0;
}
@@ -272,6 +277,7 @@ static struct mfd_cell early_devs[] = {
static struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
+ { .name = "arizona-haptics" },
{ .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5102-codec" },
@@ -280,16 +286,18 @@ static struct mfd_cell wm5102_devs[] = {
static struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-extcon" },
{ .name = "arizona-gpio" },
+ { .name = "arizona-haptics" },
{ .name = "arizona-micsupp" },
{ .name = "arizona-pwm" },
{ .name = "wm5110-codec" },
};
-int __devinit arizona_dev_init(struct arizona *arizona)
+int arizona_dev_init(struct arizona *arizona)
{
struct device *dev = arizona->dev;
const char *type_name;
unsigned int reg, val;
+ int (*apply_patch)(struct arizona *) = NULL;
int ret, i;
dev_set_drvdata(arizona->dev, arizona);
@@ -389,7 +397,7 @@ int __devinit arizona_dev_init(struct arizona *arizona)
arizona->type);
arizona->type = WM5102;
}
- ret = wm5102_patch(arizona);
+ apply_patch = wm5102_patch;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -400,7 +408,7 @@ int __devinit arizona_dev_init(struct arizona *arizona)
arizona->type);
arizona->type = WM5110;
}
- ret = wm5110_patch(arizona);
+ apply_patch = wm5110_patch;
break;
#endif
default:
@@ -410,9 +418,6 @@ int __devinit arizona_dev_init(struct arizona *arizona)
dev_info(dev, "%s revision %c\n", type_name, arizona->rev + 'A');
- if (ret != 0)
- dev_err(arizona->dev, "Failed to apply patch: %d\n", ret);
-
/* If we have a /RESET GPIO we'll already be reset */
if (!arizona->pdata.reset) {
regcache_mark_dirty(arizona->regmap);
@@ -436,6 +441,15 @@ int __devinit arizona_dev_init(struct arizona *arizona)
goto err_reset;
}
+ if (apply_patch) {
+ ret = apply_patch(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to apply patch: %d\n",
+ ret);
+ goto err_reset;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
if (!arizona->pdata.gpio_defaults[i])
continue;
@@ -561,7 +575,7 @@ err_early:
}
EXPORT_SYMBOL_GPL(arizona_dev_init);
-int __devexit arizona_dev_exit(struct arizona *arizona)
+int arizona_dev_exit(struct arizona *arizona)
{
mfd_remove_devices(arizona->dev);
arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 570c4b438086..44a1bb969841 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -22,7 +22,7 @@
#include "arizona.h"
-static __devinit int arizona_i2c_probe(struct i2c_client *i2c,
+static int arizona_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct arizona *arizona;
@@ -65,7 +65,7 @@ static __devinit int arizona_i2c_probe(struct i2c_client *i2c,
return arizona_dev_init(arizona);
}
-static int __devexit arizona_i2c_remove(struct i2c_client *i2c)
+static int arizona_i2c_remove(struct i2c_client *i2c)
{
struct arizona *arizona = dev_get_drvdata(&i2c->dev);
arizona_dev_exit(arizona);
@@ -86,7 +86,7 @@ static struct i2c_driver arizona_i2c_driver = {
.pm = &arizona_pm_ops,
},
.probe = arizona_i2c_probe,
- .remove = __devexit_p(arizona_i2c_remove),
+ .remove = arizona_i2c_remove,
.id_table = arizona_i2c_id,
};
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index b1b009177405..2bec5f0db3ee 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -176,14 +176,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm5102_aod;
irq = &wm5102_irq;
- switch (arizona->rev) {
- case 0:
- case 1:
- ctrlif_error = false;
- break;
- default:
- break;
- }
+ ctrlif_error = false;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -191,14 +184,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm5110_aod;
irq = &wm5110_irq;
- switch (arizona->rev) {
- case 0:
- case 1:
- ctrlif_error = false;
- break;
- default:
- break;
- }
+ ctrlif_error = false;
break;
#endif
default:
@@ -224,6 +210,7 @@ int arizona_irq_init(struct arizona *arizona)
arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
arizona);
if (!arizona->virq) {
+ dev_err(arizona->dev, "Failed to add core IRQ domain\n");
ret = -EINVAL;
goto err;
}
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index df2e5a8bee28..1b9fdd698b03 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -22,7 +22,7 @@
#include "arizona.h"
-static int __devinit arizona_spi_probe(struct spi_device *spi)
+static int arizona_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct arizona *arizona;
@@ -65,7 +65,7 @@ static int __devinit arizona_spi_probe(struct spi_device *spi)
return arizona_dev_init(arizona);
}
-static int __devexit arizona_spi_remove(struct spi_device *spi)
+static int arizona_spi_remove(struct spi_device *spi)
{
struct arizona *arizona = dev_get_drvdata(&spi->dev);
arizona_dev_exit(arizona);
@@ -86,7 +86,7 @@ static struct spi_driver arizona_spi_driver = {
.pm = &arizona_pm_ops,
},
.probe = arizona_spi_probe,
- .remove = __devexit_p(arizona_spi_remove),
+ .remove = arizona_spi_remove,
.id_table = arizona_spi_ids,
};
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
new file mode 100644
index 000000000000..e994c9691124
--- /dev/null
+++ b/drivers/mfd/as3711.c
@@ -0,0 +1,217 @@
+/*
+ * AS3711 PMIC MFC driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/as3711.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+ AS3711_REGULATOR,
+ AS3711_BACKLIGHT,
+};
+
+/*
+ * Ok to have it static: it is only used during probing and multiple I2C devices
+ * cannot be probed simultaneously. Just make sure to avoid stale data.
+ */
+static struct mfd_cell as3711_subdevs[] = {
+ [AS3711_REGULATOR] = {.name = "as3711-regulator",},
+ [AS3711_BACKLIGHT] = {.name = "as3711-backlight",},
+};
+
+static bool as3711_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_GPIO_SIGNAL_IN:
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ case AS3711_CHARGER_STATUS_1:
+ case AS3711_CHARGER_STATUS_2:
+ case AS3711_REG_STATUS:
+ return true;
+ }
+ return false;
+}
+
+static bool as3711_precious_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ return true;
+ }
+ return false;
+}
+
+static bool as3711_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AS3711_SD_1_VOLTAGE:
+ case AS3711_SD_2_VOLTAGE:
+ case AS3711_SD_3_VOLTAGE:
+ case AS3711_SD_4_VOLTAGE:
+ case AS3711_LDO_1_VOLTAGE:
+ case AS3711_LDO_2_VOLTAGE:
+ case AS3711_LDO_3_VOLTAGE:
+ case AS3711_LDO_4_VOLTAGE:
+ case AS3711_LDO_5_VOLTAGE:
+ case AS3711_LDO_6_VOLTAGE:
+ case AS3711_LDO_7_VOLTAGE:
+ case AS3711_LDO_8_VOLTAGE:
+ case AS3711_SD_CONTROL:
+ case AS3711_GPIO_SIGNAL_OUT:
+ case AS3711_GPIO_SIGNAL_IN:
+ case AS3711_SD_CONTROL_1:
+ case AS3711_SD_CONTROL_2:
+ case AS3711_CURR_CONTROL:
+ case AS3711_CURR1_VALUE:
+ case AS3711_CURR2_VALUE:
+ case AS3711_CURR3_VALUE:
+ case AS3711_STEPUP_CONTROL_1:
+ case AS3711_STEPUP_CONTROL_2:
+ case AS3711_STEPUP_CONTROL_4:
+ case AS3711_STEPUP_CONTROL_5:
+ case AS3711_REG_STATUS:
+ case AS3711_INTERRUPT_STATUS_1:
+ case AS3711_INTERRUPT_STATUS_2:
+ case AS3711_INTERRUPT_STATUS_3:
+ case AS3711_CHARGER_STATUS_1:
+ case AS3711_CHARGER_STATUS_2:
+ case AS3711_ASIC_ID_1:
+ case AS3711_ASIC_ID_2:
+ return true;
+ }
+ return false;
+}
+
+static const struct regmap_config as3711_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = as3711_volatile_reg,
+ .readable_reg = as3711_readable_reg,
+ .precious_reg = as3711_precious_reg,
+ .max_register = AS3711_MAX_REGS,
+ .num_reg_defaults_raw = AS3711_MAX_REGS,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int as3711_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct as3711 *as3711;
+ struct as3711_platform_data *pdata = client->dev.platform_data;
+ unsigned int id1, id2;
+ int ret;
+
+ if (!pdata)
+ dev_dbg(&client->dev, "Platform data not found\n");
+
+ as3711 = devm_kzalloc(&client->dev, sizeof(struct as3711), GFP_KERNEL);
+ if (!as3711) {
+ dev_err(&client->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ as3711->dev = &client->dev;
+ i2c_set_clientdata(client, as3711);
+
+ if (client->irq)
+ dev_notice(&client->dev, "IRQ not supported yet\n");
+
+ as3711->regmap = devm_regmap_init_i2c(client, &as3711_regmap_config);
+ if (IS_ERR(as3711->regmap)) {
+ ret = PTR_ERR(as3711->regmap);
+ dev_err(&client->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_1, &id1);
+ if (!ret)
+ ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_2, &id2);
+ if (ret < 0) {
+ dev_err(&client->dev, "regmap_read() failed: %d\n", ret);
+ return ret;
+ }
+ if (id1 != 0x8b)
+ return -ENODEV;
+ dev_info(as3711->dev, "AS3711 detected: %x:%x\n", id1, id2);
+
+ /* We can reuse as3711_subdevs[], it will be copied in mfd_add_devices() */
+ if (pdata) {
+ as3711_subdevs[AS3711_REGULATOR].platform_data = &pdata->regulator;
+ as3711_subdevs[AS3711_REGULATOR].pdata_size = sizeof(pdata->regulator);
+ as3711_subdevs[AS3711_BACKLIGHT].platform_data = &pdata->backlight;
+ as3711_subdevs[AS3711_BACKLIGHT].pdata_size = sizeof(pdata->backlight);
+ } else {
+ as3711_subdevs[AS3711_REGULATOR].platform_data = NULL;
+ as3711_subdevs[AS3711_REGULATOR].pdata_size = 0;
+ as3711_subdevs[AS3711_BACKLIGHT].platform_data = NULL;
+ as3711_subdevs[AS3711_BACKLIGHT].pdata_size = 0;
+ }
+
+ ret = mfd_add_devices(as3711->dev, -1, as3711_subdevs,
+ ARRAY_SIZE(as3711_subdevs), NULL, 0, NULL);
+ if (ret < 0)
+ dev_err(&client->dev, "add mfd devices failed: %d\n", ret);
+
+ return ret;
+}
+
+static int as3711_i2c_remove(struct i2c_client *client)
+{
+ struct as3711 *as3711 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(as3711->dev);
+ return 0;
+}
+
+static const struct i2c_device_id as3711_i2c_id[] = {
+ {.name = "as3711", .driver_data = 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, as3711_i2c_id);
+
+static struct i2c_driver as3711_i2c_driver = {
+ .driver = {
+ .name = "as3711",
+ .owner = THIS_MODULE,
+ },
+ .probe = as3711_i2c_probe,
+ .remove = as3711_i2c_remove,
+ .id_table = as3711_i2c_id,
+};
+
+static int __init as3711_i2c_init(void)
+{
+ return i2c_add_driver(&as3711_i2c_driver);
+}
+/* Initialise early */
+subsys_initcall(as3711_i2c_init);
+
+static void __exit as3711_i2c_exit(void)
+{
+ i2c_del_driver(&as3711_i2c_driver);
+}
+module_exit(as3711_i2c_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("AS3711 PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 62f0883a7630..1b15986c01e1 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -1039,7 +1039,7 @@ static int __init asic3_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit asic3_remove(struct platform_device *pdev)
+static int asic3_remove(struct platform_device *pdev)
{
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
@@ -1071,7 +1071,7 @@ static struct platform_driver asic3_device_driver = {
.driver = {
.name = "asic3",
},
- .remove = __devexit_p(asic3_remove),
+ .remove = asic3_remove,
.shutdown = asic3_shutdown,
};
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 2b282133c725..2e4752a9220a 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -71,9 +71,9 @@ static int cs5535_mfd_res_disable(struct platform_device *pdev)
return 0;
}
-static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
+static struct resource cs5535_mfd_resources[NR_BARS];
-static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
+static struct mfd_cell cs5535_mfd_cells[] = {
{
.id = SMB_BAR,
.name = "cs5535-smb",
@@ -113,7 +113,7 @@ static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
};
#ifdef CONFIG_OLPC
-static void __devinit cs5535_clone_olpc_cells(void)
+static void cs5535_clone_olpc_cells(void)
{
const char *acpi_clones[] = { "olpc-xo1-pm-acpi", "olpc-xo1-sci-acpi" };
@@ -126,7 +126,7 @@ static void __devinit cs5535_clone_olpc_cells(void)
static void cs5535_clone_olpc_cells(void) { }
#endif
-static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
+static int cs5535_mfd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int err, i;
@@ -166,7 +166,7 @@ err_disable:
return err;
}
-static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
+static void cs5535_mfd_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
pci_disable_device(pdev);
@@ -183,7 +183,7 @@ static struct pci_driver cs5535_mfd_driver = {
.name = DRV_NAME,
.id_table = cs5535_mfd_pci_tbl,
.probe = cs5535_mfd_probe,
- .remove = __devexit_p(cs5535_mfd_remove),
+ .remove = cs5535_mfd_remove,
};
module_pci_driver(cs5535_mfd_driver);
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1924b857a0fb..05176cd2862b 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -246,7 +246,7 @@ int da903x_query_status(struct device *dev, unsigned int sbits)
}
EXPORT_SYMBOL(da903x_query_status);
-static int __devinit da9030_init_chip(struct da903x_chip *chip)
+static int da9030_init_chip(struct da903x_chip *chip)
{
uint8_t chip_id;
int err;
@@ -459,7 +459,7 @@ static int da903x_remove_subdevs(struct da903x_chip *chip)
return device_for_each_child(chip->dev, NULL, __remove_subdev);
}
-static int __devinit da903x_add_subdevs(struct da903x_chip *chip,
+static int da903x_add_subdevs(struct da903x_chip *chip,
struct da903x_platform_data *pdata)
{
struct da903x_subdev_info *subdev;
@@ -491,7 +491,7 @@ failed:
return ret;
}
-static int __devinit da903x_probe(struct i2c_client *client,
+static int da903x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct da903x_platform_data *pdata = client->dev.platform_data;
@@ -544,7 +544,7 @@ out_free_chip:
return ret;
}
-static int __devexit da903x_remove(struct i2c_client *client)
+static int da903x_remove(struct i2c_client *client)
{
struct da903x_chip *chip = i2c_get_clientdata(client);
@@ -560,7 +560,7 @@ static struct i2c_driver da903x_driver = {
.owner = THIS_MODULE,
},
.probe = da903x_probe,
- .remove = __devexit_p(da903x_remove),
+ .remove = da903x_remove,
.id_table = da903x_id_table,
};
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index a0a62b24621b..a3c9613f9166 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -24,16 +23,6 @@
#include <linux/mfd/da9052/pdata.h>
#include <linux/mfd/da9052/reg.h>
-#define DA9052_NUM_IRQ_REGS 4
-#define DA9052_IRQ_MASK_POS_1 0x01
-#define DA9052_IRQ_MASK_POS_2 0x02
-#define DA9052_IRQ_MASK_POS_3 0x04
-#define DA9052_IRQ_MASK_POS_4 0x08
-#define DA9052_IRQ_MASK_POS_5 0x10
-#define DA9052_IRQ_MASK_POS_6 0x20
-#define DA9052_IRQ_MASK_POS_7 0x40
-#define DA9052_IRQ_MASK_POS_8 0x80
-
static bool da9052_reg_readable(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -425,15 +414,6 @@ err:
}
EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
-static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
-{
- struct da9052 *da9052 = irq_data;
-
- complete(&da9052->done);
-
- return IRQ_HANDLED;
-}
-
int da9052_adc_read_temp(struct da9052 *da9052)
{
int tbat;
@@ -447,75 +427,7 @@ int da9052_adc_read_temp(struct da9052 *da9052)
}
EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
-static struct resource da9052_rtc_resource = {
- .name = "ALM",
- .start = DA9052_IRQ_ALARM,
- .end = DA9052_IRQ_ALARM,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_onkey_resource = {
- .name = "ONKEY",
- .start = DA9052_IRQ_NONKEY,
- .end = DA9052_IRQ_NONKEY,
- .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_bat_resources[] = {
- {
- .name = "BATT TEMP",
- .start = DA9052_IRQ_TBAT,
- .end = DA9052_IRQ_TBAT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "DCIN DET",
- .start = DA9052_IRQ_DCIN,
- .end = DA9052_IRQ_DCIN,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "DCIN REM",
- .start = DA9052_IRQ_DCINREM,
- .end = DA9052_IRQ_DCINREM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "VBUS DET",
- .start = DA9052_IRQ_VBUS,
- .end = DA9052_IRQ_VBUS,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "VBUS REM",
- .start = DA9052_IRQ_VBUSREM,
- .end = DA9052_IRQ_VBUSREM,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "CHG END",
- .start = DA9052_IRQ_CHGEND,
- .end = DA9052_IRQ_CHGEND,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource da9052_tsi_resources[] = {
- {
- .name = "PENDWN",
- .start = DA9052_IRQ_PENDOWN,
- .end = DA9052_IRQ_PENDOWN,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "TSIRDY",
- .start = DA9052_IRQ_TSIREADY,
- .end = DA9052_IRQ_TSIREADY,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct mfd_cell __devinitdata da9052_subdev_info[] = {
+static struct mfd_cell da9052_subdev_info[] = {
{
.name = "da9052-regulator",
.id = 1,
@@ -574,13 +486,9 @@ static struct mfd_cell __devinitdata da9052_subdev_info[] = {
},
{
.name = "da9052-onkey",
- .resources = &da9052_onkey_resource,
- .num_resources = 1,
},
{
.name = "da9052-rtc",
- .resources = &da9052_rtc_resource,
- .num_resources = 1,
},
{
.name = "da9052-gpio",
@@ -602,160 +510,15 @@ static struct mfd_cell __devinitdata da9052_subdev_info[] = {
},
{
.name = "da9052-tsi",
- .resources = da9052_tsi_resources,
- .num_resources = ARRAY_SIZE(da9052_tsi_resources),
},
{
.name = "da9052-bat",
- .resources = da9052_bat_resources,
- .num_resources = ARRAY_SIZE(da9052_bat_resources),
},
{
.name = "da9052-watchdog",
},
};
-static struct regmap_irq da9052_irqs[] = {
- [DA9052_IRQ_DCIN] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_VBUS] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_DCINREM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_VBUSREM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_VDDLOW] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_ALARM] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_SEQRDY] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_COMP1V2] = {
- .reg_offset = 0,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_NONKEY] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_IDFLOAT] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_IDGND] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_CHGEND] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_TBAT] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_ADC_EOM] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_PENDOWN] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_TSIREADY] = {
- .reg_offset = 1,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_GPI0] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_GPI1] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_GPI2] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_GPI3] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_GPI4] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_GPI5] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_GPI6] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_GPI7] = {
- .reg_offset = 2,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
- [DA9052_IRQ_GPI8] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_1,
- },
- [DA9052_IRQ_GPI9] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_2,
- },
- [DA9052_IRQ_GPI10] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_3,
- },
- [DA9052_IRQ_GPI11] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_4,
- },
- [DA9052_IRQ_GPI12] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_5,
- },
- [DA9052_IRQ_GPI13] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_6,
- },
- [DA9052_IRQ_GPI14] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_7,
- },
- [DA9052_IRQ_GPI15] = {
- .reg_offset = 3,
- .mask = DA9052_IRQ_MASK_POS_8,
- },
-};
-
-static struct regmap_irq_chip da9052_regmap_irq_chip = {
- .name = "da9052_irq",
- .status_base = DA9052_EVENT_A_REG,
- .mask_base = DA9052_IRQ_MASK_A_REG,
- .ack_base = DA9052_EVENT_A_REG,
- .num_regs = DA9052_NUM_IRQ_REGS,
- .irqs = da9052_irqs,
- .num_irqs = ARRAY_SIZE(da9052_irqs),
-};
-
struct regmap_config da9052_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -769,7 +532,7 @@ struct regmap_config da9052_regmap_config = {
};
EXPORT_SYMBOL_GPL(da9052_regmap_config);
-int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
+int da9052_device_init(struct da9052 *da9052, u8 chip_id)
{
struct da9052_pdata *pdata = da9052->dev->platform_data;
int ret;
@@ -782,45 +545,31 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
da9052->chip_id = chip_id;
- if (!pdata || !pdata->irq_base)
- da9052->irq_base = -1;
- else
- da9052->irq_base = pdata->irq_base;
-
- ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- da9052->irq_base, &da9052_regmap_irq_chip,
- &da9052->irq_data);
- if (ret < 0)
- goto regmap_err;
-
- da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
-
- ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "adc irq", da9052);
- if (ret != 0)
- dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+ ret = da9052_irq_init(da9052);
+ if (ret != 0) {
+ dev_err(da9052->dev, "da9052_irq_init failed: %d\n", ret);
+ return ret;
+ }
ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
- if (ret)
+ if (ret) {
+ dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
goto err;
+ }
return 0;
err:
- free_irq(DA9052_IRQ_ADC_EOM, da9052);
- mfd_remove_devices(da9052->dev);
-regmap_err:
+ da9052_irq_exit(da9052);
+
return ret;
}
void da9052_device_exit(struct da9052 *da9052)
{
- free_irq(DA9052_IRQ_ADC_EOM, da9052);
- regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
mfd_remove_devices(da9052->dev);
+ da9052_irq_exit(da9052);
}
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 352c58b5a90d..885e56780358 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -27,6 +27,66 @@
#include <linux/of_device.h>
#endif
+/* I2C safe register check */
+static inline bool i2c_safe_reg(unsigned char reg)
+{
+ switch (reg) {
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC
+ * gets lockup up or fails to respond following a system reset.
+ * This fix is to follow any read or write with a dummy read to a safe
+ * register.
+ */
+int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
+{
+ int val;
+
+ switch (da9052->chip_id) {
+ case DA9052:
+ case DA9053_AA:
+ case DA9053_BA:
+ case DA9053_BB:
+ /* A dummy read to a safe register address. */
+ if (!i2c_safe_reg(reg))
+ return regmap_read(da9052->regmap,
+ DA9052_PARK_REGISTER,
+ &val);
+ break;
+ default:
+ /*
+ * For other chips parking of I2C register
+ * to a safe place is not required.
+ */
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(da9052_i2c_fix);
+
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -64,7 +124,7 @@ static const struct of_device_id dialog_dt_ids[] = {
};
#endif
-static int __devinit da9052_i2c_probe(struct i2c_client *client,
+static int da9052_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct da9052 *da9052;
@@ -83,6 +143,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
da9052->dev = &client->dev;
da9052->chip_irq = client->irq;
+ da9052->fix_io = da9052_i2c_fix;
i2c_set_clientdata(client, da9052);
@@ -121,7 +182,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
return 0;
}
-static int __devexit da9052_i2c_remove(struct i2c_client *client)
+static int da9052_i2c_remove(struct i2c_client *client)
{
struct da9052 *da9052 = i2c_get_clientdata(client);
@@ -131,7 +192,7 @@ static int __devexit da9052_i2c_remove(struct i2c_client *client)
static struct i2c_driver da9052_i2c_driver = {
.probe = da9052_i2c_probe,
- .remove = __devexit_p(da9052_i2c_remove),
+ .remove = da9052_i2c_remove,
.id_table = da9052_i2c_id,
.driver = {
.name = "da9052",
diff --git a/drivers/mfd/da9052-irq.c b/drivers/mfd/da9052-irq.c
new file mode 100644
index 000000000000..57ae7841f536
--- /dev/null
+++ b/drivers/mfd/da9052-irq.c
@@ -0,0 +1,288 @@
+/*
+ * DA9052 interrupt support
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ * Based on arizona-irq.c, which is:
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS 4
+#define DA9052_IRQ_MASK_POS_1 0x01
+#define DA9052_IRQ_MASK_POS_2 0x02
+#define DA9052_IRQ_MASK_POS_3 0x04
+#define DA9052_IRQ_MASK_POS_4 0x08
+#define DA9052_IRQ_MASK_POS_5 0x10
+#define DA9052_IRQ_MASK_POS_6 0x20
+#define DA9052_IRQ_MASK_POS_7 0x40
+#define DA9052_IRQ_MASK_POS_8 0x80
+
+static struct regmap_irq da9052_irqs[] = {
+ [DA9052_IRQ_DCIN] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_VBUS] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_DCINREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_VBUSREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_VDDLOW] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ALARM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_SEQRDY] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_COMP1V2] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_NONKEY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_IDFLOAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_IDGND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_CHGEND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_TBAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ADC_EOM] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_PENDOWN] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_TSIREADY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI0] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI1] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI2] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI3] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI4] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI5] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI6] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI7] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI8] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI9] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI10] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI11] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI12] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI13] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI14] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI15] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+ .name = "da9052_irq",
+ .status_base = DA9052_EVENT_A_REG,
+ .mask_base = DA9052_IRQ_MASK_A_REG,
+ .ack_base = DA9052_EVENT_A_REG,
+ .num_regs = DA9052_NUM_IRQ_REGS,
+ .irqs = da9052_irqs,
+ .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+static int da9052_map_irq(struct da9052 *da9052, int irq)
+{
+ return regmap_irq_get_virq(da9052->irq_data, irq);
+}
+
+int da9052_enable_irq(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ enable_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_enable_irq);
+
+int da9052_disable_irq(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ disable_irq(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq);
+
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ disable_irq_nosync(irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync);
+
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ name, data);
+}
+EXPORT_SYMBOL_GPL(da9052_request_irq);
+
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data)
+{
+ irq = da9052_map_irq(da9052, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+EXPORT_SYMBOL_GPL(da9052_free_irq);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+ struct da9052 *da9052 = irq_data;
+
+ complete(&da9052->done);
+
+ return IRQ_HANDLED;
+}
+
+int da9052_irq_init(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ -1, &da9052_regmap_irq_chip,
+ &da9052->irq_data);
+ if (ret < 0) {
+ dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret);
+ goto regmap_err;
+ }
+
+ ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq",
+ da9052_auxadc_irq, da9052);
+
+ if (ret != 0) {
+ dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret);
+ goto request_irq_err;
+ }
+
+ return 0;
+
+request_irq_err:
+ regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+regmap_err:
+ return ret;
+
+}
+
+int da9052_irq_exit(struct da9052 *da9052)
+{
+ da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052);
+ regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+
+ return 0;
+}
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index dbeadc5a6436..61d63b93576c 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -21,7 +21,7 @@
#include <linux/mfd/da9052/da9052.h>
-static int __devinit da9052_spi_probe(struct spi_device *spi)
+static int da9052_spi_probe(struct spi_device *spi)
{
int ret;
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -58,7 +58,7 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
return 0;
}
-static int __devexit da9052_spi_remove(struct spi_device *spi)
+static int da9052_spi_remove(struct spi_device *spi)
{
struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
@@ -76,7 +76,7 @@ static struct spi_device_id da9052_spi_id[] = {
static struct spi_driver da9052_spi_driver = {
.probe = da9052_spi_probe,
- .remove = __devexit_p(da9052_spi_remove),
+ .remove = da9052_spi_remove,
.id_table = da9052_spi_id,
.driver = {
.name = "da9052",
diff --git a/drivers/mfd/da9055-core.c b/drivers/mfd/da9055-core.c
index ff6c77f392bd..f56a1a9f7777 100644
--- a/drivers/mfd/da9055-core.c
+++ b/drivers/mfd/da9055-core.c
@@ -377,7 +377,7 @@ static struct regmap_irq_chip da9055_regmap_irq_chip = {
.num_irqs = ARRAY_SIZE(da9055_irqs),
};
-int __devinit da9055_device_init(struct da9055 *da9055)
+int da9055_device_init(struct da9055 *da9055)
{
struct da9055_pdata *pdata = da9055->dev->platform_data;
int ret;
@@ -412,7 +412,7 @@ err:
return ret;
}
-void __devexit da9055_device_exit(struct da9055 *da9055)
+void da9055_device_exit(struct da9055 *da9055)
{
regmap_del_irq_chip(da9055->chip_irq, da9055->irq_data);
mfd_remove_devices(da9055->dev);
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 88f6dca53bac..607387ffe8ca 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -18,7 +18,7 @@
#include <linux/mfd/da9055/core.h>
-static int __devinit da9055_i2c_probe(struct i2c_client *i2c,
+static int da9055_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct da9055 *da9055;
@@ -44,7 +44,7 @@ static int __devinit da9055_i2c_probe(struct i2c_client *i2c,
return da9055_device_init(da9055);
}
-static int __devexit da9055_i2c_remove(struct i2c_client *i2c)
+static int da9055_i2c_remove(struct i2c_client *i2c)
{
struct da9055 *da9055 = i2c_get_clientdata(i2c);
@@ -60,7 +60,7 @@ static struct i2c_device_id da9055_i2c_id[] = {
static struct i2c_driver da9055_i2c_driver = {
.probe = da9055_i2c_probe,
- .remove = __devexit_p(da9055_i2c_remove),
+ .remove = da9055_i2c_remove,
.id_table = da9055_i2c_id,
.driver = {
.name = "da9055",
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 45e83a68641b..c0bcc872af4e 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -151,7 +151,7 @@ fail1:
return ret;
}
-static int __devexit davinci_vc_remove(struct platform_device *pdev)
+static int davinci_vc_remove(struct platform_device *pdev)
{
struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
@@ -174,7 +174,7 @@ static struct platform_driver davinci_vc_driver = {
.name = "davinci_voicecodec",
.owner = THIS_MODULE,
},
- .remove = __devexit_p(davinci_vc_remove),
+ .remove = davinci_vc_remove,
};
static int __init davinci_vc_init(void)
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index b96661d453aa..268f45d42394 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2524,7 +2524,7 @@ static bool read_mailbox_0(void)
for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
if (ev & prcmu_irq_bit[n])
- generic_handle_irq(IRQ_PRCMU_BASE + n);
+ generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
}
r = true;
break;
@@ -2737,27 +2737,38 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
}
static struct irq_domain_ops db8500_irq_ops = {
- .map = db8500_irq_map,
- .xlate = irq_domain_xlate_twocell,
+ .map = db8500_irq_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int db8500_irq_init(struct device_node *np)
{
- db8500_irq_domain = irq_domain_add_legacy(
- np, NUM_PRCMU_WAKEUPS, IRQ_PRCMU_BASE,
- 0, &db8500_irq_ops, NULL);
+ int irq_base = 0;
+ int i;
+
+ /* In the device tree case, just take some IRQs */
+ if (!np)
+ irq_base = IRQ_PRCMU_BASE;
+
+ db8500_irq_domain = irq_domain_add_simple(
+ np, NUM_PRCMU_WAKEUPS, irq_base,
+ &db8500_irq_ops, NULL);
if (!db8500_irq_domain) {
pr_err("Failed to create irqdomain\n");
return -ENOSYS;
}
+ /* All wakeups will be used, so create mappings for all */
+ for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
+ irq_create_mapping(db8500_irq_domain, i);
+
return 0;
}
void __init db8500_prcmu_early_init(void)
{
- if (cpu_is_u8500v2()) {
+ if (cpu_is_u8500v2() || cpu_is_u9540()) {
void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
if (tcpm_base != NULL) {
@@ -2775,7 +2786,11 @@ void __init db8500_prcmu_early_init(void)
iounmap(tcpm_base);
}
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+ if (cpu_is_u9540())
+ tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE,
+ SZ_4K + SZ_8K) + SZ_8K;
+ else
+ tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
} else {
pr_err("prcmu: Unsupported chip version\n");
BUG();
@@ -3088,7 +3103,7 @@ static void db8500_prcmu_update_cpufreq(void)
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
+static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index db662e2dcfa5..b7a61f0f27a4 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -371,7 +371,7 @@ static int pcap_remove_subdev(struct device *dev, void *unused)
return 0;
}
-static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
+static int pcap_add_subdev(struct pcap_chip *pcap,
struct pcap_subdev *subdev)
{
struct platform_device *pdev;
@@ -391,7 +391,7 @@ static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
return ret;
}
-static int __devexit ezx_pcap_remove(struct spi_device *spi)
+static int ezx_pcap_remove(struct spi_device *spi)
{
struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
struct pcap_platform_data *pdata = spi->dev.platform_data;
@@ -420,7 +420,7 @@ static int __devexit ezx_pcap_remove(struct spi_device *spi)
return 0;
}
-static int __devinit ezx_pcap_probe(struct spi_device *spi)
+static int ezx_pcap_probe(struct spi_device *spi)
{
struct pcap_platform_data *pdata = spi->dev.platform_data;
struct pcap_chip *pcap;
@@ -525,7 +525,7 @@ ret:
static struct spi_driver ezxpcap_driver = {
.probe = ezx_pcap_probe,
- .remove = __devexit_p(ezx_pcap_remove),
+ .remove = ezx_pcap_remove,
.driver = {
.name = "ezx-pcap",
.owner = THIS_MODULE,
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index d55065cc324c..324187c0c124 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -327,7 +327,7 @@ static void htcpld_chip_reset(struct i2c_client *client)
client, (chip_data->cache_out = chip_data->reset));
}
-static int __devinit htcpld_setup_chip_irq(
+static int htcpld_setup_chip_irq(
struct platform_device *pdev,
int chip_index)
{
@@ -361,7 +361,7 @@ static int __devinit htcpld_setup_chip_irq(
return ret;
}
-static int __devinit htcpld_register_chip_i2c(
+static int htcpld_register_chip_i2c(
struct platform_device *pdev,
int chip_index)
{
@@ -419,7 +419,7 @@ static int __devinit htcpld_register_chip_i2c(
return 0;
}
-static void __devinit htcpld_unregister_chip_i2c(
+static void htcpld_unregister_chip_i2c(
struct platform_device *pdev,
int chip_index)
{
@@ -434,7 +434,7 @@ static void __devinit htcpld_unregister_chip_i2c(
i2c_unregister_device(chip->client);
}
-static int __devinit htcpld_register_chip_gpio(
+static int htcpld_register_chip_gpio(
struct platform_device *pdev,
int chip_index)
{
@@ -501,7 +501,7 @@ static int __devinit htcpld_register_chip_gpio(
return 0;
}
-static int __devinit htcpld_setup_chips(struct platform_device *pdev)
+static int htcpld_setup_chips(struct platform_device *pdev)
{
struct htcpld_data *htcpld;
struct device *dev = &pdev->dev;
@@ -563,7 +563,7 @@ static int __devinit htcpld_setup_chips(struct platform_device *pdev)
return 0;
}
-static int __devinit htcpld_core_probe(struct platform_device *pdev)
+static int htcpld_core_probe(struct platform_device *pdev)
{
struct htcpld_data *htcpld;
struct device *dev = &pdev->dev;
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 266bdc5bd96d..ab8d0b2739b2 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -306,7 +306,7 @@ int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg, u8 *val)
}
EXPORT_SYMBOL_GPL(intel_msic_irq_read);
-static int __devinit intel_msic_init_devices(struct intel_msic *msic)
+static int intel_msic_init_devices(struct intel_msic *msic)
{
struct platform_device *pdev = msic->pdev;
struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
@@ -364,7 +364,7 @@ fail:
return ret;
}
-static void __devexit intel_msic_remove_devices(struct intel_msic *msic)
+static void intel_msic_remove_devices(struct intel_msic *msic)
{
struct platform_device *pdev = msic->pdev;
struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
@@ -375,7 +375,7 @@ static void __devexit intel_msic_remove_devices(struct intel_msic *msic)
gpio_free(pdata->ocd->gpio);
}
-static int __devinit intel_msic_probe(struct platform_device *pdev)
+static int intel_msic_probe(struct platform_device *pdev)
{
struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
struct intel_msic *msic;
@@ -445,7 +445,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit intel_msic_remove(struct platform_device *pdev)
+static int intel_msic_remove(struct platform_device *pdev)
{
struct intel_msic *msic = platform_get_drvdata(pdev);
@@ -457,7 +457,7 @@ static int __devexit intel_msic_remove(struct platform_device *pdev)
static struct platform_driver intel_msic_driver = {
.probe = intel_msic_probe,
- .remove = __devexit_p(intel_msic_remove),
+ .remove = intel_msic_remove,
.driver = {
.name = "intel_msic",
.owner = THIS_MODULE,
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 965c4801df8a..45ece11cc27c 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -63,7 +63,7 @@ struct cmodio_device {
* Subdevices using the mfd-core API
*/
-static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
+static int cmodio_setup_subdevice(struct cmodio_device *priv,
char *name, unsigned int devno,
unsigned int modno)
{
@@ -120,7 +120,7 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
}
/* Probe each submodule using kernel parameters */
-static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
+static int cmodio_probe_submodules(struct cmodio_device *priv)
{
struct pci_dev *pdev = priv->pdev;
unsigned int num_probed = 0;
@@ -177,7 +177,7 @@ static const struct attribute_group cmodio_sysfs_attr_group = {
* PCI Driver
*/
-static int __devinit cmodio_pci_probe(struct pci_dev *dev,
+static int cmodio_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct cmodio_device *priv;
@@ -254,7 +254,7 @@ out_return:
return ret;
}
-static void __devexit cmodio_pci_remove(struct pci_dev *dev)
+static void cmodio_pci_remove(struct pci_dev *dev)
{
struct cmodio_device *priv = pci_get_drvdata(dev);
@@ -280,7 +280,7 @@ static struct pci_driver cmodio_pci_driver = {
.name = DRV_NAME,
.id_table = cmodio_pci_ids,
.probe = cmodio_pci_probe,
- .remove = __devexit_p(cmodio_pci_remove),
+ .remove = cmodio_pci_remove,
};
module_pci_driver(cmodio_pci_driver);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index c6b6d7dda517..e80587f1a792 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -202,7 +202,7 @@ static struct mfd_cell jz4740_adc_cells[] = {
},
};
-static int __devinit jz4740_adc_probe(struct platform_device *pdev)
+static int jz4740_adc_probe(struct platform_device *pdev)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
@@ -211,7 +211,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
int ret;
int irq_base;
- adc = kmalloc(sizeof(*adc), GFP_KERNEL);
+ adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
if (!adc) {
dev_err(&pdev->dev, "Failed to allocate driver structure\n");
return -ENOMEM;
@@ -221,30 +221,27 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
if (adc->irq < 0) {
ret = adc->irq;
dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
- goto err_free;
+ return ret;
}
irq_base = platform_get_irq(pdev, 1);
if (irq_base < 0) {
- ret = irq_base;
- dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
- goto err_free;
+ dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base);
+ return irq_base;
}
mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_base) {
- ret = -ENOENT;
dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
- goto err_free;
+ return -ENOENT;
}
/* Only request the shared registers for the MFD driver */
adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
pdev->name);
if (!adc->mem) {
- ret = -EBUSY;
dev_err(&pdev->dev, "Failed to request mmio memory region\n");
- goto err_free;
+ return -EBUSY;
}
adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
@@ -301,13 +298,10 @@ err_iounmap:
iounmap(adc->base);
err_release_mem_region:
release_mem_region(adc->mem->start, resource_size(adc->mem));
-err_free:
- kfree(adc);
-
return ret;
}
-static int __devexit jz4740_adc_remove(struct platform_device *pdev)
+static int jz4740_adc_remove(struct platform_device *pdev)
{
struct jz4740_adc *adc = platform_get_drvdata(pdev);
@@ -325,14 +319,12 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- kfree(adc);
-
return 0;
}
static struct platform_driver jz4740_adc_driver = {
.probe = jz4740_adc_probe,
- .remove = __devexit_p(jz4740_adc_remove),
+ .remove = jz4740_adc_remove,
.driver = {
.name = "jz4740-adc",
.owner = THIS_MODULE,
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 24212f45b201..ceebf2c1ea97 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -382,7 +382,7 @@ static struct attribute_group lm3533_attribute_group = {
.attrs = lm3533_attributes
};
-static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
+static int lm3533_device_als_init(struct lm3533 *lm3533)
{
struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
int ret;
@@ -405,7 +405,7 @@ static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
return 0;
}
-static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
+static int lm3533_device_bl_init(struct lm3533 *lm3533)
{
struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
int i;
@@ -434,7 +434,7 @@ static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
return 0;
}
-static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
+static int lm3533_device_led_init(struct lm3533 *lm3533)
{
struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
int i;
@@ -463,7 +463,7 @@ static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
return 0;
}
-static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
+static int lm3533_device_setup(struct lm3533 *lm3533,
struct lm3533_platform_data *pdata)
{
int ret;
@@ -479,7 +479,7 @@ static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
return 0;
}
-static int __devinit lm3533_device_init(struct lm3533 *lm3533)
+static int lm3533_device_init(struct lm3533 *lm3533)
{
struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
int ret;
@@ -534,7 +534,7 @@ err_disable:
return ret;
}
-static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
+static void lm3533_device_exit(struct lm3533 *lm3533)
{
dev_dbg(lm3533->dev, "%s\n", __func__);
@@ -596,7 +596,7 @@ static struct regmap_config regmap_config = {
.precious_reg = lm3533_precious_register,
};
-static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
+static int lm3533_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lm3533 *lm3533;
@@ -624,7 +624,7 @@ static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
+static int lm3533_i2c_remove(struct i2c_client *i2c)
{
struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
@@ -648,7 +648,7 @@ static struct i2c_driver lm3533_i2c_driver = {
},
.id_table = lm3533_i2c_ids,
.probe = lm3533_i2c_probe,
- .remove = __devexit_p(lm3533_i2c_remove),
+ .remove = lm3533_i2c_remove,
};
static int __init lm3533_i2c_init(void)
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index 3e94a699833c..c3d3c9b4d3ad 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -203,7 +203,7 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
}
-static int __devexit lp8788_remove(struct i2c_client *cl)
+static int lp8788_remove(struct i2c_client *cl)
{
struct lp8788 *lp = i2c_get_clientdata(cl);
@@ -224,7 +224,7 @@ static struct i2c_driver lp8788_driver = {
.owner = THIS_MODULE,
},
.probe = lp8788_probe,
- .remove = __devexit_p(lp8788_remove),
+ .remove = lp8788_remove,
.id_table = lp8788_ids,
};
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index a22544fe5319..d9d930302e98 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -196,7 +196,7 @@ enum lpc_chipsets {
LPC_LPT_LP, /* Lynx Point-LP */
};
-struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
+struct lpc_ich_info lpc_chipset_info[] = {
[LPC_ICH] = {
.name = "ICH",
.iTCO_version = 1,
@@ -672,7 +672,7 @@ static void lpc_ich_restore_config_space(struct pci_dev *dev)
}
}
-static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
+static void lpc_ich_enable_acpi_space(struct pci_dev *dev)
{
u8 reg_save;
@@ -681,7 +681,7 @@ static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
lpc_ich_acpi_save = reg_save;
}
-static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
+static void lpc_ich_enable_gpio_space(struct pci_dev *dev)
{
u8 reg_save;
@@ -690,7 +690,7 @@ static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
lpc_ich_gpio_save = reg_save;
}
-static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
+static void lpc_ich_finalize_cell(struct mfd_cell *cell,
const struct pci_device_id *id)
{
cell->platform_data = &lpc_chipset_info[id->driver_data];
@@ -702,7 +702,7 @@ static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
* GPIO groups and it's enough to have access to one of these to instantiate
* the device.
*/
-static int __devinit lpc_ich_check_conflict_gpio(struct resource *res)
+static int lpc_ich_check_conflict_gpio(struct resource *res)
{
int ret;
u8 use_gpio = 0;
@@ -721,7 +721,7 @@ static int __devinit lpc_ich_check_conflict_gpio(struct resource *res)
return use_gpio ? use_gpio : ret;
}
-static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
+static int lpc_ich_init_gpio(struct pci_dev *dev,
const struct pci_device_id *id)
{
u32 base_addr_cfg;
@@ -734,7 +734,7 @@ static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
lpc_ich_cells[LPC_GPIO].num_resources--;
goto gpe0_done;
}
@@ -760,7 +760,7 @@ gpe0_done:
pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
ret = -ENODEV;
goto gpio_done;
}
@@ -798,7 +798,7 @@ gpio_done:
return ret;
}
-static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
+static int lpc_ich_init_wdt(struct pci_dev *dev,
const struct pci_device_id *id)
{
u32 base_addr_cfg;
@@ -810,7 +810,7 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
- dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
ret = -ENODEV;
goto wdt_done;
}
@@ -830,12 +830,15 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
*/
- if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+ if (lpc_chipset_info[id->driver_data].iTCO_version == 1) {
+ /* Don't register iomem for TCO ver 1 */
+ lpc_ich_cells[LPC_WDT].num_resources--;
+ } else {
pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0xffffc000;
if (!(base_addr_cfg & 1)) {
- pr_err("RCBA is disabled by hardware/BIOS, "
- "device disabled\n");
+ dev_notice(&dev->dev, "RCBA is disabled by "
+ "hardware/BIOS, device disabled\n");
ret = -ENODEV;
goto wdt_done;
}
@@ -852,7 +855,7 @@ wdt_done:
return ret;
}
-static int __devinit lpc_ich_probe(struct pci_dev *dev,
+static int lpc_ich_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int ret;
@@ -871,6 +874,7 @@ static int __devinit lpc_ich_probe(struct pci_dev *dev,
* successfully.
*/
if (!cell_added) {
+ dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
return -ENODEV;
}
@@ -878,7 +882,7 @@ static int __devinit lpc_ich_probe(struct pci_dev *dev,
return 0;
}
-static void __devexit lpc_ich_remove(struct pci_dev *dev)
+static void lpc_ich_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
lpc_ich_restore_config_space(dev);
@@ -888,7 +892,7 @@ static struct pci_driver lpc_ich_driver = {
.name = "lpc_ich",
.id_table = lpc_ich_ids,
.probe = lpc_ich_probe,
- .remove = __devexit_p(lpc_ich_remove),
+ .remove = lpc_ich_remove,
};
static int __init lpc_ich_init(void)
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index f6b9c5c96b24..5624fcbba69b 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -83,7 +83,7 @@ static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
-static int __devinit lpc_sch_probe(struct pci_dev *dev,
+static int lpc_sch_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned int base_addr_cfg;
@@ -164,7 +164,7 @@ out_dev:
return ret;
}
-static void __devexit lpc_sch_remove(struct pci_dev *dev)
+static void lpc_sch_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
}
@@ -173,7 +173,7 @@ static struct pci_driver lpc_sch_driver = {
.name = "lpc_sch",
.id_table = lpc_sch_ids,
.probe = lpc_sch_probe,
- .remove = __devexit_p(lpc_sch_remove),
+ .remove = lpc_sch_remove,
};
module_pci_driver(lpc_sch_driver);
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index d9e24c849a00..4d73963cd8f0 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -45,7 +45,7 @@ static struct regmap_config max77686_regmap_config = {
};
#ifdef CONFIG_OF
-static struct of_device_id __devinitdata max77686_pmic_dt_match[] = {
+static struct of_device_id max77686_pmic_dt_match[] = {
{.compatible = "maxim,max77686", .data = 0},
{},
};
@@ -93,15 +93,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
if (max77686 == NULL)
return -ENOMEM;
- max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
- if (IS_ERR(max77686->regmap)) {
- ret = PTR_ERR(max77686->regmap);
- dev_err(max77686->dev, "Failed to allocate register map: %d\n",
- ret);
- kfree(max77686);
- return ret;
- }
-
i2c_set_clientdata(i2c, max77686);
max77686->dev = &i2c->dev;
max77686->i2c = i2c;
@@ -111,6 +102,15 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
max77686->irq_gpio = pdata->irq_gpio;
max77686->irq = i2c->irq;
+ max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+ if (IS_ERR(max77686->regmap)) {
+ ret = PTR_ERR(max77686->regmap);
+ dev_err(max77686->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(max77686);
+ return ret;
+ }
+
if (regmap_read(max77686->regmap,
MAX77686_REG_DEVICE_ID, &data) < 0) {
dev_err(max77686->dev,
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cc5155e20494..9e60fed5ff82 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -114,35 +114,37 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
u8 reg_data;
int ret = 0;
+ if (!pdata) {
+ dev_err(&i2c->dev, "No platform data found.\n");
+ return -EINVAL;
+ }
+
max77693 = devm_kzalloc(&i2c->dev,
sizeof(struct max77693_dev), GFP_KERNEL);
if (max77693 == NULL)
return -ENOMEM;
- max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
- if (IS_ERR(max77693->regmap)) {
- ret = PTR_ERR(max77693->regmap);
- dev_err(max77693->dev,"failed to allocate register map: %d\n",
- ret);
- goto err_regmap;
- }
-
i2c_set_clientdata(i2c, max77693);
max77693->dev = &i2c->dev;
max77693->i2c = i2c;
max77693->irq = i2c->irq;
max77693->type = id->driver_data;
- if (!pdata)
- goto err_regmap;
+ max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+ if (IS_ERR(max77693->regmap)) {
+ ret = PTR_ERR(max77693->regmap);
+ dev_err(max77693->dev, "failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
max77693->wakeup = pdata->wakeup;
- if (max77693_read_reg(max77693->regmap,
- MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
+ &reg_data);
+ if (ret < 0) {
dev_err(max77693->dev, "device not found on this channel\n");
- ret = -ENODEV;
- goto err_regmap;
+ return ret;
} else
dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
@@ -163,7 +165,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ goto err_regmap_muic;
}
ret = max77693_irq_init(max77693);
@@ -184,9 +186,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
err_mfd:
max77693_irq_exit(max77693);
err_irq:
+err_regmap_muic:
i2c_unregister_device(max77693->muic);
i2c_unregister_device(max77693->haptic);
-err_regmap:
return ret;
}
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 17f2593d82b8..e9b1c93a3ade 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -183,7 +183,7 @@ static void max8907_power_off(void)
MAX8907_MASK_POWER_OFF, MAX8907_MASK_POWER_OFF);
}
-static __devinit int max8907_i2c_probe(struct i2c_client *i2c,
+static int max8907_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max8907 *max8907;
@@ -288,7 +288,7 @@ err_alloc_drvdata:
return ret;
}
-static __devexit int max8907_i2c_remove(struct i2c_client *i2c)
+static int max8907_i2c_remove(struct i2c_client *i2c)
{
struct max8907 *max8907 = i2c_get_clientdata(i2c);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 9f54c04912f2..e32466e865b9 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -19,12 +19,12 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max8925.h>
-static struct resource bk_resources[] __devinitdata = {
+static struct resource bk_resources[] = {
{ 0x84, 0x84, "mode control", IORESOURCE_REG, },
{ 0x85, 0x85, "control", IORESOURCE_REG, },
};
-static struct mfd_cell bk_devs[] __devinitdata = {
+static struct mfd_cell bk_devs[] = {
{
.name = "max8925-backlight",
.num_resources = ARRAY_SIZE(bk_resources),
@@ -110,99 +110,99 @@ static struct mfd_cell onkey_devs[] = {
},
};
-static struct resource sd1_resources[] __devinitdata = {
+static struct resource sd1_resources[] = {
{0x06, 0x06, "sdv", IORESOURCE_REG, },
};
-static struct resource sd2_resources[] __devinitdata = {
+static struct resource sd2_resources[] = {
{0x09, 0x09, "sdv", IORESOURCE_REG, },
};
-static struct resource sd3_resources[] __devinitdata = {
+static struct resource sd3_resources[] = {
{0x0c, 0x0c, "sdv", IORESOURCE_REG, },
};
-static struct resource ldo1_resources[] __devinitdata = {
+static struct resource ldo1_resources[] = {
{0x1a, 0x1a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo2_resources[] __devinitdata = {
+static struct resource ldo2_resources[] = {
{0x1e, 0x1e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo3_resources[] __devinitdata = {
+static struct resource ldo3_resources[] = {
{0x22, 0x22, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo4_resources[] __devinitdata = {
+static struct resource ldo4_resources[] = {
{0x26, 0x26, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo5_resources[] __devinitdata = {
+static struct resource ldo5_resources[] = {
{0x2a, 0x2a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo6_resources[] __devinitdata = {
+static struct resource ldo6_resources[] = {
{0x2e, 0x2e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo7_resources[] __devinitdata = {
+static struct resource ldo7_resources[] = {
{0x32, 0x32, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo8_resources[] __devinitdata = {
+static struct resource ldo8_resources[] = {
{0x36, 0x36, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo9_resources[] __devinitdata = {
+static struct resource ldo9_resources[] = {
{0x3a, 0x3a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo10_resources[] __devinitdata = {
+static struct resource ldo10_resources[] = {
{0x3e, 0x3e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo11_resources[] __devinitdata = {
+static struct resource ldo11_resources[] = {
{0x42, 0x42, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo12_resources[] __devinitdata = {
+static struct resource ldo12_resources[] = {
{0x46, 0x46, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo13_resources[] __devinitdata = {
+static struct resource ldo13_resources[] = {
{0x4a, 0x4a, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo14_resources[] __devinitdata = {
+static struct resource ldo14_resources[] = {
{0x4e, 0x4e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo15_resources[] __devinitdata = {
+static struct resource ldo15_resources[] = {
{0x52, 0x52, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo16_resources[] __devinitdata = {
+static struct resource ldo16_resources[] = {
{0x12, 0x12, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo17_resources[] __devinitdata = {
+static struct resource ldo17_resources[] = {
{0x16, 0x16, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo18_resources[] __devinitdata = {
+static struct resource ldo18_resources[] = {
{0x74, 0x74, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo19_resources[] __devinitdata = {
+static struct resource ldo19_resources[] = {
{0x5e, 0x5e, "ldov", IORESOURCE_REG, },
};
-static struct resource ldo20_resources[] __devinitdata = {
+static struct resource ldo20_resources[] = {
{0x9e, 0x9e, "ldov", IORESOURCE_REG, },
};
-static struct mfd_cell reg_devs[] __devinitdata = {
+static struct mfd_cell reg_devs[] = {
{
.name = "max8925-regulator",
.id = 0,
@@ -714,7 +714,7 @@ tsc_irq:
return 0;
}
-static void __devinit init_regulator(struct max8925_chip *chip,
+static void init_regulator(struct max8925_chip *chip,
struct max8925_platform_data *pdata)
{
int ret;
@@ -821,7 +821,7 @@ static void __devinit init_regulator(struct max8925_chip *chip,
}
}
-int __devinit max8925_device_init(struct max8925_chip *chip,
+int max8925_device_init(struct max8925_chip *chip,
struct max8925_platform_data *pdata)
{
int ret;
@@ -901,7 +901,7 @@ out:
return ret;
}
-void __devexit max8925_device_exit(struct max8925_chip *chip)
+void max8925_device_exit(struct max8925_chip *chip)
{
if (chip->core_irq)
free_irq(chip->core_irq, chip);
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index d9e4b36edee9..00b5b456063d 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -135,7 +135,7 @@ static const struct i2c_device_id max8925_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, max8925_id_table);
-static int __devinit max8925_probe(struct i2c_client *client,
+static int max8925_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8925_platform_data *pdata = client->dev.platform_data;
@@ -168,7 +168,7 @@ static int __devinit max8925_probe(struct i2c_client *client,
return 0;
}
-static int __devexit max8925_remove(struct i2c_client *client)
+static int max8925_remove(struct i2c_client *client)
{
struct max8925_chip *chip = i2c_get_clientdata(client);
@@ -210,7 +210,7 @@ static struct i2c_driver max8925_driver = {
.pm = &max8925_pm_ops,
},
.probe = max8925_probe,
- .remove = __devexit_p(max8925_remove),
+ .remove = max8925_remove,
.id_table = max8925_id_table,
};
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index f123517065ec..14714058f2d2 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -21,8 +21,10 @@
* This driver is based on max8998.c
*/
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
@@ -47,6 +49,13 @@ static struct mfd_cell max8997_devs[] = {
{ .name = "max8997-led", .id = 2 },
};
+#ifdef CONFIG_OF
+static struct of_device_id max8997_pmic_dt_match[] = {
+ { .compatible = "maxim,max8997-pmic", .data = TYPE_MAX8997 },
+ {},
+};
+#endif
+
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
@@ -123,6 +132,58 @@ int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
}
EXPORT_SYMBOL_GPL(max8997_update_reg);
+#ifdef CONFIG_OF
+/*
+ * Only the common platform data elements for max8997 are parsed here from the
+ * device tree. Other sub-modules of max8997 such as pmic, rtc and others have
+ * to parse their own platform data elements from device tree.
+ *
+ * The max8997 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ struct max8997_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pd->ono = irq_of_parse_and_map(dev->of_node, 1);
+
+ /*
+ * ToDo: the 'wakeup' member in the platform data is more of a linux
+ * specfic information. Hence, there is no binding for that yet and
+ * not parsed here.
+ */
+
+ return pd;
+}
+#else
+static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+#ifdef CONFIG_OF
+ if (i2c->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
+ return (int)match->data;
+ }
+#endif
+ return (int)id->driver_data;
+}
+
static int max8997_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -137,12 +198,21 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, max8997);
max8997->dev = &i2c->dev;
max8997->i2c = i2c;
- max8997->type = id->driver_data;
+ max8997->type = max8997_i2c_get_driver_data(i2c, id);
max8997->irq = i2c->irq;
+ if (max8997->dev->of_node) {
+ pdata = max8997_i2c_parse_dt_pdata(max8997->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto err;
+ }
+ }
+
if (!pdata)
goto err;
+ max8997->pdata = pdata;
max8997->ono = pdata->ono;
mutex_init(&max8997->iolock);
@@ -434,6 +504,7 @@ static struct i2c_driver max8997_i2c_driver = {
.name = "max8997",
.owner = THIS_MODULE,
.pm = &max8997_pm,
+ .of_match_table = of_match_ptr(max8997_pmic_dt_match),
},
.probe = max8997_i2c_probe,
.remove = max8997_i2c_remove,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 1aba0238f426..2a9b100c4825 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -119,6 +119,11 @@
#define MC13XXX_REVISION_FAB (0x03 << 11)
#define MC13XXX_REVISION_ICIDCODE (0x3f << 13)
+#define MC34708_REVISION_REVMETAL (0x07 << 0)
+#define MC34708_REVISION_REVFULL (0x07 << 3)
+#define MC34708_REVISION_FIN (0x07 << 6)
+#define MC34708_REVISION_FAB (0x07 << 9)
+
#define MC13XXX_ADC1 44
#define MC13XXX_ADC1_ADEN (1 << 0)
#define MC13XXX_ADC1_RAND (1 << 1)
@@ -410,62 +415,52 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
return IRQ_RETVAL(handled);
}
-static const char *mc13xxx_chipname[] = {
- [MC13XXX_ID_MC13783] = "mc13783",
- [MC13XXX_ID_MC13892] = "mc13892",
-};
-
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx)
+static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
- u32 icid;
- u32 revision;
- int ret;
-
- /*
- * Get the generation ID from register 46, as apparently some older
- * IC revisions only have this info at this location. Newer ICs seem to
- * have both.
- */
- ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
- if (ret)
- return ret;
+ dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
+ "fin: %d, fab: %d, icid: %d/%d\n",
+ mc13xxx->variant->name,
+ maskval(revision, MC13XXX_REVISION_REVFULL),
+ maskval(revision, MC13XXX_REVISION_REVMETAL),
+ maskval(revision, MC13XXX_REVISION_FIN),
+ maskval(revision, MC13XXX_REVISION_FAB),
+ maskval(revision, MC13XXX_REVISION_ICID),
+ maskval(revision, MC13XXX_REVISION_ICIDCODE));
+}
- icid = (icid >> 6) & 0x7;
+static void mc34708_print_revision(struct mc13xxx *mc13xxx, u32 revision)
+{
+ dev_info(mc13xxx->dev, "%s: rev %d.%d, fin: %d, fab: %d\n",
+ mc13xxx->variant->name,
+ maskval(revision, MC34708_REVISION_REVFULL),
+ maskval(revision, MC34708_REVISION_REVMETAL),
+ maskval(revision, MC34708_REVISION_FIN),
+ maskval(revision, MC34708_REVISION_FAB));
+}
- switch (icid) {
- case 2:
- mc13xxx->ictype = MC13XXX_ID_MC13783;
- break;
- case 7:
- mc13xxx->ictype = MC13XXX_ID_MC13892;
- break;
- default:
- mc13xxx->ictype = MC13XXX_ID_INVALID;
- break;
- }
+/* These are only exported for mc13xxx-i2c and mc13xxx-spi */
+struct mc13xxx_variant mc13xxx_variant_mc13783 = {
+ .name = "mc13783",
+ .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13783);
- if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
- mc13xxx->ictype == MC13XXX_ID_MC13892) {
- ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
-
- dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
- "fin: %d, fab: %d, icid: %d/%d\n",
- mc13xxx_chipname[mc13xxx->ictype],
- maskval(revision, MC13XXX_REVISION_REVFULL),
- maskval(revision, MC13XXX_REVISION_REVMETAL),
- maskval(revision, MC13XXX_REVISION_FIN),
- maskval(revision, MC13XXX_REVISION_FAB),
- maskval(revision, MC13XXX_REVISION_ICID),
- maskval(revision, MC13XXX_REVISION_ICIDCODE));
- }
+struct mc13xxx_variant mc13xxx_variant_mc13892 = {
+ .name = "mc13892",
+ .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13892);
- return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
-}
+struct mc13xxx_variant mc13xxx_variant_mc34708 = {
+ .name = "mc34708",
+ .print_revision = mc34708_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc34708);
static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
{
- return mc13xxx_chipname[mc13xxx->ictype];
+ return mc13xxx->variant->name;
}
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -653,13 +648,16 @@ int mc13xxx_common_init(struct mc13xxx *mc13xxx,
struct mc13xxx_platform_data *pdata, int irq)
{
int ret;
+ u32 revision;
mc13xxx_lock(mc13xxx);
- ret = mc13xxx_identify(mc13xxx);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
if (ret)
goto err_revision;
+ mc13xxx->variant->print_revision(mc13xxx, revision);
+
/* mask all irqs */
ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
if (ret)
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 9d18dde3cd2a..f745e27ee874 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -24,7 +24,10 @@
static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
{
.name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+ }, {
+ .name = "mc34708",
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -34,7 +37,10 @@ MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
static const struct of_device_id mc13xxx_dt_ids[] = {
{
.compatible = "fsl,mc13892",
- .data = (void *) &mc13xxx_i2c_device_id[0],
+ .data = &mc13xxx_variant_mc13892,
+ }, {
+ .compatible = "fsl,mc34708",
+ .data = &mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -76,16 +82,20 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
return ret;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+ if (client->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mc13xxx_dt_ids, &client->dev);
+ mc13xxx->variant = of_id->data;
+ } else {
+ mc13xxx->variant = (void *)id->driver_data;
+ }
- if (ret == 0 && (id->driver_data != mc13xxx->ictype))
- dev_warn(mc13xxx->dev,
- "device id doesn't match auto detection!\n");
+ ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
return ret;
}
-static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
+static int mc13xxx_i2c_remove(struct i2c_client *client)
{
struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
@@ -102,7 +112,7 @@ static struct i2c_driver mc13xxx_i2c_driver = {
.of_match_table = mc13xxx_dt_ids,
},
.probe = mc13xxx_i2c_probe,
- .remove = __devexit_p(mc13xxx_i2c_remove),
+ .remove = mc13xxx_i2c_remove,
};
static int __init mc13xxx_i2c_init(void)
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 0bdb43a0aff0..3032bae20b62 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -28,10 +28,13 @@
static const struct spi_device_id mc13xxx_device_id[] = {
{
.name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13783,
}, {
.name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+ }, {
+ .name = "mc34708",
+ .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
}, {
/* sentinel */
}
@@ -39,8 +42,9 @@ static const struct spi_device_id mc13xxx_device_id[] = {
MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
static const struct of_device_id mc13xxx_dt_ids[] = {
- { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
- { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { .compatible = "fsl,mc13783", .data = &mc13xxx_variant_mc13783, },
+ { .compatible = "fsl,mc13892", .data = &mc13xxx_variant_mc13892, },
+ { .compatible = "fsl,mc34708", .data = &mc13xxx_variant_mc34708, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
@@ -144,22 +148,21 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
return ret;
}
- ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+ if (spi->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (ret) {
- dev_set_drvdata(&spi->dev, NULL);
+ mc13xxx->variant = of_id->data;
} else {
- const struct spi_device_id *devid =
- spi_get_device_id(spi);
- if (!devid || devid->driver_data != mc13xxx->ictype)
- dev_warn(mc13xxx->dev,
- "device id doesn't match auto detection!\n");
+ const struct spi_device_id *id_entry = spi_get_device_id(spi);
+
+ mc13xxx->variant = (void *)id_entry->driver_data;
}
- return ret;
+ return mc13xxx_common_init(mc13xxx, pdata, spi->irq);
}
-static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
+static int mc13xxx_spi_remove(struct spi_device *spi)
{
struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
@@ -176,7 +179,7 @@ static struct spi_driver mc13xxx_spi_driver = {
.of_match_table = mc13xxx_dt_ids,
},
.probe = mc13xxx_spi_probe,
- .remove = __devexit_p(mc13xxx_spi_remove),
+ .remove = mc13xxx_spi_remove,
};
static int __init mc13xxx_init(void)
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
index bbba06feea06..460ec5c7b18c 100644
--- a/drivers/mfd/mc13xxx.h
+++ b/drivers/mfd/mc13xxx.h
@@ -13,19 +13,25 @@
#include <linux/regmap.h>
#include <linux/mfd/mc13xxx.h>
-enum mc13xxx_id {
- MC13XXX_ID_MC13783,
- MC13XXX_ID_MC13892,
- MC13XXX_ID_INVALID,
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx;
+
+struct mc13xxx_variant {
+ const char *name;
+ void (*print_revision)(struct mc13xxx *mc13xxx, u32 revision);
};
-#define MC13XXX_NUMREGS 0x3f
+extern struct mc13xxx_variant
+ mc13xxx_variant_mc13783,
+ mc13xxx_variant_mc13892,
+ mc13xxx_variant_mc34708;
struct mc13xxx {
struct regmap *regmap;
struct device *dev;
- enum mc13xxx_id ictype;
+ const struct mc13xxx_variant *variant;
struct mutex lock;
int irq;
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 55d589981412..998ce8cb3065 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -41,11 +41,11 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/mfd/menelaus.h>
#include <asm/mach/irq.h>
#include <asm/gpio.h>
-#include <plat/menelaus.h>
#define DRIVER_NAME "menelaus"
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f8b77711ad2d..7604f4e5df40 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -21,6 +21,10 @@
#include <linux/irqdomain.h>
#include <linux/of.h>
+static struct device_type mfd_dev_type = {
+ .name = "mfd_device",
+};
+
int mfd_cell_enable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
@@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_device;
pdev->dev.parent = parent;
+ pdev->dev.type = &mfd_dev_type;
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
@@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
static int mfd_remove_devices_fn(struct device *dev, void *c)
{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct platform_device *pdev;
+ const struct mfd_cell *cell;
atomic_t **usage_count = c;
+ if (dev->type != &mfd_dev_type)
+ return 0;
+
+ pdev = to_platform_device(dev);
+ cell = mfd_get_cell(pdev);
+
/* find the base address of usage_count pointers (for freeing) */
if (!*usage_count || (cell->usage_count < *usage_count))
*usage_count = cell->usage_count;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 23cec57c02ba..05164d7f054b 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,10 +25,12 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
-#include <plat/cpu.h>
-#include <plat/usb.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/usb-omap.h>
#include <linux/pm_runtime.h>
+#include "omap-usb.h"
+
#define USBHS_DRIVER_NAME "usbhs_omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
#define OMAP_OHCI_DEVICE "ohci-omap3"
@@ -381,7 +383,7 @@ static void omap_usbhs_init(struct device *dev)
reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
/* Bypass the TLL module for PHY mode operation */
- if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+ if (pdata->single_ulpi_bypass) {
dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
if (is_ehci_phy_mode(pdata->port_mode[0]) ||
is_ehci_phy_mode(pdata->port_mode[1]) ||
@@ -464,7 +466,7 @@ static void omap_usbhs_deinit(struct device *dev)
*
* Allocates basic resources for this USB host controller.
*/
-static int __devinit usbhs_omap_probe(struct platform_device *pdev)
+static int usbhs_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usbhs_omap_platform_data *pdata = dev->platform_data;
@@ -652,7 +654,7 @@ end_probe:
*
* Reverses the effect of usbhs_omap_probe().
*/
-static int __devexit usbhs_omap_remove(struct platform_device *pdev)
+static int usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 4b7757b84301..eb869153206d 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -25,8 +25,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
-#include <plat/usb.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_data/usb-omap.h>
#define USBTLL_DRIVER_NAME "usbhs_tll"
@@ -200,7 +200,7 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
*
* Allocates basic resources for this USB host controller.
*/
-static int __devinit usbtll_omap_probe(struct platform_device *pdev)
+static int usbtll_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usbtll_omap_platform_data *pdata = dev->platform_data;
@@ -348,7 +348,7 @@ end:
*
* Reverses the effect of usbtll_omap_probe().
*/
-static int __devexit usbtll_omap_remove(struct platform_device *pdev)
+static int usbtll_omap_remove(struct platform_device *pdev)
{
struct usbtll_omap *tll = platform_get_drvdata(pdev);
@@ -424,7 +424,7 @@ static struct platform_driver usbtll_omap_driver = {
.pm = &usbtllomap_dev_pm_ops,
},
.probe = usbtll_omap_probe,
- .remove = __devexit_p(usbtll_omap_remove),
+ .remove = usbtll_omap_remove,
};
int omap_tll_enable(void)
diff --git a/drivers/mfd/omap-usb.h b/drivers/mfd/omap-usb.h
new file mode 100644
index 000000000000..972aa961b064
--- /dev/null
+++ b/drivers/mfd/omap-usb.h
@@ -0,0 +1,2 @@
+extern int omap_tll_enable(void);
+extern int omap_tll_disable(void);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 4f8d6e6b19aa..6ffd7a2affdc 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -247,7 +247,7 @@ static struct regmap_irq_chip palmas_irq_chip = {
PALMAS_INT1_MASK),
};
-static void __devinit palmas_dt_to_pdata(struct device_node *node,
+static void palmas_dt_to_pdata(struct device_node *node,
struct palmas_platform_data *pdata)
{
int ret;
@@ -275,7 +275,7 @@ static void __devinit palmas_dt_to_pdata(struct device_node *node,
PALMAS_POWER_CTRL_ENABLE2_MASK;
}
-static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
+static int palmas_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct palmas *palmas;
@@ -492,7 +492,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
-static struct of_device_id __devinitdata of_palmas_match_tbl[] = {
+static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas", },
{ /* end */ }
};
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 3927c17e4175..18b53cb72fea 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -199,7 +199,7 @@ static void pcf50633_adc_irq(int irq, void *data)
kfree(req);
}
-static int __devinit pcf50633_adc_probe(struct platform_device *pdev)
+static int pcf50633_adc_probe(struct platform_device *pdev)
{
struct pcf50633_adc *adc;
@@ -218,7 +218,7 @@ static int __devinit pcf50633_adc_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pcf50633_adc_remove(struct platform_device *pdev)
+static int pcf50633_adc_remove(struct platform_device *pdev)
{
struct pcf50633_adc *adc = platform_get_drvdata(pdev);
int i, head;
@@ -246,7 +246,7 @@ static struct platform_driver pcf50633_adc_driver = {
.name = "pcf50633-adc",
},
.probe = pcf50633_adc_probe,
- .remove = __devexit_p(pcf50633_adc_remove),
+ .remove = pcf50633_adc_remove,
};
module_platform_driver(pcf50633_adc_driver);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 45ce1fb5a549..d11567307fbe 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -191,7 +191,7 @@ static struct regmap_config pcf50633_regmap_config = {
.val_bits = 8,
};
-static int __devinit pcf50633_probe(struct i2c_client *client,
+static int pcf50633_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct pcf50633 *pcf;
@@ -208,6 +208,8 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
if (!pcf)
return -ENOMEM;
+ i2c_set_clientdata(client, pcf);
+ pcf->dev = &client->dev;
pcf->pdata = pdata;
mutex_init(&pcf->lock);
@@ -219,9 +221,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return ret;
}
- i2c_set_clientdata(client, pcf);
- pcf->dev = &client->dev;
-
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
@@ -275,7 +274,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return 0;
}
-static int __devexit pcf50633_remove(struct i2c_client *client)
+static int pcf50633_remove(struct i2c_client *client)
{
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
@@ -308,7 +307,7 @@ static struct i2c_driver pcf50633_driver = {
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
- .remove = __devexit_p(pcf50633_remove),
+ .remove = pcf50633_remove,
};
static int __init pcf50633_init(void)
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index e873b15753d8..d4b297cbd801 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -80,7 +80,7 @@ static struct pm8xxx_drvdata pm8921_drvdata = {
.pmic_read_irq_stat = pm8921_read_irq_stat,
};
-static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data
+static int pm8921_add_subdevices(const struct pm8921_platform_data
*pdata,
struct pm8921 *pmic,
u32 rev)
@@ -104,7 +104,7 @@ static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data
return ret;
}
-static int __devinit pm8921_probe(struct platform_device *pdev)
+static int pm8921_probe(struct platform_device *pdev)
{
const struct pm8921_platform_data *pdata = pdev->dev.platform_data;
struct pm8921 *pmic;
@@ -165,7 +165,7 @@ err_read_rev:
return rc;
}
-static int __devexit pm8921_remove(struct platform_device *pdev)
+static int pm8921_remove(struct platform_device *pdev)
{
struct pm8xxx_drvdata *drvdata;
struct pm8921 *pmic = NULL;
@@ -187,7 +187,7 @@ static int __devexit pm8921_remove(struct platform_device *pdev)
static struct platform_driver pm8921_driver = {
.probe = pm8921_probe,
- .remove = __devexit_p(pm8921_remove),
+ .remove = pm8921_remove,
.driver = {
.name = "pm8921-core",
.owner = THIS_MODULE,
diff --git a/drivers/mfd/pm8xxx-irq.c b/drivers/mfd/pm8xxx-irq.c
index d452dd013081..1360e20adf11 100644
--- a/drivers/mfd/pm8xxx-irq.c
+++ b/drivers/mfd/pm8xxx-irq.c
@@ -309,7 +309,7 @@ bail_out:
}
EXPORT_SYMBOL_GPL(pm8xxx_get_irq_stat);
-struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev,
+struct pm_irq_chip * pm8xxx_irq_init(struct device *dev,
const struct pm8xxx_irq_platform_data *pdata)
{
struct pm_irq_chip *chip;
@@ -363,7 +363,7 @@ struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev,
return chip;
}
-int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip)
+int pm8xxx_irq_exit(struct pm_irq_chip *chip)
{
irq_set_chained_handler(chip->devirq, NULL);
kfree(chip);
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index fe00cdd6f83d..b41db5968706 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -345,7 +345,7 @@ int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base)
mutex_init(&rc5t583->irq_lock);
/* Initailize all int register to 0 */
- for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++) {
+ for (i = 0; i < RC5T583_MAX_INTERRUPT_EN_REGS; i++) {
ret = rc5t583_write(rc5t583->dev, irq_en_add[i],
rc5t583->irq_en_reg[i]);
if (ret < 0)
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index f1a024ecdb1e..14bdaccefbec 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -246,7 +246,7 @@ static const struct regmap_config rc5t583_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
+static int rc5t583_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct rc5t583 *rc5t583;
@@ -303,7 +303,7 @@ err_add_devs:
return ret;
}
-static int __devexit rc5t583_i2c_remove(struct i2c_client *i2c)
+static int rc5t583_i2c_remove(struct i2c_client *i2c)
{
struct rc5t583 *rc5t583 = i2c_get_clientdata(i2c);
@@ -325,7 +325,7 @@ static struct i2c_driver rc5t583_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = rc5t583_i2c_probe,
- .remove = __devexit_p(rc5t583_i2c_remove),
+ .remove = rc5t583_i2c_remove,
.id_table = rc5t583_i2c_id,
};
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index fbabc3cbe350..21b7bef73507 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -72,7 +72,7 @@ static struct mfd_cell rdc321x_sb_cells[] = {
},
};
-static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
+static int rdc321x_sb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
@@ -91,7 +91,7 @@ static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
NULL, 0, NULL);
}
-static void __devexit rdc321x_sb_remove(struct pci_dev *pdev)
+static void rdc321x_sb_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
}
@@ -106,7 +106,7 @@ static struct pci_driver rdc321x_sb_driver = {
.name = "RDC321x Southbridge",
.id_table = rdc321x_sb_table,
.probe = rdc321x_sb_probe,
- .remove = __devexit_p(rdc321x_sb_remove),
+ .remove = rdc321x_sb_remove,
};
module_pci_driver(rdc321x_sb_driver);
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
new file mode 100644
index 000000000000..3ba048655bf3
--- /dev/null
+++ b/drivers/mfd/retu-mfd.c
@@ -0,0 +1,263 @@
+/*
+ * Retu MFD driver
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall and Mikko Ylinen.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+/* Registers */
+#define RETU_REG_ASICR 0x00 /* ASIC ID and revision */
+#define RETU_REG_ASICR_VILMA (1 << 7) /* Bit indicating Vilma */
+#define RETU_REG_IDR 0x01 /* Interrupt ID */
+#define RETU_REG_IMR 0x02 /* Interrupt mask */
+
+/* Interrupt sources */
+#define RETU_INT_PWR 0 /* Power button */
+
+struct retu_dev {
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex mutex;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static struct resource retu_pwrbutton_res[] = {
+ {
+ .name = "retu-pwrbutton",
+ .start = RETU_INT_PWR,
+ .end = RETU_INT_PWR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell retu_devs[] = {
+ {
+ .name = "retu-wdt"
+ },
+ {
+ .name = "retu-pwrbutton",
+ .resources = retu_pwrbutton_res,
+ .num_resources = ARRAY_SIZE(retu_pwrbutton_res),
+ }
+};
+
+static struct regmap_irq retu_irqs[] = {
+ [RETU_INT_PWR] = {
+ .mask = 1 << RETU_INT_PWR,
+ }
+};
+
+static struct regmap_irq_chip retu_irq_chip = {
+ .name = "RETU",
+ .irqs = retu_irqs,
+ .num_irqs = ARRAY_SIZE(retu_irqs),
+ .num_regs = 1,
+ .status_base = RETU_REG_IDR,
+ .mask_base = RETU_REG_IMR,
+ .ack_base = RETU_REG_IDR,
+};
+
+/* Retu device registered for the power off. */
+static struct retu_dev *retu_pm_power_off;
+
+int retu_read(struct retu_dev *rdev, u8 reg)
+{
+ int ret;
+ int value;
+
+ mutex_lock(&rdev->mutex);
+ ret = regmap_read(rdev->regmap, reg, &value);
+ mutex_unlock(&rdev->mutex);
+
+ return ret ? ret : value;
+}
+EXPORT_SYMBOL_GPL(retu_read);
+
+int retu_write(struct retu_dev *rdev, u8 reg, u16 data)
+{
+ int ret;
+
+ mutex_lock(&rdev->mutex);
+ ret = regmap_write(rdev->regmap, reg, data);
+ mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(retu_write);
+
+static void retu_power_off(void)
+{
+ struct retu_dev *rdev = retu_pm_power_off;
+ int reg;
+
+ mutex_lock(&retu_pm_power_off->mutex);
+
+ /* Ignore power button state */
+ regmap_read(rdev->regmap, RETU_REG_CC1, &reg);
+ regmap_write(rdev->regmap, RETU_REG_CC1, reg | 2);
+
+ /* Expire watchdog immediately */
+ regmap_write(rdev->regmap, RETU_REG_WATCHDOG, 0);
+
+ /* Wait for poweroff */
+ for (;;)
+ cpu_relax();
+
+ mutex_unlock(&retu_pm_power_off->mutex);
+}
+
+static int retu_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ BUG_ON(reg_size != 1 || val_size != 2);
+
+ ret = i2c_smbus_read_word_data(i2c, *(u8 const *)reg);
+ if (ret < 0)
+ return ret;
+
+ *(u16 *)val = ret;
+ return 0;
+}
+
+static int retu_regmap_write(void *context, const void *data, size_t count)
+{
+ u8 reg;
+ u16 val;
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ BUG_ON(count != sizeof(reg) + sizeof(val));
+ memcpy(&reg, data, sizeof(reg));
+ memcpy(&val, data + sizeof(reg), sizeof(val));
+ return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static struct regmap_bus retu_bus = {
+ .read = retu_regmap_read,
+ .write = retu_regmap_write,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct regmap_config retu_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
+
+static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct retu_dev *rdev;
+ int ret;
+
+ rdev = devm_kzalloc(&i2c->dev, sizeof(*rdev), GFP_KERNEL);
+ if (rdev == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rdev);
+ rdev->dev = &i2c->dev;
+ mutex_init(&rdev->mutex);
+ rdev->regmap = devm_regmap_init(&i2c->dev, &retu_bus, &i2c->dev,
+ &retu_config);
+ if (IS_ERR(rdev->regmap))
+ return PTR_ERR(rdev->regmap);
+
+ ret = retu_read(rdev, RETU_REG_ASICR);
+ if (ret < 0) {
+ dev_err(rdev->dev, "could not read Retu revision: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(rdev->dev, "Retu%s v%d.%d found\n",
+ (ret & RETU_REG_ASICR_VILMA) ? " & Vilma" : "",
+ (ret >> 4) & 0x7, ret & 0xf);
+
+ /* Mask all RETU interrupts. */
+ ret = retu_write(rdev, RETU_REG_IMR, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_add_irq_chip(rdev->regmap, i2c->irq, IRQF_ONESHOT, -1,
+ &retu_irq_chip, &rdev->irq_data);
+ if (ret < 0)
+ return ret;
+
+ ret = mfd_add_devices(rdev->dev, -1, retu_devs, ARRAY_SIZE(retu_devs),
+ NULL, regmap_irq_chip_get_base(rdev->irq_data),
+ NULL);
+ if (ret < 0) {
+ regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+ return ret;
+ }
+
+ if (!pm_power_off) {
+ retu_pm_power_off = rdev;
+ pm_power_off = retu_power_off;
+ }
+
+ return 0;
+}
+
+static int retu_remove(struct i2c_client *i2c)
+{
+ struct retu_dev *rdev = i2c_get_clientdata(i2c);
+
+ if (retu_pm_power_off == rdev) {
+ pm_power_off = NULL;
+ retu_pm_power_off = NULL;
+ }
+ mfd_remove_devices(rdev->dev);
+ regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id retu_id[] = {
+ { "retu-mfd", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, retu_id);
+
+static struct i2c_driver retu_driver = {
+ .driver = {
+ .name = "retu-mfd",
+ .owner = THIS_MODULE,
+ },
+ .probe = retu_probe,
+ .remove = retu_remove,
+ .id_table = retu_id,
+};
+module_i2c_driver(retu_driver);
+
+MODULE_DESCRIPTION("Retu MFD driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
new file mode 100644
index 000000000000..3d3b4addf81a
--- /dev/null
+++ b/drivers/mfd/rtl8411.c
@@ -0,0 +1,280 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mfd/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rtl8411_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, SYS_VER, &val);
+ return val & 0x0F;
+}
+
+static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CD_PAD_CTL,
+ CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+}
+
+static int rtl8411_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
+}
+
+static int rtl8411_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
+}
+
+static int rtl8411_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
+}
+
+static int rtl8411_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
+}
+
+static int rtl8411_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CTL,
+ BPP_LDO_POWB, BPP_LDO_SUSPEND);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ /* To avoid too large in-rush current */
+ udelay(150);
+
+ err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_10_PERCENT_ON);
+ if (err < 0)
+ return err;
+
+ udelay(150);
+
+ err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_15_PERCENT_ON);
+ if (err < 0)
+ return err;
+
+ udelay(150);
+
+ err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_ON);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_write_register(pcr, LDO_CTL, BPP_LDO_POWB, BPP_LDO_ON);
+}
+
+static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+
+ err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_OFF);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_write_register(pcr, LDO_CTL,
+ BPP_LDO_POWB, BPP_LDO_SUSPEND);
+}
+
+static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 mask, val;
+
+ mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
+ if (voltage == OUTPUT_3V3)
+ val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
+ else if (voltage == OUTPUT_1V8)
+ val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
+ else
+ return -EINVAL;
+
+ return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
+}
+
+static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
+{
+ unsigned int card_exist;
+
+ card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
+ card_exist &= CARD_EXIST;
+ if (!card_exist) {
+ /* Enable card CD */
+ rtsx_pci_write_register(pcr, CD_PAD_CTL,
+ CD_DISABLE_MASK, CD_ENABLE);
+ /* Enable card interrupt */
+ rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x00);
+ return 0;
+ }
+
+ if (hweight32(card_exist) > 1) {
+ rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
+ msleep(100);
+
+ card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
+ if (card_exist & MS_EXIST)
+ card_exist = MS_EXIST;
+ else if (card_exist & SD_EXIST)
+ card_exist = SD_EXIST;
+ else
+ card_exist = 0;
+
+ rtsx_pci_write_register(pcr, CARD_PWR_CTL,
+ BPP_POWER_MASK, BPP_POWER_OFF);
+
+ dev_dbg(&(pcr->pci->dev),
+ "After CD deglitch, card_exist = 0x%x\n",
+ card_exist);
+ }
+
+ if (card_exist & MS_EXIST) {
+ /* Disable SD interrupt */
+ rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x40);
+ rtsx_pci_write_register(pcr, CD_PAD_CTL,
+ CD_DISABLE_MASK, MS_CD_EN_ONLY);
+ } else if (card_exist & SD_EXIST) {
+ /* Disable MS interrupt */
+ rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x80);
+ rtsx_pci_write_register(pcr, CD_PAD_CTL,
+ CD_DISABLE_MASK, SD_CD_EN_ONLY);
+ }
+
+ return card_exist;
+}
+
+static int rtl8411_conv_clk_and_div_n(int input, int dir)
+{
+ int output;
+
+ if (dir == CLK_TO_DIV_N)
+ output = input * 4 / 5 - 2;
+ else
+ output = (input + 2) * 5 / 4;
+
+ return output;
+}
+
+static const struct pcr_ops rtl8411_pcr_ops = {
+ .extra_init_hw = rtl8411_extra_init_hw,
+ .optimize_phy = NULL,
+ .turn_on_led = rtl8411_turn_on_led,
+ .turn_off_led = rtl8411_turn_off_led,
+ .enable_auto_blink = rtl8411_enable_auto_blink,
+ .disable_auto_blink = rtl8411_disable_auto_blink,
+ .card_power_on = rtl8411_card_power_on,
+ .card_power_off = rtl8411_card_power_off,
+ .switch_output_voltage = rtl8411_switch_output_voltage,
+ .cd_deglitch = rtl8411_cd_deglitch,
+ .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+};
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rtl8411_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xA9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x09),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rtl8411_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rtl8411_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rtl8411_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
+ 0,
+};
+
+void rtl8411_init_params(struct rtsx_pcr *pcr)
+{
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+ pcr->ops = &rtl8411_pcr_ops;
+
+ pcr->ic_version = rtl8411_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rtl8411_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rtl8411_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rtl8411_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rtl8411_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
new file mode 100644
index 000000000000..98fe0f39463e
--- /dev/null
+++ b/drivers/mfd/rts5209.c
@@ -0,0 +1,244 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ val = rtsx_pci_readb(pcr, 0x1C);
+ return val & 0x0F;
+}
+
+static void rts5209_init_vendor_cfg(struct rtsx_pcr *pcr)
+{
+ u32 val;
+
+ rtsx_pci_read_config_dword(pcr, 0x724, &val);
+ dev_dbg(&(pcr->pci->dev), "Cfg 0x724: 0x%x\n", val);
+
+ if (!(val & 0x80)) {
+ if (val & 0x08)
+ pcr->ms_pmos = false;
+ else
+ pcr->ms_pmos = true;
+ }
+}
+
+static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_init_cmd(pcr);
+
+ /* Turn off LED */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03);
+ /* Configure GPIO as output */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03);
+
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5209_optimize_phy(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_phy_register(pcr, 0x00, 0xB966);
+}
+
+static int rts5209_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
+}
+
+static int rts5209_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
+}
+
+static int rts5209_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
+}
+
+static int rts5209_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
+}
+
+static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+ u8 pwr_mask, partial_pwr_on, pwr_on;
+
+ pwr_mask = SD_POWER_MASK;
+ partial_pwr_on = SD_PARTIAL_POWER_ON;
+ pwr_on = SD_POWER_ON;
+
+ if (pcr->ms_pmos && (card == RTSX_MS_CARD)) {
+ pwr_mask = MS_POWER_MASK;
+ partial_pwr_on = MS_PARTIAL_POWER_ON;
+ pwr_on = MS_POWER_ON;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ pwr_mask, partial_pwr_on);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x04);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ /* To avoid too large in-rush current */
+ udelay(150);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, pwr_mask, pwr_on);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x00);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ u8 pwr_mask, pwr_off;
+
+ pwr_mask = SD_POWER_MASK;
+ pwr_off = SD_POWER_OFF;
+
+ if (pcr->ms_pmos && (card == RTSX_MS_CARD)) {
+ pwr_mask = MS_POWER_MASK;
+ pwr_off = MS_POWER_OFF;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0X06);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pcr_ops rts5209_pcr_ops = {
+ .extra_init_hw = rts5209_extra_init_hw,
+ .optimize_phy = rts5209_optimize_phy,
+ .turn_on_led = rts5209_turn_on_led,
+ .turn_off_led = rts5209_turn_off_led,
+ .enable_auto_blink = rts5209_enable_auto_blink,
+ .disable_auto_blink = rts5209_disable_auto_blink,
+ .card_power_on = rts5209_card_power_on,
+ .card_power_off = rts5209_card_power_off,
+ .switch_output_voltage = rts5209_switch_output_voltage,
+ .cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
+};
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5209_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5209_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5209_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5209_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+void rts5209_init_params(struct rtsx_pcr *pcr)
+{
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 |
+ EXTRA_CAPS_SD_SDR104 | EXTRA_CAPS_MMC_8BIT;
+ pcr->num_slots = 2;
+ pcr->ops = &rts5209_pcr_ops;
+
+ rts5209_init_vendor_cfg(pcr);
+
+ pcr->ic_version = rts5209_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5209_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5209_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5209_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
new file mode 100644
index 000000000000..29d889cbb9c5
--- /dev/null
+++ b/drivers/mfd/rts5229.c
@@ -0,0 +1,226 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/rtsx_pci.h>
+
+#include "rtsx_pcr.h"
+
+static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & 0x0F;
+}
+
+static int rts5229_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_init_cmd(pcr);
+
+ /* Configure GPIO as output */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+ /* Switch LDO3318 source from DV33 to card_3v3 */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5229_optimize_phy(struct rtsx_pcr *pcr)
+{
+ /* Optimize RX sensitivity */
+ return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
+}
+
+static int rts5229_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
+}
+
+static int rts5229_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+}
+
+static int rts5229_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
+}
+
+static int rts5229_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
+}
+
+static int rts5229_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_PARTIAL_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x02);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ /* To avoid too large in-rush current */
+ udelay(150);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK, SD_POWER_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0x06);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
+ SD_POWER_MASK | PMOS_STRG_MASK,
+ SD_POWER_OFF | PMOS_STRG_400mA);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ LDO3318_PWR_MASK, 0X00);
+ return rtsx_pci_send_cmd(pcr, 100);
+}
+
+static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pcr_ops rts5229_pcr_ops = {
+ .extra_init_hw = rts5229_extra_init_hw,
+ .optimize_phy = rts5229_optimize_phy,
+ .turn_on_led = rts5229_turn_on_led,
+ .turn_off_led = rts5229_turn_off_led,
+ .enable_auto_blink = rts5229_enable_auto_blink,
+ .disable_auto_blink = rts5229_disable_auto_blink,
+ .card_power_on = rts5229_card_power_on,
+ .card_power_off = rts5229_card_power_off,
+ .switch_output_voltage = rts5229_switch_output_voltage,
+ .cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
+};
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5229_sd_pull_ctl_enable_tbl1[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* For RTS5229 version C */
+static const u32 rts5229_sd_pull_ctl_enable_tbl2[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5229_sd_pull_ctl_disable_tbl1[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+/* For RTS5229 version C */
+static const u32 rts5229_sd_pull_ctl_disable_tbl2[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE5),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5229_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5229_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+void rts5229_init_params(struct rtsx_pcr *pcr)
+{
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+ pcr->ops = &rts5229_pcr_ops;
+
+ pcr->ic_version = rts5229_get_ic_version(pcr);
+ if (pcr->ic_version == IC_VER_C) {
+ pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2;
+ pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl2;
+ } else {
+ pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl1;
+ pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl1;
+ }
+ pcr->ms_pull_ctl_enable_tbl = rts5229_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5229_ms_pull_ctl_disable_tbl;
+}
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
new file mode 100644
index 000000000000..9fc57009e228
--- /dev/null
+++ b/drivers/mfd/rtsx_pcr.c
@@ -0,0 +1,1271 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rtsx_pci.h>
+#include <asm/unaligned.h>
+
+#include "rtsx_pcr.h"
+
+static bool msi_en = true;
+module_param(msi_en, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msi_en, "Enable MSI");
+
+static DEFINE_IDR(rtsx_pci_idr);
+static DEFINE_SPINLOCK(rtsx_pci_lock);
+
+static struct mfd_cell rtsx_pcr_cells[] = {
+ [RTSX_SD_CARD] = {
+ .name = DRV_NAME_RTSX_PCI_SDMMC,
+ },
+ [RTSX_MS_CARD] = {
+ .name = DRV_NAME_RTSX_PCI_MS,
+ },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(rtsx_pci_ids) = {
+ { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
+
+void rtsx_pci_start_run(struct rtsx_pcr *pcr)
+{
+ /* If pci device removed, don't queue idle work any more */
+ if (pcr->remove_pci)
+ return;
+
+ if (pcr->state != PDEV_STAT_RUN) {
+ pcr->state = PDEV_STAT_RUN;
+ if (pcr->ops->enable_auto_blink)
+ pcr->ops->enable_auto_blink(pcr);
+ }
+
+ mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
+
+int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
+{
+ int i;
+ u32 val = HAIMR_WRITE_START;
+
+ val |= (u32)(addr & 0x3FFF) << 16;
+ val |= (u32)mask << 8;
+ val |= (u32)data;
+
+ rtsx_pci_writel(pcr, RTSX_HAIMR, val);
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ val = rtsx_pci_readl(pcr, RTSX_HAIMR);
+ if ((val & HAIMR_TRANS_END) == 0) {
+ if (data != (u8)val)
+ return -EIO;
+ return 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
+
+int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
+{
+ u32 val = HAIMR_READ_START;
+ int i;
+
+ val |= (u32)(addr & 0x3FFF) << 16;
+ rtsx_pci_writel(pcr, RTSX_HAIMR, val);
+
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ val = rtsx_pci_readl(pcr, RTSX_HAIMR);
+ if ((val & HAIMR_TRANS_END) == 0)
+ break;
+ }
+
+ if (i >= MAX_RW_REG_CNT)
+ return -ETIMEDOUT;
+
+ if (data)
+ *data = (u8)(val & 0xFF);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
+
+int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
+{
+ int err, i, finished = 0;
+ u8 tmp;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 100000; i++) {
+ err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
+ if (err < 0)
+ return err;
+
+ if (!(tmp & 0x80)) {
+ finished = 1;
+ break;
+ }
+ }
+
+ if (!finished)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
+
+int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
+{
+ int err, i, finished = 0;
+ u16 data;
+ u8 *ptr, tmp;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 100000; i++) {
+ err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
+ if (err < 0)
+ return err;
+
+ if (!(tmp & 0x80)) {
+ finished = 1;
+ break;
+ }
+ }
+
+ if (!finished)
+ return -ETIMEDOUT;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ ptr = rtsx_pci_get_cmd_data(pcr);
+ data = ((u16)ptr[1] << 8) | ptr[0];
+
+ if (val)
+ *val = data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
+
+void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+
+ rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
+ rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
+
+void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+{
+ unsigned long flags;
+ u32 val = 0;
+ u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
+
+ val |= (u32)(cmd_type & 0x03) << 30;
+ val |= (u32)(reg_addr & 0x3FFF) << 16;
+ val |= (u32)mask << 8;
+ val |= (u32)data;
+
+ spin_lock_irqsave(&pcr->lock, flags);
+ ptr += pcr->ci;
+ if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
+ put_unaligned_le32(val, ptr);
+ ptr++;
+ pcr->ci++;
+ }
+ spin_unlock_irqrestore(&pcr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
+
+void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
+{
+ u32 val = 1 << 31;
+
+ rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
+
+ val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
+ /* Hardware Auto Response */
+ val |= 0x40000000;
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
+
+int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
+{
+ struct completion trans_done;
+ u32 val = 1 << 31;
+ long timeleft;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&pcr->lock, flags);
+
+ /* set up data structures for the wakeup system */
+ pcr->done = &trans_done;
+ pcr->trans_result = TRANS_NOT_READY;
+ init_completion(&trans_done);
+
+ rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
+
+ val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
+ /* Hardware Auto Response */
+ val |= 0x40000000;
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ /* Wait for TRANS_OK_INT */
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, msecs_to_jiffies(timeout));
+ if (timeleft <= 0) {
+ dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
+ __func__, __LINE__);
+ err = -ETIMEDOUT;
+ goto finish_send_cmd;
+ }
+
+ spin_lock_irqsave(&pcr->lock, flags);
+ if (pcr->trans_result == TRANS_RESULT_FAIL)
+ err = -EINVAL;
+ else if (pcr->trans_result == TRANS_RESULT_OK)
+ err = 0;
+ else if (pcr->trans_result == TRANS_NO_DEVICE)
+ err = -ENODEV;
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+finish_send_cmd:
+ spin_lock_irqsave(&pcr->lock, flags);
+ pcr->done = NULL;
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ if ((err < 0) && (err != -ENODEV))
+ rtsx_pci_stop_cmd(pcr);
+
+ if (pcr->finish_me)
+ complete(pcr->finish_me);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
+
+static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
+ dma_addr_t addr, unsigned int len, int end)
+{
+ u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
+ u64 val;
+ u8 option = SG_VALID | SG_TRANS_DATA;
+
+ dev_dbg(&(pcr->pci->dev), "DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+
+ if (end)
+ option |= SG_END;
+ val = ((u64)addr << 32) | ((u64)len << 12) | option;
+
+ put_unaligned_le64(val, ptr);
+ ptr++;
+ pcr->sgi++;
+}
+
+int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read, int timeout)
+{
+ struct completion trans_done;
+ u8 dir;
+ int err = 0, i, count;
+ long timeleft;
+ unsigned long flags;
+ struct scatterlist *sg;
+ enum dma_data_direction dma_dir;
+ u32 val;
+ dma_addr_t addr;
+ unsigned int len;
+
+ dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
+
+ /* don't transfer data during abort processing */
+ if (pcr->remove_pci)
+ return -EINVAL;
+
+ if ((sglist == NULL) || (num_sg <= 0))
+ return -EINVAL;
+
+ if (read) {
+ dir = DEVICE_TO_HOST;
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ dir = HOST_TO_DEVICE;
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
+ if (count < 1) {
+ dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
+ return -EINVAL;
+ }
+ dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
+
+ val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
+ pcr->sgi = 0;
+ for_each_sg(sglist, sg, count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
+ }
+
+ spin_lock_irqsave(&pcr->lock, flags);
+
+ pcr->done = &trans_done;
+ pcr->trans_result = TRANS_NOT_READY;
+ init_completion(&trans_done);
+ rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ timeleft = wait_for_completion_interruptible_timeout(
+ &trans_done, msecs_to_jiffies(timeout));
+ if (timeleft <= 0) {
+ dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
+ __func__, __LINE__);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_irqsave(&pcr->lock, flags);
+
+ if (pcr->trans_result == TRANS_RESULT_FAIL)
+ err = -EINVAL;
+ else if (pcr->trans_result == TRANS_NO_DEVICE)
+ err = -ENODEV;
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+out:
+ spin_lock_irqsave(&pcr->lock, flags);
+ pcr->done = NULL;
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
+
+ if ((err < 0) && (err != -ENODEV))
+ rtsx_pci_stop_cmd(pcr);
+
+ if (pcr->finish_me)
+ complete(pcr->finish_me);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
+
+int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
+{
+ int err;
+ int i, j;
+ u16 reg;
+ u8 *ptr;
+
+ if (buf_len > 512)
+ buf_len = 512;
+
+ ptr = buf;
+ reg = PPBUF_BASE2;
+ for (i = 0; i < buf_len / 256; i++) {
+ rtsx_pci_init_cmd(pcr);
+
+ for (j = 0; j < 256; j++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 250);
+ if (err < 0)
+ return err;
+
+ memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
+ ptr += 256;
+ }
+
+ if (buf_len % 256) {
+ rtsx_pci_init_cmd(pcr);
+
+ for (j = 0; j < buf_len % 256; j++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 250);
+ if (err < 0)
+ return err;
+ }
+
+ memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
+
+int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
+{
+ int err;
+ int i, j;
+ u16 reg;
+ u8 *ptr;
+
+ if (buf_len > 512)
+ buf_len = 512;
+
+ ptr = buf;
+ reg = PPBUF_BASE2;
+ for (i = 0; i < buf_len / 256; i++) {
+ rtsx_pci_init_cmd(pcr);
+
+ for (j = 0; j < 256; j++) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ reg++, 0xFF, *ptr);
+ ptr++;
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 250);
+ if (err < 0)
+ return err;
+ }
+
+ if (buf_len % 256) {
+ rtsx_pci_init_cmd(pcr);
+
+ for (j = 0; j < buf_len % 256; j++) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ reg++, 0xFF, *ptr);
+ ptr++;
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 250);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
+
+static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
+{
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+
+ while (*tbl & 0xFFFF0000) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
+ tbl++;
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
+{
+ const u32 *tbl;
+
+ if (card == RTSX_SD_CARD)
+ tbl = pcr->sd_pull_ctl_enable_tbl;
+ else if (card == RTSX_MS_CARD)
+ tbl = pcr->ms_pull_ctl_enable_tbl;
+ else
+ return -EINVAL;
+
+ return rtsx_pci_set_pull_ctl(pcr, tbl);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
+
+int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
+{
+ const u32 *tbl;
+
+ if (card == RTSX_SD_CARD)
+ tbl = pcr->sd_pull_ctl_disable_tbl;
+ else if (card == RTSX_MS_CARD)
+ tbl = pcr->ms_pull_ctl_disable_tbl;
+ else
+ return -EINVAL;
+
+
+ return rtsx_pci_set_pull_ctl(pcr, tbl);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
+
+static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
+{
+ pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
+
+ if (pcr->num_slots > 1)
+ pcr->bier |= MS_INT_EN;
+
+ /* Enable Bus Interrupt */
+ rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
+
+ dev_dbg(&(pcr->pci->dev), "RTSX_BIER: 0x%08x\n", pcr->bier);
+}
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
+{
+ if (div > CLK_DIV_1) {
+ if (ssc_depth > (div - 1))
+ ssc_depth -= (div - 1);
+ else
+ ssc_depth = SSC_DEPTH_4M;
+ }
+
+ return ssc_depth;
+}
+
+int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u8 N, min_N, max_N, clk_divider;
+ u8 mcu_cnt, div, max_div;
+ u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
+ [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ dev_dbg(&(pcr->pci->dev), "Switch card clock to %dMHz\n", card_clock);
+
+ min_N = 80;
+ max_N = 208;
+ max_div = CLK_DIV_8;
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ dev_dbg(&(pcr->pci->dev),
+ "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ N = (u8)(clk - 2);
+ if ((clk <= 2) || (N > max_N))
+ return -EINVAL;
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ /* Make sure that the SSC clock div_n is equal or greater than min_N */
+ div = CLK_DIV_1;
+ while ((N < min_N) && (div < max_div)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
+ DIV_N_TO_CLK) * 2;
+ N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ N = (N + 2) * 2 - 2;
+ }
+ div++;
+ }
+ dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ ssc_depth = revise_ssc_depth(ssc_depth, div);
+ dev_dbg(&(pcr->pci->dev), "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, N);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(10);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
+
+int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ if (pcr->ops->card_power_on)
+ return pcr->ops->card_power_on(pcr, card);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
+
+int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ if (pcr->ops->card_power_off)
+ return pcr->ops->card_power_off(pcr, card);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
+
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ if (pcr->ops->switch_output_voltage)
+ return pcr->ops->switch_output_voltage(pcr, voltage);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
+
+unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
+{
+ unsigned int val;
+
+ val = rtsx_pci_readl(pcr, RTSX_BIPR);
+ if (pcr->ops->cd_deglitch)
+ val = pcr->ops->cd_deglitch(pcr);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
+
+void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
+{
+ struct completion finish;
+
+ pcr->finish_me = &finish;
+ init_completion(&finish);
+
+ if (pcr->done)
+ complete(pcr->done);
+
+ if (!pcr->remove_pci)
+ rtsx_pci_stop_cmd(pcr);
+
+ wait_for_completion_interruptible_timeout(&finish,
+ msecs_to_jiffies(2));
+ pcr->finish_me = NULL;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
+
+static void rtsx_pci_card_detect(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct rtsx_pcr *pcr;
+ unsigned long flags;
+ unsigned int card_detect = 0;
+ u32 irq_status;
+
+ dwork = to_delayed_work(work);
+ pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
+
+ dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
+
+ spin_lock_irqsave(&pcr->lock, flags);
+
+ irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
+ dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status);
+
+ if (pcr->card_inserted || pcr->card_removed) {
+ dev_dbg(&(pcr->pci->dev),
+ "card_inserted: 0x%x, card_removed: 0x%x\n",
+ pcr->card_inserted, pcr->card_removed);
+
+ if (pcr->ops->cd_deglitch)
+ pcr->card_inserted = pcr->ops->cd_deglitch(pcr);
+
+ card_detect = pcr->card_inserted | pcr->card_removed;
+ pcr->card_inserted = 0;
+ pcr->card_removed = 0;
+ }
+
+ spin_unlock_irqrestore(&pcr->lock, flags);
+
+ if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
+ pcr->slots[RTSX_SD_CARD].card_event(
+ pcr->slots[RTSX_SD_CARD].p_dev);
+ if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
+ pcr->slots[RTSX_MS_CARD].card_event(
+ pcr->slots[RTSX_MS_CARD].p_dev);
+}
+
+static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
+{
+ struct rtsx_pcr *pcr = dev_id;
+ u32 int_reg;
+
+ if (!pcr)
+ return IRQ_NONE;
+
+ spin_lock(&pcr->lock);
+
+ int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
+ /* Clear interrupt flag */
+ rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
+ if ((int_reg & pcr->bier) == 0) {
+ spin_unlock(&pcr->lock);
+ return IRQ_NONE;
+ }
+ if (int_reg == 0xFFFFFFFF) {
+ spin_unlock(&pcr->lock);
+ return IRQ_HANDLED;
+ }
+
+ int_reg &= (pcr->bier | 0x7FFFFF);
+
+ if (int_reg & SD_INT) {
+ if (int_reg & SD_EXIST) {
+ pcr->card_inserted |= SD_EXIST;
+ } else {
+ pcr->card_removed |= SD_EXIST;
+ pcr->card_inserted &= ~SD_EXIST;
+ }
+ }
+
+ if (int_reg & MS_INT) {
+ if (int_reg & MS_EXIST) {
+ pcr->card_inserted |= MS_EXIST;
+ } else {
+ pcr->card_removed |= MS_EXIST;
+ pcr->card_inserted &= ~MS_EXIST;
+ }
+ }
+
+ if (pcr->card_inserted || pcr->card_removed)
+ schedule_delayed_work(&pcr->carddet_work,
+ msecs_to_jiffies(200));
+
+ if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
+ if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
+ pcr->trans_result = TRANS_RESULT_FAIL;
+ if (pcr->done)
+ complete(pcr->done);
+ } else if (int_reg & TRANS_OK_INT) {
+ pcr->trans_result = TRANS_RESULT_OK;
+ if (pcr->done)
+ complete(pcr->done);
+ }
+ }
+
+ spin_unlock(&pcr->lock);
+ return IRQ_HANDLED;
+}
+
+static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
+{
+ dev_info(&(pcr->pci->dev), "%s: pcr->msi_en = %d, pci->irq = %d\n",
+ __func__, pcr->msi_en, pcr->pci->irq);
+
+ if (request_irq(pcr->pci->irq, rtsx_pci_isr,
+ pcr->msi_en ? 0 : IRQF_SHARED,
+ DRV_NAME_RTSX_PCI, pcr)) {
+ dev_err(&(pcr->pci->dev),
+ "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
+ pcr->pci->irq);
+ return -1;
+ }
+
+ pcr->irq = pcr->pci->irq;
+ pci_intx(pcr->pci, !pcr->msi_en);
+
+ return 0;
+}
+
+static void rtsx_pci_idle_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
+
+ dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__);
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ pcr->state = PDEV_STAT_IDLE;
+
+ if (pcr->ops->disable_auto_blink)
+ pcr->ops->disable_auto_blink(pcr);
+ if (pcr->ops->turn_off_led)
+ pcr->ops->turn_off_led(pcr);
+
+ mutex_unlock(&pcr->pcr_mutex);
+}
+
+static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
+
+ rtsx_pci_enable_bus_int(pcr);
+
+ /* Power on SSC */
+ err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC power stable */
+ udelay(200);
+
+ if (pcr->ops->optimize_phy) {
+ err = pcr->ops->optimize_phy(pcr);
+ if (err < 0)
+ return err;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
+ /* Disable card clock */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
+ /* Reset ASPM state to default value */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+ /* Reset delink mode */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
+ /* Card driving select */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+ 0x07, DRIVER_TYPE_D);
+ /* Enable SSC Clock */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
+ 0xFF, SSC_8X_EN | SSC_SEL_4M);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+ /* Disable cd_pwr_save */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
+ /* Clear Link Ready Interrupt */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ LINK_RDY_INT, LINK_RDY_INT);
+ /* Enlarge the estimation window of PERST# glitch
+ * to reduce the chance of invalid card interrupt
+ */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
+ /* Update RC oscillator to 400k
+ * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
+ * 1: 2M 0: 400k
+ */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
+ /* Set interrupt write clear
+ * bit 1: U_elbi_if_rd_clr_en
+ * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
+ * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
+ */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
+ /* Force CLKREQ# PIN to drive 0 to request clock */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ /* Enable clk_request_n to enable clock power management */
+ rtsx_pci_write_config_byte(pcr, 0x81, 1);
+ /* Enter L1 when host tx idle */
+ rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
+
+ if (pcr->ops->extra_init_hw) {
+ err = pcr->ops->extra_init_hw(pcr);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ spin_lock_init(&pcr->lock);
+ mutex_init(&pcr->pcr_mutex);
+
+ switch (PCI_PID(pcr)) {
+ default:
+ case 0x5209:
+ rts5209_init_params(pcr);
+ break;
+
+ case 0x5229:
+ rts5229_init_params(pcr);
+ break;
+
+ case 0x5289:
+ rtl8411_init_params(pcr);
+ break;
+ }
+
+ dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n",
+ PCI_PID(pcr), pcr->ic_version);
+
+ pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
+ GFP_KERNEL);
+ if (!pcr->slots)
+ return -ENOMEM;
+
+ pcr->state = PDEV_STAT_IDLE;
+ err = rtsx_pci_init_hw(pcr);
+ if (err < 0) {
+ kfree(pcr->slots);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rtsx_pci_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
+{
+ struct rtsx_pcr *pcr;
+ struct pcr_handle *handle;
+ u32 base, len;
+ int ret, i;
+
+ dev_dbg(&(pcidev->dev),
+ ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
+ (int)pcidev->revision);
+
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
+ if (ret)
+ goto disable;
+
+ pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
+ if (!pcr) {
+ ret = -ENOMEM;
+ goto release_pci;
+ }
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ ret = -ENOMEM;
+ goto free_pcr;
+ }
+ handle->pcr = pcr;
+
+ if (!idr_pre_get(&rtsx_pci_idr, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto free_handle;
+ }
+
+ spin_lock(&rtsx_pci_lock);
+ ret = idr_get_new(&rtsx_pci_idr, pcr, &pcr->id);
+ spin_unlock(&rtsx_pci_lock);
+ if (ret)
+ goto free_handle;
+
+ pcr->pci = pcidev;
+ dev_set_drvdata(&pcidev->dev, handle);
+
+ len = pci_resource_len(pcidev, 0);
+ base = pci_resource_start(pcidev, 0);
+ pcr->remap_addr = ioremap_nocache(base, len);
+ if (!pcr->remap_addr) {
+ ret = -ENOMEM;
+ goto free_host;
+ }
+
+ pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
+ RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
+ GFP_KERNEL);
+ if (pcr->rtsx_resv_buf == NULL) {
+ ret = -ENXIO;
+ goto unmap;
+ }
+ pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
+ pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
+ pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
+ pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
+
+ pcr->card_inserted = 0;
+ pcr->card_removed = 0;
+ INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
+ INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
+
+ pcr->msi_en = msi_en;
+ if (pcr->msi_en) {
+ ret = pci_enable_msi(pcidev);
+ if (ret < 0)
+ pcr->msi_en = false;
+ }
+
+ ret = rtsx_pci_acquire_irq(pcr);
+ if (ret < 0)
+ goto free_dma;
+
+ pci_set_master(pcidev);
+ synchronize_irq(pcr->irq);
+
+ ret = rtsx_pci_init_chip(pcr);
+ if (ret < 0)
+ goto disable_irq;
+
+ for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
+ rtsx_pcr_cells[i].platform_data = handle;
+ rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
+ }
+ ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
+ ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
+ if (ret < 0)
+ goto disable_irq;
+
+ schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+
+ return 0;
+
+disable_irq:
+ free_irq(pcr->irq, (void *)pcr);
+free_dma:
+ dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
+ pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
+unmap:
+ iounmap(pcr->remap_addr);
+free_host:
+ dev_set_drvdata(&pcidev->dev, NULL);
+free_handle:
+ kfree(handle);
+free_pcr:
+ kfree(pcr);
+release_pci:
+ pci_release_regions(pcidev);
+disable:
+ pci_disable_device(pcidev);
+
+ return ret;
+}
+
+static void rtsx_pci_remove(struct pci_dev *pcidev)
+{
+ struct pcr_handle *handle = pci_get_drvdata(pcidev);
+ struct rtsx_pcr *pcr = handle->pcr;
+
+ pcr->remove_pci = true;
+
+ cancel_delayed_work(&pcr->carddet_work);
+ cancel_delayed_work(&pcr->idle_work);
+
+ mfd_remove_devices(&pcidev->dev);
+
+ dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
+ pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
+ free_irq(pcr->irq, (void *)pcr);
+ if (pcr->msi_en)
+ pci_disable_msi(pcr->pci);
+ iounmap(pcr->remap_addr);
+
+ dev_set_drvdata(&pcidev->dev, NULL);
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+
+ spin_lock(&rtsx_pci_lock);
+ idr_remove(&rtsx_pci_idr, pcr->id);
+ spin_unlock(&rtsx_pci_lock);
+
+ kfree(pcr->slots);
+ kfree(pcr);
+ kfree(handle);
+
+ dev_dbg(&(pcidev->dev),
+ ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
+}
+
+#ifdef CONFIG_PM
+
+static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
+{
+ struct pcr_handle *handle;
+ struct rtsx_pcr *pcr;
+ int ret = 0;
+
+ dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+ handle = pci_get_drvdata(pcidev);
+ pcr = handle->pcr;
+
+ cancel_delayed_work(&pcr->carddet_work);
+ cancel_delayed_work(&pcr->idle_work);
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ if (pcr->ops->turn_off_led)
+ pcr->ops->turn_off_led(pcr);
+
+ rtsx_pci_writel(pcr, RTSX_BIER, 0);
+ pcr->bier = 0;
+
+ rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
+ rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x02);
+
+ pci_save_state(pcidev);
+ pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
+ pci_disable_device(pcidev);
+ pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+
+ mutex_unlock(&pcr->pcr_mutex);
+ return ret;
+}
+
+static int rtsx_pci_resume(struct pci_dev *pcidev)
+{
+ struct pcr_handle *handle;
+ struct rtsx_pcr *pcr;
+ int ret = 0;
+
+ dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+ handle = pci_get_drvdata(pcidev);
+ pcr = handle->pcr;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ pci_set_power_state(pcidev, PCI_D0);
+ pci_restore_state(pcidev);
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ goto out;
+ pci_set_master(pcidev);
+
+ ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
+ if (ret)
+ goto out;
+
+ ret = rtsx_pci_init_hw(pcr);
+ if (ret)
+ goto out;
+
+ schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+
+out:
+ mutex_unlock(&pcr->pcr_mutex);
+ return ret;
+}
+
+#else /* CONFIG_PM */
+
+#define rtsx_pci_suspend NULL
+#define rtsx_pci_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver rtsx_pci_driver = {
+ .name = DRV_NAME_RTSX_PCI,
+ .id_table = rtsx_pci_ids,
+ .probe = rtsx_pci_probe,
+ .remove = rtsx_pci_remove,
+ .suspend = rtsx_pci_suspend,
+ .resume = rtsx_pci_resume,
+};
+module_pci_driver(rtsx_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
+MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
diff --git a/drivers/staging/rts_pstor/general.h b/drivers/mfd/rtsx_pcr.h
index f17930d2e0c4..12462c1df1a9 100644
--- a/drivers/staging/rts_pstor/general.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -1,5 +1,4 @@
/* Driver for Realtek PCI-Express card reader
- * Header file
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
@@ -17,15 +16,17 @@
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
- * wwang (wei_wang@realsil.com.cn)
+ * Wei WANG <wei_wang@realsil.com.cn>
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
-#ifndef __RTSX_GENERAL_H
-#define __RTSX_GENERAL_H
+#ifndef __RTSX_PCR_H
+#define __RTSX_PCR_H
-#include "rtsx.h"
+#include <linux/mfd/rtsx_pci.h>
-int bit1cnt_long(u32 data);
+void rts5209_init_params(struct rtsx_pcr *pcr);
+void rts5229_init_params(struct rtsx_pcr *pcr);
+void rtl8411_init_params(struct rtsx_pcr *pcr);
-#endif /* __RTSX_GENERAL_H */
+#endif
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index c901fa50fea1..0dd84e99081e 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -24,67 +24,67 @@
static struct regmap_irq s2mps11_irqs[] = {
[S2MPS11_IRQ_PWRONF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONF_MASK,
},
[S2MPS11_IRQ_PWRONR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRONR_MASK,
},
[S2MPS11_IRQ_JIGONBF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_JIGONBF_MASK,
},
[S2MPS11_IRQ_JIGONBR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_JIGONBR_MASK,
},
[S2MPS11_IRQ_ACOKBF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_ACOKBF_MASK,
},
[S2MPS11_IRQ_ACOKBR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_ACOKBR_MASK,
},
[S2MPS11_IRQ_PWRON1S] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_PWRON1S_MASK,
},
[S2MPS11_IRQ_MRB] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S2MPS11_IRQ_MRB_MASK,
},
[S2MPS11_IRQ_RTC60S] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTC60S_MASK,
},
[S2MPS11_IRQ_RTCA1] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA1_MASK,
},
[S2MPS11_IRQ_RTCA2] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTCA2_MASK,
},
[S2MPS11_IRQ_SMPL] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_SMPL_MASK,
},
[S2MPS11_IRQ_RTC1S] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_RTC1S_MASK,
},
[S2MPS11_IRQ_WTSR] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S2MPS11_IRQ_WTSR_MASK,
},
[S2MPS11_IRQ_INT120C] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S2MPS11_IRQ_INT120C_MASK,
},
[S2MPS11_IRQ_INT140C] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S2MPS11_IRQ_INT140C_MASK,
},
};
@@ -92,146 +92,146 @@ static struct regmap_irq s2mps11_irqs[] = {
static struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWRR_MASK,
},
[S5M8767_IRQ_PWRF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWRF_MASK,
},
[S5M8767_IRQ_PWR1S] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_PWR1S_MASK,
},
[S5M8767_IRQ_JIGR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_JIGR_MASK,
},
[S5M8767_IRQ_JIGF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_JIGF_MASK,
},
[S5M8767_IRQ_LOWBAT2] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_LOWBAT2_MASK,
},
[S5M8767_IRQ_LOWBAT1] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8767_IRQ_LOWBAT1_MASK,
},
[S5M8767_IRQ_MRB] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_MRB_MASK,
},
[S5M8767_IRQ_DVSOK2] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK2_MASK,
},
[S5M8767_IRQ_DVSOK3] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK3_MASK,
},
[S5M8767_IRQ_DVSOK4] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8767_IRQ_DVSOK4_MASK,
},
[S5M8767_IRQ_RTC60S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTC60S_MASK,
},
[S5M8767_IRQ_RTCA1] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTCA1_MASK,
},
[S5M8767_IRQ_RTCA2] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTCA2_MASK,
},
[S5M8767_IRQ_SMPL] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_SMPL_MASK,
},
[S5M8767_IRQ_RTC1S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_RTC1S_MASK,
},
[S5M8767_IRQ_WTSR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8767_IRQ_WTSR_MASK,
},
};
static struct regmap_irq s5m8763_irqs[] = {
[S5M8763_IRQ_DCINF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_DCINF_MASK,
},
[S5M8763_IRQ_DCINR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_DCINR_MASK,
},
[S5M8763_IRQ_JIGF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_JIGF_MASK,
},
[S5M8763_IRQ_JIGR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_JIGR_MASK,
},
[S5M8763_IRQ_PWRONF] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_PWRONF_MASK,
},
[S5M8763_IRQ_PWRONR] = {
- .reg_offset = 1,
+ .reg_offset = 0,
.mask = S5M8763_IRQ_PWRONR_MASK,
},
[S5M8763_IRQ_WTSREVNT] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_WTSREVNT_MASK,
},
[S5M8763_IRQ_SMPLEVNT] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_SMPLEVNT_MASK,
},
[S5M8763_IRQ_ALARM1] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_ALARM1_MASK,
},
[S5M8763_IRQ_ALARM0] = {
- .reg_offset = 2,
+ .reg_offset = 1,
.mask = S5M8763_IRQ_ALARM0_MASK,
},
[S5M8763_IRQ_ONKEY1S] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_ONKEY1S_MASK,
},
[S5M8763_IRQ_TOPOFFR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_TOPOFFR_MASK,
},
[S5M8763_IRQ_DCINOVPR] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_DCINOVPR_MASK,
},
[S5M8763_IRQ_CHGRSTF] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_CHGRSTF_MASK,
},
[S5M8763_IRQ_DONER] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_DONER_MASK,
},
[S5M8763_IRQ_CHGFAULT] = {
- .reg_offset = 3,
+ .reg_offset = 2,
.mask = S5M8763_IRQ_CHGFAULT_MASK,
},
[S5M8763_IRQ_LOBAT1] = {
- .reg_offset = 4,
+ .reg_offset = 3,
.mask = S5M8763_IRQ_LOBAT1_MASK,
},
[S5M8763_IRQ_LOBAT2] = {
- .reg_offset = 4,
+ .reg_offset = 3,
.mask = S5M8763_IRQ_LOBAT2_MASK,
},
};
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index d927dd49acb3..9816c232e583 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1014,7 +1014,7 @@ static struct gpio_chip gpio_chip_template = {
.get = sm501_gpio_get,
};
-static int __devinit sm501_gpio_register_chip(struct sm501_devdata *sm,
+static int sm501_gpio_register_chip(struct sm501_devdata *sm,
struct sm501_gpio *gpio,
struct sm501_gpio_chip *chip)
{
@@ -1042,7 +1042,7 @@ static int __devinit sm501_gpio_register_chip(struct sm501_devdata *sm,
return gpiochip_add(gchip);
}
-static int __devinit sm501_register_gpio(struct sm501_devdata *sm)
+static int sm501_register_gpio(struct sm501_devdata *sm)
{
struct sm501_gpio *gpio = &sm->gpio;
resource_size_t iobase = sm->io_res->start + SM501_GPIO;
@@ -1313,7 +1313,7 @@ static unsigned int sm501_mem_local[] = {
* Common init code for an SM501
*/
-static int __devinit sm501_init_dev(struct sm501_devdata *sm)
+static int sm501_init_dev(struct sm501_devdata *sm)
{
struct sm501_initdata *idata;
struct sm501_platdata *pdata;
@@ -1389,7 +1389,7 @@ static int __devinit sm501_init_dev(struct sm501_devdata *sm)
return 0;
}
-static int __devinit sm501_plat_probe(struct platform_device *dev)
+static int sm501_plat_probe(struct platform_device *dev)
{
struct sm501_devdata *sm;
int ret;
@@ -1578,7 +1578,7 @@ static struct sm501_platdata sm501_pci_platdata = {
.gpio_base = -1,
};
-static int __devinit sm501_pci_probe(struct pci_dev *dev,
+static int sm501_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct sm501_devdata *sm;
@@ -1685,7 +1685,7 @@ static void sm501_dev_remove(struct sm501_devdata *sm)
sm501_gpio_remove(sm);
}
-static void __devexit sm501_pci_remove(struct pci_dev *dev)
+static void sm501_pci_remove(struct pci_dev *dev)
{
struct sm501_devdata *sm = pci_get_drvdata(dev);
@@ -1723,12 +1723,12 @@ static struct pci_driver sm501_pci_driver = {
.name = "sm501",
.id_table = sm501_pci_tbl,
.probe = sm501_pci_probe,
- .remove = __devexit_p(sm501_pci_remove),
+ .remove = sm501_pci_remove,
};
MODULE_ALIAS("platform:sm501");
-static struct of_device_id __devinitdata of_sm501_match_tbl[] = {
+static struct of_device_id of_sm501_match_tbl[] = {
{ .compatible = "smi,sm501", },
{ /* end */ }
};
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
index d35da6820bea..9bd33169a111 100644
--- a/drivers/mfd/sta2x11-mfd.c
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,21 +27,28 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/sta2x11-mfd.h>
+#include <linux/regmap.h>
#include <asm/sta2x11.h>
+static inline int __reg_within_range(unsigned int r,
+ unsigned int start,
+ unsigned int end)
+{
+ return ((r >= start) && (r <= end));
+}
+
/* This describes STA2X11 MFD chip for us, we may have several */
struct sta2x11_mfd {
struct sta2x11_instance *instance;
- spinlock_t lock;
+ struct regmap *regmap[sta2x11_n_mfd_plat_devs];
+ spinlock_t lock[sta2x11_n_mfd_plat_devs];
struct list_head list;
- void __iomem *sctl_regs;
- void __iomem *apbreg_regs;
+ void __iomem *regs[sta2x11_n_mfd_plat_devs];
};
static LIST_HEAD(sta2x11_mfd_list);
@@ -69,8 +76,9 @@ static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
return NULL;
}
-static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
+static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
{
+ int i;
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
struct sta2x11_instance *instance;
@@ -83,13 +91,14 @@ static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
if (!mfd)
return -ENOMEM;
INIT_LIST_HEAD(&mfd->list);
- spin_lock_init(&mfd->lock);
+ for (i = 0; i < ARRAY_SIZE(mfd->lock); i++)
+ spin_lock_init(&mfd->lock[i]);
mfd->instance = instance;
list_add(&mfd->list, &sta2x11_mfd_list);
return 0;
}
-static int __devexit mfd_remove(struct pci_dev *pdev)
+static int mfd_remove(struct pci_dev *pdev)
{
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
@@ -100,161 +109,276 @@ static int __devexit mfd_remove(struct pci_dev *pdev)
return 0;
}
-/* These two functions are exported and are not expected to fail */
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+/* This function is exported and is not expected to fail */
+u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
+ enum sta2x11_mfd_plat_dev index)
{
struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
u32 r;
unsigned long flags;
+ void __iomem *regs;
if (!mfd) {
dev_warn(&pdev->dev, ": can't access sctl regs\n");
return 0;
}
- if (!mfd->sctl_regs) {
+
+ regs = mfd->regs[index];
+ if (!regs) {
dev_warn(&pdev->dev, ": system ctl not initialized\n");
return 0;
}
- spin_lock_irqsave(&mfd->lock, flags);
- r = readl(mfd->sctl_regs + reg);
+ spin_lock_irqsave(&mfd->lock[index], flags);
+ r = readl(regs + reg);
r &= ~mask;
r |= val;
if (mask)
- writel(r, mfd->sctl_regs + reg);
- spin_unlock_irqrestore(&mfd->lock, flags);
+ writel(r, regs + reg);
+ spin_unlock_irqrestore(&mfd->lock[index], flags);
return r;
}
-EXPORT_SYMBOL(sta2x11_sctl_mask);
+EXPORT_SYMBOL(__sta2x11_mfd_mask);
-u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+int sta2x11_mfd_get_regs_data(struct platform_device *dev,
+ enum sta2x11_mfd_plat_dev index,
+ void __iomem **regs,
+ spinlock_t **lock)
{
- struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
- u32 r;
- unsigned long flags;
+ struct pci_dev *pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ struct sta2x11_mfd *mfd;
- if (!mfd) {
- dev_warn(&pdev->dev, ": can't access apb regs\n");
- return 0;
- }
- if (!mfd->apbreg_regs) {
- dev_warn(&pdev->dev, ": apb bridge not initialized\n");
- return 0;
- }
- spin_lock_irqsave(&mfd->lock, flags);
- r = readl(mfd->apbreg_regs + reg);
- r &= ~mask;
- r |= val;
- if (mask)
- writel(r, mfd->apbreg_regs + reg);
- spin_unlock_irqrestore(&mfd->lock, flags);
- return r;
+ if (!pdev)
+ return -ENODEV;
+ mfd = sta2x11_mfd_find(pdev);
+ if (!mfd)
+ return -ENODEV;
+ if (index >= sta2x11_n_mfd_plat_devs)
+ return -ENODEV;
+ *regs = mfd->regs[index];
+ *lock = &mfd->lock[index];
+ pr_debug("%s %d *regs = %p\n", __func__, __LINE__, *regs);
+ return *regs ? 0 : -ENODEV;
}
-EXPORT_SYMBOL(sta2x11_apbreg_mask);
-
-/* Two debugfs files, for our registers (FIXME: one instance only) */
-#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
-static struct debugfs_reg32 sta2x11_sctl_regs[] = {
- REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
- REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
- REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
- REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
- REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
- REG(SCCLKSTAT2), REG(SCRSTSTA),
-};
-#undef REG
+EXPORT_SYMBOL(sta2x11_mfd_get_regs_data);
-static struct debugfs_regset32 sctl_regset = {
- .regs = sta2x11_sctl_regs,
- .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
-};
+/*
+ * Special sta2x11-mfd regmap lock/unlock functions
+ */
+
+static void sta2x11_regmap_lock(void *__lock)
+{
+ spinlock_t *lock = __lock;
+ spin_lock(lock);
+}
-#define REG(regname) {.name = #regname, .offset = regname}
-static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
- REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
- REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+static void sta2x11_regmap_unlock(void *__lock)
+{
+ spinlock_t *lock = __lock;
+ spin_unlock(lock);
+}
+
+/* OTP (one time programmable registers do not require locking */
+static void sta2x11_regmap_nolock(void *__lock)
+{
+}
+
+static const char *sta2x11_mfd_names[sta2x11_n_mfd_plat_devs] = {
+ [sta2x11_sctl] = STA2X11_MFD_SCTL_NAME,
+ [sta2x11_apbreg] = STA2X11_MFD_APBREG_NAME,
+ [sta2x11_apb_soc_regs] = STA2X11_MFD_APB_SOC_REGS_NAME,
+ [sta2x11_scr] = STA2X11_MFD_SCR_NAME,
};
-#undef REG
-static struct debugfs_regset32 apbreg_regset = {
- .regs = sta2x11_apbreg_regs,
- .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+static bool sta2x11_sctl_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return !__reg_within_range(reg, SCTL_SCPCIECSBRST, SCTL_SCRSTSTA);
+}
+
+static struct regmap_config sta2x11_sctl_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = SCTL_SCRSTSTA,
+ .writeable_reg = sta2x11_sctl_writeable_reg,
};
-static struct dentry *sta2x11_sctl_debugfs;
-static struct dentry *sta2x11_apbreg_debugfs;
+static bool sta2x11_scr_readable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == STA2X11_SECR_CR) ||
+ __reg_within_range(reg, STA2X11_SECR_FVR0, STA2X11_SECR_FVR1);
+}
-/* Probe for the two platform devices */
-static int sta2x11_sctl_probe(struct platform_device *dev)
+static bool sta2x11_scr_writeable_reg(struct device *dev, unsigned int reg)
{
- struct pci_dev **pdev;
- struct sta2x11_mfd *mfd;
- struct resource *res;
+ return false;
+}
- pdev = dev->dev.platform_data;
- mfd = sta2x11_mfd_find(*pdev);
- if (!mfd)
- return -ENODEV;
+static struct regmap_config sta2x11_scr_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_nolock,
+ .unlock = sta2x11_regmap_nolock,
+ .max_register = STA2X11_SECR_FVR1,
+ .readable_reg = sta2x11_scr_readable_reg,
+ .writeable_reg = sta2x11_scr_writeable_reg,
+};
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+static bool sta2x11_apbreg_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* Two blocks (CAN and MLB, SARAC) 0x100 bytes apart */
+ if (reg >= APBREG_BSR_SARAC)
+ reg -= APBREG_BSR_SARAC;
+ switch (reg) {
+ case APBREG_BSR:
+ case APBREG_PAER:
+ case APBREG_PWAC:
+ case APBREG_PRAC:
+ case APBREG_PCG:
+ case APBREG_PUR:
+ case APBREG_EMU_PCG:
+ return true;
+ default:
+ return false;
+ }
+}
- if (!request_mem_region(res->start, resource_size(res),
- "sta2x11-sctl"))
- return -EBUSY;
+static bool sta2x11_apbreg_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (reg >= APBREG_BSR_SARAC)
+ reg -= APBREG_BSR_SARAC;
+ if (!sta2x11_apbreg_readable_reg(dev, reg))
+ return false;
+ return reg != APBREG_PAER;
+}
- mfd->sctl_regs = ioremap(res->start, resource_size(res));
- if (!mfd->sctl_regs) {
- release_mem_region(res->start, resource_size(res));
- return -ENOMEM;
+static struct regmap_config sta2x11_apbreg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = APBREG_EMU_PCG_SARAC,
+ .readable_reg = sta2x11_apbreg_readable_reg,
+ .writeable_reg = sta2x11_apbreg_writeable_reg,
+};
+
+static bool sta2x11_apb_soc_regs_readable_reg(struct device *dev,
+ unsigned int reg)
+{
+ return reg <= PCIE_SoC_INT_ROUTER_STATUS3_REG ||
+ __reg_within_range(reg, DMA_IP_CTRL_REG, SPARE3_RESERVED) ||
+ __reg_within_range(reg, MASTER_LOCK_REG,
+ SYSTEM_CONFIG_STATUS_REG) ||
+ reg == MSP_CLK_CTRL_REG ||
+ __reg_within_range(reg, COMPENSATION_REG1, TEST_CTL_REG);
+}
+
+static bool sta2x11_apb_soc_regs_writeable_reg(struct device *dev,
+ unsigned int reg)
+{
+ if (!sta2x11_apb_soc_regs_readable_reg(dev, reg))
+ return false;
+ switch (reg) {
+ case PCIE_COMMON_CLOCK_CONFIG_0_4_0:
+ case SYSTEM_CONFIG_STATUS_REG:
+ case COMPENSATION_REG1:
+ case PCIE_SoC_INT_ROUTER_STATUS0_REG...PCIE_SoC_INT_ROUTER_STATUS3_REG:
+ case PCIE_PM_STATUS_0_PORT_0_4...PCIE_PM_STATUS_7_0_EP4:
+ return false;
+ default:
+ return true;
}
- sctl_regset.base = mfd->sctl_regs;
- sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
- S_IFREG | S_IRUGO,
- NULL, &sctl_regset);
- return 0;
}
-static int sta2x11_apbreg_probe(struct platform_device *dev)
+static struct regmap_config sta2x11_apb_soc_regs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .lock = sta2x11_regmap_lock,
+ .unlock = sta2x11_regmap_unlock,
+ .max_register = TEST_CTL_REG,
+ .readable_reg = sta2x11_apb_soc_regs_readable_reg,
+ .writeable_reg = sta2x11_apb_soc_regs_writeable_reg,
+};
+
+static struct regmap_config *
+sta2x11_mfd_regmap_configs[sta2x11_n_mfd_plat_devs] = {
+ [sta2x11_sctl] = &sta2x11_sctl_regmap_config,
+ [sta2x11_apbreg] = &sta2x11_apbreg_regmap_config,
+ [sta2x11_apb_soc_regs] = &sta2x11_apb_soc_regs_regmap_config,
+ [sta2x11_scr] = &sta2x11_scr_regmap_config,
+};
+
+/* Probe for the four platform devices */
+
+static int sta2x11_mfd_platform_probe(struct platform_device *dev,
+ enum sta2x11_mfd_plat_dev index)
{
struct pci_dev **pdev;
struct sta2x11_mfd *mfd;
struct resource *res;
+ const char *name = sta2x11_mfd_names[index];
+ struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
pdev = dev->dev.platform_data;
- dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
- dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
-
mfd = sta2x11_mfd_find(*pdev);
if (!mfd)
return -ENODEV;
+ if (!regmap_config)
+ return -ENODEV;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
- if (!request_mem_region(res->start, resource_size(res),
- "sta2x11-apbreg"))
+ if (!request_mem_region(res->start, resource_size(res), name))
return -EBUSY;
- mfd->apbreg_regs = ioremap(res->start, resource_size(res));
- if (!mfd->apbreg_regs) {
+ mfd->regs[index] = ioremap(res->start, resource_size(res));
+ if (!mfd->regs[index]) {
release_mem_region(res->start, resource_size(res));
return -ENOMEM;
}
- dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+ regmap_config->lock_arg = &mfd->lock;
+ /*
+ No caching, registers could be reached both via regmap and via
+ void __iomem *
+ */
+ regmap_config->cache_type = REGCACHE_NONE;
+ mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
+ regmap_config);
+ WARN_ON(!mfd->regmap[index]);
- apbreg_regset.base = mfd->apbreg_regs;
- sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
- S_IFREG | S_IRUGO,
- NULL, &apbreg_regset);
return 0;
}
-/* The two platform drivers */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_sctl);
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_apbreg);
+}
+
+static int sta2x11_apb_soc_regs_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_apb_soc_regs);
+}
+
+static int sta2x11_scr_probe(struct platform_device *dev)
+{
+ return sta2x11_mfd_platform_probe(dev, sta2x11_scr);
+}
+
+/* The three platform drivers */
static struct platform_driver sta2x11_sctl_platform_driver = {
.driver = {
- .name = "sta2x11-sctl",
+ .name = STA2X11_MFD_SCTL_NAME,
.owner = THIS_MODULE,
},
.probe = sta2x11_sctl_probe,
@@ -268,7 +392,7 @@ static int __init sta2x11_sctl_init(void)
static struct platform_driver sta2x11_platform_driver = {
.driver = {
- .name = "sta2x11-apbreg",
+ .name = STA2X11_MFD_APBREG_NAME,
.owner = THIS_MODULE,
},
.probe = sta2x11_apbreg_probe,
@@ -280,13 +404,44 @@ static int __init sta2x11_apbreg_init(void)
return platform_driver_register(&sta2x11_platform_driver);
}
+static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
+ .driver = {
+ .name = STA2X11_MFD_APB_SOC_REGS_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_apb_soc_regs_probe,
+};
+
+static int __init sta2x11_apb_soc_regs_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_apb_soc_regs_platform_driver);
+}
+
+static struct platform_driver sta2x11_scr_platform_driver = {
+ .driver = {
+ .name = STA2X11_MFD_SCR_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_scr_probe,
+};
+
+static int __init sta2x11_scr_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_scr_platform_driver);
+}
+
+
/*
- * What follows is the PCI device that hosts the above two pdevs.
+ * What follows are the PCI devices that host the above pdevs.
* Each logic block is 4kB and they are all consecutive: we use this info.
*/
-/* Bar 0 */
-enum bar0_cells {
+/* Mfd 0 device */
+
+/* Mfd 0, Bar 0 */
+enum mfd0_bar0_cells {
STA2X11_GPIO_0 = 0,
STA2X11_GPIO_1,
STA2X11_GPIO_2,
@@ -295,8 +450,8 @@ enum bar0_cells {
STA2X11_SCR,
STA2X11_TIME,
};
-/* Bar 1 */
-enum bar1_cells {
+/* Mfd 0 , Bar 1 */
+enum mfd0_bar1_cells {
STA2X11_APBREG = 0,
};
#define CELL_4K(_name, _cell) { \
@@ -305,42 +460,73 @@ enum bar1_cells {
.flags = IORESOURCE_MEM, \
}
-static const __devinitconst struct resource gpio_resources[] = {
+static const struct resource gpio_resources[] = {
{
- .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+ /* 4 consecutive cells, 1 driver */
+ .name = STA2X11_MFD_GPIO_NAME,
.start = 0,
.end = (4 * 4096) - 1,
.flags = IORESOURCE_MEM,
}
};
-static const __devinitconst struct resource sctl_resources[] = {
- CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+static const struct resource sctl_resources[] = {
+ CELL_4K(STA2X11_MFD_SCTL_NAME, STA2X11_SCTL),
};
-static const __devinitconst struct resource scr_resources[] = {
- CELL_4K("sta2x11-scr", STA2X11_SCR),
+static const struct resource scr_resources[] = {
+ CELL_4K(STA2X11_MFD_SCR_NAME, STA2X11_SCR),
};
-static const __devinitconst struct resource time_resources[] = {
- CELL_4K("sta2x11-time", STA2X11_TIME),
+static const struct resource time_resources[] = {
+ CELL_4K(STA2X11_MFD_TIME_NAME, STA2X11_TIME),
};
-static const __devinitconst struct resource apbreg_resources[] = {
- CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+static const struct resource apbreg_resources[] = {
+ CELL_4K(STA2X11_MFD_APBREG_NAME, STA2X11_APBREG),
};
#define DEV(_name, _r) \
{ .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
-static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
- DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
- DEV("sta2x11-sctl", sctl_resources),
- DEV("sta2x11-scr", scr_resources),
- DEV("sta2x11-time", time_resources),
+static struct mfd_cell sta2x11_mfd0_bar0[] = {
+ /* offset 0: we add pdata later */
+ DEV(STA2X11_MFD_GPIO_NAME, gpio_resources),
+ DEV(STA2X11_MFD_SCTL_NAME, sctl_resources),
+ DEV(STA2X11_MFD_SCR_NAME, scr_resources),
+ DEV(STA2X11_MFD_TIME_NAME, time_resources),
};
-static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
- DEV("sta2x11-apbreg", apbreg_resources),
+static struct mfd_cell sta2x11_mfd0_bar1[] = {
+ DEV(STA2X11_MFD_APBREG_NAME, apbreg_resources),
};
+/* Mfd 1 devices */
+
+/* Mfd 1, Bar 0 */
+enum mfd1_bar0_cells {
+ STA2X11_VIC = 0,
+};
+
+/* Mfd 1, Bar 1 */
+enum mfd1_bar1_cells {
+ STA2X11_APB_SOC_REGS = 0,
+};
+
+static const struct resource vic_resources[] = {
+ CELL_4K(STA2X11_MFD_VIC_NAME, STA2X11_VIC),
+};
+
+static const struct resource apb_soc_regs_resources[] = {
+ CELL_4K(STA2X11_MFD_APB_SOC_REGS_NAME, STA2X11_APB_SOC_REGS),
+};
+
+static struct mfd_cell sta2x11_mfd1_bar0[] = {
+ DEV(STA2X11_MFD_VIC_NAME, vic_resources),
+};
+
+static struct mfd_cell sta2x11_mfd1_bar1[] = {
+ DEV(STA2X11_MFD_APB_SOC_REGS_NAME, apb_soc_regs_resources),
+};
+
+
static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
@@ -363,11 +549,63 @@ static int sta2x11_mfd_resume(struct pci_dev *pdev)
return 0;
}
-static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
+struct sta2x11_mfd_bar_setup_data {
+ struct mfd_cell *cells;
+ int ncells;
+};
+
+struct sta2x11_mfd_setup_data {
+ struct sta2x11_mfd_bar_setup_data bars[2];
+};
+
+#define STA2X11_MFD0 0
+#define STA2X11_MFD1 1
+
+static struct sta2x11_mfd_setup_data mfd_setup_data[] = {
+ /* Mfd 0: gpio, sctl, scr, timers / apbregs */
+ [STA2X11_MFD0] = {
+ .bars = {
+ [0] = {
+ .cells = sta2x11_mfd0_bar0,
+ .ncells = ARRAY_SIZE(sta2x11_mfd0_bar0),
+ },
+ [1] = {
+ .cells = sta2x11_mfd0_bar1,
+ .ncells = ARRAY_SIZE(sta2x11_mfd0_bar1),
+ },
+ },
+ },
+ /* Mfd 1: vic / apb-soc-regs */
+ [STA2X11_MFD1] = {
+ .bars = {
+ [0] = {
+ .cells = sta2x11_mfd1_bar0,
+ .ncells = ARRAY_SIZE(sta2x11_mfd1_bar0),
+ },
+ [1] = {
+ .cells = sta2x11_mfd1_bar1,
+ .ncells = ARRAY_SIZE(sta2x11_mfd1_bar1),
+ },
+ },
+ },
+};
+
+static void sta2x11_mfd_setup(struct pci_dev *pdev,
+ struct sta2x11_mfd_setup_data *sd)
+{
+ int i, j;
+ for (i = 0; i < ARRAY_SIZE(sd->bars); i++)
+ for (j = 0; j < sd->bars[i].ncells; j++) {
+ sd->bars[i].cells[j].pdata_size = sizeof(pdev);
+ sd->bars[i].cells[j].platform_data = &pdev;
+ }
+}
+
+static int sta2x11_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
int err, i;
- struct sta2x11_gpio_pdata *gpio_data;
+ struct sta2x11_mfd_setup_data *setup_data;
dev_info(&pdev->dev, "%s\n", __func__);
@@ -381,46 +619,29 @@ static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
if (err)
dev_info(&pdev->dev, "Enable msi failed\n");
- /* Read gpio config data as pci device's platform data */
- gpio_data = dev_get_platdata(&pdev->dev);
- if (!gpio_data)
- dev_warn(&pdev->dev, "no gpio configuration\n");
-
- dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
- gpio_data, &gpio_data);
- dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
- pdev, &pdev);
+ setup_data = pci_id->device == PCI_DEVICE_ID_STMICRO_GPIO ?
+ &mfd_setup_data[STA2X11_MFD0] :
+ &mfd_setup_data[STA2X11_MFD1];
/* platform data is the pci device for all of them */
- for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
- sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
- sta2x11_mfd_bar0[i].platform_data = &pdev;
- }
- sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
- sta2x11_mfd_bar1[0].platform_data = &pdev;
+ sta2x11_mfd_setup(pdev, setup_data);
/* Record this pdev before mfd_add_devices: their probe looks for it */
- sta2x11_mfd_add(pdev, GFP_ATOMIC);
-
-
- err = mfd_add_devices(&pdev->dev, -1,
- sta2x11_mfd_bar0,
- ARRAY_SIZE(sta2x11_mfd_bar0),
- &pdev->resource[0],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
- goto err_disable;
- }
-
- err = mfd_add_devices(&pdev->dev, -1,
- sta2x11_mfd_bar1,
- ARRAY_SIZE(sta2x11_mfd_bar1),
- &pdev->resource[1],
- 0, NULL);
- if (err) {
- dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
- goto err_disable;
+ if (!sta2x11_mfd_find(pdev))
+ sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+ /* Just 2 bars for all mfd's at present */
+ for (i = 0; i < 2; i++) {
+ err = mfd_add_devices(&pdev->dev, -1,
+ setup_data->bars[i].cells,
+ setup_data->bars[i].ncells,
+ &pdev->resource[i],
+ 0, NULL);
+ if (err) {
+ dev_err(&pdev->dev,
+ "mfd_add_devices[%d] failed: %d\n", i, err);
+ goto err_disable;
+ }
}
return 0;
@@ -434,6 +655,7 @@ err_disable:
static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
{0,},
};
@@ -459,6 +681,8 @@ static int __init sta2x11_mfd_init(void)
*/
subsys_initcall(sta2x11_apbreg_init);
subsys_initcall(sta2x11_sctl_init);
+subsys_initcall(sta2x11_apb_soc_regs_init);
+subsys_initcall(sta2x11_scr_init);
rootfs_initcall(sta2x11_mfd_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 947a06a1845f..fd5fcb630685 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -52,7 +52,7 @@ static struct stmpe_client_info i2c_ci = {
.write_block = i2c_block_write,
};
-static int __devinit
+static int
stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
i2c_ci.data = (void *)id;
@@ -63,7 +63,7 @@ stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return stmpe_probe(&i2c_ci, id->driver_data);
}
-static int __devexit stmpe_i2c_remove(struct i2c_client *i2c)
+static int stmpe_i2c_remove(struct i2c_client *i2c)
{
struct stmpe *stmpe = dev_get_drvdata(&i2c->dev);
@@ -82,13 +82,15 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, stmpe_id);
static struct i2c_driver stmpe_i2c_driver = {
- .driver.name = "stmpe-i2c",
- .driver.owner = THIS_MODULE,
+ .driver = {
+ .name = "stmpe-i2c",
+ .owner = THIS_MODULE,
#ifdef CONFIG_PM
- .driver.pm = &stmpe_dev_pm_ops,
+ .pm = &stmpe_dev_pm_ops,
#endif
+ },
.probe = stmpe_i2c_probe,
- .remove = __devexit_p(stmpe_i2c_remove),
+ .remove = stmpe_i2c_remove,
.id_table = stmpe_i2c_id,
};
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 9edfe864cc05..973659f8abd9 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -82,7 +82,7 @@ static struct stmpe_client_info spi_ci = {
.init = spi_init,
};
-static int __devinit
+static int
stmpe_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -101,7 +101,7 @@ stmpe_spi_probe(struct spi_device *spi)
return stmpe_probe(&spi_ci, id->driver_data);
}
-static int __devexit stmpe_spi_remove(struct spi_device *spi)
+static int stmpe_spi_remove(struct spi_device *spi)
{
struct stmpe *stmpe = dev_get_drvdata(&spi->dev);
@@ -128,7 +128,7 @@ static struct spi_driver stmpe_spi_driver = {
#endif
},
.probe = stmpe_spi_probe,
- .remove = __devexit_p(stmpe_spi_remove),
+ .remove = stmpe_spi_remove,
.id_table = stmpe_spi_id,
};
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index c94f521f392c..4b11202061be 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -7,11 +7,15 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
@@ -294,12 +298,14 @@ static struct resource stmpe_gpio_resources[] = {
static struct mfd_cell stmpe_gpio_cell = {
.name = "stmpe-gpio",
+ .of_compatible = "st,stmpe-gpio",
.resources = stmpe_gpio_resources,
.num_resources = ARRAY_SIZE(stmpe_gpio_resources),
};
static struct mfd_cell stmpe_gpio_cell_noirq = {
.name = "stmpe-gpio",
+ .of_compatible = "st,stmpe-gpio",
/* gpio cell resources consist of an irq only so no resources here */
};
@@ -310,20 +316,17 @@ static struct mfd_cell stmpe_gpio_cell_noirq = {
static struct resource stmpe_keypad_resources[] = {
{
.name = "KEYPAD",
- .start = 0,
- .end = 0,
.flags = IORESOURCE_IRQ,
},
{
.name = "KEYPAD_OVER",
- .start = 1,
- .end = 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell stmpe_keypad_cell = {
.name = "stmpe-keypad",
+ .of_compatible = "st,stmpe-keypad",
.resources = stmpe_keypad_resources,
.num_resources = ARRAY_SIZE(stmpe_keypad_resources),
};
@@ -397,20 +400,17 @@ static struct stmpe_variant_info stmpe801_noirq = {
static struct resource stmpe_ts_resources[] = {
{
.name = "TOUCH_DET",
- .start = 0,
- .end = 0,
.flags = IORESOURCE_IRQ,
},
{
.name = "FIFO_TH",
- .start = 1,
- .end = 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell stmpe_ts_cell = {
.name = "stmpe-ts",
+ .of_compatible = "st,stmpe-ts",
.resources = stmpe_ts_resources,
.num_resources = ARRAY_SIZE(stmpe_ts_resources),
};
@@ -526,12 +526,12 @@ static const u8 stmpe1601_regs[] = {
static struct stmpe_variant_block stmpe1601_blocks[] = {
{
.cell = &stmpe_gpio_cell,
- .irq = STMPE24XX_IRQ_GPIOC,
+ .irq = STMPE1601_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_keypad_cell,
- .irq = STMPE24XX_IRQ_KEYPAD,
+ .irq = STMPE1601_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
};
@@ -765,7 +765,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
int i;
if (variant->id_val == STMPE801_ID) {
- handle_nested_irq(stmpe->irq_base);
+ int base = irq_create_mapping(stmpe->domain, 0);
+
+ handle_nested_irq(base);
return IRQ_HANDLED;
}
@@ -786,8 +788,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
while (status) {
int bit = __ffs(status);
int line = bank * 8 + bit;
+ int nestedirq = irq_create_mapping(stmpe->domain, line);
- handle_nested_irq(stmpe->irq_base + line);
+ handle_nested_irq(nestedirq);
status &= ~(1 << bit);
}
@@ -828,7 +831,7 @@ static void stmpe_irq_sync_unlock(struct irq_data *data)
static void stmpe_irq_mask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
- int offset = data->irq - stmpe->irq_base;
+ int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -838,7 +841,7 @@ static void stmpe_irq_mask(struct irq_data *data)
static void stmpe_irq_unmask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
- int offset = data->irq - stmpe->irq_base;
+ int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -853,46 +856,61 @@ static struct irq_chip stmpe_irq_chip = {
.irq_unmask = stmpe_irq_unmask,
};
-static int __devinit stmpe_irq_init(struct stmpe *stmpe)
+static int stmpe_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
{
+ struct stmpe *stmpe = d->host_data;
struct irq_chip *chip = NULL;
- int num_irqs = stmpe->variant->num_irqs;
- int base = stmpe->irq_base;
- int irq;
if (stmpe->variant->id_val != STMPE801_ID)
chip = &stmpe_irq_chip;
- for (irq = base; irq < base + num_irqs; irq++) {
- irq_set_chip_data(irq, stmpe);
- irq_set_chip_and_handler(irq, chip, handle_edge_irq);
- irq_set_nested_thread(irq, 1);
+ irq_set_chip_data(virq, stmpe);
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
+ set_irq_flags(virq, IRQF_VALID);
#else
- irq_set_noprobe(irq);
+ irq_set_noprobe(virq);
#endif
- }
return 0;
}
-static void stmpe_irq_remove(struct stmpe *stmpe)
+static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
{
- int num_irqs = stmpe->variant->num_irqs;
- int base = stmpe->irq_base;
- int irq;
-
- for (irq = base; irq < base + num_irqs; irq++) {
#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
+ set_irq_flags(virq, 0);
#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops stmpe_irq_ops = {
+ .map = stmpe_irq_map,
+ .unmap = stmpe_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int stmpe_irq_init(struct stmpe *stmpe, struct device_node *np)
+{
+ int base = 0;
+ int num_irqs = stmpe->variant->num_irqs;
+
+ if (!np)
+ base = stmpe->irq_base;
+
+ stmpe->domain = irq_domain_add_simple(np, num_irqs, base,
+ &stmpe_irq_ops, stmpe);
+ if (!stmpe->domain) {
+ dev_err(stmpe->dev, "Failed to create irqdomain\n");
+ return -ENOSYS;
}
+
+ return 0;
}
-static int __devinit stmpe_chip_init(struct stmpe *stmpe)
+static int stmpe_chip_init(struct stmpe *stmpe)
{
unsigned int irq_trigger = stmpe->pdata->irq_trigger;
int autosleep_timeout = stmpe->pdata->autosleep_timeout;
@@ -940,13 +958,6 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
else
icr |= STMPE_ICR_LSB_HIGH;
}
-
- if (stmpe->pdata->irq_invert_polarity) {
- if (id == STMPE801_ID)
- icr ^= STMPE801_REG_SYS_CTRL_INT_HI;
- else
- icr ^= STMPE_ICR_LSB_HIGH;
- }
}
if (stmpe->pdata->autosleep) {
@@ -958,19 +969,18 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
return stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_ICR_LSB], icr);
}
-static int __devinit stmpe_add_device(struct stmpe *stmpe,
- struct mfd_cell *cell, int irq)
+static int stmpe_add_device(struct stmpe *stmpe, struct mfd_cell *cell)
{
return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
- NULL, stmpe->irq_base + irq, NULL);
+ NULL, stmpe->irq_base, stmpe->domain);
}
-static int __devinit stmpe_devices_init(struct stmpe *stmpe)
+static int stmpe_devices_init(struct stmpe *stmpe)
{
struct stmpe_variant_info *variant = stmpe->variant;
unsigned int platform_blocks = stmpe->pdata->blocks;
int ret = -EINVAL;
- int i;
+ int i, j;
for (i = 0; i < variant->num_blocks; i++) {
struct stmpe_variant_block *block = &variant->blocks[i];
@@ -978,8 +988,17 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
if (!(platform_blocks & block->block))
continue;
+ for (j = 0; j < block->cell->num_resources; j++) {
+ struct resource *res =
+ (struct resource *) &block->cell->resources[j];
+
+ /* Dynamically fill in a variant's IRQ. */
+ if (res->flags & IORESOURCE_IRQ)
+ res->start = res->end = block->irq + j;
+ }
+
platform_blocks &= ~block->block;
- ret = stmpe_add_device(stmpe, block->cell, block->irq);
+ ret = stmpe_add_device(stmpe, block->cell);
if (ret)
return ret;
}
@@ -992,17 +1011,55 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
+void stmpe_of_probe(struct stmpe_platform_data *pdata, struct device_node *np)
+{
+ struct device_node *child;
+
+ pdata->id = -1;
+ pdata->irq_trigger = IRQF_TRIGGER_NONE;
+
+ of_property_read_u32(np, "st,autosleep-timeout",
+ &pdata->autosleep_timeout);
+
+ pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
+
+ for_each_child_of_node(np, child) {
+ if (!strcmp(child->name, "stmpe_gpio")) {
+ pdata->blocks |= STMPE_BLOCK_GPIO;
+ } else if (!strcmp(child->name, "stmpe_keypad")) {
+ pdata->blocks |= STMPE_BLOCK_KEYPAD;
+ } else if (!strcmp(child->name, "stmpe_touchscreen")) {
+ pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
+ } else if (!strcmp(child->name, "stmpe_adc")) {
+ pdata->blocks |= STMPE_BLOCK_ADC;
+ } else if (!strcmp(child->name, "stmpe_pwm")) {
+ pdata->blocks |= STMPE_BLOCK_PWM;
+ } else if (!strcmp(child->name, "stmpe_rotator")) {
+ pdata->blocks |= STMPE_BLOCK_ROTATOR;
+ }
+ }
+}
+
/* Called from client specific probe routines */
-int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
+int stmpe_probe(struct stmpe_client_info *ci, int partnum)
{
struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
+ struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
int ret;
- if (!pdata)
- return -EINVAL;
+ if (!pdata) {
+ if (!np)
+ return -EINVAL;
+
+ pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ stmpe_of_probe(pdata, np);
+ }
- stmpe = kzalloc(sizeof(struct stmpe), GFP_KERNEL);
+ stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
if (!stmpe)
return -ENOMEM;
@@ -1024,11 +1081,12 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
ci->init(stmpe);
if (pdata->irq_over_gpio) {
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe");
+ ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
+ GPIOF_DIR_IN, "stmpe");
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
ret);
- goto out_free;
+ return ret;
}
stmpe->irq = gpio_to_irq(pdata->irq_gpio);
@@ -1045,51 +1103,40 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
dev_err(stmpe->dev,
"%s does not support no-irq mode!\n",
stmpe->variant->name);
- ret = -ENODEV;
- goto free_gpio;
+ return -ENODEV;
}
stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum];
+ } else if (pdata->irq_trigger == IRQF_TRIGGER_NONE) {
+ pdata->irq_trigger =
+ irqd_get_trigger_type(irq_get_irq_data(stmpe->irq));
}
ret = stmpe_chip_init(stmpe);
if (ret)
- goto free_gpio;
+ return ret;
if (stmpe->irq >= 0) {
- ret = stmpe_irq_init(stmpe);
+ ret = stmpe_irq_init(stmpe, np);
if (ret)
- goto free_gpio;
+ return ret;
- ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq,
- pdata->irq_trigger | IRQF_ONESHOT,
+ ret = devm_request_threaded_irq(ci->dev, stmpe->irq, NULL,
+ stmpe_irq, pdata->irq_trigger | IRQF_ONESHOT,
"stmpe", stmpe);
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ: %d\n",
ret);
- goto out_removeirq;
+ return ret;
}
}
ret = stmpe_devices_init(stmpe);
- if (ret) {
- dev_err(stmpe->dev, "failed to add children\n");
- goto out_removedevs;
- }
-
- return 0;
+ if (!ret)
+ return 0;
-out_removedevs:
+ dev_err(stmpe->dev, "failed to add children\n");
mfd_remove_devices(stmpe->dev);
- if (stmpe->irq >= 0)
- free_irq(stmpe->irq, stmpe);
-out_removeirq:
- if (stmpe->irq >= 0)
- stmpe_irq_remove(stmpe);
-free_gpio:
- if (pdata->irq_over_gpio)
- gpio_free(pdata->irq_gpio);
-out_free:
- kfree(stmpe);
+
return ret;
}
@@ -1097,16 +1144,6 @@ int stmpe_remove(struct stmpe *stmpe)
{
mfd_remove_devices(stmpe->dev);
- if (stmpe->irq >= 0) {
- free_irq(stmpe->irq, stmpe);
- stmpe_irq_remove(stmpe);
- }
-
- if (stmpe->pdata->irq_over_gpio)
- gpio_free(stmpe->pdata->irq_gpio);
-
- kfree(stmpe);
-
return 0;
}
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 65fe609026cc..3f10591ea94e 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -97,7 +97,7 @@ static struct regmap_config syscon_regmap_config = {
.reg_stride = 4,
};
-static int __devinit syscon_probe(struct platform_device *pdev)
+static int syscon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -138,7 +138,7 @@ static int __devinit syscon_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit syscon_remove(struct platform_device *pdev)
+static int syscon_remove(struct platform_device *pdev)
{
struct syscon *syscon;
@@ -156,7 +156,7 @@ static struct platform_driver syscon_driver = {
.of_match_table = of_syscon_match,
},
.probe = syscon_probe,
- .remove = __devexit_p(syscon_remove),
+ .remove = syscon_remove,
};
static int __init syscon_init(void)
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 8f4c853ca116..ecc092c7f745 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -219,25 +219,18 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
}
static struct irq_domain_ops tc3589x_irq_ops = {
- .map = tc3589x_irq_map,
+ .map = tc3589x_irq_map,
.unmap = tc3589x_irq_unmap,
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twocell,
};
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
{
int base = tc3589x->irq_base;
- if (base) {
- tc3589x->domain = irq_domain_add_legacy(
- NULL, TC3589x_NR_INTERNAL_IRQS, base,
- 0, &tc3589x_irq_ops, tc3589x);
- }
- else {
- tc3589x->domain = irq_domain_add_linear(
- np, TC3589x_NR_INTERNAL_IRQS,
- &tc3589x_irq_ops, tc3589x);
- }
+ tc3589x->domain = irq_domain_add_simple(
+ np, TC3589x_NR_INTERNAL_IRQS, base,
+ &tc3589x_irq_ops, tc3589x);
if (!tc3589x->domain) {
dev_err(tc3589x->dev, "Failed to create irqdomain\n");
@@ -282,7 +275,7 @@ static int tc3589x_chip_init(struct tc3589x *tc3589x)
return tc3589x_reg_write(tc3589x, TC3589x_RSTINTCLR, 0x1);
}
-static int __devinit tc3589x_device_init(struct tc3589x *tc3589x)
+static int tc3589x_device_init(struct tc3589x *tc3589x)
{
int ret = 0;
unsigned int blocks = tc3589x->pdata->block;
@@ -329,7 +322,7 @@ static int tc3589x_of_probe(struct device_node *np,
return 0;
}
-static int __devinit tc3589x_probe(struct i2c_client *i2c,
+static int tc3589x_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct tc3589x_platform_data *pdata = i2c->dev.platform_data;
@@ -402,7 +395,7 @@ out_free:
return ret;
}
-static int __devexit tc3589x_remove(struct i2c_client *client)
+static int tc3589x_remove(struct i2c_client *client)
{
struct tc3589x *tc3589x = i2c_get_clientdata(client);
@@ -458,7 +451,7 @@ static struct i2c_driver tc3589x_driver = {
.driver.owner = THIS_MODULE,
.driver.pm = &tc3589x_dev_pm_ops,
.probe = tc3589x_probe,
- .remove = __devexit_p(tc3589x_remove),
+ .remove = tc3589x_remove,
.id_table = tc3589x_id,
};
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 413c891102f8..366f7b906278 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -138,7 +138,7 @@ static struct mfd_cell tc6387xb_cells[] = {
},
};
-static int __devinit tc6387xb_probe(struct platform_device *dev)
+static int tc6387xb_probe(struct platform_device *dev)
{
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
struct resource *iomem, *rscr;
@@ -208,7 +208,7 @@ err_no_irq:
return ret;
}
-static int __devexit tc6387xb_remove(struct platform_device *dev)
+static int tc6387xb_remove(struct platform_device *dev)
{
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
@@ -229,7 +229,7 @@ static struct platform_driver tc6387xb_platform_driver = {
.name = "tc6387xb",
},
.probe = tc6387xb_probe,
- .remove = __devexit_p(tc6387xb_remove),
+ .remove = tc6387xb_remove,
.suspend = tc6387xb_suspend,
.resume = tc6387xb_resume,
};
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index dcab026fcbb2..15e1463e5e13 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -137,7 +137,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
return 0;
}
-static struct resource __devinitdata tc6393xb_nand_resources[] = {
+static struct resource tc6393xb_nand_resources[] = {
{
.start = 0x1000,
.end = 0x1007,
@@ -196,7 +196,7 @@ static const struct resource tc6393xb_ohci_resources[] = {
},
};
-static struct resource __devinitdata tc6393xb_fb_resources[] = {
+static struct resource tc6393xb_fb_resources[] = {
{
.start = 0x5000,
.end = 0x51ff,
@@ -382,7 +382,7 @@ static struct tmio_mmc_data tc6393xb_mmc_data = {
.set_clk_div = tc6393xb_mmc_clk_div,
};
-static struct mfd_cell __devinitdata tc6393xb_cells[] = {
+static struct mfd_cell tc6393xb_cells[] = {
[TC6393XB_CELL_NAND] = {
.name = "tmio-nand",
.enable = tc6393xb_nand_enable,
@@ -602,7 +602,7 @@ static void tc6393xb_detach_irq(struct platform_device *dev)
/*--------------------------------------------------------------------------*/
-static int __devinit tc6393xb_probe(struct platform_device *dev)
+static int tc6393xb_probe(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb;
@@ -731,7 +731,7 @@ err_kzalloc:
return ret;
}
-static int __devexit tc6393xb_remove(struct platform_device *dev)
+static int tc6393xb_remove(struct platform_device *dev)
{
struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
@@ -831,7 +831,7 @@ static int tc6393xb_resume(struct platform_device *dev)
static struct platform_driver tc6393xb_driver = {
.probe = tc6393xb_probe,
- .remove = __devexit_p(tc6393xb_remove),
+ .remove = tc6393xb_remove,
.suspend = tc6393xb_suspend,
.resume = tc6393xb_resume,
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index 7c3675a74f93..09a14cec351b 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -315,7 +315,7 @@ static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data)
return IRQ_HANDLED;
}
-static int __devinit ti_ssp_probe(struct platform_device *pdev)
+static int ti_ssp_probe(struct platform_device *pdev)
{
static struct ti_ssp *ssp;
const struct ti_ssp_data *pdata = pdev->dev.platform_data;
@@ -433,7 +433,7 @@ error_res:
return error;
}
-static int __devexit ti_ssp_remove(struct platform_device *pdev)
+static int ti_ssp_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ti_ssp *ssp = dev_get_drvdata(dev);
@@ -451,7 +451,7 @@ static int __devexit ti_ssp_remove(struct platform_device *pdev)
static struct platform_driver ti_ssp_driver = {
.probe = ti_ssp_probe,
- .remove = __devexit_p(ti_ssp_remove),
+ .remove = ti_ssp_remove,
.driver = {
.name = "ti-ssp",
.owner = THIS_MODULE,
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
new file mode 100644
index 000000000000..e9f3fb510b44
--- /dev/null
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -0,0 +1,274 @@
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg)
+{
+ unsigned int val;
+
+ regmap_read(tsadc->regmap_tscadc, reg, &val);
+ return val;
+}
+
+static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg,
+ unsigned int val)
+{
+ regmap_write(tsadc->regmap_tscadc, reg, val);
+}
+
+static const struct regmap_config tscadc_regmap_config = {
+ .name = "ti_tscadc",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static void tscadc_idle_config(struct ti_tscadc_dev *config)
+{
+ unsigned int idleconfig;
+
+ idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN;
+
+ tscadc_writel(config, REG_IDLECONFIG, idleconfig);
+}
+
+static int ti_tscadc_probe(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc;
+ struct resource *res;
+ struct clk *clk;
+ struct mfd_tscadc_board *pdata = pdev->dev.platform_data;
+ struct mfd_cell *cell;
+ int err, ctrl;
+ int clk_value, clock_rate;
+ int tsc_wires, adc_channels = 0, total_channels;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Could not find platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdata->adc_init)
+ adc_channels = pdata->adc_init->adc_channels;
+
+ tsc_wires = pdata->tsc_init->wires;
+ total_channels = tsc_wires + adc_channels;
+
+ if (total_channels > 8) {
+ dev_err(&pdev->dev, "Number of i/p channels more than 8\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource defined.\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for device */
+ tscadc = devm_kzalloc(&pdev->dev,
+ sizeof(struct ti_tscadc_dev), GFP_KERNEL);
+ if (!tscadc) {
+ dev_err(&pdev->dev, "failed to allocate memory.\n");
+ return -ENOMEM;
+ }
+ tscadc->dev = &pdev->dev;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "no irq ID is specified.\n");
+ goto ret;
+ } else
+ tscadc->irq = err;
+
+ res = devm_request_mem_region(&pdev->dev,
+ res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to reserve registers.\n");
+ return -EBUSY;
+ }
+
+ tscadc->tscadc_base = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!tscadc->tscadc_base) {
+ dev_err(&pdev->dev, "failed to map registers.\n");
+ return -ENOMEM;
+ }
+
+ tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
+ tscadc->tscadc_base, &tscadc_regmap_config);
+ if (IS_ERR(tscadc->regmap_tscadc)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ err = PTR_ERR(tscadc->regmap_tscadc);
+ goto ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /*
+ * The TSC_ADC_Subsystem has 2 clock domains
+ * OCP_CLK and ADC_CLK.
+ * The ADC clock is expected to run at target of 3MHz,
+ * and expected to capture 12-bit data at a rate of 200 KSPS.
+ * The TSC_ADC_SS controller design assumes the OCP clock is
+ * at least 6x faster than the ADC clock.
+ */
+ clk = clk_get(&pdev->dev, "adc_tsc_fck");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get TSC fck\n");
+ err = PTR_ERR(clk);
+ goto err_disable_clk;
+ }
+ clock_rate = clk_get_rate(clk);
+ clk_put(clk);
+ clk_value = clock_rate / ADC_CLK;
+ if (clk_value < MAX_CLK_DIV) {
+ dev_err(&pdev->dev, "clock input less than min clock requirement\n");
+ err = -EINVAL;
+ goto err_disable_clk;
+ }
+ /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
+ clk_value = clk_value - 1;
+ tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+
+ /* Set the control register bits */
+ ctrl = CNTRLREG_STEPCONFIGWRT |
+ CNTRLREG_TSCENB |
+ CNTRLREG_STEPID |
+ CNTRLREG_4WIRE;
+ tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+ /* Set register bits for Idle Config Mode */
+ tscadc_idle_config(tscadc);
+
+ /* Enable the TSC module enable bit */
+ ctrl = tscadc_readl(tscadc, REG_CTRL);
+ ctrl |= CNTRLREG_TSCSSENB;
+ tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+ /* TSC Cell */
+ cell = &tscadc->cells[TSC_CELL];
+ cell->name = "tsc";
+ cell->platform_data = tscadc;
+ cell->pdata_size = sizeof(*tscadc);
+
+ /* ADC Cell */
+ cell = &tscadc->cells[ADC_CELL];
+ cell->name = "tiadc";
+ cell->platform_data = tscadc;
+ cell->pdata_size = sizeof(*tscadc);
+
+ err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
+ TSCADC_CELLS, NULL, 0, NULL);
+ if (err < 0)
+ goto err_disable_clk;
+
+ device_init_wakeup(&pdev->dev, true);
+ platform_set_drvdata(pdev, tscadc);
+
+ return 0;
+
+err_disable_clk:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ret:
+ return err;
+}
+
+static int ti_tscadc_remove(struct platform_device *pdev)
+{
+ struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
+
+ tscadc_writel(tscadc, REG_SE, 0x00);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ mfd_remove_devices(tscadc->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tscadc_suspend(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+
+ tscadc_writel(tscadc_dev, REG_SE, 0x00);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tscadc_resume(struct device *dev)
+{
+ struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ unsigned int restore, ctrl;
+
+ pm_runtime_get_sync(dev);
+
+ /* context restore */
+ ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_TSCENB |
+ CNTRLREG_STEPID | CNTRLREG_4WIRE;
+ tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ tscadc_idle_config(tscadc_dev);
+ tscadc_writel(tscadc_dev, REG_SE, STPENB_STEPENB);
+ restore = tscadc_readl(tscadc_dev, REG_CTRL);
+ tscadc_writel(tscadc_dev, REG_CTRL,
+ (restore | CNTRLREG_TSCSSENB));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tscadc_pm_ops = {
+ .suspend = tscadc_suspend,
+ .resume = tscadc_resume,
+};
+#define TSCADC_PM_OPS (&tscadc_pm_ops)
+#else
+#define TSCADC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tscadc_driver = {
+ .driver = {
+ .name = "ti_tscadc",
+ .owner = THIS_MODULE,
+ .pm = TSCADC_PM_OPS,
+ },
+ .probe = ti_tscadc_probe,
+ .remove = ti_tscadc_remove,
+
+};
+
+module_platform_driver(ti_tscadc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen / ADC MFD controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index cccc626c83c8..59e0ee247e86 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -75,13 +75,13 @@ static struct i2c_board_info timberdale_i2c_board_info[] = {
},
};
-static __devinitdata struct xiic_i2c_platform_data
+static struct xiic_i2c_platform_data
timberdale_xiic_platform_data = {
.devices = timberdale_i2c_board_info,
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
-static __devinitdata struct ocores_i2c_platform_data
+static struct ocores_i2c_platform_data
timberdale_ocores_platform_data = {
.reg_shift = 2,
.clock_khz = 62500,
@@ -89,7 +89,7 @@ timberdale_ocores_platform_data = {
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
-static const __devinitconst struct resource timberdale_xiic_resources[] = {
+static const struct resource timberdale_xiic_resources[] = {
{
.start = XIICOFFSET,
.end = XIICEND,
@@ -102,7 +102,7 @@ static const __devinitconst struct resource timberdale_xiic_resources[] = {
},
};
-static const __devinitconst struct resource timberdale_ocores_resources[] = {
+static const struct resource timberdale_ocores_resources[] = {
{
.start = OCORESOFFSET,
.end = OCORESEND,
@@ -143,7 +143,7 @@ static struct spi_board_info timberdale_spi_8bit_board_info[] = {
},
};
-static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
+static struct xspi_platform_data timberdale_xspi_platform_data = {
.num_chipselect = 3,
.little_endian = true,
/* bits per word and devices will be filled in runtime depending
@@ -151,7 +151,7 @@ static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
*/
};
-static const __devinitconst struct resource timberdale_spi_resources[] = {
+static const struct resource timberdale_spi_resources[] = {
{
.start = SPIOFFSET,
.end = SPIEND,
@@ -164,13 +164,13 @@ static const __devinitconst struct resource timberdale_spi_resources[] = {
},
};
-static __devinitdata struct ks8842_platform_data
+static struct ks8842_platform_data
timberdale_ks8842_platform_data = {
.rx_dma_channel = DMA_ETH_RX,
.tx_dma_channel = DMA_ETH_TX
};
-static const __devinitconst struct resource timberdale_eth_resources[] = {
+static const struct resource timberdale_eth_resources[] = {
{
.start = ETHOFFSET,
.end = ETHEND,
@@ -183,14 +183,14 @@ static const __devinitconst struct resource timberdale_eth_resources[] = {
},
};
-static __devinitdata struct timbgpio_platform_data
+static struct timbgpio_platform_data
timberdale_gpio_platform_data = {
.gpio_base = 0,
.nr_pins = GPIO_NR_PINS,
.irq_base = 200,
};
-static const __devinitconst struct resource timberdale_gpio_resources[] = {
+static const struct resource timberdale_gpio_resources[] = {
{
.start = GPIOOFFSET,
.end = GPIOEND,
@@ -203,7 +203,7 @@ static const __devinitconst struct resource timberdale_gpio_resources[] = {
},
};
-static const __devinitconst struct resource timberdale_mlogicore_resources[] = {
+static const struct resource timberdale_mlogicore_resources[] = {
{
.start = MLCOREOFFSET,
.end = MLCOREEND,
@@ -221,7 +221,7 @@ static const __devinitconst struct resource timberdale_mlogicore_resources[] = {
},
};
-static const __devinitconst struct resource timberdale_uart_resources[] = {
+static const struct resource timberdale_uart_resources[] = {
{
.start = UARTOFFSET,
.end = UARTEND,
@@ -234,7 +234,7 @@ static const __devinitconst struct resource timberdale_uart_resources[] = {
},
};
-static const __devinitconst struct resource timberdale_uartlite_resources[] = {
+static const struct resource timberdale_uartlite_resources[] = {
{
.start = UARTLITEOFFSET,
.end = UARTLITEEND,
@@ -247,13 +247,13 @@ static const __devinitconst struct resource timberdale_uartlite_resources[] = {
},
};
-static __devinitdata struct i2c_board_info timberdale_adv7180_i2c_board_info = {
+static struct i2c_board_info timberdale_adv7180_i2c_board_info = {
/* Requires jumper JP9 to be off */
I2C_BOARD_INFO("adv7180", 0x42 >> 1),
.irq = IRQ_TIMBERDALE_ADV7180
};
-static __devinitdata struct timb_video_platform_data
+static struct timb_video_platform_data
timberdale_video_platform_data = {
.dma_channel = DMA_VIDEO_RX,
.i2c_adapter = 0,
@@ -262,7 +262,7 @@ static __devinitdata struct timb_video_platform_data
}
};
-static const __devinitconst struct resource
+static const struct resource
timberdale_radio_resources[] = {
{
.start = RDSOFFSET,
@@ -276,22 +276,22 @@ timberdale_radio_resources[] = {
},
};
-static __devinitdata struct i2c_board_info timberdale_tef6868_i2c_board_info = {
+static struct i2c_board_info timberdale_tef6868_i2c_board_info = {
I2C_BOARD_INFO("tef6862", 0x60)
};
-static __devinitdata struct i2c_board_info timberdale_saa7706_i2c_board_info = {
+static struct i2c_board_info timberdale_saa7706_i2c_board_info = {
I2C_BOARD_INFO("saa7706h", 0x1C)
};
-static __devinitdata struct timb_radio_platform_data
+static struct timb_radio_platform_data
timberdale_radio_platform_data = {
.i2c_adapter = 0,
.tuner = &timberdale_tef6868_i2c_board_info,
.dsp = &timberdale_saa7706_i2c_board_info
};
-static const __devinitconst struct resource timberdale_video_resources[] = {
+static const struct resource timberdale_video_resources[] = {
{
.start = LOGIWOFFSET,
.end = LOGIWEND,
@@ -303,7 +303,7 @@ static const __devinitconst struct resource timberdale_video_resources[] = {
*/
};
-static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
+static struct timb_dma_platform_data timb_dma_platform_data = {
.nr_channels = 10,
.channels = {
{
@@ -362,7 +362,7 @@ static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
}
};
-static const __devinitconst struct resource timberdale_dma_resources[] = {
+static const struct resource timberdale_dma_resources[] = {
{
.start = DMAOFFSET,
.end = DMAEND,
@@ -375,7 +375,7 @@ static const __devinitconst struct resource timberdale_dma_resources[] = {
},
};
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
+static struct mfd_cell timberdale_cells_bar0_cfg0[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -432,7 +432,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
},
};
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
+static struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -499,7 +499,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
},
};
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
+static struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -549,7 +549,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
},
};
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
+static struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
@@ -606,7 +606,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
},
};
-static const __devinitconst struct resource timberdale_sdhc_resources[] = {
+static const struct resource timberdale_sdhc_resources[] = {
/* located in bar 1 and bar 2 */
{
.start = SDHC0OFFSET,
@@ -620,7 +620,7 @@ static const __devinitconst struct resource timberdale_sdhc_resources[] = {
},
};
-static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
+static struct mfd_cell timberdale_cells_bar1[] = {
{
.name = "sdhci",
.num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
@@ -628,7 +628,7 @@ static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
},
};
-static __devinitdata struct mfd_cell timberdale_cells_bar2[] = {
+static struct mfd_cell timberdale_cells_bar2[] = {
{
.name = "sdhci",
.num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
@@ -650,7 +650,7 @@ static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
/*--------------------------------------------------------------------------*/
-static int __devinit timb_probe(struct pci_dev *dev,
+static int timb_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct timberdale_device *priv;
@@ -840,7 +840,7 @@ err_enable:
return -ENODEV;
}
-static void __devexit timb_remove(struct pci_dev *dev)
+static void timb_remove(struct pci_dev *dev)
{
struct timberdale_device *priv = pci_get_drvdata(dev);
@@ -867,7 +867,7 @@ static struct pci_driver timberdale_pci_driver = {
.name = DRIVER_NAME,
.id_table = timberdale_pci_tbl,
.probe = timb_probe,
- .remove = __devexit_p(timb_remove),
+ .remove = timb_remove,
};
static int __init timberdale_init(void)
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 14051bdc714b..1d302f583adf 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -86,7 +86,7 @@ fail:
}
EXPORT_SYMBOL(tps6105x_mask_and_set);
-static int __devinit tps6105x_startup(struct tps6105x *tps6105x)
+static int tps6105x_startup(struct tps6105x *tps6105x)
{
int ret;
u8 regval;
@@ -133,7 +133,7 @@ static struct mfd_cell tps6105x_cells[] = {
},
};
-static int __devinit tps6105x_probe(struct i2c_client *client,
+static int tps6105x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps6105x *tps6105x;
@@ -199,7 +199,7 @@ fail:
return ret;
}
-static int __devexit tps6105x_remove(struct i2c_client *client)
+static int tps6105x_remove(struct i2c_client *client)
{
struct tps6105x *tps6105x = i2c_get_clientdata(client);
@@ -226,7 +226,7 @@ static struct i2c_driver tps6105x_driver = {
.name = "tps6105x",
},
.probe = tps6105x_probe,
- .remove = __devexit_p(tps6105x_remove),
+ .remove = tps6105x_remove,
.id_table = tps6105x_id,
};
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 1b203499c744..409afa23d5dc 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -86,9 +86,9 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct tps6507x_dev *tps6507x;
- int ret = 0;
- tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+ tps6507x = devm_kzalloc(&i2c->dev, sizeof(struct tps6507x_dev),
+ GFP_KERNEL);
if (tps6507x == NULL)
return -ENOMEM;
@@ -98,19 +98,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
tps6507x->read_dev = tps6507x_i2c_read_device;
tps6507x->write_dev = tps6507x_i2c_write_device;
- ret = mfd_add_devices(tps6507x->dev, -1,
- tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
- NULL, 0, NULL);
-
- if (ret < 0)
- goto err;
-
- return ret;
-
-err:
- mfd_remove_devices(tps6507x->dev);
- kfree(tps6507x);
- return ret;
+ return mfd_add_devices(tps6507x->dev, -1, tps6507x_devs,
+ ARRAY_SIZE(tps6507x_devs), NULL, 0, NULL);
}
static int tps6507x_i2c_remove(struct i2c_client *i2c)
@@ -118,8 +107,6 @@ static int tps6507x_i2c_remove(struct i2c_client *i2c)
struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
mfd_remove_devices(tps6507x->dev);
- kfree(tps6507x);
-
return 0;
}
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 074ae32b0d2a..8d12a8e00d9c 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -25,7 +25,6 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
-#include <linux/regmap.h>
#include <linux/err.h>
#define NUM_INT_REG 2
@@ -39,204 +38,102 @@
#define TPS65090_INT_MSK 0x2
#define TPS65090_INT_MSK2 0x3
-struct tps65090_irq_data {
- u8 mask_reg;
- u8 mask_pos;
-};
-
-#define TPS65090_IRQ(_reg, _mask_pos) \
- { \
- .mask_reg = (_reg), \
- .mask_pos = (_mask_pos), \
- }
-
-static const struct tps65090_irq_data tps65090_irqs[] = {
- [0] = TPS65090_IRQ(0, 0),
- [1] = TPS65090_IRQ(0, 1),
- [2] = TPS65090_IRQ(0, 2),
- [3] = TPS65090_IRQ(0, 3),
- [4] = TPS65090_IRQ(0, 4),
- [5] = TPS65090_IRQ(0, 5),
- [6] = TPS65090_IRQ(0, 6),
- [7] = TPS65090_IRQ(0, 7),
- [8] = TPS65090_IRQ(1, 0),
- [9] = TPS65090_IRQ(1, 1),
- [10] = TPS65090_IRQ(1, 2),
- [11] = TPS65090_IRQ(1, 3),
- [12] = TPS65090_IRQ(1, 4),
- [13] = TPS65090_IRQ(1, 5),
- [14] = TPS65090_IRQ(1, 6),
- [15] = TPS65090_IRQ(1, 7),
-};
+#define TPS65090_INT1_MASK_VAC_STATUS_CHANGE 1
+#define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE 2
+#define TPS65090_INT1_MASK_BAT_STATUS_CHANGE 3
+#define TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE 4
+#define TPS65090_INT1_MASK_CHARGING_COMPLETE 5
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC1 6
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC2 7
+#define TPS65090_INT2_MASK_OVERLOAD_DCDC3 0
+#define TPS65090_INT2_MASK_OVERLOAD_FET1 1
+#define TPS65090_INT2_MASK_OVERLOAD_FET2 2
+#define TPS65090_INT2_MASK_OVERLOAD_FET3 3
+#define TPS65090_INT2_MASK_OVERLOAD_FET4 4
+#define TPS65090_INT2_MASK_OVERLOAD_FET5 5
+#define TPS65090_INT2_MASK_OVERLOAD_FET6 6
+#define TPS65090_INT2_MASK_OVERLOAD_FET7 7
static struct mfd_cell tps65090s[] = {
{
.name = "tps65090-pmic",
},
{
- .name = "tps65090-regulator",
+ .name = "tps65090-charger",
},
};
-int tps65090_write(struct device *dev, int reg, uint8_t val)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_write(tps->rmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65090_write);
-
-int tps65090_read(struct device *dev, int reg, uint8_t *val)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- unsigned int temp_val;
- int ret;
- ret = regmap_read(tps->rmap, reg, &temp_val);
- if (!ret)
- *val = temp_val;
- return ret;
-}
-EXPORT_SYMBOL_GPL(tps65090_read);
-
-int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_set_bits);
-
-int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num)
-{
- struct tps65090 *tps = dev_get_drvdata(dev);
- return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_clr_bits);
-
-static void tps65090_irq_lock(struct irq_data *data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65090->irq_lock);
-}
-
-static void tps65090_irq_mask(struct irq_data *irq_data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->hwirq;
- const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
- tps65090_set_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
- data->mask_pos);
-}
-
-static void tps65090_irq_unmask(struct irq_data *irq_data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps65090->irq_base;
- const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
- tps65090_clr_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
- data->mask_pos);
-}
-
-static void tps65090_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
- mutex_unlock(&tps65090->irq_lock);
-}
-
-static irqreturn_t tps65090_irq(int irq, void *data)
-{
- struct tps65090 *tps65090 = data;
- int ret = 0;
- u8 status, mask;
- unsigned long int acks = 0;
- int i;
-
- for (i = 0; i < NUM_INT_REG; i++) {
- ret = tps65090_read(tps65090->dev, TPS65090_INT_MSK + i, &mask);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to read mask reg [addr:%d]\n",
- TPS65090_INT_MSK + i);
- return IRQ_NONE;
- }
- ret = tps65090_read(tps65090->dev, TPS65090_INT_STS + i,
- &status);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to read status reg [addr:%d]\n",
- TPS65090_INT_STS + i);
- return IRQ_NONE;
- }
- if (status) {
- /* Ack only those interrupts which are not masked */
- status &= (~mask);
- ret = tps65090_write(tps65090->dev,
- TPS65090_INT_STS + i, status);
- if (ret < 0) {
- dev_err(tps65090->dev,
- "failed to write interrupt status\n");
- return IRQ_NONE;
- }
- acks |= (status << (i * 8));
- }
- }
-
- for_each_set_bit(i, &acks, ARRAY_SIZE(tps65090_irqs))
- handle_nested_irq(tps65090->irq_base + i);
- return acks ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int __devinit tps65090_irq_init(struct tps65090 *tps65090, int irq,
- int irq_base)
-{
- int i, ret;
-
- if (!irq_base) {
- dev_err(tps65090->dev, "IRQ base not set\n");
- return -EINVAL;
- }
-
- mutex_init(&tps65090->irq_lock);
-
- for (i = 0; i < NUM_INT_REG; i++)
- tps65090_write(tps65090->dev, TPS65090_INT_MSK + i, 0xFF);
-
- for (i = 0; i < NUM_INT_REG; i++)
- tps65090_write(tps65090->dev, TPS65090_INT_STS + i, 0xff);
-
- tps65090->irq_base = irq_base;
- tps65090->irq_chip.name = "tps65090";
- tps65090->irq_chip.irq_mask = tps65090_irq_mask;
- tps65090->irq_chip.irq_unmask = tps65090_irq_unmask;
- tps65090->irq_chip.irq_bus_lock = tps65090_irq_lock;
- tps65090->irq_chip.irq_bus_sync_unlock = tps65090_irq_sync_unlock;
-
- for (i = 0; i < ARRAY_SIZE(tps65090_irqs); i++) {
- int __irq = i + tps65090->irq_base;
- irq_set_chip_data(__irq, tps65090);
- irq_set_chip_and_handler(__irq, &tps65090->irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
- }
-
- ret = request_threaded_irq(irq, NULL, tps65090_irq, IRQF_ONESHOT,
- "tps65090", tps65090);
- if (!ret) {
- device_init_wakeup(tps65090->dev, 1);
- enable_irq_wake(irq);
- }
+static const struct regmap_irq tps65090_irqs[] = {
+ /* INT1 IRQs*/
+ [TPS65090_IRQ_VAC_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_VAC_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_VSYS_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_VSYS_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_BAT_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_BAT_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_CHARGING_STATUS_CHANGE] = {
+ .mask = TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE,
+ },
+ [TPS65090_IRQ_CHARGING_COMPLETE] = {
+ .mask = TPS65090_INT1_MASK_CHARGING_COMPLETE,
+ },
+ [TPS65090_IRQ_OVERLOAD_DCDC1] = {
+ .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC1,
+ },
+ [TPS65090_IRQ_OVERLOAD_DCDC2] = {
+ .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC2,
+ },
+ /* INT2 IRQs*/
+ [TPS65090_IRQ_OVERLOAD_DCDC3] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_DCDC3,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET1] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET1,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET2] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET2,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET3] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET3,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET4] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET4,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET5] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET5,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET6] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET6,
+ },
+ [TPS65090_IRQ_OVERLOAD_FET7] = {
+ .reg_offset = 1,
+ .mask = TPS65090_INT2_MASK_OVERLOAD_FET7,
+ },
+};
- return ret;
-}
+static struct regmap_irq_chip tps65090_irq_chip = {
+ .name = "tps65090",
+ .irqs = tps65090_irqs,
+ .num_irqs = ARRAY_SIZE(tps65090_irqs),
+ .num_regs = NUM_INT_REG,
+ .status_base = TPS65090_INT_STS,
+ .mask_base = TPS65090_INT_MSK,
+ .mask_invert = true,
+};
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
- if (reg == TPS65090_INT_STS)
+ if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS2))
return true;
else
return false;
@@ -251,7 +148,7 @@ static const struct regmap_config tps65090_regmap_config = {
.volatile_reg = is_volatile_reg,
};
-static int __devinit tps65090_i2c_probe(struct i2c_client *client,
+static int tps65090_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps65090_platform_data *pdata = client->dev.platform_data;
@@ -263,36 +160,36 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
return -EINVAL;
}
- tps65090 = devm_kzalloc(&client->dev, sizeof(struct tps65090),
- GFP_KERNEL);
- if (tps65090 == NULL)
+ tps65090 = devm_kzalloc(&client->dev, sizeof(*tps65090), GFP_KERNEL);
+ if (!tps65090) {
+ dev_err(&client->dev, "mem alloc for tps65090 failed\n");
return -ENOMEM;
+ }
- tps65090->client = client;
tps65090->dev = &client->dev;
i2c_set_clientdata(client, tps65090);
- mutex_init(&tps65090->lock);
-
- if (client->irq) {
- ret = tps65090_irq_init(tps65090, client->irq, pdata->irq_base);
- if (ret) {
- dev_err(&client->dev, "IRQ init failed with err: %d\n",
- ret);
- goto err_exit;
- }
- }
-
- tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
- &tps65090_regmap_config);
+ tps65090->rmap = devm_regmap_init_i2c(client, &tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
ret = PTR_ERR(tps65090->rmap);
dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
- goto err_irq_exit;
+ return ret;
+ }
+
+ if (client->irq) {
+ ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW, pdata->irq_base,
+ &tps65090_irq_chip, &tps65090->irq_data);
+ if (ret) {
+ dev_err(&client->dev,
+ "IRQ init failed with err: %d\n", ret);
+ return ret;
+ }
}
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
- ARRAY_SIZE(tps65090s), NULL, 0, NULL);
+ ARRAY_SIZE(tps65090s), NULL,
+ regmap_irq_chip_get_base(tps65090->irq_data), NULL);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
@@ -303,18 +200,17 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
err_irq_exit:
if (client->irq)
- free_irq(client->irq, tps65090);
-err_exit:
+ regmap_del_irq_chip(client->irq, tps65090->irq_data);
return ret;
}
-static int __devexit tps65090_i2c_remove(struct i2c_client *client)
+static int tps65090_i2c_remove(struct i2c_client *client)
{
struct tps65090 *tps65090 = i2c_get_clientdata(client);
mfd_remove_devices(tps65090->dev);
if (client->irq)
- free_irq(client->irq, tps65090);
+ regmap_del_irq_chip(client->irq, tps65090->irq_data);
return 0;
}
@@ -354,7 +250,7 @@ static struct i2c_driver tps65090_driver = {
.pm = &tps65090_pm_ops,
},
.probe = tps65090_i2c_probe,
- .remove = __devexit_p(tps65090_i2c_remove),
+ .remove = tps65090_i2c_remove,
.id_table = tps65090_id_table,
};
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 3fb32e655254..b8f48647661e 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -153,13 +153,14 @@ static const struct of_device_id tps65217_of_match[] = {
{ /* sentinel */ },
};
-static int __devinit tps65217_probe(struct i2c_client *client,
+static int tps65217_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct tps65217 *tps;
unsigned int version;
unsigned int chip_id = ids->driver_data;
const struct of_device_id *match;
+ bool status_off = false;
int ret;
if (client->dev.of_node) {
@@ -170,6 +171,8 @@ static int __devinit tps65217_probe(struct i2c_client *client,
return -EINVAL;
}
chip_id = (unsigned int)match->data;
+ status_off = of_property_read_bool(client->dev.of_node,
+ "ti,pmic-shutdown-controller");
}
if (!chip_id) {
@@ -207,6 +210,15 @@ static int __devinit tps65217_probe(struct i2c_client *client,
return ret;
}
+ /* Set the PMIC to shutdown on PWR_EN toggle */
+ if (status_off) {
+ ret = tps65217_set_bits(tps, TPS65217_REG_STATUS,
+ TPS65217_STATUS_OFF, TPS65217_STATUS_OFF,
+ TPS65217_PROTECT_NONE);
+ if (ret)
+ dev_warn(tps->dev, "unable to set the status OFF\n");
+ }
+
dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
(version & TPS65217_CHIPID_CHIP_MASK) >> 4,
version & TPS65217_CHIPID_REV_MASK);
@@ -214,7 +226,7 @@ static int __devinit tps65217_probe(struct i2c_client *client,
return 0;
}
-static int __devexit tps65217_remove(struct i2c_client *client)
+static int tps65217_remove(struct i2c_client *client)
{
struct tps65217 *tps = i2c_get_clientdata(client);
@@ -237,7 +249,7 @@ static struct i2c_driver tps65217_driver = {
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
- .remove = __devexit_p(tps65217_remove),
+ .remove = tps65217_remove,
};
static int __init tps65217_init(void)
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 467464368773..721b9186a5d1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -17,15 +17,15 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/regulator/machine.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6586x.h>
@@ -94,12 +94,25 @@ static const struct tps6586x_irq_data tps6586x_irqs[] = {
[TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
};
+static struct resource tps6586x_rtc_resources[] = {
+ {
+ .start = TPS6586X_INT_RTC_ALM1,
+ .end = TPS6586X_INT_RTC_ALM1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell tps6586x_cell[] = {
{
.name = "tps6586x-gpio",
},
{
+ .name = "tps6586x-pmic",
+ },
+ {
.name = "tps6586x-rtc",
+ .num_resources = ARRAY_SIZE(tps6586x_rtc_resources),
+ .resources = &tps6586x_rtc_resources[0],
},
{
.name = "tps6586x-onkey",
@@ -116,6 +129,7 @@ struct tps6586x {
int irq_base;
u32 irq_en;
u8 mask_reg[5];
+ struct irq_domain *irq_domain;
};
static inline struct tps6586x *dev_to_tps6586x(struct device *dev)
@@ -184,6 +198,14 @@ int tps6586x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
}
EXPORT_SYMBOL_GPL(tps6586x_update);
+int tps6586x_irq_get_virq(struct device *dev, int irq)
+{
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+ return irq_create_mapping(tps6586x->irq_domain, irq);
+}
+EXPORT_SYMBOL_GPL(tps6586x_irq_get_virq);
+
static int __remove_subdev(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
@@ -205,7 +227,7 @@ static void tps6586x_irq_lock(struct irq_data *data)
static void tps6586x_irq_enable(struct irq_data *irq_data)
{
struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->hwirq;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
@@ -216,7 +238,7 @@ static void tps6586x_irq_disable(struct irq_data *irq_data)
{
struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
- unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+ unsigned int __irq = irq_data->hwirq;
const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
@@ -239,6 +261,39 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
mutex_unlock(&tps6586x->irq_lock);
}
+static struct irq_chip tps6586x_irq_chip = {
+ .name = "tps6586x",
+ .irq_bus_lock = tps6586x_irq_lock,
+ .irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
+ .irq_disable = tps6586x_irq_disable,
+ .irq_enable = tps6586x_irq_enable,
+};
+
+static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps6586x *tps6586x = h->host_data;
+
+ irq_set_chip_data(virq, tps6586x);
+ irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops tps6586x_domain_ops = {
+ .map = tps6586x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static irqreturn_t tps6586x_irq(int irq, void *data)
{
struct tps6586x *tps6586x = data;
@@ -259,7 +314,8 @@ static irqreturn_t tps6586x_irq(int irq, void *data)
int i = __ffs(acks);
if (tps6586x->irq_en & (1 << i))
- handle_nested_irq(tps6586x->irq_base + i);
+ handle_nested_irq(
+ irq_find_mapping(tps6586x->irq_domain, i));
acks &= ~(1 << i);
}
@@ -267,16 +323,13 @@ static irqreturn_t tps6586x_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
+static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
int irq_base)
{
int i, ret;
u8 tmp[4];
-
- if (!irq_base) {
- dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
- return -EINVAL;
- }
+ int new_irq_base;
+ int irq_num = ARRAY_SIZE(tps6586x_irqs);
mutex_init(&tps6586x->irq_lock);
for (i = 0; i < 5; i++) {
@@ -286,25 +339,24 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
- tps6586x->irq_base = irq_base;
-
- tps6586x->irq_chip.name = "tps6586x";
- tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
- tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
- tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
- tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
-
- for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
- int __irq = i + tps6586x->irq_base;
- irq_set_chip_data(__irq, tps6586x);
- irq_set_chip_and_handler(__irq, &tps6586x->irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(__irq, IRQF_VALID);
-#endif
+ if (irq_base > 0) {
+ new_irq_base = irq_alloc_descs(irq_base, 0, irq_num, -1);
+ if (new_irq_base < 0) {
+ dev_err(tps6586x->dev,
+ "Failed to alloc IRQs: %d\n", new_irq_base);
+ return new_irq_base;
+ }
+ } else {
+ new_irq_base = 0;
}
+ tps6586x->irq_domain = irq_domain_add_simple(tps6586x->dev->of_node,
+ irq_num, new_irq_base, &tps6586x_domain_ops,
+ tps6586x);
+ if (!tps6586x->irq_domain) {
+ dev_err(tps6586x->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
"tps6586x", tps6586x);
@@ -316,7 +368,7 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
return ret;
}
-static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x,
+static int tps6586x_add_subdevs(struct tps6586x *tps6586x,
struct tps6586x_platform_data *pdata)
{
struct tps6586x_subdev_info *subdev;
@@ -350,80 +402,19 @@ failed:
}
#ifdef CONFIG_OF
-static struct of_regulator_match tps6586x_matches[] = {
- { .name = "sys", .driver_data = (void *)TPS6586X_ID_SYS },
- { .name = "sm0", .driver_data = (void *)TPS6586X_ID_SM_0 },
- { .name = "sm1", .driver_data = (void *)TPS6586X_ID_SM_1 },
- { .name = "sm2", .driver_data = (void *)TPS6586X_ID_SM_2 },
- { .name = "ldo0", .driver_data = (void *)TPS6586X_ID_LDO_0 },
- { .name = "ldo1", .driver_data = (void *)TPS6586X_ID_LDO_1 },
- { .name = "ldo2", .driver_data = (void *)TPS6586X_ID_LDO_2 },
- { .name = "ldo3", .driver_data = (void *)TPS6586X_ID_LDO_3 },
- { .name = "ldo4", .driver_data = (void *)TPS6586X_ID_LDO_4 },
- { .name = "ldo5", .driver_data = (void *)TPS6586X_ID_LDO_5 },
- { .name = "ldo6", .driver_data = (void *)TPS6586X_ID_LDO_6 },
- { .name = "ldo7", .driver_data = (void *)TPS6586X_ID_LDO_7 },
- { .name = "ldo8", .driver_data = (void *)TPS6586X_ID_LDO_8 },
- { .name = "ldo9", .driver_data = (void *)TPS6586X_ID_LDO_9 },
- { .name = "ldo_rtc", .driver_data = (void *)TPS6586X_ID_LDO_RTC },
-};
-
static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *client)
{
- const unsigned int num = ARRAY_SIZE(tps6586x_matches);
struct device_node *np = client->dev.of_node;
struct tps6586x_platform_data *pdata;
- struct tps6586x_subdev_info *devs;
- struct device_node *regs;
- const char *sys_rail_name = NULL;
- unsigned int count;
- unsigned int i, j;
- int err;
-
- regs = of_find_node_by_name(np, "regulators");
- if (!regs)
- return NULL;
-
- err = of_regulator_match(&client->dev, regs, tps6586x_matches, num);
- if (err < 0) {
- of_node_put(regs);
- return NULL;
- }
-
- of_node_put(regs);
- count = err;
-
- devs = devm_kzalloc(&client->dev, count * sizeof(*devs), GFP_KERNEL);
- if (!devs)
- return NULL;
-
- for (i = 0, j = 0; i < num && j < count; i++) {
- struct regulator_init_data *reg_idata;
-
- if (!tps6586x_matches[i].init_data)
- continue;
-
- reg_idata = tps6586x_matches[i].init_data;
- devs[j].name = "tps6586x-regulator";
- devs[j].platform_data = tps6586x_matches[i].init_data;
- devs[j].id = (int)tps6586x_matches[i].driver_data;
- if (devs[j].id == TPS6586X_ID_SYS)
- sys_rail_name = reg_idata->constraints.name;
-
- if ((devs[j].id == TPS6586X_ID_LDO_5) ||
- (devs[j].id == TPS6586X_ID_LDO_RTC))
- reg_idata->supply_regulator = sys_rail_name;
-
- devs[j].of_node = tps6586x_matches[i].of_node;
- j++;
- }
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ if (!pdata) {
+ dev_err(&client->dev, "Memory allocation failed\n");
return NULL;
+ }
- pdata->num_subdevs = count;
- pdata->subdevs = devs;
+ pdata->num_subdevs = 0;
+ pdata->subdevs = NULL;
pdata->gpio_base = -1;
pdata->irq_base = -1;
pdata->pm_off = of_property_read_bool(np, "ti,system-power-controller");
@@ -468,7 +459,7 @@ static void tps6586x_power_off(void)
tps6586x_set_bits(tps6586x_dev, TPS6586X_SUPPLYENE, SLEEP_MODE_BIT);
}
-static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
+static int tps6586x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tps6586x_platform_data *pdata = client->dev.platform_data;
@@ -521,7 +512,7 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
ret = mfd_add_devices(tps6586x->dev, -1,
tps6586x_cell, ARRAY_SIZE(tps6586x_cell),
- NULL, 0, NULL);
+ NULL, 0, tps6586x->irq_domain);
if (ret < 0) {
dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
goto err_mfd_add;
@@ -548,7 +539,7 @@ err_mfd_add:
return ret;
}
-static int __devexit tps6586x_i2c_remove(struct i2c_client *client)
+static int tps6586x_i2c_remove(struct i2c_client *client)
{
struct tps6586x *tps6586x = i2c_get_clientdata(client);
@@ -572,7 +563,7 @@ static struct i2c_driver tps6586x_driver = {
.of_match_table = of_match_ptr(tps6586x_of_match),
},
.probe = tps6586x_i2c_probe,
- .remove = __devexit_p(tps6586x_i2c_remove),
+ .remove = tps6586x_i2c_remove,
.id_table = tps6586x_id_table,
};
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
deleted file mode 100644
index 09aab3e4776d..000000000000
--- a/drivers/mfd/tps65910-irq.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * tps65910-irq.c -- TI TPS6591x
- *
- * Copyright 2010 Texas Instruments Inc.
- *
- * Author: Graeme Gregory <gg@slimlogic.co.uk>
- * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65910.h>
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since all
- * interrupts are clear on read the IRQ line will be reasserted and
- * the physical IRQ will be handled again if another interrupt is
- * asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads. We're also assuming that
- * it's rare to get lots of interrupts firing simultaneously so try to
- * minimise I/O.
- */
-static irqreturn_t tps65910_irq(int irq, void *irq_data)
-{
- struct tps65910 *tps65910 = irq_data;
- unsigned int reg;
- u32 irq_sts;
- u32 irq_mask;
- int i;
-
- tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
- irq_sts = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
- irq_sts |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
- irq_sts |= reg << 16;
- }
-
- tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
- irq_mask = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
- irq_mask |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
- irq_mask |= reg << 16;
- }
-
- irq_sts &= ~irq_mask;
-
- if (!irq_sts)
- return IRQ_NONE;
-
- for (i = 0; i < tps65910->irq_num; i++) {
-
- if (!(irq_sts & (1 << i)))
- continue;
-
- handle_nested_irq(irq_find_mapping(tps65910->domain, i));
- }
-
- /* Write the STS register back to clear IRQs we handled */
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
- reg = irq_sts & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- reg = irq_sts >> 8;
- tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
- }
-
- return IRQ_HANDLED;
-}
-
-static void tps65910_irq_lock(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- u32 reg_mask;
- unsigned int reg;
-
- tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
- reg_mask = reg;
- tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
- reg_mask |= reg << 8;
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
- reg_mask |= reg << 16;
- }
-
- if (tps65910->irq_mask != reg_mask) {
- reg = tps65910->irq_mask & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
- reg = tps65910->irq_mask >> 8 & 0xFF;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
- switch (tps65910_chip_id(tps65910)) {
- case TPS65911:
- reg = tps65910->irq_mask >> 16;
- tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
- }
- }
- mutex_unlock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_enable(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- tps65910->irq_mask &= ~(1 << data->hwirq);
-}
-
-static void tps65910_irq_disable(struct irq_data *data)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
- tps65910->irq_mask |= (1 << data->hwirq);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tps65910_irq_set_wake(struct irq_data *data, unsigned int enable)
-{
- struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- return irq_set_irq_wake(tps65910->chip_irq, enable);
-}
-#else
-#define tps65910_irq_set_wake NULL
-#endif
-
-static struct irq_chip tps65910_irq_chip = {
- .name = "tps65910",
- .irq_bus_lock = tps65910_irq_lock,
- .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
- .irq_disable = tps65910_irq_disable,
- .irq_enable = tps65910_irq_enable,
- .irq_set_wake = tps65910_irq_set_wake,
-};
-
-static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct tps65910 *tps65910 = h->host_data;
-
- irq_set_chip_data(virq, tps65910);
- irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
- irq_set_nested_thread(virq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(virq, IRQF_VALID);
-#else
- irq_set_noprobe(virq);
-#endif
-
- return 0;
-}
-
-static struct irq_domain_ops tps65910_domain_ops = {
- .map = tps65910_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-int tps65910_irq_init(struct tps65910 *tps65910, int irq,
- struct tps65910_platform_data *pdata)
-{
- int ret;
- int flags = IRQF_ONESHOT;
-
- if (!irq) {
- dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
- return -EINVAL;
- }
-
- if (!pdata) {
- dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
- return -EINVAL;
- }
-
- switch (tps65910_chip_id(tps65910)) {
- case TPS65910:
- tps65910->irq_num = TPS65910_NUM_IRQ;
- break;
- case TPS65911:
- tps65910->irq_num = TPS65911_NUM_IRQ;
- break;
- }
-
- if (pdata->irq_base > 0) {
- pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
- tps65910->irq_num, -1);
- if (pdata->irq_base < 0) {
- dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
- pdata->irq_base);
- return pdata->irq_base;
- }
- }
-
- tps65910->irq_mask = 0xFFFFFF;
-
- mutex_init(&tps65910->irq_lock);
- tps65910->chip_irq = irq;
- tps65910->irq_base = pdata->irq_base;
-
- if (pdata->irq_base > 0)
- tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
- tps65910->irq_num,
- pdata->irq_base,
- 0,
- &tps65910_domain_ops, tps65910);
- else
- tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
- tps65910->irq_num,
- &tps65910_domain_ops, tps65910);
-
- if (!tps65910->domain) {
- dev_err(tps65910->dev, "Failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
- "tps65910", tps65910);
-
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
-
- if (ret != 0)
- dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
-
- return ret;
-}
-
-int tps65910_irq_exit(struct tps65910 *tps65910)
-{
- if (tps65910->chip_irq)
- free_irq(tps65910->chip_irq, tps65910);
- return 0;
-}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 0d79ce2b5014..d79277204835 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,6 +19,9 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
@@ -50,6 +53,219 @@ static struct mfd_cell tps65910s[] = {
};
+static const struct regmap_irq tps65911_irqs[] = {
+ /* INT_STS */
+ [TPS65911_IRQ_PWRHOLD_F] = {
+ .mask = INT_MSK_PWRHOLD_F_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_VBAT_VMHI] = {
+ .mask = INT_MSK_VMBHI_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRON] = {
+ .mask = INT_MSK_PWRON_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRON_LP] = {
+ .mask = INT_MSK_PWRON_LP_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_PWRHOLD_R] = {
+ .mask = INT_MSK_PWRHOLD_R_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_HOTDIE] = {
+ .mask = INT_MSK_HOTDIE_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_RTC_ALARM] = {
+ .mask = INT_MSK_RTC_ALARM_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65911_IRQ_RTC_PERIOD] = {
+ .mask = INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [TPS65911_IRQ_GPIO0_R] = {
+ .mask = INT_MSK2_GPIO0_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO0_F] = {
+ .mask = INT_MSK2_GPIO0_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO1_R] = {
+ .mask = INT_MSK2_GPIO1_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO1_F] = {
+ .mask = INT_MSK2_GPIO1_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO2_R] = {
+ .mask = INT_MSK2_GPIO2_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO2_F] = {
+ .mask = INT_MSK2_GPIO2_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO3_R] = {
+ .mask = INT_MSK2_GPIO3_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65911_IRQ_GPIO3_F] = {
+ .mask = INT_MSK2_GPIO3_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+
+ /* INT_STS2 */
+ [TPS65911_IRQ_GPIO4_R] = {
+ .mask = INT_MSK3_GPIO4_R_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO4_F] = {
+ .mask = INT_MSK3_GPIO4_F_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO5_R] = {
+ .mask = INT_MSK3_GPIO5_R_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_GPIO5_F] = {
+ .mask = INT_MSK3_GPIO5_F_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_WTCHDG] = {
+ .mask = INT_MSK3_WTCHDG_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_VMBCH2_H] = {
+ .mask = INT_MSK3_VMBCH2_H_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_VMBCH2_L] = {
+ .mask = INT_MSK3_VMBCH2_L_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+ [TPS65911_IRQ_PWRDN] = {
+ .mask = INT_MSK3_PWRDN_IT_MSK_MASK,
+ .reg_offset = 2,
+ },
+};
+
+static const struct regmap_irq tps65910_irqs[] = {
+ /* INT_STS */
+ [TPS65910_IRQ_VBAT_VMBDCH] = {
+ .mask = TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_VBAT_VMHI] = {
+ .mask = TPS65910_INT_MSK_VMBHI_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRON] = {
+ .mask = TPS65910_INT_MSK_PWRON_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRON_LP] = {
+ .mask = TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_PWRHOLD] = {
+ .mask = TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_HOTDIE] = {
+ .mask = TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_RTC_ALARM] = {
+ .mask = TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+ [TPS65910_IRQ_RTC_PERIOD] = {
+ .mask = TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+ .reg_offset = 0,
+ },
+
+ /* INT_STS2 */
+ [TPS65910_IRQ_GPIO_R] = {
+ .mask = TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+ [TPS65910_IRQ_GPIO_F] = {
+ .mask = TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK,
+ .reg_offset = 1,
+ },
+};
+
+static struct regmap_irq_chip tps65911_irq_chip = {
+ .name = "tps65910",
+ .irqs = tps65911_irqs,
+ .num_irqs = ARRAY_SIZE(tps65911_irqs),
+ .num_regs = 3,
+ .irq_reg_stride = 2,
+ .status_base = TPS65910_INT_STS,
+ .mask_base = TPS65910_INT_MSK,
+ .ack_base = TPS65910_INT_STS,
+};
+
+static struct regmap_irq_chip tps65910_irq_chip = {
+ .name = "tps65910",
+ .irqs = tps65910_irqs,
+ .num_irqs = ARRAY_SIZE(tps65910_irqs),
+ .num_regs = 2,
+ .irq_reg_stride = 2,
+ .status_base = TPS65910_INT_STS,
+ .mask_base = TPS65910_INT_MSK,
+ .ack_base = TPS65910_INT_STS,
+};
+
+static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+ struct tps65910_platform_data *pdata)
+{
+ int ret = 0;
+ static struct regmap_irq_chip *tps6591x_irqs_chip;
+
+ if (!irq) {
+ dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+ return -EINVAL;
+ }
+
+ if (!pdata) {
+ dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
+ return -EINVAL;
+ }
+
+ switch (tps65910_chip_id(tps65910)) {
+ case TPS65910:
+ tps6591x_irqs_chip = &tps65910_irq_chip;
+ break;
+ case TPS65911:
+ tps6591x_irqs_chip = &tps65911_irq_chip;
+ break;
+ }
+
+ tps65910->chip_irq = irq;
+ ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
+ IRQF_ONESHOT, pdata->irq_base,
+ tps6591x_irqs_chip, &tps65910->irq_data);
+ if (ret < 0)
+ dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
+ return ret;
+}
+
+static int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+ if (tps65910->chip_irq > 0)
+ regmap_del_irq_chip(tps65910->chip_irq, tps65910->irq_data);
+ return 0;
+}
+
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -78,7 +294,7 @@ static const struct regmap_config tps65910_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
-static int __devinit tps65910_ck32k_init(struct tps65910 *tps65910,
+static int tps65910_ck32k_init(struct tps65910 *tps65910,
struct tps65910_board *pmic_pdata)
{
int ret;
@@ -96,7 +312,7 @@ static int __devinit tps65910_ck32k_init(struct tps65910 *tps65910,
return 0;
}
-static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
+static int tps65910_sleepinit(struct tps65910 *tps65910,
struct tps65910_board *pmic_pdata)
{
struct device *dev = NULL;
@@ -237,7 +453,7 @@ static void tps65910_power_off(void)
DEVCTRL_DEV_ON_MASK);
}
-static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
+static int tps65910_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct tps65910 *tps65910;
@@ -270,7 +486,6 @@ static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
tps65910->dev = &i2c->dev;
tps65910->i2c_client = i2c;
tps65910->id = chip_id;
- mutex_init(&tps65910->io_mutex);
tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
if (IS_ERR(tps65910->regmap)) {
@@ -279,14 +494,6 @@ static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = mfd_add_devices(tps65910->dev, -1,
- tps65910s, ARRAY_SIZE(tps65910s),
- NULL, 0, NULL);
- if (ret < 0) {
- dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
- return ret;
- }
-
init_data->irq = pmic_plat_data->irq;
init_data->irq_base = pmic_plat_data->irq_base;
@@ -299,10 +506,19 @@ static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
pm_power_off = tps65910_power_off;
}
+ ret = mfd_add_devices(tps65910->dev, -1,
+ tps65910s, ARRAY_SIZE(tps65910s),
+ NULL, 0,
+ regmap_irq_get_domain(tps65910->irq_data));
+ if (ret < 0) {
+ dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
+
return ret;
}
-static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
+static int tps65910_i2c_remove(struct i2c_client *i2c)
{
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
@@ -327,7 +543,7 @@ static struct i2c_driver tps65910_i2c_driver = {
.of_match_table = of_match_ptr(tps65910_of_match),
},
.probe = tps65910_i2c_probe,
- .remove = __devexit_p(tps65910_i2c_remove),
+ .remove = tps65910_i2c_remove,
.id_table = tps65910_i2c_id,
};
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 0b6e361432c4..c0816ebd9d7e 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -122,7 +122,7 @@ static ssize_t comp_threshold_show(struct device *dev,
static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL);
static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
-static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
+static int tps65911_comparator_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
@@ -152,7 +152,7 @@ static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
return ret;
}
-static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
+static int tps65911_comparator_remove(struct platform_device *pdev)
{
struct tps65910 *tps65910;
@@ -169,7 +169,7 @@ static struct platform_driver tps65911_comparator_driver = {
.owner = THIS_MODULE,
},
.probe = tps65911_comparator_probe,
- .remove = __devexit_p(tps65911_comparator_remove),
+ .remove = tps65911_comparator_remove,
};
static int __init tps65911_comparator_init(void)
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 27d3302d56b8..b45f460d299f 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -81,7 +81,7 @@ static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
return ret;
}
-static int __devinit tps65912_spi_probe(struct spi_device *spi)
+static int tps65912_spi_probe(struct spi_device *spi)
{
struct tps65912 *tps65912;
@@ -99,7 +99,7 @@ static int __devinit tps65912_spi_probe(struct spi_device *spi)
return tps65912_device_init(tps65912);
}
-static int __devexit tps65912_spi_remove(struct spi_device *spi)
+static int tps65912_spi_remove(struct spi_device *spi)
{
struct tps65912 *tps65912 = spi_get_drvdata(spi);
@@ -114,7 +114,7 @@ static struct spi_driver tps65912_spi_driver = {
.owner = THIS_MODULE,
},
.probe = tps65912_spi_probe,
- .remove = __devexit_p(tps65912_spi_remove),
+ .remove = tps65912_spi_remove,
};
static int __init tps65912_spi_init(void)
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
new file mode 100644
index 000000000000..c90a2c450f51
--- /dev/null
+++ b/drivers/mfd/tps80031.c
@@ -0,0 +1,573 @@
+/*
+ * tps80031.c -- TI TPS80031/TPS80032 mfd core driver.
+ *
+ * MFD core driver for TI TPS80031/TPS80032 Fully Integrated
+ * Power Management with Power Path and Battery Charger
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static struct resource tps80031_rtc_resources[] = {
+ {
+ .start = TPS80031_INT_RTC_ALARM,
+ .end = TPS80031_INT_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* TPS80031 sub mfd devices */
+static struct mfd_cell tps80031_cell[] = {
+ {
+ .name = "tps80031-pmic",
+ },
+ {
+ .name = "tps80031-clock",
+ },
+ {
+ .name = "tps80031-rtc",
+ .num_resources = ARRAY_SIZE(tps80031_rtc_resources),
+ .resources = tps80031_rtc_resources,
+ },
+ {
+ .name = "tps80031-gpadc",
+ },
+ {
+ .name = "tps80031-fuel-gauge",
+ },
+ {
+ .name = "tps80031-charger",
+ },
+};
+
+static int tps80031_slave_address[TPS80031_NUM_SLAVES] = {
+ TPS80031_I2C_ID0_ADDR,
+ TPS80031_I2C_ID1_ADDR,
+ TPS80031_I2C_ID2_ADDR,
+ TPS80031_I2C_ID3_ADDR,
+};
+
+struct tps80031_pupd_data {
+ u8 reg;
+ u8 pullup_bit;
+ u8 pulldown_bit;
+};
+
+#define TPS80031_IRQ(_reg, _mask) \
+ { \
+ .reg_offset = (TPS80031_INT_MSK_LINE_##_reg) - \
+ TPS80031_INT_MSK_LINE_A, \
+ .mask = BIT(_mask), \
+ }
+
+static const struct regmap_irq tps80031_main_irqs[] = {
+ [TPS80031_INT_PWRON] = TPS80031_IRQ(A, 0),
+ [TPS80031_INT_RPWRON] = TPS80031_IRQ(A, 1),
+ [TPS80031_INT_SYS_VLOW] = TPS80031_IRQ(A, 2),
+ [TPS80031_INT_RTC_ALARM] = TPS80031_IRQ(A, 3),
+ [TPS80031_INT_RTC_PERIOD] = TPS80031_IRQ(A, 4),
+ [TPS80031_INT_HOT_DIE] = TPS80031_IRQ(A, 5),
+ [TPS80031_INT_VXX_SHORT] = TPS80031_IRQ(A, 6),
+ [TPS80031_INT_SPDURATION] = TPS80031_IRQ(A, 7),
+ [TPS80031_INT_WATCHDOG] = TPS80031_IRQ(B, 0),
+ [TPS80031_INT_BAT] = TPS80031_IRQ(B, 1),
+ [TPS80031_INT_SIM] = TPS80031_IRQ(B, 2),
+ [TPS80031_INT_MMC] = TPS80031_IRQ(B, 3),
+ [TPS80031_INT_RES] = TPS80031_IRQ(B, 4),
+ [TPS80031_INT_GPADC_RT] = TPS80031_IRQ(B, 5),
+ [TPS80031_INT_GPADC_SW2_EOC] = TPS80031_IRQ(B, 6),
+ [TPS80031_INT_CC_AUTOCAL] = TPS80031_IRQ(B, 7),
+ [TPS80031_INT_ID_WKUP] = TPS80031_IRQ(C, 0),
+ [TPS80031_INT_VBUSS_WKUP] = TPS80031_IRQ(C, 1),
+ [TPS80031_INT_ID] = TPS80031_IRQ(C, 2),
+ [TPS80031_INT_VBUS] = TPS80031_IRQ(C, 3),
+ [TPS80031_INT_CHRG_CTRL] = TPS80031_IRQ(C, 4),
+ [TPS80031_INT_EXT_CHRG] = TPS80031_IRQ(C, 5),
+ [TPS80031_INT_INT_CHRG] = TPS80031_IRQ(C, 6),
+ [TPS80031_INT_RES2] = TPS80031_IRQ(C, 7),
+};
+
+static struct regmap_irq_chip tps80031_irq_chip = {
+ .name = "tps80031",
+ .irqs = tps80031_main_irqs,
+ .num_irqs = ARRAY_SIZE(tps80031_main_irqs),
+ .num_regs = 3,
+ .status_base = TPS80031_INT_STS_A,
+ .mask_base = TPS80031_INT_MSK_LINE_A,
+};
+
+#define PUPD_DATA(_reg, _pulldown_bit, _pullup_bit) \
+ { \
+ .reg = TPS80031_CFG_INPUT_PUPD##_reg, \
+ .pulldown_bit = _pulldown_bit, \
+ .pullup_bit = _pullup_bit, \
+ }
+
+static const struct tps80031_pupd_data tps80031_pupds[] = {
+ [TPS80031_PREQ1] = PUPD_DATA(1, BIT(0), BIT(1)),
+ [TPS80031_PREQ2A] = PUPD_DATA(1, BIT(2), BIT(3)),
+ [TPS80031_PREQ2B] = PUPD_DATA(1, BIT(4), BIT(5)),
+ [TPS80031_PREQ2C] = PUPD_DATA(1, BIT(6), BIT(7)),
+ [TPS80031_PREQ3] = PUPD_DATA(2, BIT(0), BIT(1)),
+ [TPS80031_NRES_WARM] = PUPD_DATA(2, 0, BIT(2)),
+ [TPS80031_PWM_FORCE] = PUPD_DATA(2, BIT(5), 0),
+ [TPS80031_CHRG_EXT_CHRG_STATZ] = PUPD_DATA(2, 0, BIT(6)),
+ [TPS80031_SIM] = PUPD_DATA(3, BIT(0), BIT(1)),
+ [TPS80031_MMC] = PUPD_DATA(3, BIT(2), BIT(3)),
+ [TPS80031_GPADC_START] = PUPD_DATA(3, BIT(4), 0),
+ [TPS80031_DVSI2C_SCL] = PUPD_DATA(4, 0, BIT(0)),
+ [TPS80031_DVSI2C_SDA] = PUPD_DATA(4, 0, BIT(1)),
+ [TPS80031_CTLI2C_SCL] = PUPD_DATA(4, 0, BIT(2)),
+ [TPS80031_CTLI2C_SDA] = PUPD_DATA(4, 0, BIT(3)),
+};
+static struct tps80031 *tps80031_power_off_dev;
+
+int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add)
+{
+ u8 res_ass_reg = 0;
+ int preq_mask_bit = 0;
+ int ret;
+
+ if (!(ext_ctrl_flag & TPS80031_EXT_PWR_REQ))
+ return 0;
+
+ if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ1) {
+ res_ass_reg = TPS80031_PREQ1_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 5;
+ } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ2) {
+ res_ass_reg = TPS80031_PREQ2_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 6;
+ } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ3) {
+ res_ass_reg = TPS80031_PREQ3_RES_ASS_A + (preq_bit >> 3);
+ preq_mask_bit = 7;
+ }
+
+ /* Configure REQ_ASS registers */
+ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1, res_ass_reg,
+ BIT(preq_bit & 0x7));
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x setbit failed, err = %d\n",
+ res_ass_reg, ret);
+ return ret;
+ }
+
+ /* Unmask the PREQ */
+ ret = tps80031_clr_bits(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, BIT(preq_mask_bit));
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x clrbit failed, err = %d\n",
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+
+ /* Switch regulator control to resource now */
+ if (ext_ctrl_flag & (TPS80031_PWR_REQ_INPUT_PREQ2 |
+ TPS80031_PWR_REQ_INPUT_PREQ3)) {
+ ret = tps80031_update(dev, TPS80031_SLAVE_ID1, state_reg_add,
+ 0x0, TPS80031_STATE_MASK);
+ if (ret < 0)
+ dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+ state_reg_add, ret);
+ } else {
+ ret = tps80031_update(dev, TPS80031_SLAVE_ID1, trans_reg_add,
+ TPS80031_TRANS_SLEEP_OFF,
+ TPS80031_TRANS_SLEEP_MASK);
+ if (ret < 0)
+ dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+ trans_reg_add, ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_ext_power_req_config);
+
+static void tps80031_power_off(void)
+{
+ dev_info(tps80031_power_off_dev->dev, "switching off PMU\n");
+ tps80031_write(tps80031_power_off_dev->dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_DEV_ON, TPS80031_DEVOFF);
+}
+
+static void tps80031_pupd_init(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct tps80031_pupd_init_data *pupd_init_data = pdata->pupd_init_data;
+ int data_size = pdata->pupd_init_data_size;
+ int i;
+
+ for (i = 0; i < data_size; ++i) {
+ struct tps80031_pupd_init_data *pupd_init = &pupd_init_data[i];
+ const struct tps80031_pupd_data *pupd =
+ &tps80031_pupds[pupd_init->input_pin];
+ u8 update_value = 0;
+ u8 update_mask = pupd->pulldown_bit | pupd->pullup_bit;
+
+ if (pupd_init->setting == TPS80031_PUPD_PULLDOWN)
+ update_value = pupd->pulldown_bit;
+ else if (pupd_init->setting == TPS80031_PUPD_PULLUP)
+ update_value = pupd->pullup_bit;
+
+ tps80031_update(tps80031->dev, TPS80031_SLAVE_ID1, pupd->reg,
+ update_value, update_mask);
+ }
+}
+
+static int tps80031_init_ext_control(struct tps80031 *tps80031,
+ struct tps80031_platform_data *pdata)
+{
+ struct device *dev = tps80031->dev;
+ int ret;
+ int i;
+
+ /* Clear all external control for this rail */
+ for (i = 0; i < 9; ++i) {
+ ret = tps80031_write(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PREQ1_RES_ASS_A + i, 0);
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x write failed, err = %d\n",
+ TPS80031_PREQ1_RES_ASS_A + i, ret);
+ return ret;
+ }
+ }
+
+ /* Mask the PREQ */
+ ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1,
+ TPS80031_PHOENIX_MSK_TRANSITION, 0x7 << 5);
+ if (ret < 0) {
+ dev_err(dev, "reg 0x%02x set_bits failed, err = %d\n",
+ TPS80031_PHOENIX_MSK_TRANSITION, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int tps80031_irq_init(struct tps80031 *tps80031, int irq, int irq_base)
+{
+ struct device *dev = tps80031->dev;
+ int i, ret;
+
+ /*
+ * The MASK register used for updating status register when
+ * interrupt occurs and LINE register used to pass the status
+ * to actual interrupt line. As per datasheet:
+ * When INT_MSK_LINE [i] is set to 1, the associated interrupt
+ * number i is INT line masked, which means that no interrupt is
+ * generated on the INT line.
+ * When INT_MSK_LINE [i] is set to 0, the associated interrupt
+ * number i is line enabled: An interrupt is generated on the
+ * INT line.
+ * In any case, the INT_STS [i] status bit may or may not be updated,
+ * only linked to the INT_MSK_STS [i] configuration register bit.
+ *
+ * When INT_MSK_STS [i] is set to 1, the associated interrupt number
+ * i is status masked, which means that no interrupt is stored in
+ * the INT_STS[i] status bit. Note that no interrupt number i is
+ * generated on the INT line, even if the INT_MSK_LINE [i] register
+ * bit is set to 0.
+ * When INT_MSK_STS [i] is set to 0, the associated interrupt number i
+ * is status enabled: An interrupt status is updated in the INT_STS [i]
+ * register. The interrupt may or may not be generated on the INT line,
+ * depending on the INT_MSK_LINE [i] configuration register bit.
+ */
+ for (i = 0; i < 3; i++)
+ tps80031_write(dev, TPS80031_SLAVE_ID2,
+ TPS80031_INT_MSK_STS_A + i, 0x00);
+
+ ret = regmap_add_irq_chip(tps80031->regmap[TPS80031_SLAVE_ID2], irq,
+ IRQF_ONESHOT, irq_base,
+ &tps80031_irq_chip, &tps80031->irq_data);
+ if (ret < 0) {
+ dev_err(dev, "add irq failed, err = %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static bool rd_wr_reg_id0(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SMPS1_CFG_FORCE ... TPS80031_SMPS2_CFG_VOLTAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id1(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SECONDS_REG ... TPS80031_RTC_RESET_STATUS_REG:
+ case TPS80031_VALIDITY0 ... TPS80031_VALIDITY7:
+ case TPS80031_PHOENIX_START_CONDITION ... TPS80031_KEY_PRESS_DUR_CFG:
+ case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+ case TPS80031_BROADCAST_ADDR_ALL ... TPS80031_BROADCAST_ADDR_CLK_RST:
+ case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+ case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+ case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+ case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+ case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+ case TPS80031_BACKUP_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_volatile_reg_id1(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+ case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+ case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+ case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+ case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+ case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id2(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_USB_VENDOR_ID_LSB ... TPS80031_USB_OTG_REVISION:
+ case TPS80031_GPADC_CTRL ... TPS80031_CTRL_P1:
+ case TPS80031_RTCH0_LSB ... TPS80031_GPCH0_MSB:
+ case TPS80031_TOGGLE1 ... TPS80031_VIBMODE:
+ case TPS80031_PWM1ON ... TPS80031_PWM2OFF:
+ case TPS80031_FG_REG_00 ... TPS80031_FG_REG_11:
+ case TPS80031_INT_STS_A ... TPS80031_INT_MSK_STS_C:
+ case TPS80031_CONTROLLER_CTRL2 ... TPS80031_LED_PWM_CTRL2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rd_wr_reg_id3(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS80031_GPADC_TRIM0 ... TPS80031_GPADC_TRIM18:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tps80031_regmap_configs[] = {
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id0,
+ .readable_reg = rd_wr_reg_id0,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id1,
+ .readable_reg = rd_wr_reg_id1,
+ .volatile_reg = is_volatile_reg_id1,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id2,
+ .readable_reg = rd_wr_reg_id2,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+ {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = rd_wr_reg_id3,
+ .readable_reg = rd_wr_reg_id3,
+ .max_register = TPS80031_MAX_REGISTER,
+ },
+};
+
+static int tps80031_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tps80031_platform_data *pdata = client->dev.platform_data;
+ struct tps80031 *tps80031;
+ int ret;
+ uint8_t es_version;
+ uint8_t ep_ver;
+ int i;
+
+ if (!pdata) {
+ dev_err(&client->dev, "tps80031 requires platform data\n");
+ return -EINVAL;
+ }
+
+ tps80031 = devm_kzalloc(&client->dev, sizeof(*tps80031), GFP_KERNEL);
+ if (!tps80031) {
+ dev_err(&client->dev, "Malloc failed for tps80031\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031_slave_address[i] == client->addr)
+ tps80031->clients[i] = client;
+ else
+ tps80031->clients[i] = i2c_new_dummy(client->adapter,
+ tps80031_slave_address[i]);
+ if (!tps80031->clients[i]) {
+ dev_err(&client->dev, "can't attach client %d\n", i);
+ ret = -ENOMEM;
+ goto fail_client_reg;
+ }
+
+ i2c_set_clientdata(tps80031->clients[i], tps80031);
+ tps80031->regmap[i] = devm_regmap_init_i2c(tps80031->clients[i],
+ &tps80031_regmap_configs[i]);
+ if (IS_ERR(tps80031->regmap[i])) {
+ ret = PTR_ERR(tps80031->regmap[i]);
+ dev_err(&client->dev,
+ "regmap %d init failed, err %d\n", i, ret);
+ goto fail_client_reg;
+ }
+ }
+
+ ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+ TPS80031_JTAGVERNUM, &es_version);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Silicon version number read failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+ TPS80031_EPROM_REV, &ep_ver);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Silicon eeprom version read failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n",
+ es_version, ep_ver);
+ tps80031->es_version = es_version;
+ tps80031->dev = &client->dev;
+ i2c_set_clientdata(client, tps80031);
+ tps80031->chip_info = id->driver_data;
+
+ ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base);
+ if (ret) {
+ dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+ goto fail_client_reg;
+ }
+
+ tps80031_pupd_init(tps80031, pdata);
+
+ tps80031_init_ext_control(tps80031, pdata);
+
+ ret = mfd_add_devices(tps80031->dev, -1,
+ tps80031_cell, ARRAY_SIZE(tps80031_cell),
+ NULL, 0,
+ regmap_irq_get_domain(tps80031->irq_data));
+ if (ret < 0) {
+ dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
+ goto fail_mfd_add;
+ }
+
+ if (pdata->use_power_off && !pm_power_off) {
+ tps80031_power_off_dev = tps80031;
+ pm_power_off = tps80031_power_off;
+ }
+ return 0;
+
+fail_mfd_add:
+ regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+fail_client_reg:
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031->clients[i] && (tps80031->clients[i] != client))
+ i2c_unregister_device(tps80031->clients[i]);
+ }
+ return ret;
+}
+
+static int tps80031_remove(struct i2c_client *client)
+{
+ struct tps80031 *tps80031 = i2c_get_clientdata(client);
+ int i;
+
+ if (tps80031_power_off_dev == tps80031) {
+ tps80031_power_off_dev = NULL;
+ pm_power_off = NULL;
+ }
+
+ mfd_remove_devices(tps80031->dev);
+
+ regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+ for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+ if (tps80031->clients[i] != client)
+ i2c_unregister_device(tps80031->clients[i]);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id tps80031_id_table[] = {
+ { "tps80031", TPS80031 },
+ { "tps80032", TPS80032 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps80031_id_table);
+
+static struct i2c_driver tps80031_driver = {
+ .driver = {
+ .name = "tps80031",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps80031_probe,
+ .remove = tps80031_remove,
+ .id_table = tps80031_id_table,
+};
+
+static int __init tps80031_init(void)
+{
+ return i2c_add_driver(&tps80031_driver);
+}
+subsys_initcall(tps80031_init);
+
+static void __exit tps80031_exit(void)
+{
+ i2c_del_driver(&tps80031_driver);
+}
+module_exit(tps80031_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("TPS80031 core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a071a8643a47..4f3baadd0038 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/device.h>
@@ -65,9 +66,6 @@
/* Triton Core internal information (BEGIN) */
-/* Last - for index max*/
-#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
-
#define TWL_NUM_SLAVES 4
#define SUB_CHIP_ID0 0
@@ -171,13 +169,7 @@ EXPORT_SYMBOL(twl_rev);
/* Structure for each TWL4030/TWL6030 Slave */
struct twl_client {
struct i2c_client *client;
- u8 address;
-
- /* max numb of i2c_msg required is for read =2 */
- struct i2c_msg xfer_msg[2];
-
- /* To lock access to xfer_msg */
- struct mutex xfer_lock;
+ struct regmap *regmap;
};
static struct twl_client twl_modules[TWL_NUM_SLAVES];
@@ -189,7 +181,7 @@ struct twl_mapping {
};
static struct twl_mapping *twl_map;
-static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+static struct twl_mapping twl4030_map[] = {
/*
* NOTE: don't change this table without updating the
* <linux/i2c/twl.h> defines for TWL4030_MODULE_*
@@ -197,34 +189,62 @@ static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
*/
{ 0, TWL4030_BASEADD_USB },
-
{ 1, TWL4030_BASEADD_AUDIO_VOICE },
{ 1, TWL4030_BASEADD_GPIO },
{ 1, TWL4030_BASEADD_INTBR },
{ 1, TWL4030_BASEADD_PIH },
- { 1, TWL4030_BASEADD_TEST },
+ { 1, TWL4030_BASEADD_TEST },
{ 2, TWL4030_BASEADD_KEYPAD },
{ 2, TWL4030_BASEADD_MADC },
{ 2, TWL4030_BASEADD_INTERRUPTS },
{ 2, TWL4030_BASEADD_LED },
+
{ 2, TWL4030_BASEADD_MAIN_CHARGE },
{ 2, TWL4030_BASEADD_PRECHARGE },
{ 2, TWL4030_BASEADD_PWM0 },
{ 2, TWL4030_BASEADD_PWM1 },
{ 2, TWL4030_BASEADD_PWMA },
+
{ 2, TWL4030_BASEADD_PWMB },
{ 2, TWL5031_BASEADD_ACCESSORY },
{ 2, TWL5031_BASEADD_INTERRUPTS },
-
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
+
{ 3, TWL4030_BASEADD_PM_MASTER },
{ 3, TWL4030_BASEADD_PM_RECEIVER },
{ 3, TWL4030_BASEADD_RTC },
{ 3, TWL4030_BASEADD_SECURED_REG },
};
+static struct regmap_config twl4030_regmap_config[4] = {
+ {
+ /* Address 0x48 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x49 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4a */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4b */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+};
+
static struct twl_mapping twl6030_map[] = {
/*
* NOTE: don't change this table without updating the
@@ -254,14 +274,35 @@ static struct twl_mapping twl6030_map[] = {
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
{ SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
{ SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
{ SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
-
{ SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
{ SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
{ SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
};
+static struct regmap_config twl6030_regmap_config[3] = {
+ {
+ /* Address 0x48 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x49 */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+ {
+ /* Address 0x4a */
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ },
+};
+
/*----------------------------------------------------------------------*/
/* Exported Functions */
@@ -283,9 +324,8 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
int ret;
int sid;
struct twl_client *twl;
- struct i2c_msg *msg;
- if (unlikely(mod_no > TWL_MODULE_LAST)) {
+ if (unlikely(mod_no >= TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
@@ -301,32 +341,14 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
twl = &twl_modules[sid];
- mutex_lock(&twl->xfer_lock);
- /*
- * [MSG1]: fill the register address data
- * fill the data Tx buffer
- */
- msg = &twl->xfer_msg[0];
- msg->addr = twl->address;
- msg->len = num_bytes + 1;
- msg->flags = 0;
- msg->buf = value;
- /* over write the first byte of buffer with the register address */
- *value = twl_map[mod_no].base + reg;
- ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
- mutex_unlock(&twl->xfer_lock);
-
- /* i2c_transfer returns number of messages transferred */
- if (ret != 1) {
- pr_err("%s: i2c_write failed to transfer all messages\n",
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- else
- return -EIO;
- } else {
- return 0;
- }
+ ret = regmap_bulk_write(twl->regmap, twl_map[mod_no].base + reg,
+ value, num_bytes);
+
+ if (ret)
+ pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
+ DRIVER_NAME, mod_no, reg, num_bytes);
+
+ return ret;
}
EXPORT_SYMBOL(twl_i2c_write);
@@ -342,12 +364,10 @@ EXPORT_SYMBOL(twl_i2c_write);
int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
- u8 val;
int sid;
struct twl_client *twl;
- struct i2c_msg *msg;
- if (unlikely(mod_no > TWL_MODULE_LAST)) {
+ if (unlikely(mod_no >= TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
@@ -363,34 +383,14 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
}
twl = &twl_modules[sid];
- mutex_lock(&twl->xfer_lock);
- /* [MSG1] fill the register address data */
- msg = &twl->xfer_msg[0];
- msg->addr = twl->address;
- msg->len = 1;
- msg->flags = 0; /* Read the register value */
- val = twl_map[mod_no].base + reg;
- msg->buf = &val;
- /* [MSG2] fill the data rx buffer */
- msg = &twl->xfer_msg[1];
- msg->addr = twl->address;
- msg->flags = I2C_M_RD; /* Read the register value */
- msg->len = num_bytes; /* only n bytes */
- msg->buf = value;
- ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
- mutex_unlock(&twl->xfer_lock);
-
- /* i2c_transfer returns number of messages transferred */
- if (ret != 2) {
- pr_err("%s: i2c_read failed to transfer all messages\n",
- DRIVER_NAME);
- if (ret < 0)
- return ret;
- else
- return -EIO;
- } else {
- return 0;
- }
+ ret = regmap_bulk_read(twl->regmap, twl_map[mod_no].base + reg,
+ value, num_bytes);
+
+ if (ret)
+ pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
+ DRIVER_NAME, mod_no, reg, num_bytes);
+
+ return ret;
}
EXPORT_SYMBOL(twl_i2c_read);
@@ -404,12 +404,7 @@ EXPORT_SYMBOL(twl_i2c_read);
*/
int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
{
-
- /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
- u8 temp_buffer[2] = { 0 };
- /* offset 1 contains the data */
- temp_buffer[1] = value;
- return twl_i2c_write(mod_no, temp_buffer, reg, 1);
+ return twl_i2c_write(mod_no, &value, reg, 1);
}
EXPORT_SYMBOL(twl_i2c_write_u8);
@@ -646,8 +641,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
return PTR_ERR(child);
}
- if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc) {
- child = add_child(2, "twl4030_madc",
+ if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
+ twl_class_is_4030()) {
+ child = add_child(SUB_CHIP_ID2, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
true, irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
@@ -663,15 +659,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* HW security concerns, and "least privilege".
*/
sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
- child = add_child(sub_chip_id, "twl_rtc",
- NULL, 0,
+ child = add_child(sub_chip_id, "twl_rtc", NULL, 0,
true, irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (IS_ENABLED(CONFIG_PWM_TWL6030) && twl_class_is_6030()) {
- child = add_child(SUB_CHIP_ID1, "twl6030-pwm", NULL, 0,
+ if (IS_ENABLED(CONFIG_PWM_TWL)) {
+ child = add_child(SUB_CHIP_ID1, "twl-pwm", NULL, 0,
+ false, 0, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
+ if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
+ child = add_child(SUB_CHIP_ID1, "twl-pwmled", NULL, 0,
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -723,9 +725,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
- child = add_child(0, "twl4030_usb",
- pdata->usb, sizeof(*pdata->usb),
- true,
+ child = add_child(SUB_CHIP_ID0, "twl4030_usb",
+ pdata->usb, sizeof(*pdata->usb), true,
/* irq0 = USB_PRES, irq1 = USB */
irq_base + USB_PRES_INTR_OFFSET,
irq_base + USB_INTR_OFFSET);
@@ -773,9 +774,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
pdata->usb->features = features;
- child = add_child(0, "twl6030_usb",
- pdata->usb, sizeof(*pdata->usb),
- true,
+ child = add_child(SUB_CHIP_ID0, "twl6030_usb",
+ pdata->usb, sizeof(*pdata->usb), true,
/* irq1 = VBUS_PRES, irq0 = USB ID */
irq_base + USBOTG_INTR_OFFSET,
irq_base + USB_PRES_INTR_OFFSET);
@@ -799,22 +799,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
}
if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
- child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
+ child = add_child(SUB_CHIP_ID3, "twl4030_wdt", NULL, 0,
+ false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
- child = add_child(1, "twl4030_pwrbutton",
- NULL, 0, true, irq_base + 8 + 0, 0);
+ child = add_child(SUB_CHIP_ID3, "twl4030_pwrbutton", NULL, 0,
+ true, irq_base + 8 + 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
twl_class_is_4030()) {
- sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl4030-audio",
+ child = add_child(SUB_CHIP_ID1, "twl4030-audio",
pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
@@ -1054,7 +1054,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
!(features & (TPS_SUBSET | TWL5031))) {
- child = add_child(3, "twl4030_bci",
+ child = add_child(SUB_CHIP_ID3, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci), false,
/* irq0 = CHG_PRES, irq1 = BCI */
irq_base + BCI_PRES_INTR_OFFSET,
@@ -1077,8 +1077,8 @@ static inline int __init protect_pm_master(void)
{
int e = 0;
- e = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -1086,12 +1086,10 @@ static inline int __init unprotect_pm_master(void)
{
int e = 0;
- e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
- e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
return e;
}
@@ -1170,12 +1168,13 @@ static int twl_remove(struct i2c_client *client)
}
/* NOTE: This driver only handles a single twl4030/tps659x0 chip */
-static int __devinit
+static int
twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct twl4030_platform_data *pdata = client->dev.platform_data;
struct device_node *node = client->dev.of_node;
struct platform_device *pdev;
+ struct regmap_config *twl_regmap_config;
int irq_base = 0;
int status;
unsigned i, num_slaves;
@@ -1229,22 +1228,23 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
if ((id->driver_data) & TWL6030_CLASS) {
twl_id = TWL6030_CLASS_ID;
twl_map = &twl6030_map[0];
+ twl_regmap_config = twl6030_regmap_config;
num_slaves = TWL_NUM_SLAVES - 1;
} else {
twl_id = TWL4030_CLASS_ID;
twl_map = &twl4030_map[0];
+ twl_regmap_config = twl4030_regmap_config;
num_slaves = TWL_NUM_SLAVES;
}
for (i = 0; i < num_slaves; i++) {
struct twl_client *twl = &twl_modules[i];
- twl->address = client->addr + i;
if (i == 0) {
twl->client = client;
} else {
twl->client = i2c_new_dummy(client->adapter,
- twl->address);
+ client->addr + i);
if (!twl->client) {
dev_err(&client->dev,
"can't attach client %d\n", i);
@@ -1252,7 +1252,16 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto fail;
}
}
- mutex_init(&twl->xfer_lock);
+
+ twl->regmap = devm_regmap_init_i2c(twl->client,
+ &twl_regmap_config[i]);
+ if (IS_ERR(twl->regmap)) {
+ status = PTR_ERR(twl->regmap);
+ dev_err(&client->dev,
+ "Failed to allocate regmap %d, err: %d\n", i,
+ status);
+ goto fail;
+ }
}
inuse = true;
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index 5c11acf9e0fd..e16edca92670 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -184,7 +184,7 @@ static bool twl4030_audio_has_vibra(struct twl4030_audio_data *pdata,
return false;
}
-static int __devinit twl4030_audio_probe(struct platform_device *pdev)
+static int twl4030_audio_probe(struct platform_device *pdev)
{
struct twl4030_audio *audio;
struct twl4030_audio_data *pdata = pdev->dev.platform_data;
@@ -269,7 +269,7 @@ static int __devinit twl4030_audio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit twl4030_audio_remove(struct platform_device *pdev)
+static int twl4030_audio_remove(struct platform_device *pdev)
{
mfd_remove_devices(&pdev->dev);
platform_set_drvdata(pdev, NULL);
@@ -291,7 +291,7 @@ static struct platform_driver twl4030_audio_driver = {
.of_match_table = twl4030_audio_of_match,
},
.probe = twl4030_audio_probe,
- .remove = __devexit_p(twl4030_audio_remove),
+ .remove = twl4030_audio_remove,
};
module_platform_driver(twl4030_audio_driver);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index cdd1173ed4e9..a5f9888aa19c 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -295,8 +295,8 @@ static irqreturn_t handle_twl4030_pih(int irq, void *devid)
irqreturn_t ret;
u8 pih_isr;
- ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
- REG_PIH_ISR_P1);
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr,
+ REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret);
return IRQ_NONE;
@@ -501,7 +501,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
} imr;
/* byte[0] gets overwritten as we write ... */
- imr.word = cpu_to_le32(agent->imr << 8);
+ imr.word = cpu_to_le32(agent->imr);
agent->imr_change_pending = false;
/* write the whole mask ... simpler than subsetting it */
@@ -526,7 +526,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
* any processor on the other IRQ line, EDR registers are
* shared.
*/
- status = twl_i2c_read(sih->module, bytes + 1,
+ status = twl_i2c_read(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -538,7 +538,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
while (edge_change) {
int i = fls(edge_change) - 1;
struct irq_data *idata;
- int byte = 1 + (i >> 2);
+ int byte = i >> 2;
int off = (i & 0x3) * 2;
unsigned int type;
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 456ecb5ac4fe..88ff9dc83305 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -173,7 +173,7 @@ static int twl4030battery_temperature(int raw_volt)
volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
/* Getting and calculating the supply current in micro ampers */
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
REG_BCICTL2);
if (ret < 0)
return ret;
@@ -196,7 +196,7 @@ static int twl4030battery_current(int raw_volt)
int ret;
u8 val;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
TWL4030_BCI_BCICTL1);
if (ret)
return ret;
@@ -635,7 +635,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
int ret;
u8 regval;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
@@ -646,7 +646,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
else
regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
- ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
@@ -668,7 +668,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
u8 regval;
int ret;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_MADC_CTRL1);
if (ret) {
dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
@@ -692,7 +692,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
/*
* Initialize MADC and request for threaded irq
*/
-static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+static int twl4030_madc_probe(struct platform_device *pdev)
{
struct twl4030_madc_data *madc;
struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
@@ -725,7 +725,7 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_current_generator;
- ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
&regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
@@ -733,7 +733,7 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
goto err_i2c;
}
regval |= TWL4030_BCI_MESBAT;
- ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+ ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
regval, TWL4030_BCI_BCICTL1);
if (ret) {
dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
@@ -785,7 +785,7 @@ err_power:
return ret;
}
-static int __devexit twl4030_madc_remove(struct platform_device *pdev)
+static int twl4030_madc_remove(struct platform_device *pdev)
{
struct twl4030_madc_data *madc = platform_get_drvdata(pdev);
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 79ca33dfacca..dd362c1078e1 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -124,21 +124,19 @@ static u8 res_config_addrs[] = {
[RES_MAIN_REF] = 0x94,
};
-static int __devinit twl4030_write_script_byte(u8 address, u8 byte)
+static int twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_MEMORY_ADDRESS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_MEMORY_ADDRESS);
if (err)
goto out;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
- R_MEMORY_DATA);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, byte, R_MEMORY_DATA);
out:
return err;
}
-static int __devinit twl4030_write_script_ins(u8 address, u16 pmb_message,
+static int twl4030_write_script_ins(u8 address, u16 pmb_message,
u8 delay, u8 next)
{
int err;
@@ -158,10 +156,10 @@ out:
return err;
}
-static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script,
+static int twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
- int err;
+ int err = -EINVAL;
for (; len; len--, address++, script++) {
if (len == 1) {
@@ -183,74 +181,66 @@ static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script
return err;
}
-static int __devinit twl4030_config_wakeup3_sequence(u8 address)
+static int twl4030_config_wakeup3_sequence(u8 address)
{
int err;
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P3 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_S2A3);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A3);
if (err)
goto out;
/* P3 LVL_WAKEUP should be on LEVEL */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P3_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 wakeup sequence for P3 config error\n");
return err;
}
-static int __devinit twl4030_config_wakeup12_sequence(u8 address)
+static int twl4030_config_wakeup12_sequence(u8 address)
{
int err = 0;
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_S2A12);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A12);
if (err)
goto out;
/* P1/P2 LVL_WAKEUP should be on LEVEL */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P1_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P2_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P2_SW_EVENTS);
if (err)
goto out;
if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
/* Disabling AC charger effect on sleep-active transitions */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
- R_CFG_P1_TRANSITION);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data,
+ R_CFG_P1_TRANSITION);
if (err)
goto out;
data &= ~(1<<1);
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
- R_CFG_P1_TRANSITION);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data,
+ R_CFG_P1_TRANSITION);
if (err)
goto out;
}
@@ -262,13 +252,12 @@ out:
return err;
}
-static int __devinit twl4030_config_sleep_sequence(u8 address)
+static int twl4030_config_sleep_sequence(u8 address)
{
int err;
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_A2S);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_A2S);
if (err)
pr_err("TWL4030 sleep sequence config error\n");
@@ -276,55 +265,48 @@ static int __devinit twl4030_config_sleep_sequence(u8 address)
return err;
}
-static int __devinit twl4030_config_warmreset_sequence(u8 address)
+static int twl4030_config_warmreset_sequence(u8 address)
{
int err;
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
- R_SEQ_ADD_WARM);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_WARM);
if (err)
goto out;
/* P1/P2/P3 enable WARMRESET */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P1_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P2_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P2_SW_EVENTS);
if (err)
goto out;
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P3_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
- R_P3_SW_EVENTS);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 warmreset seq config error\n");
return err;
}
-static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfig)
+static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
{
int rconfig_addr;
int err;
@@ -341,7 +323,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &grp,
rconfig_addr + DEV_GRP_OFFSET);
if (err) {
pr_err("TWL4030 Resource %d group could not be read\n",
@@ -352,7 +334,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
grp &= ~DEV_GRP_MASK;
grp |= rconfig->devgroup << DEV_GRP_SHIFT;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
grp, rconfig_addr + DEV_GRP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program devgroup\n");
@@ -361,7 +343,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
}
/* Set resource types */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &type,
rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d type could not be read\n",
@@ -379,7 +361,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
type |= rconfig->type2 << TYPE2_SHIFT;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program resource type\n");
@@ -387,7 +369,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
}
/* Set remap states */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d remap could not be read\n",
@@ -405,7 +387,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
remap |= rconfig->remap_sleep << SLEEP_STATE_SHIFT;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
@@ -416,7 +398,7 @@ static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfi
return 0;
}
-static int __devinit load_twl4030_script(struct twl4030_script *tscript,
+static int load_twl4030_script(struct twl4030_script *tscript,
u8 address)
{
int err;
@@ -463,49 +445,47 @@ int twl4030_remove_script(u8 flags)
{
int err = 0;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
if (flags & TWL4030_WRST_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_WARM);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_WARM);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP12_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_S2A12);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_S2A12);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP3_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_S2A3);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_S2A3);
if (err)
return err;
}
if (flags & TWL4030_SLEEP_SCRIPT) {
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
- R_SEQ_ADD_A2S);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+ R_SEQ_ADD_A2S);
if (err)
return err;
}
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
@@ -521,28 +501,26 @@ void twl4030_power_off(void)
{
int err;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
TWL4030_PM_MASTER_P1_SW_EVENTS);
if (err)
pr_err("TWL4030 Unable to power off\n");
}
-void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
+void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
{
int err = 0;
int i;
struct twl4030_resconfig *resconfig;
u8 val, address = twl4030_start_script_address;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG1,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
- TWL4030_PM_MASTER_KEY_CFG2,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
@@ -567,14 +545,14 @@ void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
/* Board has to be wired properly to use this feature */
if (twl4030_scripts->use_poweroff && !pm_power_off) {
/* Default for SEQ_OFFSYNC is set, lets ensure this */
- err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_warning("TWL4030 Unable to read registers\n");
} else if (!(val & SEQ_OFFSYNC)) {
val |= SEQ_OFFSYNC;
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
@@ -586,8 +564,8 @@ void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
relock:
- err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
- TWL4030_PM_MASTER_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index b76902f1e44a..277a8dba42d5 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -355,7 +355,7 @@ int twl6030_init_irq(struct device *dev, int irq_num)
static struct irq_chip twl6030_irq_chip;
int status = 0;
int i;
- u8 mask[4];
+ u8 mask[3];
nr_irqs = TWL6030_NR_IRQS;
@@ -370,9 +370,9 @@ int twl6030_init_irq(struct device *dev, int irq_num)
irq_end = irq_base + nr_irqs;
+ mask[0] = 0xFF;
mask[1] = 0xFF;
mask[2] = 0xFF;
- mask[3] = 0xFF;
/* mask all int lines */
twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
deleted file mode 100644
index 4b42543da228..000000000000
--- a/drivers/mfd/twl6040-irq.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Interrupt controller support for TWL6040
- *
- * Author: Misael Lopez Cruz <misael.lopez@ti.com>
- *
- * Copyright: (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/twl6040.h>
-
-struct twl6040_irq_data {
- int mask;
- int status;
-};
-
-static struct twl6040_irq_data twl6040_irqs[] = {
- {
- .mask = TWL6040_THMSK,
- .status = TWL6040_THINT,
- },
- {
- .mask = TWL6040_PLUGMSK,
- .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
- },
- {
- .mask = TWL6040_HOOKMSK,
- .status = TWL6040_HOOKINT,
- },
- {
- .mask = TWL6040_HFMSK,
- .status = TWL6040_HFINT,
- },
- {
- .mask = TWL6040_VIBMSK,
- .status = TWL6040_VIBINT,
- },
- {
- .mask = TWL6040_READYMSK,
- .status = TWL6040_READYINT,
- },
-};
-
-static inline
-struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
- int irq)
-{
- return &twl6040_irqs[irq - twl6040->irq_base];
-}
-
-static void twl6040_irq_lock(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_sync_unlock(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
- /* write back to hardware any change in irq mask */
- if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
- twl6040->irq_masks_cache = twl6040->irq_masks_cur;
- twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
- twl6040->irq_masks_cur);
- }
-
- mutex_unlock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_enable(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
- struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
- data->irq);
-
- twl6040->irq_masks_cur &= ~irq_data->mask;
-}
-
-static void twl6040_irq_disable(struct irq_data *data)
-{
- struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
- struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
- data->irq);
-
- twl6040->irq_masks_cur |= irq_data->mask;
-}
-
-static struct irq_chip twl6040_irq_chip = {
- .name = "twl6040",
- .irq_bus_lock = twl6040_irq_lock,
- .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
- .irq_enable = twl6040_irq_enable,
- .irq_disable = twl6040_irq_disable,
-};
-
-static irqreturn_t twl6040_irq_thread(int irq, void *data)
-{
- struct twl6040 *twl6040 = data;
- u8 intid;
- int i;
-
- intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
- /* apply masking and report (backwards to handle READYINT first) */
- for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
- if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
- intid &= ~twl6040_irqs[i].status;
- if (intid & twl6040_irqs[i].status)
- handle_nested_irq(twl6040->irq_base + i);
- }
-
- /* ack unmasked irqs */
- twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
-
- return IRQ_HANDLED;
-}
-
-int twl6040_irq_init(struct twl6040 *twl6040)
-{
- struct device_node *node = twl6040->dev->of_node;
- int i, nr_irqs, irq_base, ret;
- u8 val;
-
- mutex_init(&twl6040->irq_mutex);
-
- /* mask the individual interrupt sources */
- twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
- twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
- twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
-
- nr_irqs = ARRAY_SIZE(twl6040_irqs);
-
- irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
- dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
- return irq_base;
- }
- twl6040->irq_base = irq_base;
-
- irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
- &irq_domain_simple_ops, NULL);
-
- /* Register them with genirq */
- for (i = irq_base; i < irq_base + nr_irqs; i++) {
- irq_set_chip_data(i, twl6040);
- irq_set_chip_and_handler(i, &twl6040_irq_chip,
- handle_level_irq);
- irq_set_nested_thread(i, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(i, IRQF_VALID);
-#else
- irq_set_noprobe(i);
-#endif
- }
-
- ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
- IRQF_ONESHOT, "twl6040", twl6040);
- if (ret) {
- dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
- twl6040->irq, ret);
- return ret;
- }
-
- /* reset interrupts */
- val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
- /* interrupts cleared on write */
- twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
-
- return 0;
-}
-EXPORT_SYMBOL(twl6040_irq_init);
-
-void twl6040_irq_exit(struct twl6040 *twl6040)
-{
- free_irq(twl6040->irq, twl6040);
-}
-EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040.c
index 3f2a1cf02fc0..f361bf38a0aa 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040.c
@@ -37,7 +37,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
#include <linux/regulator/consumer.h>
@@ -104,7 +103,7 @@ int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
EXPORT_SYMBOL(twl6040_clear_bits);
/* twl6040 codec manual power-up sequence */
-static int twl6040_power_up(struct twl6040 *twl6040)
+static int twl6040_power_up_manual(struct twl6040 *twl6040)
{
u8 ldoctl, ncpctl, lppllctl;
int ret;
@@ -158,11 +157,12 @@ ncp_err:
ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ dev_err(twl6040->dev, "manual power-up failed\n");
return ret;
}
/* twl6040 manual power-down sequence */
-static void twl6040_power_down(struct twl6040 *twl6040)
+static void twl6040_power_down_manual(struct twl6040 *twl6040)
{
u8 ncpctl, ldoctl, lppllctl;
@@ -192,45 +192,48 @@ static void twl6040_power_down(struct twl6040 *twl6040)
twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
}
-static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+static irqreturn_t twl6040_readyint_handler(int irq, void *data)
{
struct twl6040 *twl6040 = data;
- u8 intid, status;
- intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+ complete(&twl6040->ready);
- if (intid & TWL6040_READYINT)
- complete(&twl6040->ready);
+ return IRQ_HANDLED;
+}
- if (intid & TWL6040_THINT) {
- status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
- if (status & TWL6040_TSHUTDET) {
- dev_warn(twl6040->dev,
- "Thermal shutdown, powering-off");
- twl6040_power(twl6040, 0);
- } else {
- dev_warn(twl6040->dev,
- "Leaving thermal shutdown, powering-on");
- twl6040_power(twl6040, 1);
- }
+static irqreturn_t twl6040_thint_handler(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 status;
+
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_TSHUTDET) {
+ dev_warn(twl6040->dev, "Thermal shutdown, powering-off");
+ twl6040_power(twl6040, 0);
+ } else {
+ dev_warn(twl6040->dev, "Leaving thermal shutdown, powering-on");
+ twl6040_power(twl6040, 1);
}
return IRQ_HANDLED;
}
-static int twl6040_power_up_completion(struct twl6040 *twl6040,
- int naudint)
+static int twl6040_power_up_automatic(struct twl6040 *twl6040)
{
int time_left;
- u8 intid;
+
+ gpio_set_value(twl6040->audpwron, 1);
time_left = wait_for_completion_timeout(&twl6040->ready,
msecs_to_jiffies(144));
if (!time_left) {
+ u8 intid;
+
+ dev_warn(twl6040->dev, "timeout waiting for READYINT\n");
intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
if (!(intid & TWL6040_READYINT)) {
- dev_err(twl6040->dev,
- "timeout waiting for READYINT\n");
+ dev_err(twl6040->dev, "automatic power-up failed\n");
+ gpio_set_value(twl6040->audpwron, 0);
return -ETIMEDOUT;
}
}
@@ -240,8 +243,6 @@ static int twl6040_power_up_completion(struct twl6040 *twl6040,
int twl6040_power(struct twl6040 *twl6040, int on)
{
- int audpwron = twl6040->audpwron;
- int naudint = twl6040->irq;
int ret = 0;
mutex_lock(&twl6040->mutex);
@@ -251,23 +252,17 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (twl6040->power_count++)
goto out;
- if (gpio_is_valid(audpwron)) {
- /* use AUDPWRON line */
- gpio_set_value(audpwron, 1);
- /* wait for power-up completion */
- ret = twl6040_power_up_completion(twl6040, naudint);
+ if (gpio_is_valid(twl6040->audpwron)) {
+ /* use automatic power-up sequence */
+ ret = twl6040_power_up_automatic(twl6040);
if (ret) {
- dev_err(twl6040->dev,
- "automatic power-down failed\n");
twl6040->power_count = 0;
goto out;
}
} else {
/* use manual power-up sequence */
- ret = twl6040_power_up(twl6040);
+ ret = twl6040_power_up_manual(twl6040);
if (ret) {
- dev_err(twl6040->dev,
- "manual power-up failed\n");
twl6040->power_count = 0;
goto out;
}
@@ -288,15 +283,15 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (--twl6040->power_count)
goto out;
- if (gpio_is_valid(audpwron)) {
+ if (gpio_is_valid(twl6040->audpwron)) {
/* use AUDPWRON line */
- gpio_set_value(audpwron, 0);
+ gpio_set_value(twl6040->audpwron, 0);
/* power-down sequence latency */
usleep_range(500, 700);
} else {
/* use manual power-down sequence */
- twl6040_power_down(twl6040);
+ twl6040_power_down_manual(twl6040);
}
twl6040->sysclk = 0;
twl6040->mclk = 0;
@@ -503,8 +498,27 @@ static struct regmap_config twl6040_regmap_config = {
.readable_reg = twl6040_readable_reg,
};
-static int __devinit twl6040_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct regmap_irq twl6040_irqs[] = {
+ { .reg_offset = 0, .mask = TWL6040_THINT, },
+ { .reg_offset = 0, .mask = TWL6040_PLUGINT | TWL6040_UNPLUGINT, },
+ { .reg_offset = 0, .mask = TWL6040_HOOKINT, },
+ { .reg_offset = 0, .mask = TWL6040_HFINT, },
+ { .reg_offset = 0, .mask = TWL6040_VIBINT, },
+ { .reg_offset = 0, .mask = TWL6040_READYINT, },
+};
+
+static struct regmap_irq_chip twl6040_irq_chip = {
+ .name = "twl6040",
+ .irqs = twl6040_irqs,
+ .num_irqs = ARRAY_SIZE(twl6040_irqs),
+
+ .num_regs = 1,
+ .status_base = TWL6040_REG_INTID,
+ .mask_base = TWL6040_REG_INTMR,
+};
+
+static int twl6040_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct twl6040_platform_data *pdata = client->dev.platform_data;
struct device_node *node = client->dev.of_node;
@@ -578,18 +592,31 @@ static int __devinit twl6040_probe(struct i2c_client *client,
goto gpio_err;
}
- /* codec interrupt */
- ret = twl6040_irq_init(twl6040);
- if (ret)
+ ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq,
+ IRQF_ONESHOT, 0, &twl6040_irq_chip,
+ &twl6040->irq_data);
+ if (ret < 0)
goto irq_init_err;
- ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
- NULL, twl6040_naudint_handler, IRQF_ONESHOT,
+ twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
+ TWL6040_IRQ_READY);
+ twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
+ TWL6040_IRQ_TH);
+
+ ret = request_threaded_irq(twl6040->irq_ready, NULL,
+ twl6040_readyint_handler, IRQF_ONESHOT,
"twl6040_irq_ready", twl6040);
if (ret) {
- dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
- ret);
- goto irq_err;
+ dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret);
+ goto readyirq_err;
+ }
+
+ ret = request_threaded_irq(twl6040->irq_th, NULL,
+ twl6040_thint_handler, IRQF_ONESHOT,
+ "twl6040_irq_th", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
+ goto thirq_err;
}
/* dual-access registers controlled by I2C only */
@@ -601,7 +628,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
* The ASoC codec can work without pdata, pass the platform_data only if
* it has been provided.
*/
- irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+ irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_PLUG);
cell = &twl6040->cells[children];
cell->name = "twl6040-codec";
twl6040_codec_rsrc[0].start = irq;
@@ -615,7 +642,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
children++;
if (twl6040_has_vibra(pdata, node)) {
- irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+ irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_VIB);
cell = &twl6040->cells[children];
cell->name = "twl6040-vibra";
@@ -654,9 +681,11 @@ static int __devinit twl6040_probe(struct i2c_client *client,
return 0;
mfd_err:
- free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
-irq_err:
- twl6040_irq_exit(twl6040);
+ free_irq(twl6040->irq_th, twl6040);
+thirq_err:
+ free_irq(twl6040->irq_ready, twl6040);
+readyirq_err:
+ regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
irq_init_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
@@ -670,7 +699,7 @@ err:
return ret;
}
-static int __devexit twl6040_remove(struct i2c_client *client)
+static int twl6040_remove(struct i2c_client *client)
{
struct twl6040 *twl6040 = i2c_get_clientdata(client);
@@ -680,8 +709,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
- free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
- twl6040_irq_exit(twl6040);
+ free_irq(twl6040->irq_ready, twl6040);
+ free_irq(twl6040->irq_th, twl6040);
+ regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
@@ -705,7 +735,7 @@ static struct i2c_driver twl6040_driver = {
.owner = THIS_MODULE,
},
.probe = twl6040_probe,
- .remove = __devexit_p(twl6040_remove),
+ .remove = twl6040_remove,
.id_table = twl6040_i2c_id,
};
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
new file mode 100644
index 000000000000..3c1723aa6225
--- /dev/null
+++ b/drivers/mfd/vexpress-config.c
@@ -0,0 +1,281 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#define pr_fmt(fmt) "vexpress-config: " fmt
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vexpress.h>
+
+
+#define VEXPRESS_CONFIG_MAX_BRIDGES 2
+
+struct vexpress_config_bridge {
+ struct device_node *node;
+ struct vexpress_config_bridge_info *info;
+ struct list_head transactions;
+ spinlock_t transactions_lock;
+} vexpress_config_bridges[VEXPRESS_CONFIG_MAX_BRIDGES];
+
+static DECLARE_BITMAP(vexpress_config_bridges_map,
+ ARRAY_SIZE(vexpress_config_bridges));
+static DEFINE_MUTEX(vexpress_config_bridges_mutex);
+
+struct vexpress_config_bridge *vexpress_config_bridge_register(
+ struct device_node *node,
+ struct vexpress_config_bridge_info *info)
+{
+ struct vexpress_config_bridge *bridge;
+ int i;
+
+ pr_debug("Registering bridge '%s'\n", info->name);
+
+ mutex_lock(&vexpress_config_bridges_mutex);
+ i = find_first_zero_bit(vexpress_config_bridges_map,
+ ARRAY_SIZE(vexpress_config_bridges));
+ if (i >= ARRAY_SIZE(vexpress_config_bridges)) {
+ pr_err("Can't register more bridges!\n");
+ mutex_unlock(&vexpress_config_bridges_mutex);
+ return NULL;
+ }
+ __set_bit(i, vexpress_config_bridges_map);
+ bridge = &vexpress_config_bridges[i];
+
+ bridge->node = node;
+ bridge->info = info;
+ INIT_LIST_HEAD(&bridge->transactions);
+ spin_lock_init(&bridge->transactions_lock);
+
+ mutex_unlock(&vexpress_config_bridges_mutex);
+
+ return bridge;
+}
+EXPORT_SYMBOL(vexpress_config_bridge_register);
+
+void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
+{
+ struct vexpress_config_bridge __bridge = *bridge;
+ int i;
+
+ mutex_lock(&vexpress_config_bridges_mutex);
+ for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++)
+ if (&vexpress_config_bridges[i] == bridge)
+ __clear_bit(i, vexpress_config_bridges_map);
+ mutex_unlock(&vexpress_config_bridges_mutex);
+
+ WARN_ON(!list_empty(&__bridge.transactions));
+ while (!list_empty(&__bridge.transactions))
+ cpu_relax();
+}
+EXPORT_SYMBOL(vexpress_config_bridge_unregister);
+
+
+struct vexpress_config_func {
+ struct vexpress_config_bridge *bridge;
+ void *func;
+};
+
+struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,
+ struct device_node *node)
+{
+ struct device_node *bridge_node;
+ struct vexpress_config_func *func;
+ int i;
+
+ if (WARN_ON(dev && node && dev->of_node != node))
+ return NULL;
+ if (dev && !node)
+ node = dev->of_node;
+
+ func = kzalloc(sizeof(*func), GFP_KERNEL);
+ if (!func)
+ return NULL;
+
+ bridge_node = of_node_get(node);
+ while (bridge_node) {
+ const __be32 *prop = of_get_property(bridge_node,
+ "arm,vexpress,config-bridge", NULL);
+
+ if (prop) {
+ bridge_node = of_find_node_by_phandle(
+ be32_to_cpup(prop));
+ break;
+ }
+
+ bridge_node = of_get_next_parent(bridge_node);
+ }
+
+ mutex_lock(&vexpress_config_bridges_mutex);
+ for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++) {
+ struct vexpress_config_bridge *bridge =
+ &vexpress_config_bridges[i];
+
+ if (test_bit(i, vexpress_config_bridges_map) &&
+ bridge->node == bridge_node) {
+ func->bridge = bridge;
+ func->func = bridge->info->func_get(dev, node);
+ break;
+ }
+ }
+ mutex_unlock(&vexpress_config_bridges_mutex);
+
+ if (!func->func) {
+ of_node_put(node);
+ kfree(func);
+ return NULL;
+ }
+
+ return func;
+}
+EXPORT_SYMBOL(__vexpress_config_func_get);
+
+void vexpress_config_func_put(struct vexpress_config_func *func)
+{
+ func->bridge->info->func_put(func->func);
+ of_node_put(func->bridge->node);
+ kfree(func);
+}
+EXPORT_SYMBOL(vexpress_config_func_put);
+
+struct vexpress_config_trans {
+ struct vexpress_config_func *func;
+ int offset;
+ bool write;
+ u32 *data;
+ int status;
+ struct completion completion;
+ struct list_head list;
+};
+
+static void vexpress_config_dump_trans(const char *what,
+ struct vexpress_config_trans *trans)
+{
+ pr_debug("%s %s trans %p func 0x%p offset %d data 0x%x status %d\n",
+ what, trans->write ? "write" : "read", trans,
+ trans->func->func, trans->offset,
+ trans->data ? *trans->data : 0, trans->status);
+}
+
+static int vexpress_config_schedule(struct vexpress_config_trans *trans)
+{
+ int status;
+ struct vexpress_config_bridge *bridge = trans->func->bridge;
+ unsigned long flags;
+
+ init_completion(&trans->completion);
+ trans->status = -EFAULT;
+
+ spin_lock_irqsave(&bridge->transactions_lock, flags);
+
+ vexpress_config_dump_trans("Executing", trans);
+
+ if (list_empty(&bridge->transactions))
+ status = bridge->info->func_exec(trans->func->func,
+ trans->offset, trans->write, trans->data);
+ else
+ status = VEXPRESS_CONFIG_STATUS_WAIT;
+
+ switch (status) {
+ case VEXPRESS_CONFIG_STATUS_DONE:
+ vexpress_config_dump_trans("Finished", trans);
+ trans->status = status;
+ break;
+ case VEXPRESS_CONFIG_STATUS_WAIT:
+ list_add_tail(&trans->list, &bridge->transactions);
+ break;
+ }
+
+ spin_unlock_irqrestore(&bridge->transactions_lock, flags);
+
+ return status;
+}
+
+void vexpress_config_complete(struct vexpress_config_bridge *bridge,
+ int status)
+{
+ struct vexpress_config_trans *trans;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bridge->transactions_lock, flags);
+
+ trans = list_first_entry(&bridge->transactions,
+ struct vexpress_config_trans, list);
+ vexpress_config_dump_trans("Completed", trans);
+
+ trans->status = status;
+ list_del(&trans->list);
+
+ if (!list_empty(&bridge->transactions)) {
+ vexpress_config_dump_trans("Pending", trans);
+
+ bridge->info->func_exec(trans->func->func, trans->offset,
+ trans->write, trans->data);
+ }
+ spin_unlock_irqrestore(&bridge->transactions_lock, flags);
+
+ complete(&trans->completion);
+}
+EXPORT_SYMBOL(vexpress_config_complete);
+
+int vexpress_config_wait(struct vexpress_config_trans *trans)
+{
+ wait_for_completion(&trans->completion);
+
+ return trans->status;
+}
+EXPORT_SYMBOL(vexpress_config_wait);
+
+int vexpress_config_read(struct vexpress_config_func *func, int offset,
+ u32 *data)
+{
+ struct vexpress_config_trans trans = {
+ .func = func,
+ .offset = offset,
+ .write = false,
+ .data = data,
+ .status = 0,
+ };
+ int status = vexpress_config_schedule(&trans);
+
+ if (status == VEXPRESS_CONFIG_STATUS_WAIT)
+ status = vexpress_config_wait(&trans);
+
+ return status;
+}
+EXPORT_SYMBOL(vexpress_config_read);
+
+int vexpress_config_write(struct vexpress_config_func *func, int offset,
+ u32 data)
+{
+ struct vexpress_config_trans trans = {
+ .func = func,
+ .offset = offset,
+ .write = true,
+ .data = &data,
+ .status = 0,
+ };
+ int status = vexpress_config_schedule(&trans);
+
+ if (status == VEXPRESS_CONFIG_STATUS_WAIT)
+ status = vexpress_config_wait(&trans);
+
+ return status;
+}
+EXPORT_SYMBOL(vexpress_config_write);
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
new file mode 100644
index 000000000000..77048b18439e
--- /dev/null
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -0,0 +1,483 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/timer.h>
+#include <linux/vexpress.h>
+
+#define SYS_ID 0x000
+#define SYS_SW 0x004
+#define SYS_LED 0x008
+#define SYS_100HZ 0x024
+#define SYS_FLAGS 0x030
+#define SYS_FLAGSSET 0x030
+#define SYS_FLAGSCLR 0x034
+#define SYS_NVFLAGS 0x038
+#define SYS_NVFLAGSSET 0x038
+#define SYS_NVFLAGSCLR 0x03c
+#define SYS_MCI 0x048
+#define SYS_FLASH 0x04c
+#define SYS_CFGSW 0x058
+#define SYS_24MHZ 0x05c
+#define SYS_MISC 0x060
+#define SYS_DMA 0x064
+#define SYS_PROCID0 0x084
+#define SYS_PROCID1 0x088
+#define SYS_CFGDATA 0x0a0
+#define SYS_CFGCTRL 0x0a4
+#define SYS_CFGSTAT 0x0a8
+
+#define SYS_HBI_MASK 0xfff
+#define SYS_ID_HBI_SHIFT 16
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_MCI_CARDIN (1 << 0)
+#define SYS_MCI_WPROT (1 << 1)
+
+#define SYS_FLASH_WPn (1 << 0)
+
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+
+static void __iomem *vexpress_sysreg_base;
+static struct device *vexpress_sysreg_dev;
+static int vexpress_master_site;
+
+
+void vexpress_flags_set(u32 data)
+{
+ writel(~0, vexpress_sysreg_base + SYS_FLAGSCLR);
+ writel(data, vexpress_sysreg_base + SYS_FLAGSSET);
+}
+
+u32 vexpress_get_procid(int site)
+{
+ if (site == VEXPRESS_SITE_MASTER)
+ site = vexpress_master_site;
+
+ return readl(vexpress_sysreg_base + (site == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+}
+
+u32 vexpress_get_hbi(int site)
+{
+ u32 id;
+
+ switch (site) {
+ case VEXPRESS_SITE_MB:
+ id = readl(vexpress_sysreg_base + SYS_ID);
+ return (id >> SYS_ID_HBI_SHIFT) & SYS_HBI_MASK;
+ case VEXPRESS_SITE_MASTER:
+ case VEXPRESS_SITE_DB1:
+ case VEXPRESS_SITE_DB2:
+ id = vexpress_get_procid(site);
+ return (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
+ }
+
+ return ~0;
+}
+
+void __iomem *vexpress_get_24mhz_clock_base(void)
+{
+ return vexpress_sysreg_base + SYS_24MHZ;
+}
+
+
+static void vexpress_sysreg_find_prop(struct device_node *node,
+ const char *name, u32 *val)
+{
+ of_node_get(node);
+ while (node) {
+ if (of_property_read_u32(node, name, val) == 0) {
+ of_node_put(node);
+ return;
+ }
+ node = of_get_next_parent(node);
+ }
+}
+
+unsigned __vexpress_get_site(struct device *dev, struct device_node *node)
+{
+ u32 site = 0;
+
+ WARN_ON(dev && node && dev->of_node != node);
+ if (dev && !node)
+ node = dev->of_node;
+
+ if (node) {
+ vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site);
+ } else if (dev && dev->bus == &platform_bus_type) {
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (pdev->num_resources == 1 &&
+ pdev->resource[0].flags == IORESOURCE_BUS)
+ site = pdev->resource[0].start;
+ } else if (dev && strncmp(dev_name(dev), "ct:", 3) == 0) {
+ site = VEXPRESS_SITE_MASTER;
+ }
+
+ if (site == VEXPRESS_SITE_MASTER)
+ site = vexpress_master_site;
+
+ return site;
+}
+
+
+struct vexpress_sysreg_config_func {
+ u32 template;
+ u32 device;
+};
+
+static struct vexpress_config_bridge *vexpress_sysreg_config_bridge;
+static struct timer_list vexpress_sysreg_config_timer;
+static u32 *vexpress_sysreg_config_data;
+static int vexpress_sysreg_config_tries;
+
+static void *vexpress_sysreg_config_func_get(struct device *dev,
+ struct device_node *node)
+{
+ struct vexpress_sysreg_config_func *config_func;
+ u32 site;
+ u32 position = 0;
+ u32 dcc = 0;
+ u32 func_device[2];
+ int err = -EFAULT;
+
+ if (node) {
+ of_node_get(node);
+ vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site);
+ vexpress_sysreg_find_prop(node, "arm,vexpress,position",
+ &position);
+ vexpress_sysreg_find_prop(node, "arm,vexpress,dcc", &dcc);
+ err = of_property_read_u32_array(node,
+ "arm,vexpress-sysreg,func", func_device,
+ ARRAY_SIZE(func_device));
+ of_node_put(node);
+ } else if (dev && dev->bus == &platform_bus_type) {
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (pdev->num_resources == 1 &&
+ pdev->resource[0].flags == IORESOURCE_BUS) {
+ site = pdev->resource[0].start;
+ func_device[0] = pdev->resource[0].end;
+ func_device[1] = pdev->id;
+ err = 0;
+ }
+ }
+ if (err)
+ return NULL;
+
+ config_func = kzalloc(sizeof(*config_func), GFP_KERNEL);
+ if (!config_func)
+ return NULL;
+
+ config_func->template = SYS_CFGCTRL_DCC(dcc);
+ config_func->template |= SYS_CFGCTRL_FUNC(func_device[0]);
+ config_func->template |= SYS_CFGCTRL_SITE(site == VEXPRESS_SITE_MASTER ?
+ vexpress_master_site : site);
+ config_func->template |= SYS_CFGCTRL_POSITION(position);
+ config_func->device |= func_device[1];
+
+ dev_dbg(vexpress_sysreg_dev, "func 0x%p = 0x%x, %d\n", config_func,
+ config_func->template, config_func->device);
+
+ return config_func;
+}
+
+static void vexpress_sysreg_config_func_put(void *func)
+{
+ kfree(func);
+}
+
+static int vexpress_sysreg_config_func_exec(void *func, int offset,
+ bool write, u32 *data)
+{
+ int status;
+ struct vexpress_sysreg_config_func *config_func = func;
+ u32 command;
+
+ if (WARN_ON(!vexpress_sysreg_base))
+ return -ENOENT;
+
+ command = readl(vexpress_sysreg_base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+ command |= config_func->template;
+ command |= SYS_CFGCTRL_DEVICE(config_func->device + offset);
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(vexpress_sysreg_dev, "command %x, data %x\n",
+ command, *data);
+ writel(*data, vexpress_sysreg_base + SYS_CFGDATA);
+ writel(0, vexpress_sysreg_base + SYS_CFGSTAT);
+ writel(command, vexpress_sysreg_base + SYS_CFGCTRL);
+ mb();
+
+ if (vexpress_sysreg_dev) {
+ /* Schedule completion check */
+ if (!write)
+ vexpress_sysreg_config_data = data;
+ vexpress_sysreg_config_tries = 100;
+ mod_timer(&vexpress_sysreg_config_timer,
+ jiffies + usecs_to_jiffies(100));
+ status = VEXPRESS_CONFIG_STATUS_WAIT;
+ } else {
+ /* Early execution, no timer available, have to spin */
+ u32 cfgstat;
+
+ do {
+ cpu_relax();
+ cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT);
+ } while (!cfgstat);
+
+ if (!write && (cfgstat & SYS_CFGSTAT_COMPLETE))
+ *data = readl(vexpress_sysreg_base + SYS_CFGDATA);
+ status = VEXPRESS_CONFIG_STATUS_DONE;
+
+ if (cfgstat & SYS_CFGSTAT_ERR)
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+struct vexpress_config_bridge_info vexpress_sysreg_config_bridge_info = {
+ .name = "vexpress-sysreg",
+ .func_get = vexpress_sysreg_config_func_get,
+ .func_put = vexpress_sysreg_config_func_put,
+ .func_exec = vexpress_sysreg_config_func_exec,
+};
+
+static void vexpress_sysreg_config_complete(unsigned long data)
+{
+ int status = VEXPRESS_CONFIG_STATUS_DONE;
+ u32 cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT);
+
+ if (cfgstat & SYS_CFGSTAT_ERR)
+ status = -EINVAL;
+ if (!vexpress_sysreg_config_tries--)
+ status = -ETIMEDOUT;
+
+ if (status < 0) {
+ dev_err(vexpress_sysreg_dev, "error %d\n", status);
+ } else if (!(cfgstat & SYS_CFGSTAT_COMPLETE)) {
+ mod_timer(&vexpress_sysreg_config_timer,
+ jiffies + usecs_to_jiffies(50));
+ return;
+ }
+
+ if (vexpress_sysreg_config_data) {
+ *vexpress_sysreg_config_data = readl(vexpress_sysreg_base +
+ SYS_CFGDATA);
+ dev_dbg(vexpress_sysreg_dev, "read data %x\n",
+ *vexpress_sysreg_config_data);
+ vexpress_sysreg_config_data = NULL;
+ }
+
+ vexpress_config_complete(vexpress_sysreg_config_bridge, status);
+}
+
+
+void __init vexpress_sysreg_setup(struct device_node *node)
+{
+ if (WARN_ON(!vexpress_sysreg_base))
+ return;
+
+ if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
+ vexpress_master_site = VEXPRESS_SITE_DB2;
+ else
+ vexpress_master_site = VEXPRESS_SITE_DB1;
+
+ vexpress_sysreg_config_bridge = vexpress_config_bridge_register(
+ node, &vexpress_sysreg_config_bridge_info);
+ WARN_ON(!vexpress_sysreg_config_bridge);
+}
+
+void __init vexpress_sysreg_early_init(void __iomem *base)
+{
+ vexpress_sysreg_base = base;
+ vexpress_sysreg_setup(NULL);
+}
+
+void __init vexpress_sysreg_of_early_init(void)
+{
+ struct device_node *node = of_find_compatible_node(NULL, NULL,
+ "arm,vexpress-sysreg");
+
+ if (node) {
+ vexpress_sysreg_base = of_iomap(node, 0);
+ vexpress_sysreg_setup(node);
+ } else {
+ pr_info("vexpress-sysreg: No Device Tree node found.");
+ }
+}
+
+
+static struct vexpress_sysreg_gpio {
+ unsigned long reg;
+ u32 value;
+} vexpress_sysreg_gpios[] = {
+ [VEXPRESS_GPIO_MMC_CARDIN] = {
+ .reg = SYS_MCI,
+ .value = SYS_MCI_CARDIN,
+ },
+ [VEXPRESS_GPIO_MMC_WPROT] = {
+ .reg = SYS_MCI,
+ .value = SYS_MCI_WPROT,
+ },
+ [VEXPRESS_GPIO_FLASH_WPn] = {
+ .reg = SYS_FLASH,
+ .value = SYS_FLASH_WPn,
+ },
+};
+
+static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ return 0;
+}
+
+static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ return 0;
+}
+
+static int vexpress_sysreg_gpio_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset];
+ u32 reg_value = readl(vexpress_sysreg_base + gpio->reg);
+
+ return !!(reg_value & gpio->value);
+}
+
+static void vexpress_sysreg_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset];
+ u32 reg_value = readl(vexpress_sysreg_base + gpio->reg);
+
+ if (value)
+ reg_value |= gpio->value;
+ else
+ reg_value &= ~gpio->value;
+
+ writel(reg_value, vexpress_sysreg_base + gpio->reg);
+}
+
+static struct gpio_chip vexpress_sysreg_gpio_chip = {
+ .label = "vexpress-sysreg",
+ .direction_input = vexpress_sysreg_gpio_direction_input,
+ .direction_output = vexpress_sysreg_gpio_direction_output,
+ .get = vexpress_sysreg_gpio_get,
+ .set = vexpress_sysreg_gpio_set,
+ .ngpio = ARRAY_SIZE(vexpress_sysreg_gpios),
+ .base = 0,
+};
+
+
+static ssize_t vexpress_sysreg_sys_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%08x\n", readl(vexpress_sysreg_base + SYS_ID));
+}
+
+DEVICE_ATTR(sys_id, S_IRUGO, vexpress_sysreg_sys_id_show, NULL);
+
+static int vexpress_sysreg_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 0);
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Failed to request memory region!\n");
+ return -EBUSY;
+ }
+
+ if (!vexpress_sysreg_base) {
+ vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ vexpress_sysreg_setup(pdev->dev.of_node);
+ }
+
+ if (!vexpress_sysreg_base) {
+ dev_err(&pdev->dev, "Failed to obtain base address!\n");
+ return -EFAULT;
+ }
+
+ setup_timer(&vexpress_sysreg_config_timer,
+ vexpress_sysreg_config_complete, 0);
+
+ vexpress_sysreg_gpio_chip.dev = &pdev->dev;
+ err = gpiochip_add(&vexpress_sysreg_gpio_chip);
+ if (err) {
+ vexpress_config_bridge_unregister(
+ vexpress_sysreg_config_bridge);
+ dev_err(&pdev->dev, "Failed to register GPIO chip! (%d)\n",
+ err);
+ return err;
+ }
+
+ vexpress_sysreg_dev = &pdev->dev;
+
+ device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id);
+
+ return 0;
+}
+
+static const struct of_device_id vexpress_sysreg_match[] = {
+ { .compatible = "arm,vexpress-sysreg", },
+ {},
+};
+
+static struct platform_driver vexpress_sysreg_driver = {
+ .driver = {
+ .name = "vexpress-sysreg",
+ .of_match_table = vexpress_sysreg_match,
+ },
+ .probe = vexpress_sysreg_probe,
+};
+
+static int __init vexpress_sysreg_init(void)
+{
+ return platform_driver_register(&vexpress_sysreg_driver);
+}
+core_initcall(vexpress_sysreg_init);
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
new file mode 100644
index 000000000000..af2a6703f34f
--- /dev/null
+++ b/drivers/mfd/viperboard.c
@@ -0,0 +1,137 @@
+/*
+ * Nano River Technologies viperboard driver
+ *
+ * This is the core driver for the viperboard. There are cell drivers
+ * available for I2C, ADC and both GPIOs. SPI is not yet supported.
+ * The drivers do not support all features the board exposes. See user
+ * manual of the viperboard.
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/viperboard.h>
+
+#include <linux/usb.h>
+
+
+static const struct usb_device_id vprbrd_table[] = {
+ { USB_DEVICE(0x2058, 0x1005) }, /* Nano River Technologies */
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, vprbrd_table);
+
+static struct mfd_cell vprbrd_devs[] = {
+ {
+ .name = "viperboard-gpio",
+ },
+ {
+ .name = "viperboard-i2c",
+ },
+ {
+ .name = "viperboard-adc",
+ },
+};
+
+static int vprbrd_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct vprbrd *vb;
+
+ u16 version = 0;
+ int pipe, ret;
+
+ /* allocate memory for our device state and initialize it */
+ vb = kzalloc(sizeof(*vb), GFP_KERNEL);
+ if (vb == NULL) {
+ dev_err(&interface->dev, "Out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&vb->lock);
+
+ vb->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, vb);
+ dev_set_drvdata(&vb->pdev.dev, vb);
+
+ /* get version information, major first, minor then */
+ pipe = usb_rcvctrlpipe(vb->usb_dev, 0);
+ ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MAJOR,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret == 1)
+ version = vb->buf[0];
+
+ ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MINOR,
+ VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret == 1) {
+ version <<= 8;
+ version = version | vb->buf[0];
+ }
+
+ dev_info(&interface->dev,
+ "version %x.%02x found at bus %03d address %03d\n",
+ version >> 8, version & 0xff,
+ vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+ ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
+ ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+ if (ret != 0) {
+ dev_err(&interface->dev, "Failed to add mfd devices to core.");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (vb) {
+ usb_put_dev(vb->usb_dev);
+ kfree(vb);
+ }
+
+ return ret;
+}
+
+static void vprbrd_disconnect(struct usb_interface *interface)
+{
+ struct vprbrd *vb = usb_get_intfdata(interface);
+
+ mfd_remove_devices(&interface->dev);
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(vb->usb_dev);
+ kfree(vb);
+
+ dev_dbg(&interface->dev, "disconnected\n");
+}
+
+static struct usb_driver vprbrd_driver = {
+ .name = "viperboard",
+ .probe = vprbrd_probe,
+ .disconnect = vprbrd_disconnect,
+ .id_table = vprbrd_table,
+};
+
+module_usb_driver(vprbrd_driver);
+
+MODULE_DESCRIPTION("Nano River Technologies viperboard mfd core driver");
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index b9a636d44c7f..757ecc63338c 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -72,7 +72,7 @@ static struct mfd_cell vx855_cells[] = {
},
};
-static __devinit int vx855_probe(struct pci_dev *pdev,
+static int vx855_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
@@ -112,7 +112,7 @@ out:
return ret;
}
-static void __devexit vx855_remove(struct pci_dev *pdev)
+static void vx855_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
pci_disable_device(pdev);
@@ -128,7 +128,7 @@ static struct pci_driver vx855_pci_driver = {
.name = "vx855",
.id_table = vx855_pci_tbl,
.probe = vx855_probe,
- .remove = __devexit_p(vx855_remove),
+ .remove = vx855_remove,
};
module_pci_driver(vx855_pci_driver);
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index 86e0e4309fc2..edbe6c1b755a 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -182,7 +182,7 @@ static int wl1273_core_remove(struct i2c_client *client)
return 0;
}
-static int __devinit wl1273_core_probe(struct i2c_client *client,
+static int wl1273_core_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wl1273_fm_platform_data *pdata = client->dev.platform_data;
@@ -262,7 +262,7 @@ static struct i2c_driver wl1273_core_driver = {
},
.probe = wl1273_core_probe,
.id_table = wl1273_driver_id_table,
- .remove = __devexit_p(wl1273_core_remove),
+ .remove = wl1273_core_remove,
};
static int __init wl1273_core_init(void)
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 14490cc785d2..1133a64c2dc9 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -56,6 +56,18 @@ static const struct reg_default wm5102_reva_patch[] = {
{ 0x80, 0x0000 },
};
+static const struct reg_default wm5102_revb_patch[] = {
+ { 0x80, 0x0003 },
+ { 0x081, 0xE022 },
+ { 0x410, 0x6080 },
+ { 0x418, 0x6080 },
+ { 0x420, 0x6080 },
+ { 0x428, 0xC000 },
+ { 0x441, 0x8014 },
+ { 0x458, 0x000b },
+ { 0x80, 0x0000 },
+};
+
/* We use a function so we can use ARRAY_SIZE() */
int wm5102_patch(struct arizona *arizona)
{
@@ -65,7 +77,9 @@ int wm5102_patch(struct arizona *arizona)
wm5102_reva_patch,
ARRAY_SIZE(wm5102_reva_patch));
default:
- return 0;
+ return regmap_register_patch(arizona->regmap,
+ wm5102_revb_patch,
+ ARRAY_SIZE(wm5102_revb_patch));
}
}
@@ -258,6 +272,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
{ 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
@@ -290,6 +305,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x00000212, 0x0001 }, /* R530 - LDO1 Control 2 */
{ 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
@@ -1047,6 +1063,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_RATE_ESTIMATOR_3:
case ARIZONA_RATE_ESTIMATOR_4:
case ARIZONA_RATE_ESTIMATOR_5:
+ case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
case ARIZONA_FLL1_CONTROL_1:
case ARIZONA_FLL1_CONTROL_2:
case ARIZONA_FLL1_CONTROL_3:
@@ -1054,6 +1071,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -1069,6 +1087,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
@@ -1079,6 +1098,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_GPIO_CLOCK:
case ARIZONA_MIC_CHARGE_PUMP_1:
case ARIZONA_LDO1_CONTROL_1:
+ case ARIZONA_LDO1_CONTROL_2:
case ARIZONA_LDO2_CONTROL_1:
case ARIZONA_MIC_BIAS_CTRL_1:
case ARIZONA_MIC_BIAS_CTRL_2:
@@ -1802,6 +1822,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP1_STATUS_3:
return true;
default:
return false;
@@ -1810,15 +1831,23 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
{
+ if (reg > 0xffff)
+ return true;
+
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FX_CTRL2:
case ARIZONA_INTERRUPT_STATUS_1:
case ARIZONA_INTERRUPT_STATUS_2:
@@ -1844,6 +1873,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP1_STATUS_3:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_MIC_DETECT_3:
return true;
@@ -1852,12 +1882,14 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
}
}
+#define WM5102_MAX_REGISTER 0x1a9800
+
const struct regmap_config wm5102_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
@@ -1871,7 +1903,7 @@ const struct regmap_config wm5102_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
- .max_register = ARIZONA_DSP1_STATUS_2,
+ .max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 4bceee98f0a4..4e70e157a909 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -21,7 +21,7 @@
#include <linux/mfd/wm831x/core.h>
-static int __devinit wm831x_spi_probe(struct spi_device *spi)
+static int wm831x_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct wm831x *wm831x;
@@ -51,7 +51,7 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
return wm831x_device_init(wm831x, type, spi->irq);
}
-static int __devexit wm831x_spi_remove(struct spi_device *spi)
+static int wm831x_spi_remove(struct spi_device *spi)
{
struct wm831x *wm831x = dev_get_drvdata(&spi->dev);
@@ -99,7 +99,7 @@ static struct spi_driver wm831x_spi_driver = {
},
.id_table = wm831x_spi_ids,
.probe = wm831x_spi_probe,
- .remove = __devexit_p(wm831x_spi_remove),
+ .remove = wm831x_spi_remove,
.shutdown = wm831x_spi_shutdown,
};
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 8fefc961ec06..57c488d42d3e 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -374,21 +374,21 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
}
#endif
-static const __devinitconst struct reg_default wm8994_revc_patch[] = {
+static const struct reg_default wm8994_revc_patch[] = {
{ 0x102, 0x3 },
{ 0x56, 0x3 },
{ 0x817, 0x0 },
{ 0x102, 0x0 },
};
-static const __devinitconst struct reg_default wm8958_reva_patch[] = {
+static const struct reg_default wm8958_reva_patch[] = {
{ 0x102, 0x3 },
{ 0xcb, 0x81 },
{ 0x817, 0x0 },
{ 0x102, 0x0 },
};
-static const __devinitconst struct reg_default wm1811_reva_patch[] = {
+static const struct reg_default wm1811_reva_patch[] = {
{ 0x102, 0x3 },
{ 0x56, 0xc07 },
{ 0x5d, 0x7e },
@@ -399,15 +399,21 @@ static const __devinitconst struct reg_default wm1811_reva_patch[] = {
/*
* Instantiate the generic non-control parts of the device.
*/
-static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
+static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
- struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ struct wm8994_pdata *pdata;
struct regmap_config *regmap_config;
const struct reg_default *regmap_patch = NULL;
const char *devname;
int ret, i, patch_regs;
int pulls = 0;
+ if (dev_get_platdata(wm8994->dev)) {
+ pdata = dev_get_platdata(wm8994->dev);
+ wm8994->pdata = *pdata;
+ }
+ pdata = &wm8994->pdata;
+
dev_set_drvdata(wm8994->dev, wm8994);
/* Add the on-chip regulators first for bootstrapping */
@@ -529,11 +535,10 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
case 2:
case 3:
+ default:
regmap_patch = wm8994_revc_patch;
patch_regs = ARRAY_SIZE(wm8994_revc_patch);
break;
- default:
- break;
}
break;
@@ -552,17 +557,9 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
/* Revision C did not change the relevant layer */
if (wm8994->revision > 1)
wm8994->revision++;
- switch (wm8994->revision) {
- case 0:
- case 1:
- case 2:
- case 3:
- regmap_patch = wm1811_reva_patch;
- patch_regs = ARRAY_SIZE(wm1811_reva_patch);
- break;
- default:
- break;
- }
+
+ regmap_patch = wm1811_reva_patch;
+ patch_regs = ARRAY_SIZE(wm1811_reva_patch);
break;
default:
@@ -604,24 +601,21 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
}
- if (pdata) {
- wm8994->irq_base = pdata->irq_base;
- wm8994->gpio_base = pdata->gpio_base;
-
- /* GPIO configuration is only applied if it's non-zero */
- for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
- if (pdata->gpio_defaults[i]) {
- wm8994_set_bits(wm8994, WM8994_GPIO_1 + i,
- 0xffff,
- pdata->gpio_defaults[i]);
- }
+ wm8994->irq_base = pdata->irq_base;
+ wm8994->gpio_base = pdata->gpio_base;
+
+ /* GPIO configuration is only applied if it's non-zero */
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+ if (pdata->gpio_defaults[i]) {
+ wm8994_set_bits(wm8994, WM8994_GPIO_1 + i,
+ 0xffff, pdata->gpio_defaults[i]);
}
+ }
- wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
+ wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
- if (pdata->spkmode_pu)
- pulls |= WM8994_SPKMODE_PU;
- }
+ if (pdata->spkmode_pu)
+ pulls |= WM8994_SPKMODE_PU;
/* Disable unneeded pulls */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
@@ -671,7 +665,7 @@ err:
return ret;
}
-static __devexit void wm8994_device_exit(struct wm8994 *wm8994)
+static void wm8994_device_exit(struct wm8994 *wm8994)
{
pm_runtime_disable(wm8994->dev);
mfd_remove_devices(wm8994->dev);
@@ -689,7 +683,7 @@ static const struct of_device_id wm8994_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wm8994_of_match);
-static __devinit int wm8994_i2c_probe(struct i2c_client *i2c,
+static int wm8994_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8994 *wm8994;
@@ -715,7 +709,7 @@ static __devinit int wm8994_i2c_probe(struct i2c_client *i2c,
return wm8994_device_init(wm8994, i2c->irq);
}
-static __devexit int wm8994_i2c_remove(struct i2c_client *i2c)
+static int wm8994_i2c_remove(struct i2c_client *i2c)
{
struct wm8994 *wm8994 = i2c_get_clientdata(i2c);
@@ -744,7 +738,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.of_match_table = wm8994_of_match,
},
.probe = wm8994_i2c_probe,
- .remove = __devexit_p(wm8994_i2c_remove),
+ .remove = wm8994_i2c_remove,
.id_table = wm8994_i2c_id,
};
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 820826270b62..705b881e186d 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -51,7 +51,7 @@ static const struct ad_dpot_bus_ops bops = {
.write_r8d16 = write_r8d16,
};
-static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
+static int ad_dpot_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad_dpot_bus_data bdata = {
@@ -68,7 +68,7 @@ static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
-static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
+static int ad_dpot_i2c_remove(struct i2c_client *client)
{
return ad_dpot_remove(&client->dev);
}
@@ -109,7 +109,7 @@ static struct i2c_driver ad_dpot_i2c_driver = {
.owner = THIS_MODULE,
},
.probe = ad_dpot_i2c_probe,
- .remove = __devexit_p(ad_dpot_i2c_remove),
+ .remove = ad_dpot_i2c_remove,
.id_table = ad_dpot_id,
};
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index f62317540d00..9da04ede04f3 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -75,7 +75,7 @@ static const struct ad_dpot_bus_ops bops = {
.write_r8d8 = write16,
.write_r8d16 = write24,
};
-static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
+static int ad_dpot_spi_probe(struct spi_device *spi)
{
struct ad_dpot_bus_data bdata = {
.client = spi,
@@ -87,7 +87,7 @@ static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->name);
}
-static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
+static int ad_dpot_spi_remove(struct spi_device *spi)
{
return ad_dpot_remove(&spi->dev);
}
@@ -131,7 +131,7 @@ static struct spi_driver ad_dpot_spi_driver = {
.owner = THIS_MODULE,
},
.probe = ad_dpot_spi_probe,
- .remove = __devexit_p(ad_dpot_spi_remove),
+ .remove = ad_dpot_spi_remove,
.id_table = ad_dpot_spi_id,
};
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 6938f1be664d..8f99e8e3f0ac 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -641,7 +641,7 @@ static const struct attribute_group ad525x_group_commands = {
.attrs = ad525x_attributes_commands,
};
-__devinit int ad_dpot_add_files(struct device *dev,
+int ad_dpot_add_files(struct device *dev,
unsigned features, unsigned rdac)
{
int err = sysfs_create_file(&dev->kobj,
@@ -685,7 +685,7 @@ inline void ad_dpot_remove_files(struct device *dev,
}
}
-int __devinit ad_dpot_probe(struct device *dev,
+int ad_dpot_probe(struct device *dev,
struct ad_dpot_bus_data *bdata, unsigned long devid,
const char *name)
{
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 0314773f6db3..d648b0893027 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -68,7 +68,7 @@ static int als_wait_for_data_ready(struct device *dev)
ret = i2c_smbus_read_byte_data(client, 0x86);
} while (!(ret & 0x80) && retry--);
- if (!retry) {
+ if (retry < 0) {
dev_warn(dev, "timeout waiting for data ready\n");
return -ETIMEDOUT;
}
@@ -254,7 +254,7 @@ als_error1:
return res;
}
-static int __devexit apds9802als_remove(struct i2c_client *client)
+static int apds9802als_remove(struct i2c_client *client)
{
struct als_data *data = i2c_get_clientdata(client);
@@ -326,7 +326,7 @@ static struct i2c_driver apds9802als_driver = {
.pm = APDS9802ALS_PM_OPS,
},
.probe = apds9802als_probe,
- .remove = __devexit_p(apds9802als_remove),
+ .remove = apds9802als_remove,
.suspend = apds9802als_suspend,
.resume = apds9802als_resume,
.id_table = apds9802als_id,
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index ee74244aa03b..0e67f8263cd8 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1047,7 +1047,7 @@ static struct attribute_group apds990x_attribute_group[] = {
{.attrs = sysfs_attrs_ctrl },
};
-static int __devinit apds990x_probe(struct i2c_client *client,
+static int apds990x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct apds990x_chip *chip;
@@ -1181,7 +1181,7 @@ fail1:
return err;
}
-static int __devexit apds990x_remove(struct i2c_client *client)
+static int apds990x_remove(struct i2c_client *client)
{
struct apds990x_chip *chip = i2c_get_clientdata(client);
@@ -1275,7 +1275,7 @@ static struct i2c_driver apds990x_driver = {
.pm = &apds990x_pm_ops,
},
.probe = apds990x_probe,
- .remove = __devexit_p(apds990x_remove),
+ .remove = apds990x_remove,
.id_table = apds990x_id,
};
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 5bb187781074..3c09cbb70b1d 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -18,6 +18,9 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
static LIST_HEAD(ssc_list);
@@ -29,7 +32,13 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
spin_lock(&user_lock);
list_for_each_entry(ssc, &ssc_list, list) {
- if (ssc->pdev->id == ssc_num) {
+ if (ssc->pdev->dev.of_node) {
+ if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
+ == ssc_num) {
+ ssc_valid = 1;
+ break;
+ }
+ } else if (ssc->pdev->id == ssc_num) {
ssc_valid = 1;
break;
}
@@ -68,39 +77,100 @@ void ssc_free(struct ssc_device *ssc)
}
EXPORT_SYMBOL(ssc_free);
-static int __init ssc_probe(struct platform_device *pdev)
+static struct atmel_ssc_platform_data at91rm9200_config = {
+ .use_dma = 0,
+};
+
+static struct atmel_ssc_platform_data at91sam9g45_config = {
+ .use_dma = 1,
+};
+
+static const struct platform_device_id atmel_ssc_devtypes[] = {
+ {
+ .name = "at91rm9200_ssc",
+ .driver_data = (unsigned long) &at91rm9200_config,
+ }, {
+ .name = "at91sam9g45_ssc",
+ .driver_data = (unsigned long) &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ssc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-ssc",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9g45-ssc",
+ .data = &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
+#endif
+
+static inline const struct atmel_ssc_platform_data * __init
+ atmel_ssc_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_ssc_dt_ids, pdev->dev.of_node);
+ if (match == NULL)
+ return NULL;
+ return match->data;
+ }
+
+ return (struct atmel_ssc_platform_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int ssc_probe(struct platform_device *pdev)
{
- int retval = 0;
struct resource *regs;
struct ssc_device *ssc;
+ const struct atmel_ssc_platform_data *plat_dat;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "Failed to request pinctrl\n");
+ return PTR_ERR(pinctrl);
+ }
- ssc = kzalloc(sizeof(struct ssc_device), GFP_KERNEL);
+ ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
dev_dbg(&pdev->dev, "out of memory\n");
- retval = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
+ ssc->pdev = pdev;
+
+ plat_dat = atmel_ssc_get_driver_data(pdev);
+ if (!plat_dat)
+ return -ENODEV;
+ ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_dbg(&pdev->dev, "no mmio resource defined\n");
- retval = -ENXIO;
- goto out_free;
- }
-
- ssc->clk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(ssc->clk)) {
- dev_dbg(&pdev->dev, "no pclk clock defined\n");
- retval = -ENXIO;
- goto out_free;
+ return -ENXIO;
}
- ssc->pdev = pdev;
- ssc->regs = ioremap(regs->start, resource_size(regs));
+ ssc->regs = devm_request_and_ioremap(&pdev->dev, regs);
if (!ssc->regs) {
dev_dbg(&pdev->dev, "ioremap failed\n");
- retval = -EINVAL;
- goto out_clk;
+ return -EINVAL;
+ }
+
+ ssc->phybase = regs->start;
+
+ ssc->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(ssc->clk)) {
+ dev_dbg(&pdev->dev, "no pclk clock defined\n");
+ return -ENXIO;
}
/* disable all interrupts */
@@ -112,8 +182,7 @@ static int __init ssc_probe(struct platform_device *pdev)
ssc->irq = platform_get_irq(pdev, 0);
if (!ssc->irq) {
dev_dbg(&pdev->dev, "could not get irq\n");
- retval = -ENXIO;
- goto out_unmap;
+ return -ENXIO;
}
spin_lock(&user_lock);
@@ -125,51 +194,31 @@ static int __init ssc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n",
ssc->regs, ssc->irq);
- goto out;
-
-out_unmap:
- iounmap(ssc->regs);
-out_clk:
- clk_put(ssc->clk);
-out_free:
- kfree(ssc);
-out:
- return retval;
+ return 0;
}
-static int __devexit ssc_remove(struct platform_device *pdev)
+static int ssc_remove(struct platform_device *pdev)
{
struct ssc_device *ssc = platform_get_drvdata(pdev);
spin_lock(&user_lock);
- iounmap(ssc->regs);
- clk_put(ssc->clk);
list_del(&ssc->list);
- kfree(ssc);
spin_unlock(&user_lock);
return 0;
}
static struct platform_driver ssc_driver = {
- .remove = __devexit_p(ssc_remove),
.driver = {
.name = "ssc",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_ssc_dt_ids),
},
+ .id_table = atmel_ssc_devtypes,
+ .probe = ssc_probe,
+ .remove = ssc_remove,
};
-
-static int __init ssc_init(void)
-{
- return platform_driver_probe(&ssc_driver, ssc_probe);
-}
-module_init(ssc_init);
-
-static void __exit ssc_exit(void)
-{
- platform_driver_unregister(&ssc_driver);
-}
-module_exit(ssc_exit);
+module_platform_driver(ssc_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
MODULE_DESCRIPTION("SSC driver for Atmel AVR32 and AT91");
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 3d56ae7ef8de..2ed8fc3be7e6 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1162,7 +1162,7 @@ static struct attribute_group bh1770_attribute_group = {
.attrs = sysfs_attrs
};
-static int __devinit bh1770_probe(struct i2c_client *client,
+static int bh1770_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bh1770_chip *chip;
@@ -1285,7 +1285,7 @@ fail1:
return err;
}
-static int __devexit bh1770_remove(struct i2c_client *client)
+static int bh1770_remove(struct i2c_client *client)
{
struct bh1770_chip *chip = i2c_get_clientdata(client);
@@ -1395,7 +1395,7 @@ static struct i2c_driver bh1770_driver = {
.pm = &bh1770_pm_ops,
},
.probe = bh1770_probe,
- .remove = __devexit_p(bh1770_remove),
+ .remove = bh1770_remove,
.id_table = bh1770_id,
};
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index f1f9877f3fdf..cf03d0abf33e 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -144,7 +144,7 @@ static const struct attribute_group bh1780_attr_group = {
.attrs = bh1780_attributes,
};
-static int __devinit bh1780_probe(struct i2c_client *client,
+static int bh1780_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
@@ -185,7 +185,7 @@ err_op_failed:
return ret;
}
-static int __devexit bh1780_remove(struct i2c_client *client)
+static int bh1780_remove(struct i2c_client *client)
{
struct bh1780_data *ddata;
@@ -248,7 +248,7 @@ static const struct i2c_device_id bh1780_id[] = {
static struct i2c_driver bh1780_driver = {
.probe = bh1780_probe,
- .remove = __devexit_p(bh1780_remove),
+ .remove = bh1780_remove,
.id_table = bh1780_id,
.driver = {
.name = "bh1780",
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
index a4f33c995ea1..3abfcecf8424 100644
--- a/drivers/misc/bmp085-i2c.c
+++ b/drivers/misc/bmp085-i2c.c
@@ -36,7 +36,7 @@ static int bmp085_i2c_detect(struct i2c_client *client,
return bmp085_detect(&client->dev);
}
-static int __devinit bmp085_i2c_probe(struct i2c_client *client,
+static int bmp085_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err;
@@ -71,7 +71,7 @@ static struct i2c_driver bmp085_i2c_driver = {
},
.id_table = bmp085_id,
.probe = bmp085_i2c_probe,
- .remove = __devexit_p(bmp085_i2c_remove),
+ .remove = bmp085_i2c_remove,
.detect = bmp085_i2c_detect,
.address_list = normal_i2c
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c
index 5e982af99730..d6a52659cf24 100644
--- a/drivers/misc/bmp085-spi.c
+++ b/drivers/misc/bmp085-spi.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include "bmp085.h"
-static int __devinit bmp085_spi_probe(struct spi_device *client)
+static int bmp085_spi_probe(struct spi_device *client)
{
int err;
struct regmap *regmap;
@@ -70,7 +70,7 @@ static struct spi_driver bmp085_spi_driver = {
},
.id_table = bmp085_id,
.probe = bmp085_spi_probe,
- .remove = __devexit_p(bmp085_spi_remove)
+ .remove = bmp085_spi_remove
};
module_spi_driver(bmp085_spi_driver);
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 62e418293b7e..849e2fed4da2 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -420,7 +420,7 @@ struct regmap_config bmp085_regmap_config = {
};
EXPORT_SYMBOL_GPL(bmp085_regmap_config);
-__devinit int bmp085_probe(struct device *dev, struct regmap *regmap)
+int bmp085_probe(struct device *dev, struct regmap *regmap)
{
struct bmp085_data *data;
int err = 0;
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index 9d5eed754666..2e50f811ff59 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -30,7 +30,7 @@ void cb710_pci_update_config_reg(struct pci_dev *pdev,
EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
/* Some magic writes based on Windows driver init code */
-static int __devinit cb710_pci_configure(struct pci_dev *pdev)
+static int cb710_pci_configure(struct pci_dev *pdev)
{
unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
struct pci_dev *pdev0;
@@ -96,7 +96,7 @@ static void cb710_release_slot(struct device *dev)
#endif
}
-static int __devinit cb710_register_slot(struct cb710_chip *chip,
+static int cb710_register_slot(struct cb710_chip *chip,
unsigned slot_mask, unsigned io_offset, const char *name)
{
int nr = chip->slots;
@@ -201,7 +201,7 @@ static int cb710_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static int __devinit cb710_probe(struct pci_dev *pdev,
+static int cb710_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct cb710_chip *chip;
@@ -305,7 +305,7 @@ unreg_mmc:
return err;
}
-static void __devexit cb710_remove_one(struct pci_dev *pdev)
+static void cb710_remove_one(struct pci_dev *pdev)
{
struct cb710_chip *chip = pci_get_drvdata(pdev);
unsigned long flags;
@@ -332,7 +332,7 @@ static struct pci_driver cb710_driver = {
.name = KBUILD_MODNAME,
.id_table = cb710_pci_tbl,
.probe = cb710_probe,
- .remove = __devexit_p(cb710_remove_one),
+ .remove = cb710_remove_one,
#ifdef CONFIG_PM
.suspend = cb710_suspend,
.resume = cb710_resume,
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index f505a40a8f49..9858f36dad8b 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
* Jordan tells me that he and Mitch once played w/ it, but it's unclear
* what the results of that were (and they experienced some instability).
*/
-static void __devinit reset_all_timers(void)
+static void reset_all_timers(void)
{
uint32_t val, dummy;
@@ -262,7 +262,7 @@ static void __devinit reset_all_timers(void)
* In other cases (such as with VSAless OpenFirmware), the system firmware
* leaves timers available for us to use.
*/
-static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+static int scan_timers(struct cs5535_mfgpt_chip *mfgpt)
{
struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
unsigned long flags;
@@ -289,7 +289,7 @@ static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
return timers;
}
-static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev)
+static int cs5535_mfgpt_probe(struct platform_device *pdev)
{
struct resource *res;
int err = -EIO, t;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index ab1ad41786d1..2baeec56edfe 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -656,7 +656,7 @@ err_out:
return err;
}
-static int __devexit at24_remove(struct i2c_client *client)
+static int at24_remove(struct i2c_client *client)
{
struct at24_data *at24;
int i;
@@ -680,7 +680,7 @@ static struct i2c_driver at24_driver = {
.owner = THIS_MODULE,
},
.probe = at24_probe,
- .remove = __devexit_p(at24_remove),
+ .remove = at24_remove,
.id_table = at24_ids,
};
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 4ed93dd54116..b08cf8a08789 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -459,7 +459,7 @@ fail:
return err;
}
-static int __devexit at25_remove(struct spi_device *spi)
+static int at25_remove(struct spi_device *spi)
{
struct at25_data *at25;
@@ -477,7 +477,7 @@ static struct spi_driver at25_driver = {
.owner = THIS_MODULE,
},
.probe = at25_probe,
- .remove = __devexit_p(at25_remove),
+ .remove = at25_remove,
};
module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index ce3fe3633dd7..a6b5d5e73485 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -309,7 +309,7 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev,
}
static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
-static int __devinit eeprom_93xx46_probe(struct spi_device *spi)
+static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
@@ -370,7 +370,7 @@ fail:
return err;
}
-static int __devexit eeprom_93xx46_remove(struct spi_device *spi)
+static int eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = dev_get_drvdata(&spi->dev);
@@ -389,7 +389,7 @@ static struct spi_driver eeprom_93xx46_driver = {
.owner = THIS_MODULE,
},
.probe = eeprom_93xx46_probe,
- .remove = __devexit_p(eeprom_93xx46_remove),
+ .remove = eeprom_93xx46_remove,
};
module_spi_driver(eeprom_93xx46_driver);
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index ac96c3a4034a..e8cbb1c59f4c 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -407,7 +407,7 @@ static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)
return 0;
}
-static int __devinit fsa9480_probe(struct i2c_client *client,
+static int fsa9480_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
@@ -462,7 +462,7 @@ fail1:
return ret;
}
-static int __devexit fsa9480_remove(struct i2c_client *client)
+static int fsa9480_remove(struct i2c_client *client)
{
struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
if (client->irq)
@@ -533,7 +533,7 @@ static struct i2c_driver fsa9480_i2c_driver = {
.name = "fsa9480",
},
.probe = fsa9480_probe,
- .remove = __devexit_p(fsa9480_remove),
+ .remove = fsa9480_remove,
.resume = fsa9480_resume,
.suspend = fsa9480_suspend,
.id_table = fsa9480_id,
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 12ccdf94e4fa..621c7a373390 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -30,7 +30,7 @@
static struct class *ilo_class;
static unsigned int ilo_major;
-static unsigned int max_ccb = MIN_CCB;
+static unsigned int max_ccb = 16;
static char ilo_hwdev[MAX_ILO_DEV];
static inline int get_entry_id(int entry)
@@ -686,7 +686,7 @@ static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
pci_iounmap(pdev, hw->mmio_vaddr);
}
-static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
+static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
int error = -ENOMEM;
@@ -725,6 +725,9 @@ static void ilo_remove(struct pci_dev *pdev)
int i, minor;
struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
+ if (!ilo_hw)
+ return;
+
clear_device(ilo_hw);
minor = MINOR(ilo_hw->cdev.dev);
@@ -748,12 +751,16 @@ static void ilo_remove(struct pci_dev *pdev)
ilo_hwdev[(minor / max_ccb)] = 0;
}
-static int __devinit ilo_probe(struct pci_dev *pdev,
+static int ilo_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int devnum, minor, start, error;
+ int devnum, minor, start, error = 0;
struct ilo_hwinfo *ilo_hw;
+ /* Ignore subsystem_device = 0x1979 (set by BIOS) */
+ if (pdev->subsystem_device == 0x1979)
+ goto out;
+
if (max_ccb > MAX_CCB)
max_ccb = MAX_CCB;
else if (max_ccb < MIN_CCB)
@@ -852,7 +859,7 @@ static struct pci_driver ilo_driver = {
.name = ILO_NAME,
.id_table = ilo_devices,
.probe = ilo_probe,
- .remove = __devexit_p(ilo_remove),
+ .remove = ilo_remove,
};
static int __init ilo_init(void)
@@ -892,14 +899,14 @@ static void __exit ilo_exit(void)
class_destroy(ilo_class);
}
-MODULE_VERSION("1.3");
+MODULE_VERSION("1.4");
MODULE_ALIAS(ILO_NAME);
MODULE_DESCRIPTION(ILO_NAME);
MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
MODULE_LICENSE("GPL v2");
module_param(max_ccb, uint, 0444);
-MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8)");
+MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (16)");
module_init(ilo_init);
module_exit(ilo_exit);
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 168d8008f460..0346d87c5fed 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -62,7 +62,7 @@ module_param(ibmasm_debug, int , S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ibmasm_debug, " Set debug mode on or off");
-static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result;
struct service_processor *sp;
@@ -163,7 +163,7 @@ error_resources:
return result;
}
-static void __devexit ibmasm_remove_one(struct pci_dev *pdev)
+static void ibmasm_remove_one(struct pci_dev *pdev)
{
struct service_processor *sp = (struct service_processor *)pci_get_drvdata(pdev);
@@ -198,7 +198,7 @@ static struct pci_driver ibmasm_driver = {
.name = DRIVER_NAME,
.id_table = ibmasm_pci_table,
.probe = ibmasm_init_one,
- .remove = __devexit_p(ibmasm_remove_one),
+ .remove = ibmasm_remove_one,
};
static void __exit ibmasm_exit (void)
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 6a7710603a90..06f6ad29ceff 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -139,7 +139,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
* even though the following code utilizes external interrupt registers
* to perform the speed calculation.
*/
-static void __devinit
+static void
ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
@@ -231,7 +231,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
* on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
* If neither is present, it's a PCI-RT.
*/
-static unsigned int __devinit
+static unsigned int
ioc4_variant(struct ioc4_driver_data *idd)
{
struct pci_dev *pdev = NULL;
@@ -279,7 +279,7 @@ ioc4_load_modules(struct work_struct *work)
static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
/* Adds a new instance of an IOC4 card */
-static int __devinit
+static int
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc4_driver_data *idd;
@@ -415,7 +415,7 @@ out:
}
/* Removes a particular instance of an IOC4 card. */
-static void __devexit
+static void
ioc4_remove(struct pci_dev *pdev)
{
struct ioc4_submodule *is;
@@ -466,7 +466,7 @@ static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
- .remove = __devexit_p(ioc4_remove),
+ .remove = ioc4_remove,
};
MODULE_DEVICE_TABLE(pci, ioc4_id_table);
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index eb5de2e210d7..29b306c6bdb3 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -365,7 +365,7 @@ static int isl29003_init_client(struct i2c_client *client)
* I2C layer
*/
-static int __devinit isl29003_probe(struct i2c_client *client,
+static int isl29003_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
@@ -401,7 +401,7 @@ exit_kfree:
return err;
}
-static int __devexit isl29003_remove(struct i2c_client *client)
+static int isl29003_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group);
isl29003_set_power_state(client, 0);
@@ -451,7 +451,7 @@ static struct i2c_driver isl29003_driver = {
.suspend = isl29003_suspend,
.resume = isl29003_resume,
.probe = isl29003_probe,
- .remove = __devexit_p(isl29003_remove),
+ .remove = isl29003_remove,
.id_table = isl29003_id,
};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 60ec8689d6e3..7c97550240f1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -114,7 +114,7 @@ static struct of_device_id lis3lv02d_i2c_dt_ids[] = {
MODULE_DEVICE_TABLE(of, lis3lv02d_i2c_dt_ids);
#endif
-static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
+static int lis3lv02d_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0;
@@ -191,7 +191,7 @@ fail:
return ret;
}
-static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
+static int lis3lv02d_i2c_remove(struct i2c_client *client)
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
@@ -280,7 +280,7 @@ static struct i2c_driver lis3lv02d_i2c_driver = {
.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
},
.probe = lis3lv02d_i2c_probe,
- .remove = __devexit_p(lis3lv02d_i2c_remove),
+ .remove = lis3lv02d_i2c_remove,
.id_table = lis3lv02d_id,
};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index ccb6475fa059..9aa2bd2a71ae 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -69,7 +69,7 @@ static struct of_device_id lis302dl_spi_dt_ids[] = {
MODULE_DEVICE_TABLE(of, lis302dl_spi_dt_ids);
#endif
-static int __devinit lis302dl_spi_probe(struct spi_device *spi)
+static int lis302dl_spi_probe(struct spi_device *spi)
{
int ret;
@@ -100,7 +100,7 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
return lis3lv02d_init_device(&lis3_dev);
}
-static int __devexit lis302dl_spi_remove(struct spi_device *spi)
+static int lis302dl_spi_remove(struct spi_device *spi)
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
lis3lv02d_joystick_disable(lis3);
@@ -144,7 +144,7 @@ static struct spi_driver lis302dl_spi_driver = {
.of_match_table = of_match_ptr(lis302dl_spi_dt_ids),
},
.probe = lis302dl_spi_probe,
- .remove = __devexit_p(lis302dl_spi_remove),
+ .remove = lis302dl_spi_remove,
};
module_spi_driver(lis302dl_spi_driver);
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 57168db6c7e5..0017842e166c 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -8,4 +8,5 @@ mei-objs += interrupt.o
mei-objs += interface.o
mei-objs += iorw.o
mei-objs += main.o
+mei-objs += amthif.o
mei-objs += wd.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
new file mode 100644
index 000000000000..e40ffd9502d1
--- /dev/null
+++ b/drivers/misc/mei/amthif.c
@@ -0,0 +1,722 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+
+
+#include "mei_dev.h"
+#include "hw.h"
+#include <linux/mei.h>
+#include "interface.h"
+
+const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
+ 0xa8, 0x46, 0xe0, 0xff, 0x65,
+ 0x81, 0x4c);
+
+/**
+ * mei_amthif_reset_params - initializes mei device iamthif
+ *
+ * @dev: the device structure
+ */
+void mei_amthif_reset_params(struct mei_device *dev)
+{
+ /* reset iamthif parameters. */
+ dev->iamthif_current_cb = NULL;
+ dev->iamthif_msg_buf_size = 0;
+ dev->iamthif_msg_buf_index = 0;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = false;
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ dev->iamthif_timer = 0;
+}
+
+/**
+ * mei_amthif_host_init_ - mei initialization amthif client.
+ *
+ * @dev: the device structure
+ *
+ */
+void mei_amthif_host_init(struct mei_device *dev)
+{
+ int i;
+ unsigned char *msg_buf;
+
+ mei_cl_init(&dev->iamthif_cl, dev);
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+
+ /* find ME amthi client */
+ i = mei_me_cl_link(dev, &dev->iamthif_cl,
+ &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
+ if (i < 0) {
+ dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
+ return;
+ }
+
+ /* Assign iamthif_mtu to the value received from ME */
+
+ dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
+ dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n",
+ dev->me_clients[i].props.max_msg_length);
+
+ kfree(dev->iamthif_msg_buf);
+ dev->iamthif_msg_buf = NULL;
+
+ /* allocate storage for ME message buffer */
+ msg_buf = kcalloc(dev->iamthif_mtu,
+ sizeof(unsigned char), GFP_KERNEL);
+ if (!msg_buf) {
+ dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
+ return;
+ }
+
+ dev->iamthif_msg_buf = msg_buf;
+
+ if (mei_connect(dev, &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+ dev->iamthif_cl.host_client_id = 0;
+ } else {
+ dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ }
+}
+
+/**
+ * mei_amthif_find_read_list_entry - finds a amthilist entry for current file
+ *
+ * @dev: the device structure
+ * @file: pointer to file object
+ *
+ * returns returned a list entry on success, NULL on failure.
+ */
+struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
+ struct file *file)
+{
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthif_rd_complete_list.list, list) {
+ if (pos->cl && pos->cl == &dev->iamthif_cl &&
+ pos->file_object == file)
+ return pos;
+ }
+ return NULL;
+}
+
+
+/**
+ * mei_amthif_read - read data from AMTHIF client
+ *
+ * @dev: the device structure
+ * @if_num: minor number
+ * @file: pointer to file object
+ * @*ubuf: pointer to user data in user space
+ * @length: data length to read
+ * @offset: data read offset
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns
+ * returned data length on success,
+ * zero if no data to read,
+ * negative on failure.
+ */
+int mei_amthif_read(struct mei_device *dev, struct file *file,
+ char __user *ubuf, size_t length, loff_t *offset)
+{
+ int rets;
+ int wait_ret;
+ struct mei_cl_cb *cb = NULL;
+ struct mei_cl *cl = file->private_data;
+ unsigned long timeout;
+ int i;
+
+ /* Only Posible if we are in timeout */
+ if (!cl || cl != &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "bad file ext.\n");
+ return -ETIMEDOUT;
+ }
+
+ i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
+
+ if (i < 0) {
+ dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
+ return -ENODEV;
+ }
+ dev_dbg(&dev->pdev->dev, "checking amthi data\n");
+ cb = mei_amthif_find_read_list_entry(dev, file);
+
+ /* Check for if we can block or not*/
+ if (cb == NULL && file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+
+ dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
+ while (cb == NULL) {
+ /* unlock the Mutex */
+ mutex_unlock(&dev->device_lock);
+
+ wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
+ (cb = mei_amthif_find_read_list_entry(dev, file)));
+
+ /* Locking again the Mutex */
+ mutex_lock(&dev->device_lock);
+
+ if (wait_ret)
+ return -ERESTARTSYS;
+
+ dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
+ }
+
+
+ dev_dbg(&dev->pdev->dev, "Got amthi data\n");
+ dev->iamthif_timer = 0;
+
+ if (cb) {
+ timeout = cb->read_time +
+ mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
+ dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
+ timeout);
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(&dev->pdev->dev, "amthi Time out\n");
+ /* 15 sec for the message has expired */
+ list_del(&cb->list);
+ rets = -ETIMEDOUT;
+ goto free;
+ }
+ }
+ /* if the whole message will fit remove it from the list */
+ if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
+ list_del(&cb->list);
+ else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
+ /* end of the message has been reached */
+ list_del(&cb->list);
+ rets = 0;
+ goto free;
+ }
+ /* else means that not full buffer will be read and do not
+ * remove message from deletion list
+ */
+
+ dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
+ cb->response_buffer.size);
+ dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
+
+ /* length is being turncated to PAGE_SIZE, however,
+ * the buf_idx may point beyond */
+ length = min_t(size_t, length, (cb->buf_idx - *offset));
+
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
+ rets = -EFAULT;
+ else {
+ rets = length;
+ if ((*offset + length) < cb->buf_idx) {
+ *offset += length;
+ goto out;
+ }
+ }
+free:
+ dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
+ *offset = 0;
+ mei_io_cb_free(cb);
+out:
+ return rets;
+}
+
+/**
+ * mei_amthif_send_cmd - send amthif command to the ME
+ *
+ * @dev: the device structure
+ * @cb: mei call back struct
+ *
+ * returns 0 on success, <0 on failure.
+ *
+ */
+static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
+{
+ struct mei_msg_hdr mei_hdr;
+ int ret;
+
+ if (!dev || !cb)
+ return -ENODEV;
+
+ dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
+
+ dev->iamthif_state = MEI_IAMTHIF_WRITING;
+ dev->iamthif_current_cb = cb;
+ dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = true;
+ dev->iamthif_msg_buf_size = cb->request_buffer.size;
+ memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
+ cb->request_buffer.size);
+
+ ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
+ if (ret < 0)
+ return ret;
+
+ if (ret && dev->mei_host_buffer_is_empty) {
+ ret = 0;
+ dev->mei_host_buffer_is_empty = false;
+ if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
+ mei_hdr.length = mei_hbuf_max_data(dev);
+ mei_hdr.msg_complete = 0;
+ } else {
+ mei_hdr.length = cb->request_buffer.size;
+ mei_hdr.msg_complete = 1;
+ }
+
+ mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
+ mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
+ mei_hdr.reserved = 0;
+ dev->iamthif_msg_buf_index += mei_hdr.length;
+ if (mei_write_message(dev, &mei_hdr,
+ (unsigned char *)(dev->iamthif_msg_buf),
+ mei_hdr.length))
+ return -ENODEV;
+
+ if (mei_hdr.msg_complete) {
+ if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
+ return -ENODEV;
+ dev->iamthif_flow_control_pending = true;
+ dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
+ dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
+ dev->iamthif_current_cb = cb;
+ dev->iamthif_file_object = cb->file_object;
+ list_add_tail(&cb->list, &dev->write_waiting_list.list);
+ } else {
+ dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
+ list_add_tail(&cb->list, &dev->write_list.list);
+ }
+ } else {
+ if (!(dev->mei_host_buffer_is_empty))
+ dev_dbg(&dev->pdev->dev, "host buffer is not empty");
+
+ dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
+ list_add_tail(&cb->list, &dev->write_list.list);
+ }
+ return 0;
+}
+
+/**
+ * mei_amthif_write - write amthif data to amthif client
+ *
+ * @dev: the device structure
+ * @cb: mei call back struct
+ *
+ * returns 0 on success, <0 on failure.
+ *
+ */
+int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
+{
+ int ret;
+
+ if (!dev || !cb)
+ return -ENODEV;
+
+ ret = mei_io_cb_alloc_resp_buf(cb, dev->iamthif_mtu);
+ if (ret)
+ return ret;
+
+ cb->fop_type = MEI_FOP_IOCTL;
+
+ if (!list_empty(&dev->amthif_cmd_list.list) ||
+ dev->iamthif_state != MEI_IAMTHIF_IDLE) {
+ dev_dbg(&dev->pdev->dev,
+ "amthif state = %d\n", dev->iamthif_state);
+ dev_dbg(&dev->pdev->dev, "AMTHIF: add cb to the wait list\n");
+ list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
+ return 0;
+ }
+ return mei_amthif_send_cmd(dev, cb);
+}
+/**
+ * mei_amthif_run_next_cmd
+ *
+ * @dev: the device structure
+ *
+ * returns 0 on success, <0 on failure.
+ */
+void mei_amthif_run_next_cmd(struct mei_device *dev)
+{
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
+ int status;
+
+ if (!dev)
+ return;
+
+ dev->iamthif_msg_buf_size = 0;
+ dev->iamthif_msg_buf_index = 0;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = true;
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ dev->iamthif_timer = 0;
+ dev->iamthif_file_object = NULL;
+
+ dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+
+ list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
+ list_del(&pos->list);
+
+ if (pos->cl && pos->cl == &dev->iamthif_cl) {
+ status = mei_amthif_send_cmd(dev, pos);
+ if (status) {
+ dev_dbg(&dev->pdev->dev,
+ "amthi write failed status = %d\n",
+ status);
+ return;
+ }
+ break;
+ }
+ }
+}
+
+
+unsigned int mei_amthif_poll(struct mei_device *dev,
+ struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ mutex_unlock(&dev->device_lock);
+ poll_wait(file, &dev->iamthif_cl.wait, wait);
+ mutex_lock(&dev->device_lock);
+ if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
+ dev->iamthif_file_object == file) {
+ mask |= (POLLIN | POLLRDNORM);
+ dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
+ mei_amthif_run_next_cmd(dev);
+ }
+ return mask;
+}
+
+
+
+/**
+ * mei_amthif_irq_process_completed - processes completed iamthif operation.
+ *
+ * @dev: the device structure.
+ * @slots: free slots.
+ * @cb_pos: callback block.
+ * @cl: private data of the file object.
+ * @cmpl_list: complete list.
+ *
+ * returns 0, OK; otherwise, error.
+ */
+int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
+ struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
+{
+ struct mei_msg_hdr *mei_hdr;
+ struct mei_cl *cl = cb->cl;
+ size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
+ size_t msg_slots = mei_data2slots(len);
+
+ mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
+ mei_hdr->host_addr = cl->host_client_id;
+ mei_hdr->me_addr = cl->me_client_id;
+ mei_hdr->reserved = 0;
+
+ if (*slots >= msg_slots) {
+ mei_hdr->length = len;
+ mei_hdr->msg_complete = 1;
+ /* Split the message only if we can write the whole host buffer */
+ } else if (*slots == dev->hbuf_depth) {
+ msg_slots = *slots;
+ len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ mei_hdr->length = len;
+ mei_hdr->msg_complete = 0;
+ } else {
+ /* wait for next time the host buffer is empty */
+ return 0;
+ }
+
+ dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
+ mei_hdr->length, mei_hdr->msg_complete);
+
+ *slots -= msg_slots;
+ if (mei_write_message(dev, mei_hdr,
+ dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
+ mei_hdr->length)) {
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ cl->status = -ENODEV;
+ list_del(&cb->list);
+ return -ENODEV;
+ }
+
+ if (mei_flow_ctrl_reduce(dev, cl))
+ return -ENODEV;
+
+ dev->iamthif_msg_buf_index += mei_hdr->length;
+ cl->status = 0;
+
+ if (mei_hdr->msg_complete) {
+ dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
+ dev->iamthif_flow_control_pending = true;
+
+ /* save iamthif cb sent to amthi client */
+ cb->buf_idx = dev->iamthif_msg_buf_index;
+ dev->iamthif_current_cb = cb;
+
+ list_move_tail(&cb->list, &dev->write_waiting_list.list);
+ }
+
+
+ return 0;
+}
+
+/**
+ * mei_amthif_irq_read_message - read routine after ISR to
+ * handle the read amthi message
+ *
+ * @complete_list: An instance of our list structure
+ * @dev: the device structure
+ * @mei_hdr: header of amthi message
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
+ struct mei_device *dev, struct mei_msg_hdr *mei_hdr)
+{
+ struct mei_cl_cb *cb;
+ unsigned char *buffer;
+
+ BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
+ BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
+
+ buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
+ BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
+
+ mei_read_slots(dev, buffer, mei_hdr->length);
+
+ dev->iamthif_msg_buf_index += mei_hdr->length;
+
+ if (!mei_hdr->msg_complete)
+ return 0;
+
+ dev_dbg(&dev->pdev->dev,
+ "amthi_message_buffer_index =%d\n",
+ mei_hdr->length);
+
+ dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
+ if (!dev->iamthif_current_cb)
+ return -ENODEV;
+
+ cb = dev->iamthif_current_cb;
+ dev->iamthif_current_cb = NULL;
+
+ if (!cb->cl)
+ return -ENODEV;
+
+ dev->iamthif_stall_timer = 0;
+ cb->buf_idx = dev->iamthif_msg_buf_index;
+ cb->read_time = jiffies;
+ if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
+ /* found the iamthif cb */
+ dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
+ dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
+ list_add_tail(&cb->list, &complete_list->list);
+ }
+ return 0;
+}
+
+/**
+ * mei_amthif_irq_read - prepares to read amthif data.
+ *
+ * @dev: the device structure.
+ * @slots: free slots.
+ *
+ * returns 0, OK; otherwise, error.
+ */
+int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
+{
+
+ if (((*slots) * sizeof(u32)) < (sizeof(struct mei_msg_hdr)
+ + sizeof(struct hbm_flow_control))) {
+ return -EMSGSIZE;
+ }
+ *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
+ if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
+ dev->iamthif_state = MEI_IAMTHIF_READING;
+ dev->iamthif_flow_control_pending = false;
+ dev->iamthif_msg_buf_index = 0;
+ dev->iamthif_msg_buf_size = 0;
+ dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
+ dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
+ return 0;
+}
+
+/**
+ * mei_amthif_complete - complete amthif callback.
+ *
+ * @dev: the device structure.
+ * @cb_pos: callback block.
+ */
+void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
+{
+ if (dev->iamthif_canceled != 1) {
+ dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
+ dev->iamthif_stall_timer = 0;
+ memcpy(cb->response_buffer.data,
+ dev->iamthif_msg_buf,
+ dev->iamthif_msg_buf_index);
+ list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+ dev_dbg(&dev->pdev->dev, "amthi read completed\n");
+ dev->iamthif_timer = jiffies;
+ dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
+ dev->iamthif_timer);
+ } else {
+ mei_amthif_run_next_cmd(dev);
+ }
+
+ dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
+ wake_up_interruptible(&dev->iamthif_cl.wait);
+}
+
+/**
+ * mei_clear_list - removes all callbacks associated with file
+ * from mei_cb_list
+ *
+ * @dev: device structure.
+ * @file: file structure
+ * @mei_cb_list: callbacks list
+ *
+ * mei_clear_list is called to clear resources associated with file
+ * when application calls close function or Ctrl-C was pressed
+ *
+ * returns true if callback removed from the list, false otherwise
+ */
+static bool mei_clear_list(struct mei_device *dev,
+ const struct file *file, struct list_head *mei_cb_list)
+{
+ struct mei_cl_cb *cb_pos = NULL;
+ struct mei_cl_cb *cb_next = NULL;
+ bool removed = false;
+
+ /* list all list member */
+ list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) {
+ /* check if list member associated with a file */
+ if (file == cb_pos->file_object) {
+ /* remove member from the list */
+ list_del(&cb_pos->list);
+ /* check if cb equal to current iamthif cb */
+ if (dev->iamthif_current_cb == cb_pos) {
+ dev->iamthif_current_cb = NULL;
+ /* send flow control to iamthif client */
+ mei_send_flow_control(dev, &dev->iamthif_cl);
+ }
+ /* free all allocated buffers */
+ mei_io_cb_free(cb_pos);
+ cb_pos = NULL;
+ removed = true;
+ }
+ }
+ return removed;
+}
+
+/**
+ * mei_clear_lists - removes all callbacks associated with file
+ *
+ * @dev: device structure
+ * @file: file structure
+ *
+ * mei_clear_lists is called to clear resources associated with file
+ * when application calls close function or Ctrl-C was pressed
+ *
+ * returns true if callback removed from the list, false otherwise
+ */
+static bool mei_clear_lists(struct mei_device *dev, struct file *file)
+{
+ bool removed = false;
+
+ /* remove callbacks associated with a file */
+ mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
+ if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list))
+ removed = true;
+
+ mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
+
+ if (mei_clear_list(dev, file, &dev->ctrl_wr_list.list))
+ removed = true;
+
+ if (mei_clear_list(dev, file, &dev->write_waiting_list.list))
+ removed = true;
+
+ if (mei_clear_list(dev, file, &dev->write_list.list))
+ removed = true;
+
+ /* check if iamthif_current_cb not NULL */
+ if (dev->iamthif_current_cb && !removed) {
+ /* check file and iamthif current cb association */
+ if (dev->iamthif_current_cb->file_object == file) {
+ /* remove cb */
+ mei_io_cb_free(dev->iamthif_current_cb);
+ dev->iamthif_current_cb = NULL;
+ removed = true;
+ }
+ }
+ return removed;
+}
+
+/**
+* mei_amthif_release - the release function
+*
+* @inode: pointer to inode structure
+* @file: pointer to file structure
+*
+* returns 0 on success, <0 on error
+*/
+int mei_amthif_release(struct mei_device *dev, struct file *file)
+{
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+
+ if (dev->iamthif_file_object == file &&
+ dev->iamthif_state != MEI_IAMTHIF_IDLE) {
+
+ dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
+ dev->iamthif_state);
+ dev->iamthif_canceled = true;
+ if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
+ dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
+ mei_amthif_run_next_cmd(dev);
+ }
+ }
+
+ if (mei_clear_lists(dev, file))
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+
+ return 0;
+}
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 9700532f02f6..be8ca6b333ca 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -20,16 +20,16 @@
#include <linux/uuid.h>
/*
- * Timeouts
+ * Timeouts in Seconds
*/
-#define MEI_INTEROP_TIMEOUT (HZ * 7)
-#define MEI_CONNECT_TIMEOUT 3 /* at least 2 seconds */
+#define MEI_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
-#define CONNECT_TIMEOUT 15 /* HPS definition */
-#define INIT_CLIENTS_TIMEOUT 15 /* HPS definition */
+#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
-#define IAMTHIF_STALL_TIMER 12 /* seconds */
-#define IAMTHIF_READ_TIMER 10000 /* ms */
+#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
+#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
/*
* Internal Clients Number
@@ -293,6 +293,14 @@ struct hbm_props_response {
struct mei_client_properties client_properties;
} __packed;
+/**
+ * struct hbm_client_connect_request - connect/disconnect request
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @reserved
+ */
struct hbm_client_connect_request {
u8 hbm_cmd;
u8 me_addr;
@@ -300,6 +308,14 @@ struct hbm_client_connect_request {
u8 reserved;
} __packed;
+/**
+ * struct hbm_client_connect_response - connect/disconnect response
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @status - status of the request
+ */
struct hbm_client_connect_response {
u8 hbm_cmd;
u8 me_addr;
@@ -307,12 +323,6 @@ struct hbm_client_connect_response {
u8 status;
} __packed;
-struct hbm_client_disconnect_request {
- u8 hbm_cmd;
- u8 me_addr;
- u8 host_addr;
- u8 reserved[1];
-} __packed;
#define MEI_FC_MESSAGE_RESERVED_LENGTH 5
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 98f1430e3e14..a54cd5567ca2 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -43,21 +43,6 @@ const char *mei_dev_state_str(int state)
}
-const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
- 0xa8, 0x46, 0xe0, 0xff, 0x65,
- 0x81, 0x4c);
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance io list structure
- * @dev: the device structure
- */
-void mei_io_list_init(struct mei_io_list *list)
-{
- /* initialize our queue list */
- INIT_LIST_HEAD(&list->mei_cb.cb_list);
-}
/**
* mei_io_list_flush - removes list entry belonging to cl.
@@ -65,17 +50,15 @@ void mei_io_list_init(struct mei_io_list *list)
* @list: An instance of our list structure
* @cl: private data of the file object
*/
-void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl)
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
{
struct mei_cl_cb *pos;
struct mei_cl_cb *next;
- list_for_each_entry_safe(pos, next, &list->mei_cb.cb_list, cb_list) {
- if (pos->file_private) {
- struct mei_cl *cl_tmp;
- cl_tmp = (struct mei_cl *)pos->file_private;
- if (mei_cl_cmp_id(cl, cl_tmp))
- list_del(&pos->cb_list);
+ list_for_each_entry_safe(pos, next, &list->list, list) {
+ if (pos->cl) {
+ if (mei_cl_cmp_id(cl, pos->cl))
+ list_del(&pos->list);
}
}
}
@@ -96,31 +79,14 @@ int mei_cl_flush_queues(struct mei_cl *cl)
mei_io_list_flush(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
- mei_io_list_flush(&cl->dev->amthi_cmd_list, cl);
- mei_io_list_flush(&cl->dev->amthi_read_complete_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
return 0;
}
/**
- * mei_reset_iamthif_params - initializes mei device iamthif
- *
- * @dev: the device structure
- */
-static void mei_reset_iamthif_params(struct mei_device *dev)
-{
- /* reset iamthif parameters. */
- dev->iamthif_current_cb = NULL;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_msg_buf_index = 0;
- dev->iamthif_canceled = false;
- dev->iamthif_ioctl = false;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
-}
-
-/**
* init_mei_device - allocates and initializes the mei device structure
*
* @pdev: The pci device structure
@@ -144,16 +110,14 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->wd_interface_reg = false;
-
mei_io_list_init(&dev->read_list);
mei_io_list_init(&dev->write_list);
mei_io_list_init(&dev->write_waiting_list);
mei_io_list_init(&dev->ctrl_wr_list);
mei_io_list_init(&dev->ctrl_rd_list);
- mei_io_list_init(&dev->amthi_cmd_list);
- mei_io_list_init(&dev->amthi_read_complete_list);
+ mei_io_list_init(&dev->amthif_cmd_list);
+ mei_io_list_init(&dev->amthif_rd_complete_list);
dev->pdev = pdev;
return dev;
}
@@ -196,7 +160,8 @@ int mei_hw_init(struct mei_device *dev)
if (!dev->recvd_msg) {
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
- dev->recvd_msg, MEI_INTEROP_TIMEOUT);
+ dev->recvd_msg,
+ mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
}
@@ -317,15 +282,13 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
cl_pos->timer_count = 0;
}
/* remove entry if already in list */
- dev_dbg(&dev->pdev->dev, "list del iamthif and wd file list.\n");
- mei_remove_client_from_file_list(dev,
- dev->wd_cl.host_client_id);
+ dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
+ mei_me_cl_unlink(dev, &dev->wd_cl);
- mei_remove_client_from_file_list(dev,
- dev->iamthif_cl.host_client_id);
+ mei_me_cl_unlink(dev, &dev->iamthif_cl);
- mei_reset_iamthif_params(dev);
- dev->extra_write_index = 0;
+ mei_amthif_reset_params(dev);
+ memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
}
dev->me_clients_num = 0;
@@ -351,10 +314,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
}
}
/* remove all waiting requests */
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- list_del(&cb_pos->cb_list);
- mei_free_cb_private(cb_pos);
+ list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
+ list_del(&cb_pos->list);
+ mei_io_cb_free(cb_pos);
}
}
@@ -370,31 +332,26 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
void mei_host_start_message(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
- struct hbm_host_version_request *host_start_req;
+ struct hbm_host_version_request *start_req;
+ const size_t len = sizeof(struct hbm_host_version_request);
+
+ mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
/* host start message */
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_host_version_request);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- host_start_req =
- (struct hbm_host_version_request *) &dev->wr_msg_buf[1];
- memset(host_start_req, 0, sizeof(struct hbm_host_version_request));
- host_start_req->hbm_cmd = HOST_START_REQ_CMD;
- host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
- host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
+ start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
+ memset(start_req, 0, len);
+ start_req->hbm_cmd = HOST_START_REQ_CMD;
+ start_req->host_version.major_version = HBM_MAJOR_VERSION;
+ start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
dev->recvd_msg = false;
- if (mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
- mei_hdr->length)) {
+ if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->dev_state = MEI_DEV_RESETING;
mei_reset(dev, 1);
}
dev->init_clients_state = MEI_START_MESSAGE;
- dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return ;
}
@@ -408,26 +365,22 @@ void mei_host_start_message(struct mei_device *dev)
void mei_host_enum_clients_message(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
- struct hbm_host_enum_request *host_enum_req;
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
+ struct hbm_host_enum_request *enum_req;
+ const size_t len = sizeof(struct hbm_host_enum_request);
/* enumerate clients */
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_host_enum_request);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
- memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
- host_enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- if (mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
- mei_hdr->length)) {
+ mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
+
+ enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
+ memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
+ enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
dev->dev_state = MEI_DEV_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
mei_reset(dev, 1);
}
dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
- dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
return;
}
@@ -470,56 +423,87 @@ void mei_allocate_me_clients_storage(struct mei_device *dev)
dev->me_clients = clients;
return ;
}
-/**
- * host_client_properties - reads properties for client
- *
- * @dev: the device structure
- *
- * returns:
- * < 0 - Error.
- * = 0 - no more clients.
- * = 1 - still have clients to send properties request.
- */
-int mei_host_client_properties(struct mei_device *dev)
+
+void mei_host_client_init(struct work_struct *work)
{
- struct mei_msg_hdr *mei_header;
- struct hbm_props_request *host_cli_req;
- int b;
- u8 client_num = dev->me_client_presentation_num;
-
- b = dev->me_client_index;
- b = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, b);
- if (b < MEI_CLIENTS_MAX) {
- dev->me_clients[client_num].client_id = b;
- dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
- mei_header = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_header->host_addr = 0;
- mei_header->me_addr = 0;
- mei_header->length = sizeof(struct hbm_props_request);
- mei_header->msg_complete = 1;
- mei_header->reserved = 0;
-
- host_cli_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
-
- memset(host_cli_req, 0, sizeof(struct hbm_props_request));
-
- host_cli_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- host_cli_req->address = b;
-
- if (mei_write_message(dev, mei_header,
- (unsigned char *)host_cli_req,
- mei_header->length)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
- mei_reset(dev, 1);
- return -EIO;
- }
+ struct mei_device *dev = container_of(work,
+ struct mei_device, init_work);
+ struct mei_client_properties *client_props;
+ int i;
+
+ mutex_lock(&dev->device_lock);
+
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first three client IDs
+ * 0: Reserved for MEI Bus Message communications
+ * 1: Reserved for Watchdog
+ * 2: Reserved for AMTHI
+ */
+ bitmap_set(dev->host_clients_map, 0, 3);
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client_props = &dev->me_clients[i].props;
+
+ if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
+ mei_amthif_host_init(dev);
+ else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
+ mei_wd_host_init(dev);
+ }
+
+ dev->dev_state = MEI_DEV_ENABLED;
- dev->init_clients_timer = INIT_CLIENTS_TIMEOUT;
- dev->me_client_index = b;
- return 1;
+ mutex_unlock(&dev->device_lock);
+}
+
+int mei_host_client_enumerate(struct mei_device *dev)
+{
+
+ struct mei_msg_hdr *mei_hdr;
+ struct hbm_props_request *prop_req;
+ const size_t len = sizeof(struct hbm_props_request);
+ unsigned long next_client_index;
+ u8 client_num;
+
+
+ client_num = dev->me_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
+ dev->me_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == MEI_CLIENTS_MAX) {
+ schedule_work(&dev->init_work);
+
+ return 0;
}
+ dev->me_clients[client_num].client_id = next_client_index;
+ dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
+
+ mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
+ prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
+
+ memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+
+ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req->address = next_client_index;
+
+ if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
+ mei_hdr->length)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_err(&dev->pdev->dev, "Properties request command failed\n");
+ mei_reset(dev, 1);
+
+ return -EIO;
+ }
+
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->me_client_index = next_client_index;
+
return 0;
}
@@ -557,17 +541,20 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
/**
- * mei_me_cl_update_filext - searches for ME client guid
- * sets client_id in mei_file_private if found
+ * mei_me_cl_link - create link between host and me clinet and add
+ * me_cl to the list
+ *
* @dev: the device structure
- * @cl: private file structure to set client_id in
- * @cuuid: searched uuid of ME client
- * @client_id: id of host client to be set in file private structure
+ * @cl: link between me and host client assocated with opened file descriptor
+ * @cuuid: uuid of ME client
+ * @client_id: id of the host client
*
- * returns ME client index
+ * returns ME client index if ME client
+ * -EINVAL on incorrect values
+ * -ENONET if client not found
*/
-int mei_me_cl_update_filext(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cuuid, u8 host_cl_id)
+int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
+ const uuid_le *cuuid, u8 host_cl_id)
{
int i;
@@ -587,54 +574,22 @@ int mei_me_cl_update_filext(struct mei_device *dev, struct mei_cl *cl,
return -ENOENT;
}
-
/**
- * host_init_iamthif - mei initialization iamthif client.
+ * mei_me_cl_unlink - remove me_cl from the list
*
* @dev: the device structure
- *
+ * @host_client_id: host client id to be removed
*/
-void mei_host_init_iamthif(struct mei_device *dev)
+void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
{
- int i;
- unsigned char *msg_buf;
-
- mei_cl_init(&dev->iamthif_cl, dev);
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
-
- /* find ME amthi client */
- i = mei_me_cl_update_filext(dev, &dev->iamthif_cl,
- &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
- if (i < 0) {
- dev_dbg(&dev->pdev->dev, "failed to find iamthif client.\n");
- return;
- }
-
- /* Assign iamthif_mtu to the value received from ME */
-
- dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
- dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n",
- dev->me_clients[i].props.max_msg_length);
-
- kfree(dev->iamthif_msg_buf);
- dev->iamthif_msg_buf = NULL;
-
- /* allocate storage for ME message buffer */
- msg_buf = kcalloc(dev->iamthif_mtu,
- sizeof(unsigned char), GFP_KERNEL);
- if (!msg_buf) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
- return;
- }
-
- dev->iamthif_msg_buf = msg_buf;
-
- if (mei_connect(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
- dev->iamthif_cl.host_client_id = 0;
- } else {
- dev->iamthif_cl.timer_count = CONNECT_TIMEOUT;
+ struct mei_cl *pos, *next;
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if (cl->host_client_id == pos->host_client_id) {
+ dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
+ pos->host_client_id, pos->me_client_id);
+ list_del_init(&pos->link);
+ break;
+ }
}
}
@@ -671,9 +626,8 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
*/
int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
{
- int rets, err;
- long timeout = 15; /* 15 seconds */
struct mei_cl_cb *cb;
+ int rets, err;
if (!dev || !cl)
return -ENODEV;
@@ -681,13 +635,11 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
- INIT_LIST_HEAD(&cb->cb_list);
- cb->file_private = cl;
- cb->major_file_operations = MEI_CLOSE;
+ cb->fop_type = MEI_FOP_CLOSE;
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
if (mei_disconnect(dev, cl)) {
@@ -696,17 +648,17 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
goto free;
}
mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->cb_list, &dev->ctrl_rd_list.mei_cb.cb_list);
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
- list_add_tail(&cb->cb_list,
- &dev->ctrl_wr_list.mei_cb.cb_list);
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
}
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(dev->wait_recvd_msg,
- (MEI_FILE_DISCONNECTED == cl->state),
- timeout * HZ);
+ MEI_FILE_DISCONNECTED == cl->state,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (MEI_FILE_DISCONNECTED == cl->state) {
@@ -728,29 +680,7 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
- mei_free_cb_private(cb);
+ mei_io_cb_free(cb);
return rets;
}
-/**
- * mei_remove_client_from_file_list -
- * removes file private data from device file list
- *
- * @dev: the device structure
- * @host_client_id: host client id to be removed
- */
-void mei_remove_client_from_file_list(struct mei_device *dev,
- u8 host_client_id)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (host_client_id == cl_pos->host_client_id) {
- dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- list_del_init(&cl_pos->link);
- break;
- }
- }
-}
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
index 509c3957ff45..8de854785960 100644
--- a/drivers/misc/mei/interface.c
+++ b/drivers/misc/mei/interface.c
@@ -292,28 +292,23 @@ int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
- struct hbm_flow_control *mei_flow_control;
-
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_flow_control);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
- memset(mei_flow_control, 0, sizeof(*mei_flow_control));
- mei_flow_control->host_addr = cl->host_client_id;
- mei_flow_control->me_addr = cl->me_client_id;
- mei_flow_control->hbm_cmd = MEI_FLOW_CONTROL_CMD;
- memset(mei_flow_control->reserved, 0,
- sizeof(mei_flow_control->reserved));
+ struct hbm_flow_control *flow_ctrl;
+ const size_t len = sizeof(struct hbm_flow_control);
+
+ mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
+
+ flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
+ memset(flow_ctrl, 0, len);
+ flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
+ flow_ctrl->host_addr = cl->host_client_id;
+ flow_ctrl->me_addr = cl->me_client_id;
+ /* FIXME: reserved !? */
+ memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
return mei_write_message(dev, mei_hdr,
- (unsigned char *) mei_flow_control,
- sizeof(struct hbm_flow_control));
+ (unsigned char *) flow_ctrl, len);
}
/**
@@ -352,26 +347,19 @@ int mei_other_client_is_connecting(struct mei_device *dev,
int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
- struct hbm_client_disconnect_request *mei_cli_disconnect;
-
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_client_disconnect_request);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- mei_cli_disconnect =
- (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
- memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
- mei_cli_disconnect->host_addr = cl->host_client_id;
- mei_cli_disconnect->me_addr = cl->me_client_id;
- mei_cli_disconnect->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
- mei_cli_disconnect->reserved[0] = 0;
+ struct hbm_client_connect_request *req;
+ const size_t len = sizeof(struct hbm_client_connect_request);
- return mei_write_message(dev, mei_hdr,
- (unsigned char *) mei_cli_disconnect,
- sizeof(struct hbm_client_disconnect_request));
+ mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
+
+ req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
+ memset(req, 0, len);
+ req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
+ req->host_addr = cl->host_client_id;
+ req->me_addr = cl->me_client_id;
+ req->reserved = 0;
+
+ return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
}
/**
@@ -385,23 +373,16 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
int mei_connect(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *mei_cli_connect;
-
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_client_connect_request);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- mei_cli_connect =
- (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
- mei_cli_connect->host_addr = cl->host_client_id;
- mei_cli_connect->me_addr = cl->me_client_id;
- mei_cli_connect->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
- mei_cli_connect->reserved = 0;
+ struct hbm_client_connect_request *req;
+ const size_t len = sizeof(struct hbm_client_connect_request);
- return mei_write_message(dev, mei_hdr,
- (unsigned char *) mei_cli_connect,
- sizeof(struct hbm_client_connect_request));
+ mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
+
+ req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
+ req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
+ req->host_addr = cl->host_client_id;
+ req->me_addr = cl->me_client_id;
+ req->reserved = 0;
+
+ return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
}
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3533edde04a5..04fa2134615e 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -57,14 +57,14 @@ irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
*/
static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
{
- if (cb_pos->major_file_operations == MEI_WRITE) {
- mei_free_cb_private(cb_pos);
+ if (cb_pos->fop_type == MEI_FOP_WRITE) {
+ mei_io_cb_free(cb_pos);
cb_pos = NULL;
cl->writing_state = MEI_WRITE_COMPLETE;
if (waitqueue_active(&cl->tx_wait))
wake_up_interruptible(&cl->tx_wait);
- } else if (cb_pos->major_file_operations == MEI_READ &&
+ } else if (cb_pos->fop_type == MEI_FOP_READ &&
MEI_READING == cl->reading_state) {
cl->reading_state = MEI_READ_COMPLETE;
if (waitqueue_active(&cl->rx_wait))
@@ -74,94 +74,6 @@ static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
}
/**
- * _mei_cmpl_iamthif - processes completed iamthif operation.
- *
- * @dev: the device structure.
- * @cb_pos: callback block.
- */
-static void _mei_cmpl_iamthif(struct mei_device *dev, struct mei_cl_cb *cb_pos)
-{
- if (dev->iamthif_canceled != 1) {
- dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
- dev->iamthif_stall_timer = 0;
- memcpy(cb_pos->response_buffer.data,
- dev->iamthif_msg_buf,
- dev->iamthif_msg_buf_index);
- list_add_tail(&cb_pos->cb_list,
- &dev->amthi_read_complete_list.mei_cb.cb_list);
- dev_dbg(&dev->pdev->dev, "amthi read completed.\n");
- dev->iamthif_timer = jiffies;
- dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
- dev->iamthif_timer);
- } else {
- mei_run_next_iamthif_cmd(dev);
- }
-
- dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
- wake_up_interruptible(&dev->iamthif_cl.wait);
-}
-
-
-/**
- * mei_irq_thread_read_amthi_message - bottom half read routine after ISR to
- * handle the read amthi message data processing.
- *
- * @complete_list: An instance of our list structure
- * @dev: the device structure
- * @mei_hdr: header of amthi message
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_irq_thread_read_amthi_message(struct mei_io_list *complete_list,
- struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr)
-{
- struct mei_cl *cl;
- struct mei_cl_cb *cb;
- unsigned char *buffer;
-
- BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
- BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
-
- buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index;
- BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
-
- mei_read_slots(dev, buffer, mei_hdr->length);
-
- dev->iamthif_msg_buf_index += mei_hdr->length;
-
- if (!mei_hdr->msg_complete)
- return 0;
-
- dev_dbg(&dev->pdev->dev,
- "amthi_message_buffer_index =%d\n",
- mei_hdr->length);
-
- dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
- if (!dev->iamthif_current_cb)
- return -ENODEV;
-
- cb = dev->iamthif_current_cb;
- dev->iamthif_current_cb = NULL;
-
- cl = (struct mei_cl *)cb->file_private;
- if (!cl)
- return -ENODEV;
-
- dev->iamthif_stall_timer = 0;
- cb->information = dev->iamthif_msg_buf_index;
- cb->read_time = jiffies;
- if (dev->iamthif_ioctl && cl == &dev->iamthif_cl) {
- /* found the iamthif cb */
- dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
- dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
- list_add_tail(&cb->cb_list,
- &complete_list->mei_cb.cb_list);
- }
- return 0;
-}
-
-/**
* _mei_irq_thread_state_ok - checks if mei header matches file private data
*
* @cl: private data of the file object
@@ -188,7 +100,7 @@ static int _mei_irq_thread_state_ok(struct mei_cl *cl,
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
+static int mei_irq_thread_read_client_message(struct mei_cl_cb *complete_list,
struct mei_device *dev,
struct mei_msg_hdr *mei_hdr)
{
@@ -197,36 +109,36 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
unsigned char *buffer = NULL;
dev_dbg(&dev->pdev->dev, "start client msg\n");
- if (list_empty(&dev->read_list.mei_cb.cb_list))
+ if (list_empty(&dev->read_list.list))
goto quit;
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->read_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
+ list_for_each_entry_safe(cb_pos, cb_next, &dev->read_list.list, list) {
+ cl = cb_pos->cl;
if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
cl->reading_state = MEI_READING;
- buffer = cb_pos->response_buffer.data + cb_pos->information;
+ buffer = cb_pos->response_buffer.data + cb_pos->buf_idx;
if (cb_pos->response_buffer.size <
- mei_hdr->length + cb_pos->information) {
+ mei_hdr->length + cb_pos->buf_idx) {
dev_dbg(&dev->pdev->dev, "message overflow.\n");
- list_del(&cb_pos->cb_list);
+ list_del(&cb_pos->list);
return -ENOMEM;
}
if (buffer)
mei_read_slots(dev, buffer, mei_hdr->length);
- cb_pos->information += mei_hdr->length;
+ cb_pos->buf_idx += mei_hdr->length;
if (mei_hdr->msg_complete) {
cl->status = 0;
- list_del(&cb_pos->cb_list);
+ list_del(&cb_pos->list);
dev_dbg(&dev->pdev->dev,
"completed read H cl = %d, ME cl = %d, length = %lu\n",
cl->host_client_id,
cl->me_client_id,
- cb_pos->information);
- list_add_tail(&cb_pos->cb_list,
- &complete_list->mei_cb.cb_list);
+ cb_pos->buf_idx);
+
+ list_add_tail(&cb_pos->list,
+ &complete_list->list);
}
break;
@@ -246,37 +158,6 @@ quit:
}
/**
- * _mei_irq_thread_iamthif_read - prepares to read iamthif data.
- *
- * @dev: the device structure.
- * @slots: free slots.
- *
- * returns 0, OK; otherwise, error.
- */
-static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
-{
-
- if (((*slots) * sizeof(u32)) < (sizeof(struct mei_msg_hdr)
- + sizeof(struct hbm_flow_control))) {
- return -EMSGSIZE;
- }
- *slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
- return -EIO;
- }
-
- dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
- dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_flow_control_pending = false;
- dev->iamthif_msg_buf_index = 0;
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
- return 0;
-}
-
-/**
* _mei_irq_thread_close - processes close related operation.
*
* @dev: the device structure.
@@ -290,26 +171,24 @@ static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
- struct mei_io_list *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
- sizeof(struct hbm_client_disconnect_request)))
+ sizeof(struct hbm_client_connect_request)))
return -EBADMSG;
- *slots -= mei_data2slots(sizeof(struct hbm_client_disconnect_request));
+ *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
if (mei_disconnect(dev, cl)) {
cl->status = 0;
- cb_pos->information = 0;
- list_move_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
+ cb_pos->buf_idx = 0;
+ list_move_tail(&cb_pos->list, &cmpl_list->list);
return -EMSGSIZE;
} else {
cl->state = MEI_FILE_DISCONNECTING;
cl->status = 0;
- cb_pos->information = 0;
- list_move_tail(&cb_pos->cb_list,
- &dev->ctrl_rd_list.mei_cb.cb_list);
+ cb_pos->buf_idx = 0;
+ list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
}
@@ -356,7 +235,7 @@ static void mei_client_connect_response(struct mei_device *dev,
{
struct mei_cl *cl;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
dev_dbg(&dev->pdev->dev,
"connect_response:\n"
@@ -373,8 +252,6 @@ static void mei_client_connect_response(struct mei_device *dev,
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
mei_watchdog_register(dev);
- /* next step in the state maching */
- mei_host_init_iamthif(dev);
return;
}
@@ -382,17 +259,16 @@ static void mei_client_connect_response(struct mei_device *dev,
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
- cl = (struct mei_cl *)cb_pos->file_private;
+ cl = pos->cl;
if (!cl) {
- list_del(&cb_pos->cb_list);
+ list_del(&pos->list);
return;
}
- if (MEI_IOCTL == cb_pos->major_file_operations) {
+ if (pos->fop_type == MEI_FOP_IOCTL) {
if (is_treat_specially_client(cl, rs)) {
- list_del(&cb_pos->cb_list);
+ list_del(&pos->list);
cl->status = 0;
cl->timer_count = 0;
break;
@@ -411,7 +287,7 @@ static void mei_client_disconnect_response(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
dev_dbg(&dev->pdev->dev,
"disconnect_response:\n"
@@ -422,12 +298,11 @@ static void mei_client_disconnect_response(struct mei_device *dev,
rs->host_addr,
rs->status);
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+ cl = pos->cl;
if (!cl) {
- list_del(&cb_pos->cb_list);
+ list_del(&pos->list);
return;
}
@@ -435,7 +310,7 @@ static void mei_client_disconnect_response(struct mei_device *dev,
if (cl->host_client_id == rs->host_addr &&
cl->me_client_id == rs->me_addr) {
- list_del(&cb_pos->cb_list);
+ list_del(&pos->list);
if (!rs->status)
cl->state = MEI_FILE_DISCONNECTED;
@@ -537,10 +412,10 @@ static void mei_client_flow_control_response(struct mei_device *dev,
* returns !=0, same; 0,not.
*/
static int same_disconn_addr(struct mei_cl *cl,
- struct hbm_client_disconnect_request *disconn)
+ struct hbm_client_connect_request *req)
{
- return (cl->host_client_id == disconn->host_addr &&
- cl->me_client_id == disconn->me_addr);
+ return (cl->host_client_id == req->host_addr &&
+ cl->me_client_id == req->me_addr);
}
/**
@@ -550,49 +425,38 @@ static int same_disconn_addr(struct mei_cl *cl,
* @disconnect_req: disconnect request bus message.
*/
static void mei_client_disconnect_request(struct mei_device *dev,
- struct hbm_client_disconnect_request *disconnect_req)
+ struct hbm_client_connect_request *disconnect_req)
{
- struct mei_msg_hdr *mei_hdr;
struct hbm_client_connect_response *disconnect_res;
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
+ struct mei_cl *pos, *next;
+ const size_t len = sizeof(struct hbm_client_connect_response);
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (same_disconn_addr(cl_pos, disconnect_req)) {
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if (same_disconn_addr(pos, disconnect_req)) {
dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
disconnect_req->host_addr,
disconnect_req->me_addr);
- cl_pos->state = MEI_FILE_DISCONNECTED;
- cl_pos->timer_count = 0;
- if (cl_pos == &dev->wd_cl)
+ pos->state = MEI_FILE_DISCONNECTED;
+ pos->timer_count = 0;
+ if (pos == &dev->wd_cl)
dev->wd_pending = false;
- else if (cl_pos == &dev->iamthif_cl)
+ else if (pos == &dev->iamthif_cl)
dev->iamthif_timer = 0;
/* prepare disconnect response */
- mei_hdr =
- (struct mei_msg_hdr *) &dev->ext_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length =
- sizeof(struct hbm_client_connect_response);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
+ (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
disconnect_res =
(struct hbm_client_connect_response *)
- &dev->ext_msg_buf[1];
- disconnect_res->host_addr = cl_pos->host_client_id;
- disconnect_res->me_addr = cl_pos->me_client_id;
+ &dev->wr_ext_msg.data;
disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
+ disconnect_res->host_addr = pos->host_client_id;
+ disconnect_res->me_addr = pos->me_client_id;
disconnect_res->status = 0;
- dev->extra_write_index = 2;
break;
}
}
}
-
/**
* mei_irq_thread_read_bus_message - bottom half read routine after ISR to
* handle the read bus message cmd processing.
@@ -604,16 +468,15 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct mei_msg_hdr *mei_hdr)
{
struct mei_bus_message *mei_msg;
+ struct mei_me_client *me_client;
struct hbm_host_version_response *version_res;
struct hbm_client_connect_response *connect_res;
struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
struct hbm_flow_control *flow_control;
struct hbm_props_response *props_res;
struct hbm_host_enum_response *enum_res;
- struct hbm_client_disconnect_request *disconnect_req;
- struct hbm_host_stop_request *host_stop_req;
- int res;
-
+ struct hbm_host_stop_request *stop_req;
/* read the message to our buffer */
BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
@@ -637,26 +500,20 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
return;
}
} else {
+ u32 *buf = dev->wr_msg_buf;
+ const size_t len = sizeof(struct hbm_host_stop_request);
+
dev->version = version_res->me_max_version;
+
/* send stop message */
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_host_stop_request);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- host_stop_req = (struct hbm_host_stop_request *)
- &dev->wr_msg_buf[1];
-
- memset(host_stop_req,
- 0,
- sizeof(struct hbm_host_stop_request));
- host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- host_stop_req->reason = DRIVER_STOP_REQUEST;
+ mei_hdr = mei_hbm_hdr(&buf[0], len);
+ stop_req = (struct hbm_host_stop_request *)&buf[1];
+ memset(stop_req, 0, len);
+ stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
+ stop_req->reason = DRIVER_STOP_REQUEST;
+
mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_stop_req),
- mei_hdr->length);
+ (unsigned char *)stop_req, len);
dev_dbg(&dev->pdev->dev, "version mismatch.\n");
return;
}
@@ -666,16 +523,14 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
break;
case CLIENT_CONNECT_RES_CMD:
- connect_res =
- (struct hbm_client_connect_response *) mei_msg;
+ connect_res = (struct hbm_client_connect_response *) mei_msg;
mei_client_connect_response(dev, connect_res);
dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case CLIENT_DISCONNECT_RES_CMD:
- disconnect_res =
- (struct hbm_client_connect_response *) mei_msg;
+ disconnect_res = (struct hbm_client_connect_response *) mei_msg;
mei_client_disconnect_response(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
@@ -689,64 +544,37 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
case HOST_CLIENT_PROPERTIES_RES_CMD:
props_res = (struct hbm_props_response *)mei_msg;
+ me_client = &dev->me_clients[dev->me_client_presentation_num];
+
if (props_res->status || !dev->me_clients) {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
mei_reset(dev, 1);
return;
}
- if (dev->me_clients[dev->me_client_presentation_num]
- .client_id == props_res->address) {
- dev->me_clients[dev->me_client_presentation_num].props
- = props_res->client_properties;
+ if (me_client->client_id != props_res->address) {
+ dev_err(&dev->pdev->dev,
+ "Host client properties reply mismatch\n");
+ mei_reset(dev, 1);
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state ==
- MEI_CLIENT_PROPERTIES_MESSAGE) {
- dev->me_client_index++;
- dev->me_client_presentation_num++;
-
- /** Send Client Properties request **/
- res = mei_host_client_properties(dev);
- if (res < 0) {
- dev_dbg(&dev->pdev->dev, "mei_host_client_properties() failed");
- return;
- } else if (!res) {
- /*
- * No more clients to send to.
- * Clear Map for indicating now ME clients
- * with associated host client
- */
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
-
- /*
- * Reserving the first three client IDs
- * Client Id 0 - Reserved for MEI Bus Message communications
- * Client Id 1 - Reserved for Watchdog
- * Client ID 2 - Reserved for AMTHI
- */
- bitmap_set(dev->host_clients_map, 0, 3);
- dev->dev_state = MEI_DEV_ENABLED;
-
- /* if wd initialization fails, initialization the AMTHI client,
- * otherwise the AMTHI client will be initialized after the WD client connect response
- * will be received
- */
- if (mei_wd_host_init(dev))
- mei_host_init_iamthif(dev);
- }
+ return;
+ }
- } else {
- dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message");
- mei_reset(dev, 1);
- return;
- }
- } else {
- dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message for wrong client ID\n");
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
+ dev_err(&dev->pdev->dev,
+ "Unexpected client properties reply\n");
mei_reset(dev, 1);
+
return;
}
+
+ me_client->props = props_res->client_properties;
+ dev->me_client_index++;
+ dev->me_client_presentation_num++;
+
+ mei_host_client_enumerate(dev);
+
break;
case HOST_ENUM_RES_CMD:
@@ -760,7 +588,8 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
mei_allocate_me_clients_storage(dev);
dev->init_clients_state =
MEI_CLIENT_PROPERTIES_MESSAGE;
- mei_host_client_properties(dev);
+
+ mei_host_client_enumerate(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
mei_reset(dev, 1);
@@ -776,29 +605,23 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
case CLIENT_DISCONNECT_REQ_CMD:
/* search for client */
- disconnect_req =
- (struct hbm_client_disconnect_request *) mei_msg;
+ disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_client_disconnect_request(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
- /* prepare stop request */
- mei_hdr = (struct mei_msg_hdr *) &dev->ext_msg_buf[0];
- mei_hdr->host_addr = 0;
- mei_hdr->me_addr = 0;
- mei_hdr->length = sizeof(struct hbm_host_stop_request);
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
- host_stop_req =
- (struct hbm_host_stop_request *) &dev->ext_msg_buf[1];
- memset(host_stop_req, 0, sizeof(struct hbm_host_stop_request));
- host_stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- host_stop_req->reason = DRIVER_STOP_REQUEST;
- host_stop_req->reserved[0] = 0;
- host_stop_req->reserved[1] = 0;
- dev->extra_write_index = 2;
- break;
+ {
+ /* prepare stop request: sent in next interrupt event */
+
+ const size_t len = sizeof(struct hbm_host_stop_request);
+ mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
+ stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
+ memset(stop_req, 0, len);
+ stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
+ stop_req->reason = DRIVER_STOP_REQUEST;
+ break;
+ }
default:
BUG();
break;
@@ -821,12 +644,12 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
- struct mei_io_list *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
/* return the cancel routine */
- list_del(&cb_pos->cb_list);
+ list_del(&cb_pos->list);
return -EBADMSG;
}
@@ -834,11 +657,11 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
if (mei_send_flow_control(dev, cl)) {
cl->status = -ENODEV;
- cb_pos->information = 0;
- list_move_tail(&cb_pos->cb_list, &cmpl_list->mei_cb.cb_list);
+ cb_pos->buf_idx = 0;
+ list_move_tail(&cb_pos->list, &cmpl_list->list);
return -ENODEV;
}
- list_move_tail(&cb_pos->cb_list, &dev->read_list.mei_cb.cb_list);
+ list_move_tail(&cb_pos->list, &dev->read_list.list);
return 0;
}
@@ -858,12 +681,12 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
- struct mei_io_list *cmpl_list)
+ struct mei_cl_cb *cmpl_list)
{
if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_connect_request))) {
/* return the cancel routine */
- list_del(&cb_pos->cb_list);
+ list_del(&cb_pos->list);
return -EBADMSG;
}
@@ -871,188 +694,73 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
if (mei_connect(dev, cl)) {
cl->status = -ENODEV;
- cb_pos->information = 0;
- list_del(&cb_pos->cb_list);
+ cb_pos->buf_idx = 0;
+ list_del(&cb_pos->list);
return -ENODEV;
} else {
- list_move_tail(&cb_pos->cb_list,
- &dev->ctrl_rd_list.mei_cb.cb_list);
+ list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
}
return 0;
}
/**
- * _mei_irq_thread_cmpl - processes completed and no-iamthif operation.
+ * mei_irq_thread_write_complete - write messages to device.
*
* @dev: the device structure.
* @slots: free slots.
- * @cb_pos: callback block.
- * @cl: private data of the file object.
+ * @cb: callback block.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
-static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
- struct mei_cl_cb *cb_pos,
- struct mei_cl *cl,
- struct mei_io_list *cmpl_list)
+static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
+ struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
struct mei_msg_hdr *mei_hdr;
+ struct mei_cl *cl = cb->cl;
+ size_t len = cb->request_buffer.size - cb->buf_idx;
+ size_t msg_slots = mei_data2slots(len);
+
+ mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
+ mei_hdr->host_addr = cl->host_client_id;
+ mei_hdr->me_addr = cl->me_client_id;
+ mei_hdr->reserved = 0;
- if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
- (cb_pos->request_buffer.size -
- cb_pos->information))) {
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->length = cb_pos->request_buffer.size -
- cb_pos->information;
+ if (*slots >= msg_slots) {
+ mei_hdr->length = len;
mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
- dev_dbg(&dev->pdev->dev, "cb_pos->request_buffer.size =%d"
- "mei_hdr->msg_complete = %d\n",
- cb_pos->request_buffer.size,
- mei_hdr->msg_complete);
- dev_dbg(&dev->pdev->dev, "cb_pos->information =%lu\n",
- cb_pos->information);
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
- mei_hdr->length);
- *slots -= mei_data2slots(mei_hdr->length);
- if (mei_write_message(dev, mei_hdr,
- (unsigned char *)
- (cb_pos->request_buffer.data +
- cb_pos->information),
- mei_hdr->length)) {
- cl->status = -ENODEV;
- list_move_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- return -ENODEV;
- } else {
- if (mei_flow_ctrl_reduce(dev, cl))
- return -ENODEV;
- cl->status = 0;
- cb_pos->information += mei_hdr->length;
- list_move_tail(&cb_pos->cb_list,
- &dev->write_waiting_list.mei_cb.cb_list);
- }
+ /* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
- /* buffer is still empty */
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->length =
- (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ msg_slots = *slots;
+ len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
+ mei_hdr->length = len;
mei_hdr->msg_complete = 0;
- mei_hdr->reserved = 0;
- *slots -= mei_data2slots(mei_hdr->length);
- if (mei_write_message(dev, mei_hdr,
- (unsigned char *)
- (cb_pos->request_buffer.data +
- cb_pos->information),
- mei_hdr->length)) {
- cl->status = -ENODEV;
- list_move_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- return -ENODEV;
- } else {
- cb_pos->information += mei_hdr->length;
- dev_dbg(&dev->pdev->dev,
- "cb_pos->request_buffer.size =%d"
- " mei_hdr->msg_complete = %d\n",
- cb_pos->request_buffer.size,
- mei_hdr->msg_complete);
- dev_dbg(&dev->pdev->dev, "cb_pos->information =%lu\n",
- cb_pos->information);
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
- mei_hdr->length);
- }
- return -EMSGSIZE;
} else {
- return -EBADMSG;
+ /* wait for next time the host buffer is empty */
+ return 0;
}
- return 0;
-}
-
-/**
- * _mei_irq_thread_cmpl_iamthif - processes completed iamthif operation.
- *
- * @dev: the device structure.
- * @slots: free slots.
- * @cb_pos: callback block.
- * @cl: private data of the file object.
- * @cmpl_list: complete list.
- *
- * returns 0, OK; otherwise, error.
- */
-static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
- struct mei_cl_cb *cb_pos,
- struct mei_cl *cl,
- struct mei_io_list *cmpl_list)
-{
- struct mei_msg_hdr *mei_hdr;
-
- if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
- dev->iamthif_msg_buf_size -
- dev->iamthif_msg_buf_index)) {
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->length = dev->iamthif_msg_buf_size -
- dev->iamthif_msg_buf_index;
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
-
- *slots -= mei_data2slots(mei_hdr->length);
-
- if (mei_write_message(dev, mei_hdr,
- (dev->iamthif_msg_buf +
- dev->iamthif_msg_buf_index),
- mei_hdr->length)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- cl->status = -ENODEV;
- list_del(&cb_pos->cb_list);
- return -ENODEV;
- } else {
- if (mei_flow_ctrl_reduce(dev, cl))
- return -ENODEV;
- dev->iamthif_msg_buf_index += mei_hdr->length;
- cb_pos->information = dev->iamthif_msg_buf_index;
- cl->status = 0;
- dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev->iamthif_flow_control_pending = true;
- /* save iamthif cb sent to amthi client */
- dev->iamthif_current_cb = cb_pos;
- list_move_tail(&cb_pos->cb_list,
- &dev->write_waiting_list.mei_cb.cb_list);
+ dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
+ cb->request_buffer.size, cb->buf_idx);
+ dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
+ mei_hdr->length, mei_hdr->msg_complete);
- }
- } else if (*slots == dev->hbuf_depth) {
- /* buffer is still empty */
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->length =
- (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->msg_complete = 0;
- mei_hdr->reserved = 0;
+ *slots -= msg_slots;
+ if (mei_write_message(dev, mei_hdr,
+ cb->request_buffer.data + cb->buf_idx, len)) {
+ cl->status = -ENODEV;
+ list_move_tail(&cb->list, &cmpl_list->list);
+ return -ENODEV;
+ }
- *slots -= mei_data2slots(mei_hdr->length);
+ if (mei_flow_ctrl_reduce(dev, cl))
+ return -ENODEV;
- if (mei_write_message(dev, mei_hdr,
- (dev->iamthif_msg_buf +
- dev->iamthif_msg_buf_index),
- mei_hdr->length)) {
- cl->status = -ENODEV;
- list_del(&cb_pos->cb_list);
- } else {
- dev->iamthif_msg_buf_index += mei_hdr->length;
- }
- return -EMSGSIZE;
- } else {
- return -EBADMSG;
- }
+ cl->status = 0;
+ cb->buf_idx += mei_hdr->length;
+ if (mei_hdr->msg_complete)
+ list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
}
@@ -1067,7 +775,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_read_handler(struct mei_io_list *cmpl_list,
+static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
struct mei_device *dev,
s32 *slots)
{
@@ -1130,8 +838,8 @@ static int mei_irq_thread_read_handler(struct mei_io_list *cmpl_list,
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
mei_hdr->length);
- ret = mei_irq_thread_read_amthi_message(cmpl_list,
- dev, mei_hdr);
+
+ ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
if (ret)
goto end;
@@ -1164,53 +872,51 @@ end:
* mei_irq_thread_write_handler - bottom half write routine after
* ISR to handle the write processing.
*
- * @cmpl_list: An instance of our list structure
* @dev: the device structure
- * @slots: slots to write.
+ * @cmpl_list: An instance of our list structure
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
- struct mei_device *dev,
- s32 *slots)
+static int mei_irq_thread_write_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list)
{
struct mei_cl *cl;
struct mei_cl_cb *pos = NULL, *next = NULL;
- struct mei_io_list *list;
+ struct mei_cl_cb *list;
+ s32 slots;
int ret;
if (!mei_hbuf_is_empty(dev)) {
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
- *slots = mei_hbuf_empty_slots(dev);
- if (*slots <= 0)
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots <= 0)
return -EMSGSIZE;
/* complete all waiting for write CB */
dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
list = &dev->write_waiting_list;
- list_for_each_entry_safe(pos, next, &list->mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)pos->file_private;
+ list_for_each_entry_safe(pos, next, &list->list, list) {
+ cl = pos->cl;
if (cl == NULL)
continue;
cl->status = 0;
- list_del(&pos->cb_list);
+ list_del(&pos->list);
if (MEI_WRITING == cl->writing_state &&
- (pos->major_file_operations == MEI_WRITE) &&
- (cl != &dev->iamthif_cl)) {
+ pos->fop_type == MEI_FOP_WRITE &&
+ cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
cl->writing_state = MEI_WRITE_COMPLETE;
- list_add_tail(&pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
+ list_add_tail(&pos->list, &cmpl_list->list);
}
if (cl == &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
if (dev->iamthif_flow_control_pending) {
- ret = _mei_irq_thread_iamthif_read(dev, slots);
+ ret = mei_amthif_irq_read(dev, &slots);
if (ret)
return ret;
}
@@ -1222,15 +928,11 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
wake_up_interruptible(&dev->wait_stop_wd);
}
- if (dev->extra_write_index) {
- dev_dbg(&dev->pdev->dev, "extra_write_index =%d.\n",
- dev->extra_write_index);
- mei_write_message(dev,
- (struct mei_msg_hdr *) &dev->ext_msg_buf[0],
- (unsigned char *) &dev->ext_msg_buf[1],
- (dev->extra_write_index - 1) * sizeof(u32));
- *slots -= dev->extra_write_index;
- dev->extra_write_index = 0;
+ if (dev->wr_ext_msg.hdr.length) {
+ mei_write_message(dev, &dev->wr_ext_msg.hdr,
+ dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
+ slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
+ dev->wr_ext_msg.hdr.length = 0;
}
if (dev->dev_state == MEI_DEV_ENABLED) {
if (dev->wd_pending &&
@@ -1243,41 +945,43 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
dev->wd_pending = false;
if (dev->wd_state == MEI_WD_RUNNING)
- *slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
+ slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
else
- *slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
+ slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
}
}
/* complete control write list CB */
dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
- list_for_each_entry_safe(pos, next,
- &dev->ctrl_wr_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *) pos->file_private;
+ list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
+ cl = pos->cl;
if (!cl) {
- list_del(&pos->cb_list);
+ list_del(&pos->list);
return -ENODEV;
}
- switch (pos->major_file_operations) {
- case MEI_CLOSE:
+ switch (pos->fop_type) {
+ case MEI_FOP_CLOSE:
/* send disconnect message */
- ret = _mei_irq_thread_close(dev, slots, pos, cl, cmpl_list);
+ ret = _mei_irq_thread_close(dev, &slots, pos,
+ cl, cmpl_list);
if (ret)
return ret;
break;
- case MEI_READ:
+ case MEI_FOP_READ:
/* send flow control message */
- ret = _mei_irq_thread_read(dev, slots, pos, cl, cmpl_list);
+ ret = _mei_irq_thread_read(dev, &slots, pos,
+ cl, cmpl_list);
if (ret)
return ret;
break;
- case MEI_IOCTL:
+ case MEI_FOP_IOCTL:
/* connect message */
if (mei_other_client_is_connecting(dev, cl))
continue;
- ret = _mei_irq_thread_ioctl(dev, slots, pos, cl, cmpl_list);
+ ret = _mei_irq_thread_ioctl(dev, &slots, pos,
+ cl, cmpl_list);
if (ret)
return ret;
@@ -1290,40 +994,26 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
}
/* complete write list CB */
dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
- list_for_each_entry_safe(pos, next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)pos->file_private;
+ list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
+ cl = pos->cl;
if (cl == NULL)
continue;
-
- if (cl != &dev->iamthif_cl) {
- if (mei_flow_ctrl_creds(dev, cl) <= 0) {
- dev_dbg(&dev->pdev->dev,
- "No flow control credentials for client %d, not sending.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl(dev, slots, pos,
- cl, cmpl_list);
- if (ret)
- return ret;
-
- } else if (cl == &dev->iamthif_cl) {
- /* IAMTHIF IOCTL */
- dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
- if (mei_flow_ctrl_creds(dev, cl) <= 0) {
- dev_dbg(&dev->pdev->dev,
- "No flow control credentials for amthi client %d.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl_iamthif(dev, slots, pos,
- cl, cmpl_list);
- if (ret)
- return ret;
-
+ if (mei_flow_ctrl_creds(dev, cl) <= 0) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control credentials for client %d, not sending.\n",
+ cl->host_client_id);
+ continue;
}
+ if (cl == &dev->iamthif_cl)
+ ret = mei_amthif_irq_write_complete(dev, &slots,
+ pos, cmpl_list);
+ else
+ ret = mei_irq_thread_write_complete(dev, &slots, pos,
+ cmpl_list);
+ if (ret)
+ return ret;
+
}
return 0;
}
@@ -1342,7 +1032,6 @@ void mei_timer(struct work_struct *work)
unsigned long timeout;
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
- struct list_head *amthi_complete_list = NULL;
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
@@ -1385,19 +1074,18 @@ void mei_timer(struct work_struct *work)
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
- if (dev->iamthif_current_cb)
- mei_free_cb_private(dev->iamthif_current_cb);
+ mei_io_cb_free(dev->iamthif_current_cb);
+ dev->iamthif_current_cb = NULL;
dev->iamthif_file_object = NULL;
- dev->iamthif_current_cb = NULL;
- mei_run_next_iamthif_cmd(dev);
+ mei_amthif_run_next_cmd(dev);
}
}
if (dev->iamthif_timer) {
timeout = dev->iamthif_timer +
- msecs_to_jiffies(IAMTHIF_READ_TIMER);
+ mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
@@ -1411,25 +1099,22 @@ void mei_timer(struct work_struct *work)
dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
- amthi_complete_list = &dev->amthi_read_complete_list.
- mei_cb.cb_list;
-
- list_for_each_entry_safe(cb_pos, cb_next, amthi_complete_list, cb_list) {
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->amthif_rd_complete_list.list, list) {
cl_pos = cb_pos->file_object->private_data;
/* Finding the AMTHI entry. */
if (cl_pos == &dev->iamthif_cl)
- list_del(&cb_pos->cb_list);
+ list_del(&cb_pos->list);
}
- if (dev->iamthif_current_cb)
- mei_free_cb_private(dev->iamthif_current_cb);
+ mei_io_cb_free(dev->iamthif_current_cb);
+ dev->iamthif_current_cb = NULL;
dev->iamthif_file_object->private_data = NULL;
dev->iamthif_file_object = NULL;
- dev->iamthif_current_cb = NULL;
dev->iamthif_timer = 0;
- mei_run_next_iamthif_cmd(dev);
+ mei_amthif_run_next_cmd(dev);
}
}
@@ -1451,7 +1136,7 @@ out:
irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
- struct mei_io_list complete_list;
+ struct mei_cl_cb complete_list;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
struct mei_cl *cl;
s32 slots;
@@ -1504,17 +1189,17 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
- dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
- slots, dev->extra_write_index);
- while (slots > 0 && !dev->extra_write_index) {
- dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
- slots, dev->extra_write_index);
+ while (slots > 0) {
+ /* we have urgent data to send so break the read */
+ if (dev->wr_ext_msg.hdr.length)
+ break;
+ dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
if (rets)
goto end;
}
- rets = mei_irq_thread_write_handler(&complete_list, dev, &slots);
+ rets = mei_irq_thread_write_handler(dev, &complete_list);
end:
dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
dev->host_hw_state = mei_hcsr_read(dev);
@@ -1531,21 +1216,20 @@ end:
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
- if (list_empty(&complete_list.mei_cb.cb_list))
+ if (list_empty(&complete_list.list))
return IRQ_HANDLED;
- list_for_each_entry_safe(cb_pos, cb_next,
- &complete_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- list_del(&cb_pos->cb_list);
+ list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
+ cl = cb_pos->cl;
+ list_del(&cb_pos->list);
if (cl) {
if (cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "completing call back.\n");
_mei_cmpl(cl, cb_pos);
cb_pos = NULL;
} else if (cl == &dev->iamthif_cl) {
- _mei_cmpl_iamthif(dev, cb_pos);
+ mei_amthif_complete(dev, cb_pos);
}
}
}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
index fcba98eb892e..eb93a1b53b9b 100644
--- a/drivers/misc/mei/iorw.c
+++ b/drivers/misc/mei/iorw.c
@@ -39,6 +39,95 @@
#include "interface.h"
/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+ if (cb == NULL)
+ return;
+
+ kfree(cb->request_buffer.data);
+ kfree(cb->response_buffer.data);
+ kfree(cb);
+}
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl - mei client
+ * @file: pointer to file structure
+ *
+ * returns mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+{
+ struct mei_cl_cb *cb;
+
+ cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ if (!cb)
+ return NULL;
+
+ mei_io_list_init(cb);
+
+ cb->file_object = fp;
+ cb->cl = cl;
+ cb->buf_idx = 0;
+ return cb;
+}
+
+
+/**
+ * mei_io_cb_alloc_req_buf - allocate request buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->request_buffer.data)
+ return -ENOMEM;
+ cb->request_buffer.size = length;
+ return 0;
+}
+/**
+ * mei_io_cb_alloc_req_buf - allocate respose buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->response_buffer.data)
+ return -ENOMEM;
+ cb->response_buffer.size = length;
+ return 0;
+}
+
+
+/**
* mei_me_cl_by_id return index to me_clients for client_id
*
* @dev: the device structure
@@ -82,9 +171,7 @@ int mei_ioctl_connect_client(struct file *file,
struct mei_cl_cb *cb;
struct mei_client *client;
struct mei_cl *cl;
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- long timeout = CONNECT_TIMEOUT;
+ long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
int i;
int err;
int rets;
@@ -97,16 +184,14 @@ int mei_ioctl_connect_client(struct file *file,
dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
-
/* buffered ioctl cb */
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = mei_io_cb_init(cl, file);
if (!cb) {
rets = -ENOMEM;
goto end;
}
- INIT_LIST_HEAD(&cb->cb_list);
- cb->major_file_operations = MEI_IOCTL;
+ cb->fop_type = MEI_FOP_IOCTL;
if (dev->dev_state != MEI_DEV_ENABLED) {
rets = -ENODEV;
@@ -142,21 +227,9 @@ int mei_ioctl_connect_client(struct file *file,
goto end;
}
clear_bit(cl->host_client_id, dev->host_clients_map);
- list_for_each_entry_safe(cl_pos, cl_next,
- &dev->file_list, link) {
- if (mei_cl_cmp_id(cl, cl_pos)) {
- dev_dbg(&dev->pdev->dev,
- "remove file private data node host"
- " client = %d, ME client = %d.\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- list_del(&cl_pos->link);
- }
+ mei_me_cl_unlink(dev, cl);
- }
- dev_dbg(&dev->pdev->dev, "free file private data memory.\n");
kfree(cl);
-
cl = NULL;
file->private_data = &dev->iamthif_cl;
@@ -192,25 +265,19 @@ int mei_ioctl_connect_client(struct file *file,
} else {
dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
cl->timer_count = MEI_CONNECT_TIMEOUT;
- cb->file_private = cl;
- list_add_tail(&cb->cb_list,
- &dev->ctrl_rd_list.mei_cb.
- cb_list);
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
}
} else {
dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
- cb->file_private = cl;
dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
- list_add_tail(&cb->cb_list,
- &dev->ctrl_wr_list.mei_cb.cb_list);
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(dev->wait_recvd_msg,
(MEI_FILE_CONNECTED == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state),
- timeout * HZ);
+ MEI_FILE_DISCONNECTED == cl->state), timeout);
mutex_lock(&dev->device_lock);
if (MEI_FILE_CONNECTED == cl->state) {
@@ -234,153 +301,7 @@ int mei_ioctl_connect_client(struct file *file,
rets = 0;
end:
dev_dbg(&dev->pdev->dev, "free connect cb memory.");
- kfree(cb);
- return rets;
-}
-
-/**
- * find_amthi_read_list_entry - finds a amthilist entry for current file
- *
- * @dev: the device structure
- * @file: pointer to file object
- *
- * returns returned a list entry on success, NULL on failure.
- */
-struct mei_cl_cb *find_amthi_read_list_entry(
- struct mei_device *dev,
- struct file *file)
-{
- struct mei_cl *cl_temp;
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
-
- list_for_each_entry_safe(pos, next,
- &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
- cl_temp = (struct mei_cl *)pos->file_private;
- if (cl_temp && cl_temp == &dev->iamthif_cl &&
- pos->file_object == file)
- return pos;
- }
- return NULL;
-}
-
-/**
- * amthi_read - read data from AMTHI client
- *
- * @dev: the device structure
- * @if_num: minor number
- * @file: pointer to file object
- * @*ubuf: pointer to user data in user space
- * @length: data length to read
- * @offset: data read offset
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns
- * returned data length on success,
- * zero if no data to read,
- * negative on failure.
- */
-int amthi_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset)
-{
- int rets;
- int wait_ret;
- struct mei_cl_cb *cb = NULL;
- struct mei_cl *cl = file->private_data;
- unsigned long timeout;
- int i;
-
- /* Only Posible if we are in timeout */
- if (!cl || cl != &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "bad file ext.\n");
- return -ETIMEDOUT;
- }
-
- i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
-
- if (i < 0) {
- dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
- return -ENODEV;
- }
- dev_dbg(&dev->pdev->dev, "checking amthi data\n");
- cb = find_amthi_read_list_entry(dev, file);
-
- /* Check for if we can block or not*/
- if (cb == NULL && file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
-
- dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
- while (cb == NULL) {
- /* unlock the Mutex */
- mutex_unlock(&dev->device_lock);
-
- wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
- (cb = find_amthi_read_list_entry(dev, file)));
-
- if (wait_ret)
- return -ERESTARTSYS;
-
- dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
-
- /* Locking again the Mutex */
- mutex_lock(&dev->device_lock);
- }
-
-
- dev_dbg(&dev->pdev->dev, "Got amthi data\n");
- dev->iamthif_timer = 0;
-
- if (cb) {
- timeout = cb->read_time + msecs_to_jiffies(IAMTHIF_READ_TIMER);
- dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
- timeout);
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(&dev->pdev->dev, "amthi Time out\n");
- /* 15 sec for the message has expired */
- list_del(&cb->cb_list);
- rets = -ETIMEDOUT;
- goto free;
- }
- }
- /* if the whole message will fit remove it from the list */
- if (cb->information >= *offset && length >= (cb->information - *offset))
- list_del(&cb->cb_list);
- else if (cb->information > 0 && cb->information <= *offset) {
- /* end of the message has been reached */
- list_del(&cb->cb_list);
- rets = 0;
- goto free;
- }
- /* else means that not full buffer will be read and do not
- * remove message from deletion list
- */
-
- dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
- cb->response_buffer.size);
- dev_dbg(&dev->pdev->dev, "amthi cb->information - %lu\n",
- cb->information);
-
- /* length is being turncated to PAGE_SIZE, however,
- * the information may be longer */
- length = min_t(size_t, length, (cb->information - *offset));
-
- if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
- rets = -EFAULT;
- else {
- rets = length;
- if ((*offset + length) < cb->information) {
- *offset += length;
- goto out;
- }
- }
-free:
- dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
- *offset = 0;
- mei_free_cb_private(cb);
-out:
+ mei_io_cb_free(cb);
return rets;
}
@@ -396,7 +317,7 @@ out:
int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_cl_cb *cb;
- int rets = 0;
+ int rets;
int i;
if (cl->state != MEI_FILE_CONNECTED)
@@ -405,187 +326,41 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
if (dev->dev_state != MEI_DEV_ENABLED)
return -ENODEV;
- dev_dbg(&dev->pdev->dev, "check if read is pending.\n");
if (cl->read_pending || cl->read_cb) {
dev_dbg(&dev->pdev->dev, "read is pending.\n");
return -EBUSY;
}
+ i = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (i < 0) {
+ dev_err(&dev->pdev->dev, "no such me client %d\n",
+ cl->me_client_id);
+ return -ENODEV;
+ }
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
- dev_dbg(&dev->pdev->dev, "allocation call back successful. host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
- i = mei_me_cl_by_id(dev, cl->me_client_id);
- if (i < 0) {
- rets = -ENODEV;
- goto unlock;
- }
+ rets = mei_io_cb_alloc_resp_buf(cb,
+ dev->me_clients[i].props.max_msg_length);
+ if (rets)
+ goto err;
- cb->response_buffer.size = dev->me_clients[i].props.max_msg_length;
- cb->response_buffer.data =
- kmalloc(cb->response_buffer.size, GFP_KERNEL);
- if (!cb->response_buffer.data) {
- rets = -ENOMEM;
- goto unlock;
- }
- dev_dbg(&dev->pdev->dev, "allocation call back data success.\n");
- cb->major_file_operations = MEI_READ;
- /* make sure information is zero before we start */
- cb->information = 0;
- cb->file_private = (void *) cl;
+ cb->fop_type = MEI_FOP_READ;
cl->read_cb = cb;
if (dev->mei_host_buffer_is_empty) {
dev->mei_host_buffer_is_empty = false;
if (mei_send_flow_control(dev, cl)) {
rets = -ENODEV;
- goto unlock;
+ goto err;
}
- list_add_tail(&cb->cb_list, &dev->read_list.mei_cb.cb_list);
+ list_add_tail(&cb->list, &dev->read_list.list);
} else {
- list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
return rets;
-unlock:
- mei_free_cb_private(cb);
+err:
+ mei_io_cb_free(cb);
return rets;
}
-/**
- * amthi_write - write iamthif data to amthi client
- *
- * @dev: the device structure
- * @cb: mei call back struct
- *
- * returns 0 on success, <0 on failure.
- */
-int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
-{
- struct mei_msg_hdr mei_hdr;
- int ret;
-
- if (!dev || !cb)
- return -ENODEV;
-
- dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
-
- dev->iamthif_state = MEI_IAMTHIF_WRITING;
- dev->iamthif_current_cb = cb;
- dev->iamthif_file_object = cb->file_object;
- dev->iamthif_canceled = false;
- dev->iamthif_ioctl = true;
- dev->iamthif_msg_buf_size = cb->request_buffer.size;
- memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
- cb->request_buffer.size);
-
- ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
- if (ret < 0)
- return ret;
-
- if (ret && dev->mei_host_buffer_is_empty) {
- ret = 0;
- dev->mei_host_buffer_is_empty = false;
- if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
- mei_hdr.msg_complete = 0;
- } else {
- mei_hdr.length = cb->request_buffer.size;
- mei_hdr.msg_complete = 1;
- }
-
- mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
- mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
- mei_hdr.reserved = 0;
- dev->iamthif_msg_buf_index += mei_hdr.length;
- if (mei_write_message(dev, &mei_hdr,
- (unsigned char *)(dev->iamthif_msg_buf),
- mei_hdr.length))
- return -ENODEV;
-
- if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
- return -ENODEV;
- dev->iamthif_flow_control_pending = true;
- dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
- dev->iamthif_current_cb = cb;
- dev->iamthif_file_object = cb->file_object;
- list_add_tail(&cb->cb_list,
- &dev->write_waiting_list.mei_cb.cb_list);
- } else {
- dev_dbg(&dev->pdev->dev, "message does not complete, "
- "so add amthi cb to write list.\n");
- list_add_tail(&cb->cb_list,
- &dev->write_list.mei_cb.cb_list);
- }
- } else {
- if (!(dev->mei_host_buffer_is_empty))
- dev_dbg(&dev->pdev->dev, "host buffer is not empty");
-
- dev_dbg(&dev->pdev->dev, "No flow control credentials, "
- "so add iamthif cb to write list.\n");
- list_add_tail(&cb->cb_list, &dev->write_list.mei_cb.cb_list);
- }
- return 0;
-}
-
-/**
- * iamthif_ioctl_send_msg - send cmd data to amthi client
- *
- * @dev: the device structure
- *
- * returns 0 on success, <0 on failure.
- */
-void mei_run_next_iamthif_cmd(struct mei_device *dev)
-{
- struct mei_cl *cl_tmp;
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
- int status;
-
- if (!dev)
- return;
-
- dev->iamthif_msg_buf_size = 0;
- dev->iamthif_msg_buf_index = 0;
- dev->iamthif_canceled = false;
- dev->iamthif_ioctl = true;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
- dev->iamthif_file_object = NULL;
-
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
-
- list_for_each_entry_safe(pos, next,
- &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
- list_del(&pos->cb_list);
- cl_tmp = (struct mei_cl *)pos->file_private;
-
- if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
- status = amthi_write(dev, pos);
- if (status) {
- dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
- status);
- return;
- }
- break;
- }
- }
-}
-
-/**
- * mei_free_cb_private - free mei_cb_private related memory
- *
- * @cb: mei callback struct
- */
-void mei_free_cb_private(struct mei_cl_cb *cb)
-{
- if (cb == NULL)
- return;
-
- kfree(cb->request_buffer.data);
- kfree(cb->response_buffer.data);
- kfree(cb);
-}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e8b0858132c1..43fb52ff98ad 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -90,93 +90,6 @@ static DEFINE_MUTEX(mei_mutex);
/**
- * mei_clear_list - removes all callbacks associated with file
- * from mei_cb_list
- *
- * @dev: device structure.
- * @file: file structure
- * @mei_cb_list: callbacks list
- *
- * mei_clear_list is called to clear resources associated with file
- * when application calls close function or Ctrl-C was pressed
- *
- * returns true if callback removed from the list, false otherwise
- */
-static bool mei_clear_list(struct mei_device *dev,
- struct file *file, struct list_head *mei_cb_list)
-{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
- struct file *file_temp;
- bool removed = false;
-
- /* list all list member */
- list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
- file_temp = (struct file *)cb_pos->file_object;
- /* check if list member associated with a file */
- if (file_temp == file) {
- /* remove member from the list */
- list_del(&cb_pos->cb_list);
- /* check if cb equal to current iamthif cb */
- if (dev->iamthif_current_cb == cb_pos) {
- dev->iamthif_current_cb = NULL;
- /* send flow control to iamthif client */
- mei_send_flow_control(dev, &dev->iamthif_cl);
- }
- /* free all allocated buffers */
- mei_free_cb_private(cb_pos);
- cb_pos = NULL;
- removed = true;
- }
- }
- return removed;
-}
-
-/**
- * mei_clear_lists - removes all callbacks associated with file
- *
- * @dev: device structure
- * @file: file structure
- *
- * mei_clear_lists is called to clear resources associated with file
- * when application calls close function or Ctrl-C was pressed
- *
- * returns true if callback removed from the list, false otherwise
- */
-static bool mei_clear_lists(struct mei_device *dev, struct file *file)
-{
- bool removed = false;
-
- /* remove callbacks associated with a file */
- mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
- if (mei_clear_list(dev, file,
- &dev->amthi_read_complete_list.mei_cb.cb_list))
- removed = true;
-
- mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
-
- if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
- removed = true;
-
- if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
- removed = true;
-
- if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
- removed = true;
-
- /* check if iamthif_current_cb not NULL */
- if (dev->iamthif_current_cb && !removed) {
- /* check file and iamthif current cb association */
- if (dev->iamthif_current_cb->file_object == file) {
- /* remove cb */
- mei_free_cb_private(dev->iamthif_current_cb);
- dev->iamthif_current_cb = NULL;
- removed = true;
- }
- }
- return removed;
-}
-/**
* find_read_list_entry - find read list entry
*
* @dev: device structure
@@ -192,14 +105,9 @@ static struct mei_cl_cb *find_read_list_entry(
struct mei_cl_cb *next = NULL;
dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(pos, next,
- &dev->read_list.mei_cb.cb_list, cb_list) {
- struct mei_cl *cl_temp;
- cl_temp = (struct mei_cl *)pos->file_private;
-
- if (mei_cl_cmp_id(cl, cl_temp))
+ list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
+ if (mei_cl_cmp_id(cl, pos->cl))
return pos;
- }
return NULL;
}
@@ -297,67 +205,51 @@ static int mei_release(struct inode *inode, struct file *file)
dev = cl->dev;
mutex_lock(&dev->device_lock);
- if (cl != &dev->iamthif_cl) {
- if (cl->state == MEI_FILE_CONNECTED) {
- cl->state = MEI_FILE_DISCONNECTING;
- dev_dbg(&dev->pdev->dev,
- "disconnecting client host client = %d, "
- "ME client = %d\n",
- cl->host_client_id,
- cl->me_client_id);
- rets = mei_disconnect_host_client(dev, cl);
- }
- mei_cl_flush_queues(cl);
- dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
+ if (cl == &dev->iamthif_cl) {
+ rets = mei_amthif_release(dev, file);
+ goto out;
+ }
+ if (cl->state == MEI_FILE_CONNECTED) {
+ cl->state = MEI_FILE_DISCONNECTING;
+ dev_dbg(&dev->pdev->dev,
+ "disconnecting client host client = %d, "
+ "ME client = %d\n",
cl->host_client_id,
cl->me_client_id);
+ rets = mei_disconnect_host_client(dev, cl);
+ }
+ mei_cl_flush_queues(cl);
+ dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
+ cl->host_client_id,
+ cl->me_client_id);
+
+ if (dev->open_handle_count > 0) {
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ dev->open_handle_count--;
+ }
+ mei_me_cl_unlink(dev, cl);
- if (dev->open_handle_count > 0) {
- clear_bit(cl->host_client_id, dev->host_clients_map);
- dev->open_handle_count--;
- }
- mei_remove_client_from_file_list(dev, cl->host_client_id);
-
- /* free read cb */
- cb = NULL;
- if (cl->read_cb) {
- cb = find_read_list_entry(dev, cl);
- /* Remove entry from read list */
- if (cb)
- list_del(&cb->cb_list);
-
- cb = cl->read_cb;
- cl->read_cb = NULL;
- }
-
- file->private_data = NULL;
-
- if (cb) {
- mei_free_cb_private(cb);
- cb = NULL;
- }
+ /* free read cb */
+ cb = NULL;
+ if (cl->read_cb) {
+ cb = find_read_list_entry(dev, cl);
+ /* Remove entry from read list */
+ if (cb)
+ list_del(&cb->list);
- kfree(cl);
- } else {
- if (dev->open_handle_count > 0)
- dev->open_handle_count--;
-
- if (dev->iamthif_file_object == file &&
- dev->iamthif_state != MEI_IAMTHIF_IDLE) {
-
- dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
- dev->iamthif_state);
- dev->iamthif_canceled = true;
- if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
- dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
- mei_run_next_iamthif_cmd(dev);
- }
- }
+ cb = cl->read_cb;
+ cl->read_cb = NULL;
+ }
- if (mei_clear_lists(dev, file))
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ file->private_data = NULL;
+ if (cb) {
+ mei_io_cb_free(cb);
+ cb = NULL;
}
+
+ kfree(cl);
+out:
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -411,20 +303,19 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
}
if (cl == &dev->iamthif_cl) {
- rets = amthi_read(dev, file, ubuf, length, offset);
+ rets = mei_amthif_read(dev, file, ubuf, length, offset);
goto out;
}
- if (cl->read_cb && cl->read_cb->information > *offset) {
+ if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
cb = cl->read_cb;
goto copy_buffer;
- } else if (cl->read_cb && cl->read_cb->information > 0 &&
- cl->read_cb->information <= *offset) {
+ } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
+ cl->read_cb->buf_idx <= *offset) {
cb = cl->read_cb;
rets = 0;
goto free;
- } else if ((!cl->read_cb || !cl->read_cb->information) &&
- *offset > 0) {
+ } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
/*Offset needs to be cleaned for contiguous reads*/
*offset = 0;
rets = 0;
@@ -481,16 +372,15 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
copy_buffer:
dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
cb->response_buffer.size);
- dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
- cb->information);
- if (length == 0 || ubuf == NULL || *offset > cb->information) {
+ dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
+ if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
rets = -EMSGSIZE;
goto free;
}
- /* length is being truncated to PAGE_SIZE, however, */
- /* information size may be longer */
- length = min_t(size_t, length, (cb->information - *offset));
+ /* length is being truncated to PAGE_SIZE,
+ * however buf_idx may point beyond that */
+ length = min_t(size_t, length, cb->buf_idx - *offset);
if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
rets = -EFAULT;
@@ -499,15 +389,15 @@ copy_buffer:
rets = length;
*offset += length;
- if ((unsigned long)*offset < cb->information)
+ if ((unsigned long)*offset < cb->buf_idx)
goto out;
free:
cb_pos = find_read_list_entry(dev, cl);
/* Remove entry from read list */
if (cb_pos)
- list_del(&cb_pos->cb_list);
- mei_free_cb_private(cb);
+ list_del(&cb_pos->list);
+ mei_io_cb_free(cb);
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
cl->read_pending = 0;
@@ -516,7 +406,6 @@ out:
mutex_unlock(&dev->device_lock);
return rets;
}
-
/**
* mei_write - the write function.
*
@@ -546,23 +435,39 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
- mutex_unlock(&dev->device_lock);
- return -ENODEV;
+ rets = -ENODEV;
+ goto err;
}
+ i = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (i < 0) {
+ rets = -ENODEV;
+ goto err;
+ }
+ if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
+ rets = -EMSGSIZE;
+ goto err;
+ }
+
+ if (cl->state != MEI_FILE_CONNECTED) {
+ rets = -ENODEV;
+ dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
+ cl->host_client_id, cl->me_client_id);
+ goto err;
+ }
if (cl == &dev->iamthif_cl) {
- write_cb = find_amthi_read_list_entry(dev, file);
+ write_cb = mei_amthif_find_read_list_entry(dev, file);
if (write_cb) {
timeout = write_cb->read_time +
- msecs_to_jiffies(IAMTHIF_READ_TIMER);
+ mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
if (time_after(jiffies, timeout) ||
- cl->reading_state == MEI_READ_COMPLETE) {
- *offset = 0;
- list_del(&write_cb->cb_list);
- mei_free_cb_private(write_cb);
- write_cb = NULL;
+ cl->reading_state == MEI_READ_COMPLETE) {
+ *offset = 0;
+ list_del(&write_cb->list);
+ mei_io_cb_free(write_cb);
+ write_cb = NULL;
}
}
}
@@ -572,8 +477,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
*offset = 0;
write_cb = find_read_list_entry(dev, cl);
if (write_cb) {
- list_del(&write_cb->cb_list);
- mei_free_cb_private(write_cb);
+ list_del(&write_cb->list);
+ mei_io_cb_free(write_cb);
write_cb = NULL;
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
@@ -583,24 +488,21 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
*offset = 0;
- write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ write_cb = mei_io_cb_init(cl, file);
if (!write_cb) {
- mutex_unlock(&dev->device_lock);
- return -ENOMEM;
+ dev_err(&dev->pdev->dev, "write cb allocation failed\n");
+ rets = -ENOMEM;
+ goto err;
}
+ rets = mei_io_cb_alloc_req_buf(write_cb, length);
+ if (rets)
+ goto err;
- write_cb->file_object = file;
- write_cb->file_private = cl;
- write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
- rets = -ENOMEM;
- if (!write_cb->request_buffer.data)
- goto unlock_dev;
-
- dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
+ dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
- rets = -EFAULT;
- if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
- goto unlock_dev;
+ rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
+ if (rets)
+ goto err;
cl->sm_state = 0;
if (length == 4 &&
@@ -612,139 +514,71 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
write_cb->request_buffer.data, 4) == 0)))
cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
- INIT_LIST_HEAD(&write_cb->cb_list);
if (cl == &dev->iamthif_cl) {
- write_cb->response_buffer.data =
- kmalloc(dev->iamthif_mtu, GFP_KERNEL);
- if (!write_cb->response_buffer.data) {
- rets = -ENOMEM;
- goto unlock_dev;
- }
- if (dev->dev_state != MEI_DEV_ENABLED) {
- rets = -ENODEV;
- goto unlock_dev;
- }
- i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
- if (i < 0) {
- rets = -ENODEV;
- goto unlock_dev;
- }
- if (length > dev->me_clients[i].props.max_msg_length ||
- length <= 0) {
- rets = -EMSGSIZE;
- goto unlock_dev;
- }
+ rets = mei_amthif_write(dev, write_cb);
- write_cb->response_buffer.size = dev->iamthif_mtu;
- write_cb->major_file_operations = MEI_IOCTL;
- write_cb->information = 0;
- write_cb->request_buffer.size = length;
- if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
- rets = -ENODEV;
- goto unlock_dev;
- }
-
- if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
- dev->iamthif_state != MEI_IAMTHIF_IDLE) {
- dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
- (int) dev->iamthif_state);
- dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
- list_add_tail(&write_cb->cb_list,
- &dev->amthi_cmd_list.mei_cb.cb_list);
- rets = length;
- } else {
- dev_dbg(&dev->pdev->dev, "call amthi write\n");
- rets = amthi_write(dev, write_cb);
-
- if (rets) {
- dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
- rets);
- goto unlock_dev;
- }
- rets = length;
+ if (rets) {
+ dev_err(&dev->pdev->dev,
+ "amthi write failed with status = %d\n", rets);
+ goto err;
}
mutex_unlock(&dev->device_lock);
- return rets;
+ return length;
}
- write_cb->major_file_operations = MEI_WRITE;
- /* make sure information is zero before we start */
-
- write_cb->information = 0;
- write_cb->request_buffer.size = length;
+ write_cb->fop_type = MEI_FOP_WRITE;
dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
- if (cl->state != MEI_FILE_CONNECTED) {
- rets = -ENODEV;
- dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
- cl->host_client_id,
- cl->me_client_id);
- goto unlock_dev;
- }
- i = mei_me_cl_by_id(dev, cl->me_client_id);
- if (i < 0) {
- rets = -ENODEV;
- goto unlock_dev;
- }
- if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
- rets = -EINVAL;
- goto unlock_dev;
- }
- write_cb->file_private = cl;
-
rets = mei_flow_ctrl_creds(dev, cl);
if (rets < 0)
- goto unlock_dev;
+ goto err;
- if (rets && dev->mei_host_buffer_is_empty) {
- rets = 0;
- dev->mei_host_buffer_is_empty = false;
- if (length > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
- mei_hdr.msg_complete = 0;
- } else {
- mei_hdr.length = length;
- mei_hdr.msg_complete = 1;
- }
- mei_hdr.host_addr = cl->host_client_id;
- mei_hdr.me_addr = cl->me_client_id;
- mei_hdr.reserved = 0;
- dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
- *((u32 *) &mei_hdr));
- if (mei_write_message(dev, &mei_hdr,
- (unsigned char *) (write_cb->request_buffer.data),
- mei_hdr.length)) {
- rets = -ENODEV;
- goto unlock_dev;
- }
+ if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
+ write_cb->buf_idx = 0;
+ mei_hdr.msg_complete = 0;
cl->writing_state = MEI_WRITING;
- write_cb->information = mei_hdr.length;
- if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, cl)) {
- rets = -ENODEV;
- goto unlock_dev;
- }
- list_add_tail(&write_cb->cb_list,
- &dev->write_waiting_list.mei_cb.cb_list);
- } else {
- list_add_tail(&write_cb->cb_list,
- &dev->write_list.mei_cb.cb_list);
- }
+ goto out;
+ }
+ dev->mei_host_buffer_is_empty = false;
+ if (length > mei_hbuf_max_data(dev)) {
+ mei_hdr.length = mei_hbuf_max_data(dev);
+ mei_hdr.msg_complete = 0;
} else {
+ mei_hdr.length = length;
+ mei_hdr.msg_complete = 1;
+ }
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
+ dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
+ *((u32 *) &mei_hdr));
+ if (mei_write_message(dev, &mei_hdr,
+ write_cb->request_buffer.data, mei_hdr.length)) {
+ rets = -ENODEV;
+ goto err;
+ }
+ cl->writing_state = MEI_WRITING;
+ write_cb->buf_idx = mei_hdr.length;
- write_cb->information = 0;
- cl->writing_state = MEI_WRITING;
- list_add_tail(&write_cb->cb_list,
- &dev->write_list.mei_cb.cb_list);
+out:
+ if (mei_hdr.msg_complete) {
+ if (mei_flow_ctrl_reduce(dev, cl)) {
+ rets = -ENODEV;
+ goto err;
+ }
+ list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
+ } else {
+ list_add_tail(&write_cb->list, &dev->write_list.list);
}
+
mutex_unlock(&dev->device_lock);
return length;
-unlock_dev:
+err:
mutex_unlock(&dev->device_lock);
- mei_free_cb_private(write_cb);
+ mei_io_cb_free(write_cb);
return rets;
}
@@ -860,15 +694,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
if (cl == &dev->iamthif_cl) {
- mutex_unlock(&dev->device_lock);
- poll_wait(file, &dev->iamthif_cl.wait, wait);
- mutex_lock(&dev->device_lock);
- if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_file_object == file) {
- mask |= (POLLIN | POLLRDNORM);
- dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
- mei_run_next_iamthif_cmd(dev);
- }
+ mask = mei_amthif_poll(dev, file, wait);
goto out;
}
@@ -917,7 +743,7 @@ static struct miscdevice mei_misc_device = {
*
* returns true if ME Interface is valid, false otherwise
*/
-static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
+static bool mei_quirk_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u32 reg;
@@ -939,7 +765,7 @@ static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
*
* returns 0 on success, <0 on failure.
*/
-static int __devinit mei_probe(struct pci_dev *pdev,
+static int mei_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mei_device *dev;
@@ -1003,6 +829,8 @@ static int __devinit mei_probe(struct pci_dev *pdev,
goto disable_msi;
}
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ INIT_WORK(&dev->init_work, mei_host_client_init);
+
if (mei_hw_init(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
@@ -1054,7 +882,7 @@ end:
* mei_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device.
*/
-static void __devexit mei_remove(struct pci_dev *pdev)
+static void mei_remove(struct pci_dev *pdev)
{
struct mei_device *dev;
@@ -1087,8 +915,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
/* remove entry if already in list */
dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
- mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
+ mei_me_cl_unlink(dev, &dev->wd_cl);
+ mei_me_cl_unlink(dev, &dev->iamthif_cl);
dev->iamthif_current_cb = NULL;
dev->me_clients_num = 0;
@@ -1195,8 +1023,8 @@ static struct pci_driver mei_driver = {
.name = KBUILD_MODNAME,
.id_table = mei_pci_tbl,
.probe = mei_probe,
- .remove = __devexit_p(mei_remove),
- .shutdown = __devexit_p(mei_remove),
+ .remove = mei_remove,
+ .shutdown = mei_remove,
.driver.pm = MEI_PM_OPS,
};
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index adb35fb9281c..25da04549d04 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/watchdog.h>
+#include <linux/poll.h>
#include <linux/mei.h>
#include "hw.h"
@@ -125,13 +126,20 @@ enum mei_wd_states {
MEI_WD_STOPPING,
};
-/* MEI CB */
-enum mei_cb_major_types {
- MEI_READ = 0,
- MEI_WRITE,
- MEI_IOCTL,
- MEI_OPEN,
- MEI_CLOSE
+/**
+ * enum mei_cb_file_ops - file operation associated with the callback
+ * @MEI_FOP_READ - read
+ * @MEI_FOP_WRITE - write
+ * @MEI_FOP_IOCTL - ioctl
+ * @MEI_FOP_OPEN - open
+ * @MEI_FOP_CLOSE - close
+ */
+enum mei_cb_file_ops {
+ MEI_FOP_READ = 0,
+ MEI_FOP_WRITE,
+ MEI_FOP_IOCTL,
+ MEI_FOP_OPEN,
+ MEI_FOP_CLOSE
};
/*
@@ -143,13 +151,21 @@ struct mei_message_data {
};
+struct mei_cl;
+
+/**
+ * struct mei_cl_cb - file operation callback structure
+ *
+ * @cl - file client who is running this operation
+ * @fop_type - file operation type
+ */
struct mei_cl_cb {
- struct list_head cb_list;
- enum mei_cb_major_types major_file_operations;
- void *file_private;
+ struct list_head list;
+ struct mei_cl *cl;
+ enum mei_cb_file_ops fop_type;
struct mei_message_data request_buffer;
struct mei_message_data response_buffer;
- unsigned long information;
+ unsigned long buf_idx;
unsigned long read_time;
struct file *file_object;
};
@@ -175,29 +191,23 @@ struct mei_cl {
struct mei_cl_cb *read_cb;
};
-struct mei_io_list {
- struct mei_cl_cb mei_cb;
-};
-
/**
- * struct mei_deive - MEI private device struct
+ * struct mei_device - MEI private device struct
* @hbuf_depth - depth of host(write) buffer
+ * @wr_ext_msg - buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
/*
* lists of queues
*/
- /* array of pointers to aio lists */
- struct mei_io_list read_list; /* driver read queue */
- struct mei_io_list write_list; /* driver write queue */
- struct mei_io_list write_waiting_list; /* write waiting queue */
- struct mei_io_list ctrl_wr_list; /* managed write IOCTL list */
- struct mei_io_list ctrl_rd_list; /* managed read IOCTL list */
- struct mei_io_list amthi_cmd_list; /* amthi list for cmd waiting */
-
- /* driver managed amthi list for reading completed amthi cmd data */
- struct mei_io_list amthi_read_complete_list;
+ /* array of pointers to aio lists */
+ struct mei_cl_cb read_list; /* driver read queue */
+ struct mei_cl_cb write_list; /* driver write queue */
+ struct mei_cl_cb write_waiting_list; /* write waiting queue */
+ struct mei_cl_cb ctrl_wr_list; /* managed write IOCTL list */
+ struct mei_cl_cb ctrl_rd_list; /* managed read IOCTL list */
+
/*
* list of files
*/
@@ -235,11 +245,13 @@ struct mei_device {
u16 init_clients_timer;
bool need_reset;
- u32 extra_write_index;
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
- u32 wr_msg_buf[128]; /* used for control messages */
- u32 ext_msg_buf[8]; /* for control responses */
u32 rd_msg_hdr;
+ u32 wr_msg_buf[128]; /* used for control messages */
+ struct {
+ struct mei_msg_hdr hdr;
+ unsigned char data[4]; /* All HBM messages are 4 bytes */
+ } wr_ext_msg; /* for control responses */
struct hbm_version version;
@@ -253,12 +265,15 @@ struct mei_device {
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
- bool wd_interface_reg;
bool wd_pending;
u16 wd_timeout;
unsigned char wd_data[MEI_WD_START_MSG_SIZE];
+ /* amthif list for cmd waiting */
+ struct mei_cl_cb amthif_cmd_list;
+ /* driver managed amthif list for reading completed amthif cmd data */
+ struct mei_cl_cb amthif_rd_complete_list;
struct file *iamthif_file_object;
struct mei_cl iamthif_cl;
struct mei_cl_cb *iamthif_current_cb;
@@ -272,8 +287,15 @@ struct mei_device {
bool iamthif_flow_control_pending;
bool iamthif_ioctl;
bool iamthif_canceled;
+
+ struct work_struct init_work;
};
+static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
+{
+ return msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
/*
* mei init function prototypes
@@ -284,21 +306,34 @@ int mei_hw_init(struct mei_device *dev);
int mei_task_initialize_clients(void *data);
int mei_initialize_clients(struct mei_device *dev);
int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_remove_client_from_file_list(struct mei_device *dev, u8 host_client_id);
-void mei_host_init_iamthif(struct mei_device *dev);
void mei_allocate_me_clients_storage(struct mei_device *dev);
-int mei_me_cl_update_filext(struct mei_device *dev, struct mei_cl *cl,
+int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
const uuid_le *cguid, u8 host_client_id);
+void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
/*
- * MEI IO List Functions
+ * MEI IO Functions
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+void mei_io_cb_free(struct mei_cl_cb *priv_cb);
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+
+
+/**
+ * mei_io_list_init - Sets up a queue list.
+ *
+ * @list: An instance cl callback structure
*/
-void mei_io_list_init(struct mei_io_list *list);
-void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl);
+static inline void mei_io_list_init(struct mei_cl_cb *list)
+{
+ INIT_LIST_HEAD(&list->list);
+}
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
/*
* MEI ME Client Functions
@@ -330,7 +365,8 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
*/
void mei_host_start_message(struct mei_device *dev);
void mei_host_enum_clients_message(struct mei_device *dev);
-int mei_host_client_properties(struct mei_device *dev);
+int mei_host_client_enumerate(struct mei_device *dev);
+void mei_host_client_init(struct work_struct *work);
/*
* MEI interrupt functions prototype
@@ -347,18 +383,40 @@ int mei_ioctl_connect_client(struct file *file,
int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
-int amthi_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
-int amthi_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset);
+/*
+ * AMTHIF - AMT Host Interface Functions
+ */
+void mei_amthif_reset_params(struct mei_device *dev);
+
+void mei_amthif_host_init(struct mei_device *dev);
+
+int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
-struct mei_cl_cb *find_amthi_read_list_entry(struct mei_device *dev,
+int mei_amthif_read(struct mei_device *dev, struct file *file,
+ char __user *ubuf, size_t length, loff_t *offset);
+
+unsigned int mei_amthif_poll(struct mei_device *dev,
+ struct file *file, poll_table *wait);
+
+int mei_amthif_release(struct mei_device *dev, struct file *file);
+
+struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
struct file *file);
-void mei_run_next_iamthif_cmd(struct mei_device *dev);
+void mei_amthif_run_next_cmd(struct mei_device *dev);
+
-void mei_free_cb_private(struct mei_cl_cb *priv_cb);
+int mei_amthif_read_message(struct mei_cl_cb *complete_list,
+ struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
+int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
+ struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
+
+void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
+int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
+ struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
+int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/*
* Register Access Function
@@ -437,4 +495,15 @@ void mei_csr_clear_his(struct mei_device *dev);
void mei_enable_interrupts(struct mei_device *dev);
void mei_disable_interrupts(struct mei_device *dev);
+static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
+{
+ struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
+ hdr->host_addr = 0;
+ hdr->me_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+ return hdr;
+}
+
#endif
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index d96c537f046f..9299a8c29a6f 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -62,6 +62,7 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*/
int mei_wd_host_init(struct mei_device *dev)
{
+ int id;
mei_cl_init(&dev->wd_cl, dev);
/* look for WD client and connect to it */
@@ -69,12 +70,11 @@ int mei_wd_host_init(struct mei_device *dev)
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
- /* find ME WD client */
- mei_me_cl_update_filext(dev, &dev->wd_cl,
+ /* Connect WD ME client to the host client */
+ id = mei_me_cl_link(dev, &dev->wd_cl,
&mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
- dev_dbg(&dev->pdev->dev, "wd: check client\n");
- if (MEI_FILE_CONNECTING != dev->wd_cl.state) {
+ if (id < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
return -ENOENT;
}
@@ -85,7 +85,7 @@ int mei_wd_host_init(struct mei_device *dev)
dev->wd_cl.host_client_id = 0;
return -EIO;
}
- dev->wd_cl.timer_count = CONNECT_TIMEOUT;
+ dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
return 0;
}
@@ -360,23 +360,20 @@ void mei_watchdog_register(struct mei_device *dev)
if (watchdog_register_device(&amt_wd_dev)) {
dev_err(&dev->pdev->dev,
"wd: unable to register watchdog device.\n");
- dev->wd_interface_reg = false;
return;
}
dev_dbg(&dev->pdev->dev,
"wd: successfully register watchdog interface.\n");
- dev->wd_interface_reg = true;
watchdog_set_drvdata(&amt_wd_dev, dev);
}
void mei_watchdog_unregister(struct mei_device *dev)
{
- if (!dev->wd_interface_reg)
+ if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
return;
watchdog_set_drvdata(&amt_wd_dev, NULL);
watchdog_unregister_device(&amt_wd_dev);
- dev->wd_interface_reg = false;
}
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index c9f20dae1855..931e635aa491 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -666,7 +666,7 @@ static struct bin_attribute pch_bin_attr = {
.write = pch_phub_bin_write,
};
-static int __devinit pch_phub_probe(struct pci_dev *pdev,
+static int pch_phub_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int retval;
@@ -819,7 +819,7 @@ err_pci_enable_dev:
return ret;
}
-static void __devexit pch_phub_remove(struct pci_dev *pdev)
+static void pch_phub_remove(struct pci_dev *pdev)
{
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
@@ -888,7 +888,7 @@ static struct pci_driver pch_phub_driver = {
.name = "pch_phub",
.id_table = pch_phub_pcidev_id,
.probe = pch_phub_probe,
- .remove = __devexit_p(pch_phub_remove),
+ .remove = pch_phub_remove,
.suspend = pch_phub_suspend,
.resume = pch_phub_resume
};
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 21b28fc6d912..68b7c773d2cf 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -324,7 +324,7 @@ static irqreturn_t phantom_isr(int irq, void *data)
* Init and deinit driver
*/
-static unsigned int __devinit phantom_get_free(void)
+static unsigned int phantom_get_free(void)
{
unsigned int i;
@@ -335,7 +335,7 @@ static unsigned int __devinit phantom_get_free(void)
return i;
}
-static int __devinit phantom_probe(struct pci_dev *pdev,
+static int phantom_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct phantom_device *pht;
@@ -435,7 +435,7 @@ err:
return retval;
}
-static void __devexit phantom_remove(struct pci_dev *pdev)
+static void phantom_remove(struct pci_dev *pdev)
{
struct phantom_device *pht = pci_get_drvdata(pdev);
unsigned int minor = MINOR(pht->cdev.dev);
@@ -487,7 +487,7 @@ static int phantom_resume(struct pci_dev *pdev)
#define phantom_resume NULL
#endif
-static struct pci_device_id phantom_pci_tbl[] __devinitdata = {
+static struct pci_device_id phantom_pci_tbl[] = {
{ .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
.subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050,
.class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
@@ -499,7 +499,7 @@ static struct pci_driver phantom_pci_driver = {
.name = "phantom",
.id_table = phantom_pci_tbl,
.probe = phantom_probe,
- .remove = __devexit_p(phantom_remove),
+ .remove = phantom_remove,
.suspend = phantom_suspend,
.resume = phantom_resume
};
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 4999b34b7a60..f84ff0c06035 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -76,7 +76,7 @@ struct pti_dev {
*/
static DEFINE_MUTEX(alloclock);
-static const struct pci_device_id pci_ids[] __devinitconst = {
+static const struct pci_device_id pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
{0}
};
@@ -796,7 +796,7 @@ static const struct tty_port_operations tty_port_ops = {
* 0 for success
* otherwise, error
*/
-static int __devinit pti_pci_probe(struct pci_dev *pdev,
+static int pti_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int a;
@@ -879,14 +879,17 @@ err:
* PCI bus.
* @pdev: variable containing pci info of PTI.
*/
-static void __devexit pti_pci_remove(struct pci_dev *pdev)
+static void pti_pci_remove(struct pci_dev *pdev)
{
struct pti_dev *drv_data = pci_get_drvdata(pdev);
+ unsigned int a;
unregister_console(&pti_console);
- tty_unregister_device(pti_tty_driver, 0);
- tty_unregister_device(pti_tty_driver, 1);
+ for (a = 0; a < PTITTY_MINOR_NUM; a++) {
+ tty_unregister_device(pti_tty_driver, a);
+ tty_port_destroy(&drv_data->port[a]);
+ }
iounmap(drv_data->pti_ioaddr);
pci_set_drvdata(pdev, NULL);
@@ -901,7 +904,7 @@ static struct pci_driver pti_pci_driver = {
.name = PCINAME,
.id_table = pci_ids,
.probe = pti_pci_probe,
- .remove = __devexit_p(pti_pci_remove),
+ .remove = pti_pci_remove,
};
/**
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 8d082b46426b..d971817182f7 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -53,6 +53,10 @@
#include <linux/kthread.h>
#include "xpc.h"
+#ifdef CONFIG_X86_64
+#include <asm/traps.h>
+#endif
+
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE;
}
+/* Used to only allow one cpu to complete disconnect */
+static unsigned int xpc_die_disconnecting;
+
/*
* Notify other partitions to deactivate from us by first disengaging from all
* references to our memory.
@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
long keep_waiting;
long wait_to_print;
+ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
+ return;
+
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
* about the lack of a heartbeat.
*/
static int
-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
{
#ifdef CONFIG_IA64 /* !!! temporary kludge */
switch (event) {
@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
break;
}
#else
- xpc_die_deactivate();
+ struct die_args *die_args = _die_args;
+
+ switch (event) {
+ case DIE_TRAP:
+ if (die_args->trapnr == X86_TRAP_DF)
+ xpc_die_deactivate();
+
+ if (((die_args->trapnr == X86_TRAP_MF) ||
+ (die_args->trapnr == X86_TRAP_XF)) &&
+ !user_mode_vm(die_args->regs))
+ xpc_die_deactivate();
+
+ break;
+ case DIE_INT3:
+ case DIE_DEBUG:
+ break;
+ case DIE_OOPS:
+ case DIE_GPF:
+ default:
+ xpc_die_deactivate();
+ }
#endif
return NOTIFY_DONE;
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index 123ed98eec3e..7deb25dc86a7 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -711,7 +711,7 @@ static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
}
-static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
+static int spear_pcie_gadget_probe(struct platform_device *pdev)
{
struct resource *res0, *res1;
unsigned int status = 0;
@@ -853,7 +853,7 @@ err_rel_res0:
return status;
}
-static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
+static int spear_pcie_gadget_remove(struct platform_device *pdev)
{
struct resource *res0, *res1;
static struct pcie_gadget_target *target;
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 46937b107261..b90a2241d79c 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -511,7 +511,6 @@ long st_register(struct st_proto_s *new_proto)
unsigned long flags = 0;
st_kim_ref(&st_gdata, 0);
- pr_info("%s(%d) ", __func__, new_proto->chnl_id);
if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
|| new_proto->reg_complete_cb == NULL) {
pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 04a819944f6b..83269f1d16e3 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)
if (pdata->chip_enable)
pdata->chip_enable(kim_gdata);
+ /* Configure BT nShutdown to HIGH state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(5); /* FIXME: a proper toggle */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(100);
/* re-initialize the completion */
INIT_COMPLETION(kim_gdata->ldisc_installed);
/* send notification to UIM */
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)
* (b) upon failure to either install ldisc or download firmware.
* The function is responsible to (a) notify UIM about un-installation,
* (b) flush UART if the ldisc was installed.
- * (c) invoke platform's chip disabling routine.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)
err = -ETIMEDOUT;
}
+ /* By default configure BT nShutdown to LOW state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
/* platform specific disable */
if (pdata->chip_disable)
pdata->chip_disable(kim_gdata);
@@ -705,9 +718,9 @@ static const struct file_operations list_debugfs_fops = {
static struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
- long status;
struct kim_data_s *kim_gdata;
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ int err;
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
@@ -724,24 +737,39 @@ static int kim_probe(struct platform_device *pdev)
}
dev_set_drvdata(&pdev->dev, kim_gdata);
- status = st_core_init(&kim_gdata->core_data);
- if (status != 0) {
+ err = st_core_init(&kim_gdata->core_data);
+ if (err != 0) {
pr_err(" ST core init failed");
- return -EIO;
+ err = -EIO;
+ goto err_core_init;
}
/* refer to itself */
kim_gdata->core_data->kim_data = kim_gdata;
+ /* Claim the chip enable nShutdown gpio from the system */
+ kim_gdata->nshutdown = pdata->nshutdown_gpio;
+ err = gpio_request(kim_gdata->nshutdown, "kim");
+ if (unlikely(err)) {
+ pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+ return err;
+ }
+
+ /* Configure nShutdown GPIO as output=0 */
+ err = gpio_direction_output(kim_gdata->nshutdown, 0);
+ if (unlikely(err)) {
+ pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+ return err;
+ }
/* get reference of pdev for request_firmware
*/
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
- status = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp);
- if (status) {
+ err = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp);
+ if (err) {
pr_err("failed to create sysfs entries");
- return status;
+ goto err_sysfs_group;
}
/* copying platform data */
@@ -753,8 +781,8 @@ static int kim_probe(struct platform_device *pdev)
kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
if (IS_ERR(kim_debugfs_dir)) {
pr_err(" debugfs entries creation failed ");
- kim_debugfs_dir = NULL;
- return -EIO;
+ err = -EIO;
+ goto err_debugfs_dir;
}
debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
@@ -763,14 +791,33 @@ static int kim_probe(struct platform_device *pdev)
kim_gdata, &list_debugfs_fops);
pr_info(" debugfs entries created ");
return 0;
+
+err_debugfs_dir:
+ sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
+
+err_sysfs_group:
+ st_core_exit(kim_gdata->core_data);
+
+err_core_init:
+ kfree(kim_gdata);
+
+ return err;
}
static int kim_remove(struct platform_device *pdev)
{
+ /* free the GPIOs requested */
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
kim_gdata = dev_get_drvdata(&pdev->dev);
+ /* Free the Bluetooth/FM/GPIO
+ * nShutdown gpio from the system
+ */
+ gpio_free(pdata->nshutdown_gpio);
+ pr_info("nshutdown GPIO Freed");
+
debugfs_remove_recursive(kim_debugfs_dir);
sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
pr_info("sysfs entries removed");
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
index 5acbba120de0..1d86407189eb 100644
--- a/drivers/misc/ti_dac7512.c
+++ b/drivers/misc/ti_dac7512.c
@@ -54,7 +54,7 @@ static const struct attribute_group dac7512_attr_group = {
.attrs = dac7512_attributes,
};
-static int __devinit dac7512_probe(struct spi_device *spi)
+static int dac7512_probe(struct spi_device *spi)
{
int ret;
@@ -67,7 +67,7 @@ static int __devinit dac7512_probe(struct spi_device *spi)
return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
}
-static int __devexit dac7512_remove(struct spi_device *spi)
+static int dac7512_remove(struct spi_device *spi)
{
sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
return 0;
@@ -79,7 +79,7 @@ static struct spi_driver dac7512_driver = {
.owner = THIS_MODULE,
},
.probe = dac7512_probe,
- .remove = __devexit_p(dac7512_remove),
+ .remove = dac7512_remove,
};
module_spi_driver(dac7512_driver);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 0beb298a17dd..1e7bc0eb081e 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -347,7 +347,7 @@ static int tsl2550_init_client(struct i2c_client *client)
*/
static struct i2c_driver tsl2550_driver;
-static int __devinit tsl2550_probe(struct i2c_client *client,
+static int tsl2550_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
@@ -405,7 +405,7 @@ exit:
return err;
}
-static int __devexit tsl2550_remove(struct i2c_client *client)
+static int tsl2550_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
@@ -450,7 +450,7 @@ static struct i2c_driver tsl2550_driver = {
.suspend = tsl2550_suspend,
.resume = tsl2550_resume,
.probe = tsl2550_probe,
- .remove = __devexit_p(tsl2550_remove),
+ .remove = tsl2550_remove,
.id_table = tsl2550_id,
};
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index d2339ea37815..bd57a11acc79 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -66,8 +66,6 @@ struct uart_icount {
struct sdio_uart_port {
struct tty_port port;
- struct kref kref;
- struct tty_struct *tty;
unsigned int index;
struct sdio_func *func;
struct mutex func_lock;
@@ -93,7 +91,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
{
int index, ret = -EBUSY;
- kref_init(&port->kref);
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
@@ -123,23 +120,15 @@ static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
spin_lock(&sdio_uart_table_lock);
port = sdio_uart_table[index];
if (port)
- kref_get(&port->kref);
+ tty_port_get(&port->port);
spin_unlock(&sdio_uart_table_lock);
return port;
}
-static void sdio_uart_port_destroy(struct kref *kref)
-{
- struct sdio_uart_port *port =
- container_of(kref, struct sdio_uart_port, kref);
- kfifo_free(&port->xmit_fifo);
- kfree(port);
-}
-
static void sdio_uart_port_put(struct sdio_uart_port *port)
{
- kref_put(&port->kref, sdio_uart_port_destroy);
+ tty_port_put(&port->port);
}
static void sdio_uart_port_remove(struct sdio_uart_port *port)
@@ -737,6 +726,14 @@ static void sdio_uart_shutdown(struct tty_port *tport)
sdio_uart_release_func(port);
}
+static void sdio_uart_port_destroy(struct tty_port *tport)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ kfifo_free(&port->xmit_fifo);
+ kfree(port);
+}
+
/**
* sdio_uart_install - install method
* @driver: the driver in use (sdio_uart in our case)
@@ -1045,6 +1042,7 @@ static const struct tty_port_operations sdio_uart_port_ops = {
.carrier_raised = uart_carrier_raised,
.shutdown = sdio_uart_shutdown,
.activate = sdio_uart_activate,
+ .destruct = sdio_uart_port_destroy,
};
static const struct tty_operations sdio_uart_ops = {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 83eb1e06ff76..8d13c6594520 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,18 @@ config MMC_RICOH_MMC
If unsure, say Y.
+config MMC_SDHCI_ACPI
+ tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+ depends on MMC_SDHCI && ACPI
+ help
+ This selects support for ACPI enumerated SDHCI controllers,
+ identified by ACPI Compatibility ID PNP0D40 or specific
+ ACPI Hardware IDs.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_PLTFM
tristate "SDHCI platform and OF driver helper"
depends on MMC_SDHCI
@@ -521,7 +533,7 @@ config MMC_DW_PLTFM
If unsure, say Y.
config MMC_DW_EXYNOS
- tristate "Exynos specific extentions for Synopsys DW Memory Card Interface"
+ tristate "Exynos specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
select MMC_DW_PLTFM
help
@@ -612,3 +624,10 @@ config MMC_WMT
To compile this driver as a module, choose M here: the
module will be called wmt-sdmmc.
+
+config MMC_REALTEK_PCI
+ tristate "Realtek PCI-E SD/MMC Card Interface Driver"
+ depends on MFD_RTSX_PCI
+ help
+ Say Y here to include driver code to support SD/MMC card interface
+ of Realtek PCI-E card reader
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 39d5e1234709..e4e218c930bd 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
+obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
@@ -46,6 +47,10 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
+obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+
+obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
+
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index ddf096e3803f..722af1de7967 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
+#include <linux/platform_data/atmel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
@@ -40,7 +41,6 @@
#include <asm/unaligned.h>
#include <mach/cpu.h>
-#include <mach/board.h>
#include "atmel-mci-regs.h"
@@ -511,7 +511,7 @@ static const struct of_device_id atmci_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmci_dt_ids);
-static struct mci_platform_data __devinit*
+static struct mci_platform_data*
atmci_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index dbd0c8a4e98a..127a8fade4da 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -943,7 +943,7 @@ static const struct mmc_host_ops au1xmmc_ops = {
.enable_sdio_irq = au1xmmc_enable_sdio_irq,
};
-static int __devinit au1xmmc_probe(struct platform_device *pdev)
+static int au1xmmc_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct au1xmmc_host *host;
@@ -1114,7 +1114,7 @@ out0:
return ret;
}
-static int __devexit au1xmmc_remove(struct platform_device *pdev)
+static int au1xmmc_remove(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index b9b463eca1ec..fb4348c5b6ac 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -522,7 +522,7 @@ static void sdh_reset(void)
SSYNC();
}
-static int __devinit sdh_probe(struct platform_device *pdev)
+static int sdh_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct sdh_host *host;
@@ -617,7 +617,7 @@ out1:
return ret;
}
-static int __devexit sdh_remove(struct platform_device *pdev)
+static int sdh_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -680,7 +680,7 @@ static int sdh_resume(struct platform_device *dev)
static struct platform_driver sdh_driver = {
.probe = sdh_probe,
- .remove = __devexit_p(sdh_remove),
+ .remove = sdh_remove,
.suspend = sdh_suspend,
.resume = sdh_resume,
.driver = {
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 83693fd7c6b3..777ca2046b27 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -690,7 +690,7 @@ static int cb710_mmc_resume(struct platform_device *pdev)
#endif /* CONFIG_PM */
-static int __devinit cb710_mmc_init(struct platform_device *pdev)
+static int cb710_mmc_init(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
struct cb710_chip *chip = cb710_slot_to_chip(slot);
@@ -746,7 +746,7 @@ err_free_mmc:
return err;
}
-static int __devexit cb710_mmc_exit(struct platform_device *pdev)
+static int cb710_mmc_exit(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
@@ -773,7 +773,7 @@ static int __devexit cb710_mmc_exit(struct platform_device *pdev)
static struct platform_driver cb710_mmc_driver = {
.driver.name = "cb710-mmc",
.probe = cb710_mmc_init,
- .remove = __devexit_p(cb710_mmc_exit),
+ .remove = cb710_mmc_exit,
#ifdef CONFIG_PM
.suspend = cb710_mmc_suspend,
.resume = cb710_mmc_resume,
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index 53a09cbb2c7c..083fcd29c9c6 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -37,7 +37,7 @@ static struct dw_mci_board pci_board_data = {
.fifo_depth = 32,
};
-static int __devinit dw_mci_pci_probe(struct pci_dev *pdev,
+static int dw_mci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *entries)
{
struct dw_mci *host;
@@ -85,7 +85,7 @@ err_disable_dev:
return ret;
}
-static void __devexit dw_mci_pci_remove(struct pci_dev *pdev)
+static void dw_mci_pci_remove(struct pci_dev *pdev)
{
struct dw_mci *host = pci_get_drvdata(pdev);
@@ -134,7 +134,7 @@ static struct pci_driver dw_mci_pci_driver = {
.name = "dw_mmc_pci",
.id_table = dw_mci_pci_id,
.probe = dw_mci_pci_probe,
- .remove = __devexit_p(dw_mci_pci_remove),
+ .remove = dw_mci_pci_remove,
.driver = {
.pm = &dw_mci_pci_pmops
},
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 4e133709e33d..5e1fb1d2c422 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -62,12 +62,12 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_mci_pltfm_register);
-static int __devinit dw_mci_pltfm_probe(struct platform_device *pdev)
+static int dw_mci_pltfm_probe(struct platform_device *pdev)
{
return dw_mci_pltfm_register(pdev, NULL);
}
-static int __devexit dw_mci_pltfm_remove(struct platform_device *pdev)
+static int dw_mci_pltfm_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
static struct platform_driver dw_mci_pltfm_driver = {
.probe = dw_mci_pltfm_probe,
- .remove = __devexit_p(dw_mci_pltfm_remove),
+ .remove = dw_mci_pltfm_remove,
.driver = {
.name = "dw_mmc",
.of_match_table = of_match_ptr(dw_mci_pltfm_match),
diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h
index 2ac37b81de4d..68e7fd2f6148 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.h
+++ b/drivers/mmc/host/dw_mmc-pltfm.h
@@ -14,7 +14,7 @@
extern int dw_mci_pltfm_register(struct platform_device *pdev,
const struct dw_mci_drv_data *drv_data);
-extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev);
+extern int dw_mci_pltfm_remove(struct platform_device *pdev);
extern const struct dev_pm_ops dw_mci_pltfm_pmops;
#endif /* _DW_MMC_PLTFM_H_ */
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index c8852a8128a9..2391c6b7a4bb 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -702,7 +702,7 @@ static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {
JZ_GPIO_BULK_PIN(MSC_DATA3),
};
-static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio,
+static int jz4740_mmc_request_gpio(struct device *dev, int gpio,
const char *name, bool output, int value)
{
int ret;
@@ -724,7 +724,7 @@ static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio,
return 0;
}
-static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev)
+static int jz4740_mmc_request_gpios(struct platform_device *pdev)
{
int ret;
struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
@@ -759,7 +759,7 @@ err:
return ret;
}
-static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev,
+static int jz4740_mmc_request_cd_irq(struct platform_device *pdev,
struct jz4740_mmc_host *host)
{
struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
@@ -802,7 +802,7 @@ static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)
return num_pins;
}
-static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
+static int jz4740_mmc_probe(struct platform_device* pdev)
{
int ret;
struct mmc_host *mmc;
@@ -938,7 +938,7 @@ err_free_host:
return ret;
}
-static int __devexit jz4740_mmc_remove(struct platform_device *pdev)
+static int jz4740_mmc_remove(struct platform_device *pdev)
{
struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
@@ -1004,7 +1004,7 @@ const struct dev_pm_ops jz4740_mmc_pm_ops = {
static struct platform_driver jz4740_mmc_driver = {
.probe = jz4740_mmc_probe,
- .remove = __devexit_p(jz4740_mmc_remove),
+ .remove = jz4740_mmc_remove,
.driver = {
.name = "jz4740-mmc",
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a600eabbd6c3..74145d1d51f5 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1485,7 +1485,7 @@ nomem:
}
-static int __devexit mmc_spi_remove(struct spi_device *spi)
+static int mmc_spi_remove(struct spi_device *spi)
{
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
struct mmc_spi_host *host;
@@ -1517,7 +1517,7 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
return 0;
}
-static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
+static struct of_device_id mmc_spi_of_match_table[] = {
{ .compatible = "mmc-spi-slot", },
{},
};
@@ -1529,7 +1529,7 @@ static struct spi_driver mmc_spi_driver = {
.of_match_table = mmc_spi_of_match_table,
},
.probe = mmc_spi_probe,
- .remove = __devexit_p(mmc_spi_remove),
+ .remove = mmc_spi_remove,
};
module_spi_driver(mmc_spi_driver);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index edc3e9baf0e7..150772395cc6 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -33,6 +33,7 @@
#include <linux/amba/mmci.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -261,7 +262,7 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
-static void __devinit mmci_dma_setup(struct mmci_host *host)
+static void mmci_dma_setup(struct mmci_host *host)
{
struct mmci_platform_data *plat = host->plat;
const char *rxname, *txname;
@@ -337,7 +338,7 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
}
/*
- * This is used in __devinit or __devexit so inline it
+ * This is used in or so inline it
* so it can be discarded.
*/
static inline void mmci_dma_release(struct mmci_host *host)
@@ -654,9 +655,31 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
/* The ST Micro variants has a special bit to enable SDIO */
if (variant->sdio && host->mmc->card)
- if (mmc_card_sdio(host->mmc->card))
+ if (mmc_card_sdio(host->mmc->card)) {
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO.
+ */
+ u32 clk;
+
datactrl |= MCI_ST_DPSM_SDIOEN;
+ /*
+ * The ST Micro variant for SDIO small write transfers
+ * needs to have clock H/W flow control disabled,
+ * otherwise the transfer will not start. The threshold
+ * depends on the rate of MCLK.
+ */
+ if (data->flags & MMC_DATA_WRITE &&
+ (host->size < 8 ||
+ (host->size <= 8 && host->mclk > 50000000)))
+ clk = host->clk_reg & ~variant->clkreg_enable;
+ else
+ clk = host->clk_reg | variant->clkreg_enable;
+
+ mmci_write_clkreg(host, clk);
+ }
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
@@ -840,14 +863,14 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
if (unlikely(count & 0x3)) {
if (count < 4) {
unsigned char buf[4];
- readsl(base + MMCIFIFO, buf, 1);
+ ioread32_rep(base + MMCIFIFO, buf, 1);
memcpy(ptr, buf, count);
} else {
- readsl(base + MMCIFIFO, ptr, count >> 2);
+ ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
count &= ~0x3;
}
} else {
- readsl(base + MMCIFIFO, ptr, count >> 2);
+ ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
}
ptr += count;
@@ -877,22 +900,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
count = min(remain, maxcnt);
/*
- * The ST Micro variant for SDIO transfer sizes
- * less then 8 bytes should have clock H/W flow
- * control disabled.
- */
- if (variant->sdio &&
- mmc_card_sdio(host->mmc->card)) {
- u32 clk;
- if (count < 8)
- clk = host->clk_reg & ~variant->clkreg_enable;
- else
- clk = host->clk_reg | variant->clkreg_enable;
-
- mmci_write_clkreg(host, clk);
- }
-
- /*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc), and the FIFO only accept full 32-bit writes.
@@ -900,7 +907,7 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
* byte become a 32bit write, 7 bytes will be two
* 32bit writes etc.
*/
- writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
+ iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
ptr += count;
remain -= count;
@@ -1255,7 +1262,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
}
#endif
-static int __devinit mmci_probe(struct amba_device *dev,
+static int mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
@@ -1360,6 +1367,23 @@ static int __devinit mmci_probe(struct amba_device *dev,
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ ret = PTR_ERR(host->pinctrl);
+ goto clk_disable;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+
+ /* enable pins to be muxed in and configured */
+ if (!IS_ERR(host->pins_default)) {
+ ret = pinctrl_select_state(host->pinctrl, host->pins_default);
+ if (ret)
+ dev_warn(&dev->dev, "could not set default pins\n");
+ } else
+ dev_warn(&dev->dev, "could not get default pinstate\n");
+
#ifdef CONFIG_REGULATOR
/* If we're using the regulator framework, try to fetch a regulator */
host->vcc = regulator_get(&dev->dev, "vmmc");
@@ -1522,7 +1546,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
return ret;
}
-static int __devexit mmci_remove(struct amba_device *dev)
+static int mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
@@ -1669,7 +1693,7 @@ static struct amba_driver mmci_driver = {
.pm = &mmci_dev_pm_ops,
},
.probe = mmci_probe,
- .remove = __devexit_p(mmci_remove),
+ .remove = mmci_remove,
.id_table = mmci_ids,
};
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index d437ccf62d6b..d34d8c0add8e 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -195,6 +195,10 @@ struct mmci_host {
unsigned int size;
struct regulator *vcc;
+ /* pinctrl handles */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
struct dma_chan *dma_current;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index de4c20b3936c..f8dd36102949 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -50,8 +50,6 @@ struct mvsd_host {
struct timer_list timer;
struct mmc_host *mmc;
struct device *dev;
- struct resource *res;
- int irq;
struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
@@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (!r || irq < 0 || !mvsd_data)
return -ENXIO;
- r = request_mem_region(r->start, SZ_1K, DRIVER_NAME);
- if (!r)
- return -EBUSY;
-
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
@@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
- host->res = r;
host->base_clock = mvsd_data->clock / 2;
+ host->clk = ERR_PTR(-EINVAL);
mmc->ops = &mvsd_ops;
@@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = ioremap(r->start, SZ_4K);
+ host->base = devm_request_and_ioremap(&pdev->dev, r);
if (!host->base) {
ret = -ENOMEM;
goto out;
@@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)
mvsd_power_down(host);
- ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
if (ret) {
pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
goto out;
- } else
- host->irq = irq;
+ }
/* Not all platforms can gate the clock, so it is not
an error if the clock does not exists. */
- host->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(host->clk)) {
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
- }
if (mvsd_data->gpio_card_detect) {
- ret = gpio_request(mvsd_data->gpio_card_detect,
- DRIVER_NAME " cd");
+ ret = devm_gpio_request_one(&pdev->dev,
+ mvsd_data->gpio_card_detect,
+ GPIOF_IN, DRIVER_NAME " cd");
if (ret == 0) {
- gpio_direction_input(mvsd_data->gpio_card_detect);
irq = gpio_to_irq(mvsd_data->gpio_card_detect);
- ret = request_irq(irq, mvsd_card_detect_irq,
- IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING,
- DRIVER_NAME " cd", host);
+ ret = devm_request_irq(&pdev->dev, irq,
+ mvsd_card_detect_irq,
+ IRQ_TYPE_EDGE_RISING |
+ IRQ_TYPE_EDGE_FALLING,
+ DRIVER_NAME " cd", host);
if (ret == 0)
host->gpio_card_detect =
mvsd_data->gpio_card_detect;
else
- gpio_free(mvsd_data->gpio_card_detect);
+ devm_gpio_free(&pdev->dev,
+ mvsd_data->gpio_card_detect);
}
}
if (!host->gpio_card_detect)
mmc->caps |= MMC_CAP_NEEDS_POLL;
if (mvsd_data->gpio_write_protect) {
- ret = gpio_request(mvsd_data->gpio_write_protect,
- DRIVER_NAME " wp");
+ ret = devm_gpio_request_one(&pdev->dev,
+ mvsd_data->gpio_write_protect,
+ GPIOF_IN, DRIVER_NAME " wp");
if (ret == 0) {
- gpio_direction_input(mvsd_data->gpio_write_protect);
host->gpio_write_protect =
mvsd_data->gpio_write_protect;
}
@@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
return 0;
out:
- if (host) {
- if (host->irq)
- free_irq(host->irq, host);
- if (host->gpio_card_detect) {
- free_irq(gpio_to_irq(host->gpio_card_detect), host);
- gpio_free(host->gpio_card_detect);
- }
- if (host->gpio_write_protect)
- gpio_free(host->gpio_write_protect);
- if (host->base)
- iounmap(host->base);
- }
- if (r)
- release_resource(r);
- if (mmc)
- if (!IS_ERR_OR_NULL(host->clk)) {
+ if (mmc) {
+ if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
- clk_put(host->clk);
- }
mmc_free_host(mmc);
+ }
return ret;
}
@@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
- if (mmc) {
- struct mvsd_host *host = mmc_priv(mmc);
+ struct mvsd_host *host = mmc_priv(mmc);
- if (host->gpio_card_detect) {
- free_irq(gpio_to_irq(host->gpio_card_detect), host);
- gpio_free(host->gpio_card_detect);
- }
- mmc_remove_host(mmc);
- free_irq(host->irq, host);
- if (host->gpio_write_protect)
- gpio_free(host->gpio_write_protect);
- del_timer_sync(&host->timer);
- mvsd_power_down(host);
- iounmap(host->base);
- release_resource(host->res);
+ mmc_remove_host(mmc);
+ del_timer_sync(&host->timer);
+ mvsd_power_down(host);
+
+ if (!IS_ERR(host->clk))
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(mmc);
- if (!IS_ERR(host->clk)) {
- clk_disable_unprepare(host->clk);
- clk_put(host->clk);
- }
- mmc_free_host(mmc);
- }
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 29e680f193a0..a72936eea6fa 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -41,7 +41,6 @@
#include <linux/platform_data/mmc-mxcmmc.h>
#include <linux/platform_data/dma-imx.h>
-#include <mach/hardware.h>
#define DRIVER_NAME "mxc-mmc"
#define MXCMCI_TIMEOUT_MS 10000
@@ -113,6 +112,11 @@
#define INT_WRITE_OP_DONE_EN (1 << 1)
#define INT_READ_OP_EN (1 << 0)
+enum mxcmci_type {
+ IMX21_MMC,
+ IMX31_MMC,
+};
+
struct mxcmci_host {
struct mmc_host *mmc;
struct resource *res;
@@ -153,7 +157,26 @@ struct mxcmci_host {
struct imx_dma_data dma_data;
struct timer_list watchdog;
+ enum mxcmci_type devtype;
+};
+
+static struct platform_device_id mxcmci_devtype[] = {
+ {
+ .name = "imx21-mmc",
+ .driver_data = IMX21_MMC,
+ }, {
+ .name = "imx31-mmc",
+ .driver_data = IMX31_MMC,
+ }, {
+ /* sentinel */
+ }
};
+MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
+
+static inline int is_imx31_mmc(struct mxcmci_host *host)
+{
+ return host->devtype == IMX31_MMC;
+}
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -843,6 +866,8 @@ static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
{
+ struct mxcmci_host *mxcmci = mmc_priv(host);
+
/*
* MX3 SoCs have a silicon bug which corrupts CRC calculation of
* multi-block transfers when connected SDIO peripheral doesn't
@@ -850,7 +875,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
* One way to prevent this is to only allow 1-bit transfers.
*/
- if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
+ if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
host->caps &= ~MMC_CAP_4_BIT_DATA;
else
host->caps |= MMC_CAP_4_BIT_DATA;
@@ -948,6 +973,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->mmc = mmc;
host->pdata = pdev->dev.platform_data;
+ host->devtype = pdev->id_entry->driver_data;
spin_lock_init(&host->lock);
mxcmci_init_ocr(host);
@@ -1120,6 +1146,7 @@ static const struct dev_pm_ops mxcmci_pm_ops = {
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
.remove = mxcmci_remove,
+ .id_table = mxcmci_devtype,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 48ad361613ef..4254975f931d 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -28,9 +28,8 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/platform_data/mmc-omap.h>
-#include <plat/mmc.h>
-#include <plat/dma.h>
#define OMAP_MMC_REG_CMD 0x00
#define OMAP_MMC_REG_ARGL 0x01
@@ -72,6 +71,13 @@
#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
+#define mmc_omap7xx() (host->features & MMC_OMAP7XX)
+#define mmc_omap15xx() (host->features & MMC_OMAP15XX)
+#define mmc_omap16xx() (host->features & MMC_OMAP16XX)
+#define MMC_OMAP1_MASK (MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX)
+#define mmc_omap1() (host->features & MMC_OMAP1_MASK)
+#define mmc_omap2() (!mmc_omap1())
+
#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
@@ -84,6 +90,16 @@
#define OMAP_MMC_CMDTYPE_AC 2
#define OMAP_MMC_CMDTYPE_ADTC 3
+#define OMAP_DMA_MMC_TX 21
+#define OMAP_DMA_MMC_RX 22
+#define OMAP_DMA_MMC2_TX 54
+#define OMAP_DMA_MMC2_RX 55
+
+#define OMAP24XX_DMA_MMC2_TX 47
+#define OMAP24XX_DMA_MMC2_RX 48
+#define OMAP24XX_DMA_MMC1_TX 61
+#define OMAP24XX_DMA_MMC1_RX 62
+
#define DRIVER_NAME "mmci-omap"
@@ -147,6 +163,7 @@ struct mmc_omap_host {
u32 buffer_bytes_left;
u32 total_bytes_left;
+ unsigned features;
unsigned use_dma:1;
unsigned brs_received:1, dma_done:1;
unsigned dma_in_use:1;
@@ -988,7 +1005,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
* blocksize is at least that large. Blocksize is
* usually 512 bytes; but not for some SD reads.
*/
- burst = cpu_is_omap15xx() ? 32 : 64;
+ burst = mmc_omap15xx() ? 32 : 64;
if (burst > data->blksz)
burst = data->blksz;
@@ -1104,8 +1121,7 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
vdd);
-
- if (cpu_is_omap24xx()) {
+ if (mmc_omap2()) {
u16 w;
if (power_on) {
@@ -1214,7 +1230,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
.set_ios = mmc_omap_set_ios,
};
-static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
{
struct mmc_omap_slot *slot = NULL;
struct mmc_host *mmc;
@@ -1239,7 +1255,7 @@ static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->ops = &mmc_omap_ops;
mmc->f_min = 400000;
- if (cpu_class_is_omap2())
+ if (mmc_omap2())
mmc->f_max = 48000000;
else
mmc->f_max = 24000000;
@@ -1309,7 +1325,7 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
mmc_free_host(mmc);
}
-static int __devinit mmc_omap_probe(struct platform_device *pdev)
+static int mmc_omap_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
@@ -1359,6 +1375,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
init_waitqueue_head(&host->slot_wq);
host->pdata = pdata;
+ host->features = host->pdata->slots[0].features;
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
@@ -1391,7 +1408,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
host->dma_tx_burst = -1;
host->dma_rx_burst = -1;
- if (cpu_is_omap24xx())
+ if (mmc_omap2())
sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
else
sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
@@ -1407,7 +1424,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
sig);
#endif
- if (cpu_is_omap24xx())
+ if (mmc_omap2())
sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
else
sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
@@ -1435,7 +1452,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
}
host->nr_slots = pdata->nr_slots;
- host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+ host->reg_shift = (mmc_omap7xx() ? 1 : 2);
host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
if (!host->mmc_omap_wq)
@@ -1478,7 +1495,7 @@ err_free_mem_region:
return ret;
}
-static int __devexit mmc_omap_remove(struct platform_device *pdev)
+static int mmc_omap_remove(struct platform_device *pdev)
{
struct mmc_omap_host *host = platform_get_drvdata(pdev);
int i;
@@ -1566,7 +1583,7 @@ static int mmc_omap_resume(struct platform_device *pdev)
static struct platform_driver mmc_omap_driver = {
.probe = mmc_omap_probe,
- .remove = __devexit_p(mmc_omap_remove),
+ .remove = mmc_omap_remove,
.suspend = mmc_omap_suspend,
.resume = mmc_omap_resume,
.driver = {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index d0a912fbad3b..bc5807873b2c 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -39,9 +39,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
-#include <mach/hardware.h>
-#include <plat/mmc.h>
-#include <plat/cpu.h>
+#include <linux/platform_data/mmc-omap.h>
/* OMAP HSMMC Host Controller Registers */
#define OMAP_HSMMC_SYSSTATUS 0x0014
@@ -1761,7 +1759,7 @@ static inline struct omap_mmc_platform_data
}
#endif
-static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
+static int omap_hsmmc_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_host *mmc;
@@ -2037,7 +2035,7 @@ err:
return ret;
}
-static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
+static int omap_hsmmc_remove(struct platform_device *pdev)
{
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
struct resource *res;
@@ -2204,7 +2202,7 @@ static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
static struct platform_driver omap_hsmmc_driver = {
.probe = omap_hsmmc_probe,
- .remove = __devexit_p(omap_hsmmc_remove),
+ .remove = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 3f9d6d577a91..2b2f65ada22e 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -584,7 +584,7 @@ static const struct of_device_id pxa_mmc_dt_ids[] = {
MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
-static int __devinit pxamci_of_init(struct platform_device *pdev)
+static int pxamci_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct pxamci_platform_data *pdata;
@@ -614,7 +614,7 @@ static int __devinit pxamci_of_init(struct platform_device *pdev)
return 0;
}
#else
-static int __devinit pxamci_of_init(struct platform_device *pdev)
+static int pxamci_of_init(struct platform_device *pdev)
{
return 0;
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
new file mode 100644
index 000000000000..f74b5adca642
--- /dev/null
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -0,0 +1,1328 @@
+/* Realtek PCI-Express SD/MMC Card Interface driver
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/card.h>
+#include <linux/mfd/rtsx_pci.h>
+#include <asm/unaligned.h>
+
+/* SD Tuning Data Structure
+ * Record continuous timing phase path
+ */
+struct timing_phase_path {
+ int start;
+ int end;
+ int mid;
+ int len;
+};
+
+struct realtek_pci_sdmmc {
+ struct platform_device *pdev;
+ struct rtsx_pcr *pcr;
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+
+ struct mutex host_mutex;
+
+ u8 ssc_depth;
+ unsigned int clock;
+ bool vpclk;
+ bool double_clk;
+ bool eject;
+ bool initial_mode;
+ bool ddr_mode;
+};
+
+static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
+{
+ return &(host->pdev->dev);
+}
+
+static inline void sd_clear_error(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, CARD_STOP,
+ SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR);
+}
+
+#ifdef DEBUG
+static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ u16 i;
+ u8 *ptr;
+
+ /* Print SD host internal registers */
+ rtsx_pci_init_cmd(pcr);
+ for (i = 0xFDA0; i <= 0xFDAE; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
+ for (i = 0xFD52; i <= 0xFD69; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0);
+ rtsx_pci_send_cmd(pcr, 100);
+
+ ptr = rtsx_pci_get_cmd_data(pcr);
+ for (i = 0xFDA0; i <= 0xFDAE; i++)
+ dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+ for (i = 0xFD52; i <= 0xFD69; i++)
+ dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++));
+}
+#else
+#define sd_print_debug_regs(host)
+#endif /* DEBUG */
+
+static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
+ u8 *buf, int buf_len, int timeout)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err, i;
+ u8 trans_mode;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, cmd[0] - 0x40);
+
+ if (!buf)
+ buf_len = 0;
+
+ if ((cmd[0] & 0x3F) == MMC_SEND_TUNING_BLOCK)
+ trans_mode = SD_TM_AUTO_TUNING;
+ else
+ trans_mode = SD_TM_NORMAL_READ;
+
+ rtsx_pci_init_cmd(pcr);
+
+ for (i = 0; i < 5; i++)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0 + i, 0xFF, cmd[i]);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H,
+ 0xFF, (u8)(byte_cnt >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+ if (trans_mode != SD_TM_AUTO_TUNING)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, trans_mode | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd fail (err = %d)\n", err);
+ return err;
+ }
+
+ if (buf && buf_len) {
+ err = rtsx_pci_read_ppbuf(pcr, buf, buf_len);
+ if (err < 0) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_read_ppbuf fail (err = %d)\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
+ u8 *buf, int buf_len, int timeout)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err, i;
+ u8 trans_mode;
+
+ if (!buf)
+ buf_len = 0;
+
+ if (buf && buf_len) {
+ err = rtsx_pci_write_ppbuf(pcr, buf, buf_len);
+ if (err < 0) {
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_write_ppbuf fail (err = %d)\n", err);
+ return err;
+ }
+ }
+
+ trans_mode = cmd ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3;
+ rtsx_pci_init_cmd(pcr);
+
+ if (cmd) {
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d\n", __func__,
+ cmd[0] - 0x40);
+
+ for (i = 0; i < 5; i++)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_CMD0 + i, 0xFF, cmd[i]);
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H,
+ 0xFF, (u8)(byte_cnt >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF,
+ SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd fail (err = %d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ struct mmc_command *cmd)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ u8 cmd_idx = (u8)cmd->opcode;
+ u32 arg = cmd->arg;
+ int err = 0;
+ int timeout = 100;
+ int i;
+ u8 *ptr;
+ int stat_idx = 0;
+ u8 rsp_type;
+ int rsp_len = 5;
+
+ dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
+ __func__, cmd_idx, arg);
+
+ /* Response type:
+ * R0
+ * R1, R5, R6, R7
+ * R1b
+ * R2
+ * R3, R4
+ */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ rsp_type = SD_RSP_TYPE_R0;
+ rsp_len = 0;
+ break;
+ case MMC_RSP_R1:
+ rsp_type = SD_RSP_TYPE_R1;
+ break;
+ case MMC_RSP_R1B:
+ rsp_type = SD_RSP_TYPE_R1b;
+ break;
+ case MMC_RSP_R2:
+ rsp_type = SD_RSP_TYPE_R2;
+ rsp_len = 16;
+ break;
+ case MMC_RSP_R3:
+ rsp_type = SD_RSP_TYPE_R3;
+ break;
+ default:
+ dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R1b)
+ timeout = 3000;
+
+ if (cmd->opcode == SD_SWITCH_VOLTAGE) {
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ 0xFF, SD_CLK_TOGGLE_EN);
+ if (err < 0)
+ goto out;
+ }
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, PINGPONG_BUFFER);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER,
+ 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END | SD_STAT_IDLE,
+ SD_TRANSFER_END | SD_STAT_IDLE);
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ /* Read data from ping-pong buffer */
+ for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+ stat_idx = 16;
+ } else if (rsp_type != SD_RSP_TYPE_R0) {
+ /* Read data from SD_CMDx registers */
+ for (i = SD_CMD0; i <= SD_CMD4; i++)
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+ stat_idx = 5;
+ }
+
+ rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
+
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ sd_clear_error(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd error (err = %d)\n", err);
+ goto out;
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R0) {
+ err = 0;
+ goto out;
+ }
+
+ /* Eliminate returned value of CHECK_REG_CMD */
+ ptr = rtsx_pci_get_cmd_data(pcr) + 1;
+
+ /* Check (Start,Transmission) bit of Response */
+ if ((ptr[0] & 0xC0) != 0) {
+ err = -EILSEQ;
+ dev_dbg(sdmmc_dev(host), "Invalid response bit\n");
+ goto out;
+ }
+
+ /* Check CRC7 */
+ if (!(rsp_type & SD_NO_CHECK_CRC7)) {
+ if (ptr[stat_idx] & SD_CRC7_ERR) {
+ err = -EILSEQ;
+ dev_dbg(sdmmc_dev(host), "CRC7 error\n");
+ goto out;
+ }
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+ i, cmd->resp[i]);
+ }
+ } else {
+ cmd->resp[0] = get_unaligned_be32(ptr + 1);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n",
+ cmd->resp[0]);
+ }
+
+out:
+ cmd->error = err;
+}
+
+static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_card *card = mmc->card;
+ struct mmc_data *data = mrq->data;
+ int uhs = mmc_sd_card_uhs(card);
+ int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
+ u8 cfg2, trans_mode;
+ int err;
+ size_t data_len = data->blksz * data->blocks;
+
+ if (read) {
+ cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
+ trans_mode = SD_TM_AUTO_READ_3;
+ } else {
+ cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
+ SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
+ trans_mode = SD_TM_AUTO_WRITE_3;
+ }
+
+ if (!uhs)
+ cfg2 |= SD_NO_CHECK_WAIT_CRC_TO;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L,
+ 0xFF, (u8)data->blocks);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H,
+ 0xFF, (u8)(data->blocks >> 8));
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
+ DMA_DONE_INT, DMA_DONE_INT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3,
+ 0xFF, (u8)(data_len >> 24));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2,
+ 0xFF, (u8)(data_len >> 16));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1,
+ 0xFF, (u8)(data_len >> 8));
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len);
+ if (read) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_FROM_CARD | DMA_EN | DMA_512);
+ } else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL,
+ 0x03 | DMA_PACK_SIZE_MASK,
+ DMA_DIR_TO_CARD | DMA_EN | DMA_512);
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
+ 0x01, RING_BUFFER);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
+ trans_mode | SD_TRANSFER_START);
+ rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
+ SD_TRANSFER_END, SD_TRANSFER_END);
+
+ rtsx_pci_send_cmd_no_wait(pcr);
+
+ err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
+ if (err < 0) {
+ sd_clear_error(host);
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
+}
+
+static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
+{
+ rtsx_pci_write_register(host->pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
+}
+
+static void sd_normal_rw(struct realtek_pci_sdmmc *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ u8 _cmd[5], *buf;
+
+ _cmd[0] = 0x40 | (u8)cmd->opcode;
+ put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1]));
+
+ buf = kzalloc(data->blksz, GFP_NOIO);
+ if (!buf) {
+ cmd->error = -ENOMEM;
+ return;
+ }
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->initial_mode)
+ sd_disable_initial_mode(host);
+
+ cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf,
+ data->blksz, 200);
+
+ if (host->initial_mode)
+ sd_enable_initial_mode(host);
+
+ sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz);
+ } else {
+ sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz);
+
+ cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf,
+ data->blksz, 200);
+ }
+
+ kfree(buf);
+}
+
+static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "%s: sample_point = %d\n",
+ __func__, sample_point);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPRX_CTL, 0x1F, sample_point);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map)
+{
+ struct timing_phase_path path[MAX_PHASE + 1];
+ int i, j, cont_path_cnt;
+ int new_block, max_len, final_path_idx;
+ u8 final_phase = 0xFF;
+
+ /* Parse phase_map, take it as a bit-ring */
+ cont_path_cnt = 0;
+ new_block = 1;
+ j = 0;
+ for (i = 0; i < MAX_PHASE + 1; i++) {
+ if (phase_map & (1 << i)) {
+ if (new_block) {
+ new_block = 0;
+ j = cont_path_cnt++;
+ path[j].start = i;
+ path[j].end = i;
+ } else {
+ path[j].end = i;
+ }
+ } else {
+ new_block = 1;
+ if (cont_path_cnt) {
+ /* Calculate path length and middle point */
+ int idx = cont_path_cnt - 1;
+ path[idx].len =
+ path[idx].end - path[idx].start + 1;
+ path[idx].mid =
+ path[idx].start + path[idx].len / 2;
+ }
+ }
+ }
+
+ if (cont_path_cnt == 0) {
+ dev_dbg(sdmmc_dev(host), "No continuous phase path\n");
+ goto finish;
+ } else {
+ /* Calculate last continuous path length and middle point */
+ int idx = cont_path_cnt - 1;
+ path[idx].len = path[idx].end - path[idx].start + 1;
+ path[idx].mid = path[idx].start + path[idx].len / 2;
+ }
+
+ /* Connect the first and last continuous paths if they are adjacent */
+ if (!path[0].start && (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ /* Using negative index */
+ path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
+ path[0].len += path[cont_path_cnt - 1].len;
+ path[0].mid = path[0].start + path[0].len / 2;
+ /* Convert negative middle point index to positive one */
+ if (path[0].mid < 0)
+ path[0].mid += MAX_PHASE + 1;
+ cont_path_cnt--;
+ }
+
+ /* Choose the longest continuous phase path */
+ max_len = 0;
+ final_phase = 0;
+ final_path_idx = 0;
+ for (i = 0; i < cont_path_cnt; i++) {
+ if (path[i].len > max_len) {
+ max_len = path[i].len;
+ final_phase = (u8)path[i].mid;
+ final_path_idx = i;
+ }
+
+ dev_dbg(sdmmc_dev(host), "path[%d].start = %d\n",
+ i, path[i].start);
+ dev_dbg(sdmmc_dev(host), "path[%d].end = %d\n",
+ i, path[i].end);
+ dev_dbg(sdmmc_dev(host), "path[%d].len = %d\n",
+ i, path[i].len);
+ dev_dbg(sdmmc_dev(host), "path[%d].mid = %d\n",
+ i, path[i].mid);
+ }
+
+finish:
+ dev_dbg(sdmmc_dev(host), "Final chosen phase: %d\n", final_phase);
+ return final_phase;
+}
+
+static void sd_wait_data_idle(struct realtek_pci_sdmmc *host)
+{
+ int err, i;
+ u8 val = 0;
+
+ for (i = 0; i < 100; i++) {
+ err = rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val);
+ if (val & SD_DATA_IDLE)
+ return;
+
+ udelay(100);
+ }
+}
+
+static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
+ u8 opcode, u8 sample_point)
+{
+ int err;
+ u8 cmd[5] = {0};
+
+ err = sd_change_phase(host, sample_point);
+ if (err < 0)
+ return err;
+
+ cmd[0] = 0x40 | opcode;
+ err = sd_read_data(host, cmd, 0x40, NULL, 0, 100);
+ if (err < 0) {
+ /* Wait till SD DATA IDLE */
+ sd_wait_data_idle(host);
+ sd_clear_error(host);
+ return err;
+ }
+
+ return 0;
+}
+
+static int sd_tuning_phase(struct realtek_pci_sdmmc *host,
+ u8 opcode, u32 *phase_map)
+{
+ int err, i;
+ u32 raw_phase_map = 0;
+
+ for (i = MAX_PHASE; i >= 0; i--) {
+ err = sd_tuning_rx_cmd(host, opcode, (u8)i);
+ if (err == 0)
+ raw_phase_map |= 1 << i;
+ }
+
+ if (phase_map)
+ *phase_map = raw_phase_map;
+
+ return 0;
+}
+
+static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
+{
+ int err, i;
+ u32 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map;
+ u8 final_phase;
+
+ for (i = 0; i < RX_TUNING_CNT; i++) {
+ err = sd_tuning_phase(host, opcode, &(raw_phase_map[i]));
+ if (err < 0)
+ return err;
+
+ if (raw_phase_map[i] == 0)
+ break;
+ }
+
+ phase_map = 0xFFFFFFFF;
+ for (i = 0; i < RX_TUNING_CNT; i++) {
+ dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n",
+ i, raw_phase_map[i]);
+ phase_map &= raw_phase_map[i];
+ }
+ dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map);
+
+ if (phase_map) {
+ final_phase = sd_search_final_phase(host, phase_map);
+ if (final_phase == 0xFF)
+ return -EINVAL;
+
+ err = sd_change_phase(host, final_phase);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_command *cmd = mrq->cmd;
+ struct mmc_data *data = mrq->data;
+ unsigned int data_size = 0;
+
+ if (host->eject) {
+ cmd->error = -ENOMEDIUM;
+ goto finish;
+ }
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
+ host->initial_mode, host->double_clk, host->vpclk);
+ rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, SD_MOD_SEL);
+ rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_SD);
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = mrq;
+ mutex_unlock(&host->host_mutex);
+
+ if (mrq->data)
+ data_size = data->blocks * data->blksz;
+
+ if (!data_size || mmc_op_multi(cmd->opcode) ||
+ (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+ (cmd->opcode == MMC_WRITE_BLOCK)) {
+ sd_send_cmd_get_rsp(host, cmd);
+
+ if (!cmd->error && data_size) {
+ sd_rw_multi(host, mrq);
+
+ if (mmc_op_multi(cmd->opcode) && mrq->stop)
+ sd_send_cmd_get_rsp(host, mrq->stop);
+ }
+ } else {
+ sd_normal_rw(host, mrq);
+ }
+
+ if (mrq->data) {
+ if (cmd->error || data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+finish:
+ if (cmd->error)
+ dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = NULL;
+ mutex_unlock(&host->host_mutex);
+
+ mmc_request_done(mmc, mrq);
+}
+
+static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
+ unsigned char bus_width)
+{
+ int err = 0;
+ u8 width[] = {
+ [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT,
+ [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT,
+ [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT,
+ };
+
+ if (bus_width <= MMC_BUS_WIDTH_8)
+ err = rtsx_pci_write_register(host->pcr, SD_CFG1,
+ 0x03, width[bus_width]);
+
+ return err;
+}
+
+static int sd_power_on(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE,
+ CARD_SHARE_MASK, CARD_SHARE_48_SD);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN,
+ SD_CLK_EN, SD_CLK_EN);
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_SD_CARD);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_on(pcr, RTSX_SD_CARD);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int sd_power_off(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0);
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+ if (err < 0)
+ return err;
+
+ err = rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+}
+
+static int sd_set_power_mode(struct realtek_pci_sdmmc *host,
+ unsigned char power_mode)
+{
+ int err;
+
+ if (power_mode == MMC_POWER_OFF)
+ err = sd_power_off(host);
+ else
+ err = sd_power_on(host);
+
+ return err;
+}
+
+static int sd_set_timing(struct realtek_pci_sdmmc *host,
+ unsigned char timing, bool *ddr_mode)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err = 0;
+
+ *ddr_mode = false;
+
+ rtsx_pci_init_cmd(pcr);
+
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_NOT_RST,
+ SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ break;
+
+ case MMC_TIMING_UHS_DDR50:
+ *ddr_mode = true;
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C | SD_ASYNC_FIFO_NOT_RST,
+ SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD,
+ DDR_VAR_RX_DAT | DDR_VAR_RX_CMD);
+ break;
+
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1,
+ 0x0C, SD_20_MODE);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL,
+ SD20_TX_SEL_MASK, SD20_TX_14_AHEAD);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, SD20_RX_14_DELAY);
+ break;
+
+ default:
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_CFG1, 0x0C, SD_20_MODE);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF,
+ CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+ SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL,
+ SD20_RX_SEL_MASK, SD20_RX_POS_EDGE);
+ break;
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 100);
+
+ return err;
+}
+
+static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+
+ if (host->eject)
+ return;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ sd_set_bus_width(host, ios->bus_width);
+ sd_set_power_mode(host, ios->power_mode);
+ sd_set_timing(host, ios->timing, &host->ddr_mode);
+
+ host->vpclk = false;
+ host->double_clk = true;
+
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ host->ssc_depth = RTSX_SSC_DEPTH_2M;
+ host->vpclk = true;
+ host->double_clk = false;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_UHS_SDR25:
+ host->ssc_depth = RTSX_SSC_DEPTH_1M;
+ break;
+ default:
+ host->ssc_depth = RTSX_SSC_DEPTH_500K;
+ break;
+ }
+
+ host->initial_mode = (ios->clock <= 1000000) ? true : false;
+
+ host->clock = ios->clock;
+ rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth,
+ host->initial_mode, host->double_clk, host->vpclk);
+
+ mutex_unlock(&pcr->pcr_mutex);
+}
+
+static int sdmmc_get_ro(struct mmc_host *mmc)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int ro = 0;
+ u32 val;
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ /* Check SD mechanical write-protect switch */
+ val = rtsx_pci_readl(pcr, RTSX_BIPR);
+ dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val);
+ if (val & SD_WRITE_PROTECT)
+ ro = 1;
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return ro;
+}
+
+static int sdmmc_get_cd(struct mmc_host *mmc)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int cd = 0;
+ u32 val;
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ /* Check SD card detect */
+ val = rtsx_pci_card_exist(pcr);
+ dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val);
+ if (val & SD_EXIST)
+ cd = 1;
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return cd;
+}
+
+static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ u8 stat;
+
+ /* Reference to Signal Voltage Switch Sequence in SD spec.
+ * Wait for a period of time so that the card can drive SD_CMD and
+ * SD_DAT[3:0] to low after sending back CMD11 response.
+ */
+ mdelay(1);
+
+ /* SD_CMD, SD_DAT[3:0] should be driven to low by card;
+ * If either one of SD_CMD,SD_DAT[3:0] is not low,
+ * abort the voltage switch sequence;
+ */
+ err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat);
+ if (err < 0)
+ return err;
+
+ if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS))
+ return -EINVAL;
+
+ /* Stop toggle SD clock */
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ 0xFF, SD_CLK_FORCE_STOP);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int err;
+ u8 stat, mask, val;
+
+ /* Wait 1.8V output of voltage regulator in card stable */
+ msleep(50);
+
+ /* Toggle SD clock again */
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN);
+ if (err < 0)
+ return err;
+
+ /* Wait for a period of time so that the card can drive
+ * SD_DAT[3:0] to high at 1.8V
+ */
+ msleep(20);
+
+ /* SD_CMD, SD_DAT[3:0] should be pulled high by host */
+ err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat);
+ if (err < 0)
+ return err;
+
+ mask = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS;
+ val = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
+ SD_DAT1_STATUS | SD_DAT0_STATUS;
+ if ((stat & mask) != val) {
+ dev_dbg(sdmmc_dev(host),
+ "%s: SD_BUS_STAT = 0x%x\n", __func__, stat);
+ rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, 0xFF, 0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int err = 0;
+ u8 voltage;
+
+ dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n",
+ __func__, ios->signal_voltage);
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ voltage = OUTPUT_3V3;
+ else
+ voltage = OUTPUT_1V8;
+
+ if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_register(pcr,
+ SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+ if (err < 0)
+ goto out;
+
+ err = sd_wait_voltage_stable_1(host);
+ if (err < 0)
+ goto out;
+ }
+
+ err = rtsx_pci_switch_output_voltage(pcr, voltage);
+ if (err < 0)
+ goto out;
+
+ if (voltage == OUTPUT_1V8) {
+ err = sd_wait_voltage_stable_2(host);
+ if (err < 0)
+ goto out;
+ }
+
+ /* Stop toggle SD clock in idle */
+ err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+
+out:
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return err;
+}
+
+static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ int err = 0;
+
+ if (host->eject)
+ return -ENOMEDIUM;
+
+ mutex_lock(&pcr->pcr_mutex);
+
+ rtsx_pci_start_run(pcr);
+
+ if (!host->ddr_mode)
+ err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK);
+
+ mutex_unlock(&pcr->pcr_mutex);
+
+ return err;
+}
+
+static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
+ .request = sdmmc_request,
+ .set_ios = sdmmc_set_ios,
+ .get_ro = sdmmc_get_ro,
+ .get_cd = sdmmc_get_cd,
+ .start_signal_voltage_switch = sdmmc_switch_voltage,
+ .execute_tuning = sdmmc_execute_tuning,
+};
+
+#ifdef CONFIG_PM
+static int rtsx_pci_sdmmc_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = host->mmc;
+ int err;
+
+ dev_dbg(sdmmc_dev(host), "--> %s\n", __func__);
+
+ err = mmc_suspend_host(mmc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rtsx_pci_sdmmc_resume(struct platform_device *pdev)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = host->mmc;
+
+ dev_dbg(sdmmc_dev(host), "--> %s\n", __func__);
+
+ return mmc_resume_host(mmc);
+}
+#else /* CONFIG_PM */
+#define rtsx_pci_sdmmc_suspend NULL
+#define rtsx_pci_sdmmc_resume NULL
+#endif /* CONFIG_PM */
+
+static void init_extra_caps(struct realtek_pci_sdmmc *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct rtsx_pcr *pcr = host->pcr;
+
+ dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50)
+ mmc->caps |= MMC_CAP_UHS_SDR50;
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ mmc->caps |= MMC_CAP_UHS_SDR104;
+ if (pcr->extra_caps & EXTRA_CAPS_SD_DDR50)
+ mmc->caps |= MMC_CAP_UHS_DDR50;
+ if (pcr->extra_caps & EXTRA_CAPS_MMC_HSDDR)
+ mmc->caps |= MMC_CAP_1_8V_DDR;
+ if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
+}
+
+static void realtek_init_host(struct realtek_pci_sdmmc *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->f_min = 250000;
+ mmc->f_max = 208000000;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
+ MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+ mmc->max_current_330 = 400;
+ mmc->max_current_180 = 800;
+ mmc->ops = &realtek_pci_sdmmc_ops;
+
+ init_extra_caps(host);
+
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 65536;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size = 524288;
+}
+
+static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+
+ mmc_detect_change(host->mmc, 0);
+}
+
+static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct realtek_pci_sdmmc *host;
+ struct rtsx_pcr *pcr;
+ struct pcr_handle *handle = pdev->dev.platform_data;
+
+ if (!handle)
+ return -ENXIO;
+
+ pcr = handle->pcr;
+ if (!pcr)
+ return -ENXIO;
+
+ dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n");
+
+ mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->pcr = pcr;
+ host->mmc = mmc;
+ host->pdev = pdev;
+ platform_set_drvdata(pdev, host);
+ pcr->slots[RTSX_SD_CARD].p_dev = pdev;
+ pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
+
+ mutex_init(&host->host_mutex);
+
+ realtek_init_host(host);
+
+ mmc_add_host(mmc);
+
+ return 0;
+}
+
+static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
+{
+ struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
+ struct rtsx_pcr *pcr;
+ struct mmc_host *mmc;
+
+ if (!host)
+ return 0;
+
+ pcr = host->pcr;
+ pcr->slots[RTSX_SD_CARD].p_dev = NULL;
+ pcr->slots[RTSX_SD_CARD].card_event = NULL;
+ mmc = host->mmc;
+ host->eject = true;
+
+ mutex_lock(&host->host_mutex);
+ if (host->mrq) {
+ dev_dbg(&(pdev->dev),
+ "%s: Controller removed during transfer\n",
+ mmc_hostname(mmc));
+
+ rtsx_pci_complete_unfinished_transfer(pcr);
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ if (host->mrq->stop)
+ host->mrq->stop->error = -ENOMEDIUM;
+ mmc_request_done(mmc, host->mrq);
+ }
+ mutex_unlock(&host->host_mutex);
+
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&(pdev->dev),
+ ": Realtek PCI-E SDMMC controller has been removed\n");
+
+ return 0;
+}
+
+static struct platform_device_id rtsx_pci_sdmmc_ids[] = {
+ {
+ .name = DRV_NAME_RTSX_PCI_SDMMC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids);
+
+static struct platform_driver rtsx_pci_sdmmc_driver = {
+ .probe = rtsx_pci_sdmmc_drv_probe,
+ .remove = rtsx_pci_sdmmc_drv_remove,
+ .id_table = rtsx_pci_sdmmc_ids,
+ .suspend = rtsx_pci_sdmmc_suspend,
+ .resume = rtsx_pci_sdmmc_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME_RTSX_PCI_SDMMC,
+ },
+};
+module_platform_driver(rtsx_pci_sdmmc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
+MODULE_DESCRIPTION("Realtek PCI-E SD/MMC Card Host Driver");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 4638ddab97b8..63fb265e0da6 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1540,7 +1540,7 @@ static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
#endif /* CONFIG_DEBUG_FS */
-static int __devinit s3cmci_probe(struct platform_device *pdev)
+static int s3cmci_probe(struct platform_device *pdev)
{
struct s3cmci_host *host;
struct mmc_host *mmc;
@@ -1819,7 +1819,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)
clk_disable(host->clk);
}
-static int __devexit s3cmci_remove(struct platform_device *pdev)
+static int s3cmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
@@ -1906,7 +1906,7 @@ static struct platform_driver s3cmci_driver = {
},
.id_table = s3cmci_driver_ids,
.probe = s3cmci_probe,
- .remove = __devexit_p(s3cmci_remove),
+ .remove = s3cmci_remove,
.shutdown = s3cmci_shutdown,
};
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
new file mode 100644
index 000000000000..2592dddbd965
--- /dev/null
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -0,0 +1,312 @@
+/*
+ * Secure Digital Host Controller Interface ACPI driver.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/compiler.h>
+#include <linux/stddef.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/pm.h>
+#include <linux/mmc/sdhci.h>
+
+#include "sdhci.h"
+
+enum {
+ SDHCI_ACPI_SD_CD = BIT(0),
+ SDHCI_ACPI_RUNTIME_PM = BIT(1),
+};
+
+struct sdhci_acpi_chip {
+ const struct sdhci_ops *ops;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned long caps;
+ unsigned int caps2;
+ mmc_pm_flag_t pm_caps;
+};
+
+struct sdhci_acpi_slot {
+ const struct sdhci_acpi_chip *chip;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned long caps;
+ unsigned int caps2;
+ mmc_pm_flag_t pm_caps;
+ unsigned int flags;
+};
+
+struct sdhci_acpi_host {
+ struct sdhci_host *host;
+ const struct sdhci_acpi_slot *slot;
+ struct platform_device *pdev;
+ bool use_runtime_pm;
+};
+
+static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
+{
+ return c->slot && (c->slot->flags & flag);
+}
+
+static int sdhci_acpi_enable_dma(struct sdhci_host *host)
+{
+ return 0;
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_dflt = {
+ .enable_dma = sdhci_acpi_enable_dma,
+};
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
+ .flags = SDHCI_ACPI_RUNTIME_PM,
+ .pm_caps = MMC_PM_KEEP_POWER,
+};
+
+static const struct acpi_device_id sdhci_acpi_ids[] = {
+ { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio },
+ { "PNP0D40" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid)
+{
+ const struct acpi_device_id *id;
+
+ for (id = sdhci_acpi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, hid))
+ return (const struct sdhci_acpi_slot *)id->driver_data;
+ return NULL;
+}
+
+static int sdhci_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *device;
+ struct sdhci_acpi_host *c;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ resource_size_t len;
+ const char *hid;
+ int err;
+
+ if (acpi_bus_get_device(handle, &device))
+ return -ENODEV;
+
+ if (acpi_bus_get_status(device) || !device->status.present)
+ return -ENODEV;
+
+ hid = acpi_device_hid(device);
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -ENOMEM;
+
+ len = resource_size(iomem);
+ if (len < 0x100)
+ dev_err(dev, "Invalid iomem size!\n");
+
+ if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
+ return -ENOMEM;
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ c = sdhci_priv(host);
+ c->host = host;
+ c->slot = sdhci_acpi_get_slot(hid);
+ c->pdev = pdev;
+ c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
+
+ platform_set_drvdata(pdev, c);
+
+ host->hw_name = "ACPI";
+ host->ops = &sdhci_acpi_ops_dflt;
+ host->irq = platform_get_irq(pdev, 0);
+
+ host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+ resource_size(iomem));
+ if (host->ioaddr == NULL) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ if (!dev->dma_mask) {
+ u64 dma_mask;
+
+ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) {
+ /* 64-bit DMA is not supported at present */
+ dma_mask = DMA_BIT_MASK(32);
+ } else {
+ dma_mask = DMA_BIT_MASK(32);
+ }
+
+ dev->dma_mask = &dev->coherent_dma_mask;
+ dev->coherent_dma_mask = dma_mask;
+ }
+
+ if (c->slot) {
+ if (c->slot->chip) {
+ host->ops = c->slot->chip->ops;
+ host->quirks |= c->slot->chip->quirks;
+ host->quirks2 |= c->slot->chip->quirks2;
+ host->mmc->caps |= c->slot->chip->caps;
+ host->mmc->caps2 |= c->slot->chip->caps2;
+ host->mmc->pm_caps |= c->slot->chip->pm_caps;
+ }
+ host->quirks |= c->slot->quirks;
+ host->quirks2 |= c->slot->quirks2;
+ host->mmc->caps |= c->slot->caps;
+ host->mmc->caps2 |= c->slot->caps2;
+ host->mmc->pm_caps |= c->slot->pm_caps;
+ }
+
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_free;
+
+ if (c->use_runtime_pm) {
+ pm_suspend_ignore_children(dev, 1);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return 0;
+
+err_free:
+ platform_set_drvdata(pdev, NULL);
+ sdhci_free_host(c->host);
+ return err;
+}
+
+static int sdhci_acpi_remove(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int dead;
+
+ if (c->use_runtime_pm) {
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ }
+
+ dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
+ sdhci_remove_host(c->host, dead);
+ platform_set_drvdata(pdev, NULL);
+ sdhci_free_host(c->host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int sdhci_acpi_suspend(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(c->host);
+}
+
+static int sdhci_acpi_resume(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_resume_host(c->host);
+}
+
+#else
+
+#define sdhci_acpi_suspend NULL
+#define sdhci_acpi_resume NULL
+
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdhci_acpi_runtime_suspend(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_runtime_suspend_host(c->host);
+}
+
+static int sdhci_acpi_runtime_resume(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_runtime_resume_host(c->host);
+}
+
+static int sdhci_acpi_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+#else
+
+#define sdhci_acpi_runtime_suspend NULL
+#define sdhci_acpi_runtime_resume NULL
+#define sdhci_acpi_runtime_idle NULL
+
+#endif
+
+static const struct dev_pm_ops sdhci_acpi_pm_ops = {
+ .suspend = sdhci_acpi_suspend,
+ .resume = sdhci_acpi_resume,
+ .runtime_suspend = sdhci_acpi_runtime_suspend,
+ .runtime_resume = sdhci_acpi_runtime_resume,
+ .runtime_idle = sdhci_acpi_runtime_idle,
+};
+
+static struct platform_driver sdhci_acpi_driver = {
+ .driver = {
+ .name = "sdhci-acpi",
+ .owner = THIS_MODULE,
+ .acpi_match_table = sdhci_acpi_ids,
+ .pm = &sdhci_acpi_pm_ops,
+ },
+ .probe = sdhci_acpi_probe,
+ .remove = sdhci_acpi_remove,
+};
+
+module_platform_driver(sdhci_acpi_driver);
+
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 28a870804f60..30bfdc4ae52a 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -95,12 +95,12 @@ static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
SDHCI_QUIRK_NONSTANDARD_CLOCK,
};
-static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev)
+static int sdhci_cns3xxx_probe(struct platform_device *pdev)
{
return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata);
}
-static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev)
+static int sdhci_cns3xxx_remove(struct platform_device *pdev)
{
return sdhci_pltfm_unregister(pdev);
}
@@ -112,7 +112,7 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
- .remove = __devexit_p(sdhci_cns3xxx_remove),
+ .remove = sdhci_cns3xxx_remove,
};
module_platform_driver(sdhci_cns3xxx_driver);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index e6214480bc98..169fab91778e 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -97,7 +97,7 @@ static struct sdhci_pltfm_data sdhci_dove_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT,
};
-static int __devinit sdhci_dove_probe(struct platform_device *pdev)
+static int sdhci_dove_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -178,7 +178,7 @@ err_sdhci_pltfm_init:
return ret;
}
-static int __devexit sdhci_dove_remove(struct platform_device *pdev)
+static int sdhci_dove_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -197,7 +197,7 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = {
+static const struct of_device_id sdhci_dove_of_match_table[] = {
{ .compatible = "marvell,dove-sdhci", },
{}
};
@@ -211,7 +211,7 @@ static struct platform_driver sdhci_dove_driver = {
.of_match_table = of_match_ptr(sdhci_dove_of_match_table),
},
.probe = sdhci_dove_probe,
- .remove = __devexit_p(sdhci_dove_remove),
+ .remove = sdhci_dove_remove,
};
module_platform_driver(sdhci_dove_driver);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1849461c39ee..e07df812ff1e 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -403,7 +403,7 @@ static irqreturn_t cd_irq(int irq, void *data)
};
#ifdef CONFIG_OF
-static int __devinit
+static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
struct esdhc_platform_data *boarddata)
{
@@ -440,7 +440,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
}
#endif
-static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
+static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(imx_esdhc_dt_ids, &pdev->dev);
@@ -590,7 +590,7 @@ free_sdhci:
return err;
}
-static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
+static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -617,7 +617,7 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
- .remove = __devexit_p(sdhci_esdhc_imx_remove),
+ .remove = sdhci_esdhc_imx_remove,
};
module_platform_driver(sdhci_esdhc_imx_driver);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 60de2eeb39b1..f32526d2d966 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -260,12 +260,12 @@ static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
.ops = &sdhci_esdhc_ops,
};
-static int __devinit sdhci_esdhc_probe(struct platform_device *pdev)
+static int sdhci_esdhc_probe(struct platform_device *pdev)
{
return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata);
}
-static int __devexit sdhci_esdhc_remove(struct platform_device *pdev)
+static int sdhci_esdhc_remove(struct platform_device *pdev)
{
return sdhci_pltfm_unregister(pdev);
}
@@ -286,7 +286,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_esdhc_probe,
- .remove = __devexit_p(sdhci_esdhc_remove),
+ .remove = sdhci_esdhc_remove,
};
module_platform_driver(sdhci_esdhc_driver);
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 0ce088ae0228..c3d3715ec3d7 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -66,12 +66,12 @@ static struct sdhci_pltfm_data sdhci_hlwd_pdata = {
.ops = &sdhci_hlwd_ops,
};
-static int __devinit sdhci_hlwd_probe(struct platform_device *pdev)
+static int sdhci_hlwd_probe(struct platform_device *pdev)
{
return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata);
}
-static int __devexit sdhci_hlwd_remove(struct platform_device *pdev)
+static int sdhci_hlwd_remove(struct platform_device *pdev)
{
return sdhci_pltfm_unregister(pdev);
}
@@ -90,7 +90,7 @@ static struct platform_driver sdhci_hlwd_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_hlwd_probe,
- .remove = __devexit_p(sdhci_hlwd_remove),
+ .remove = sdhci_hlwd_remove,
};
module_platform_driver(sdhci_hlwd_driver);
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0777fad997ba..c7dd0cbc99de 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -654,7 +654,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
.probe = via_probe,
};
-static const struct pci_device_id pci_ids[] __devinitconst = {
+static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_RICOH,
.device = PCI_DEVICE_ID_RICOH_R5C822,
@@ -1184,7 +1184,7 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
* *
\*****************************************************************************/
-static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
+static struct sdhci_pci_slot *sdhci_pci_probe_slot(
struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
int slotno)
{
@@ -1339,7 +1339,7 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
sdhci_free_host(slot->host);
}
-static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev)
+static void sdhci_pci_runtime_pm_allow(struct device *dev)
{
pm_runtime_put_noidle(dev);
pm_runtime_allow(dev);
@@ -1348,13 +1348,13 @@ static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev)
pm_suspend_ignore_children(dev, 1);
}
-static void __devexit sdhci_pci_runtime_pm_forbid(struct device *dev)
+static void sdhci_pci_runtime_pm_forbid(struct device *dev)
{
pm_runtime_forbid(dev);
pm_runtime_get_noresume(dev);
}
-static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
+static int sdhci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct sdhci_pci_chip *chip;
@@ -1446,7 +1446,7 @@ err:
return ret;
}
-static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
+static void sdhci_pci_remove(struct pci_dev *pdev)
{
int i;
struct sdhci_pci_chip *chip;
@@ -1471,7 +1471,7 @@ static struct pci_driver sdhci_driver = {
.name = "sdhci-pci",
.id_table = pci_ids,
.probe = sdhci_pci_probe,
- .remove = __devexit_p(sdhci_pci_remove),
+ .remove = sdhci_pci_remove,
.driver = {
.pm = &sdhci_pci_pm_ops
},
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 8e63a9c04e31..ac854aa192a8 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -166,7 +166,7 @@ static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
}
#endif
-static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
+static int sdhci_pxav2_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
@@ -247,7 +247,7 @@ err_clk_get:
return ret;
}
-static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
+static int sdhci_pxav2_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -275,7 +275,7 @@ static struct platform_driver sdhci_pxav2_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
- .remove = __devexit_p(sdhci_pxav2_remove),
+ .remove = sdhci_pxav2_remove,
};
module_platform_driver(sdhci_pxav2_driver);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 60829c92bcfd..fad0966427fd 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -222,7 +222,7 @@ static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
}
#endif
-static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
+static int sdhci_pxav3_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
@@ -324,7 +324,7 @@ err_clk_get:
return ret;
}
-static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
+static int sdhci_pxav3_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -357,7 +357,7 @@ static struct platform_driver sdhci_pxav3_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav3_probe,
- .remove = __devexit_p(sdhci_pxav3_remove),
+ .remove = sdhci_pxav3_remove,
};
module_platform_driver(sdhci_pxav3_driver);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 82b7a7ad4217..82a8de148a8f 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -441,7 +441,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
}
#ifdef CONFIG_OF
-static int __devinit sdhci_s3c_parse_dt(struct device *dev,
+static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
{
struct device_node *node = dev->of_node;
@@ -532,7 +532,7 @@ static int __devinit sdhci_s3c_parse_dt(struct device *dev,
return 0;
}
#else
-static int __devinit sdhci_s3c_parse_dt(struct device *dev,
+static int sdhci_s3c_parse_dt(struct device *dev,
struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)
{
return -EINVAL;
@@ -555,7 +555,7 @@ static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
platform_get_device_id(pdev)->driver_data;
}
-static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
+static int sdhci_s3c_probe(struct platform_device *pdev)
{
struct s3c_sdhci_platdata *pdata;
struct sdhci_s3c_drv_data *drv_data;
@@ -781,7 +781,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
+static int sdhci_s3c_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
@@ -911,7 +911,7 @@ MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
- .remove = __devexit_p(sdhci_s3c_remove),
+ .remove = sdhci_s3c_remove,
.id_table = sdhci_s3c_driver_ids,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 87a700944b7d..c6ece0bd03b3 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -71,8 +71,7 @@ static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
}
#ifdef CONFIG_OF
-static struct sdhci_plat_data * __devinit
-sdhci_probe_config_dt(struct platform_device *pdev)
+static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sdhci_plat_data *pdata = NULL;
@@ -96,14 +95,13 @@ sdhci_probe_config_dt(struct platform_device *pdev)
return pdata;
}
#else
-static struct sdhci_plat_data * __devinit
-sdhci_probe_config_dt(struct platform_device *pdev)
+static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev)
{
return ERR_PTR(-ENOSYS);
}
#endif
-static int __devinit sdhci_probe(struct platform_device *pdev)
+static int sdhci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sdhci_host *host;
@@ -273,7 +271,7 @@ err:
return ret;
}
-static int __devexit sdhci_remove(struct platform_device *pdev)
+static int sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
@@ -341,7 +339,7 @@ static struct platform_driver sdhci_driver = {
.of_match_table = of_match_ptr(sdhci_spear_id_table),
},
.probe = sdhci_probe,
- .remove = __devexit_p(sdhci_remove),
+ .remove = sdhci_remove,
};
module_platform_driver(sdhci_driver);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f9eb91623701..3695b2e0cbd2 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -206,7 +206,7 @@ static struct sdhci_tegra_soc_data soc_data_tegra30 = {
};
#endif
-static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = {
+static const struct of_device_id sdhci_tegra_dt_match[] = {
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
#endif
@@ -217,7 +217,7 @@ static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
-static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
+static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
struct platform_device *pdev)
{
struct tegra_sdhci_platform_data *plat;
@@ -244,7 +244,7 @@ static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
return plat;
}
-static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
+static int sdhci_tegra_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct sdhci_tegra_soc_data *soc_data;
@@ -370,7 +370,7 @@ err_no_plat:
return rc;
}
-static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
+static int sdhci_tegra_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -407,7 +407,7 @@ static struct platform_driver sdhci_tegra_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
- .remove = __devexit_p(sdhci_tegra_remove),
+ .remove = sdhci_tegra_remove,
};
module_platform_driver(sdhci_tegra_driver);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ae795233a1d6..9a4c151067dd 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1298,7 +1298,7 @@ static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
}
-static int __devinit sh_mmcif_probe(struct platform_device *pdev)
+static int sh_mmcif_probe(struct platform_device *pdev)
{
int ret = 0, irq[2];
struct mmc_host *mmc;
@@ -1424,7 +1424,7 @@ ealloch:
return ret;
}
-static int __devexit sh_mmcif_remove(struct platform_device *pdev)
+static int sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index d6ff8531fb35..524a7f773820 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -117,7 +117,7 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
.cd_wakeup = sh_mobile_sdhi_cd_wakeup,
};
-static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
+static int sh_mobile_sdhi_probe(struct platform_device *pdev)
{
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
@@ -328,7 +328,7 @@ static struct platform_driver sh_mobile_sdhi_driver = {
.of_match_table = sh_mobile_sdhi_of_match,
},
.probe = sh_mobile_sdhi_probe,
- .remove = __devexit_p(sh_mobile_sdhi_remove),
+ .remove = sh_mobile_sdhi_remove,
};
module_platform_driver(sh_mobile_sdhi_driver);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 113ce6c9cf32..139212e79cde 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -57,7 +57,7 @@ static int tmio_mmc_resume(struct platform_device *dev)
#define tmio_mmc_resume NULL
#endif
-static int __devinit tmio_mmc_probe(struct platform_device *pdev)
+static int tmio_mmc_probe(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct tmio_mmc_data *pdata;
@@ -107,7 +107,7 @@ out:
return ret;
}
-static int __devexit tmio_mmc_remove(struct platform_device *pdev)
+static int tmio_mmc_remove(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -133,7 +133,7 @@ static struct platform_driver tmio_mmc_driver = {
.owner = THIS_MODULE,
},
.probe = tmio_mmc_probe,
- .remove = __devexit_p(tmio_mmc_remove),
+ .remove = tmio_mmc_remove,
.suspend = tmio_mmc_suspend,
.resume = tmio_mmc_resume,
};
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 0d8a9bbe30be..50bf495a988b 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -918,7 +918,7 @@ static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
}
-int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
+int tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata)
{
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index f18becef156d..4f84586c6e9e 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1082,7 +1082,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
msleep(1);
}
-static int __devinit via_sd_probe(struct pci_dev *pcidev,
+static int via_sd_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
struct mmc_host *mmc;
@@ -1176,7 +1176,7 @@ disable:
return ret;
}
-static void __devexit via_sd_remove(struct pci_dev *pcidev)
+static void via_sd_remove(struct pci_dev *pcidev)
{
struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
unsigned long flags;
@@ -1332,7 +1332,7 @@ static struct pci_driver via_sd_driver = {
.name = DRV_NAME,
.id_table = via_ids,
.probe = via_sd_probe,
- .remove = __devexit_p(via_sd_remove),
+ .remove = via_sd_remove,
.suspend = via_sd_suspend,
.resume = via_sd_resume,
};
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 64acd9ce141c..e954b7758876 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1196,7 +1196,7 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id)
* Allocate/free MMC structure.
*/
-static int __devinit wbsd_alloc_mmc(struct device *dev)
+static int wbsd_alloc_mmc(struct device *dev)
{
struct mmc_host *mmc;
struct wbsd_host *host;
@@ -1288,7 +1288,7 @@ static void wbsd_free_mmc(struct device *dev)
* Scan for known chip id:s
*/
-static int __devinit wbsd_scan(struct wbsd_host *host)
+static int wbsd_scan(struct wbsd_host *host)
{
int i, j, k;
int id;
@@ -1344,7 +1344,7 @@ static int __devinit wbsd_scan(struct wbsd_host *host)
* Allocate/free io port ranges
*/
-static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
+static int wbsd_request_region(struct wbsd_host *host, int base)
{
if (base & 0x7)
return -EINVAL;
@@ -1374,7 +1374,7 @@ static void wbsd_release_regions(struct wbsd_host *host)
* Allocate/free DMA port and buffer
*/
-static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
+static void wbsd_request_dma(struct wbsd_host *host, int dma)
{
if (dma < 0)
return;
@@ -1452,7 +1452,7 @@ static void wbsd_release_dma(struct wbsd_host *host)
* Allocate/free IRQ.
*/
-static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
+static int wbsd_request_irq(struct wbsd_host *host, int irq)
{
int ret;
@@ -1502,7 +1502,7 @@ static void wbsd_release_irq(struct wbsd_host *host)
* Allocate all resources for the host.
*/
-static int __devinit wbsd_request_resources(struct wbsd_host *host,
+static int wbsd_request_resources(struct wbsd_host *host,
int base, int irq, int dma)
{
int ret;
@@ -1644,7 +1644,7 @@ static void wbsd_chip_poweroff(struct wbsd_host *host)
* *
\*****************************************************************************/
-static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
+static int wbsd_init(struct device *dev, int base, int irq, int dma,
int pnp)
{
struct wbsd_host *host = NULL;
@@ -1735,7 +1735,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
return 0;
}
-static void __devexit wbsd_shutdown(struct device *dev, int pnp)
+static void wbsd_shutdown(struct device *dev, int pnp)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct wbsd_host *host;
@@ -1762,13 +1762,13 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp)
* Non-PnP
*/
-static int __devinit wbsd_probe(struct platform_device *dev)
+static int wbsd_probe(struct platform_device *dev)
{
/* Use the module parameters for resources */
return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
}
-static int __devexit wbsd_remove(struct platform_device *dev)
+static int wbsd_remove(struct platform_device *dev)
{
wbsd_shutdown(&dev->dev, 0);
@@ -1781,7 +1781,7 @@ static int __devexit wbsd_remove(struct platform_device *dev)
#ifdef CONFIG_PNP
-static int __devinit
+static int
wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
{
int io, irq, dma;
@@ -1801,7 +1801,7 @@ wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
}
-static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
+static void wbsd_pnp_remove(struct pnp_dev *dev)
{
wbsd_shutdown(&dev->dev, 1);
}
@@ -1941,7 +1941,7 @@ static struct platform_device *wbsd_device;
static struct platform_driver wbsd_driver = {
.probe = wbsd_probe,
- .remove = __devexit_p(wbsd_remove),
+ .remove = wbsd_remove,
.suspend = wbsd_platform_suspend,
.resume = wbsd_platform_resume,
@@ -1957,7 +1957,7 @@ static struct pnp_driver wbsd_pnp_driver = {
.name = DRIVER_NAME,
.id_table = pnp_dev_table,
.probe = wbsd_pnp_probe,
- .remove = __devexit_p(wbsd_pnp_remove),
+ .remove = wbsd_pnp_remove,
.suspend = wbsd_pnp_suspend,
.resume = wbsd_pnp_resume,
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 5ba4605e4f80..154f0e8e931c 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -766,7 +766,7 @@ static struct of_device_id wmt_mci_dt_ids[] = {
{ /* Sentinel */ },
};
-static int __devinit wmt_mci_probe(struct platform_device *pdev)
+static int wmt_mci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
@@ -892,7 +892,7 @@ fail1:
return ret;
}
-static int __devexit wmt_mci_remove(struct platform_device *pdev)
+static int wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 945393129952..7c057a05adb6 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -26,19 +26,16 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
-#include <linux/magic.h>
#include <linux/module.h>
+#include <uapi/linux/magic.h>
+
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
-#ifndef SQUASHFS_MAGIC
-#define SQUASHFS_MAGIC 0x73717368
-#endif
-
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 63d2a64331f7..6eeb84c81bc2 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -37,8 +37,7 @@
#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
-#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_CFE_BLOCK_SIZE 0x10000 /* always at least 64KiB */
#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
@@ -79,7 +78,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
unsigned int rootfsaddr, kerneladdr, spareaddr;
unsigned int rootfslen, kernellen, sparelen, totallen;
unsigned int cfelen, nvramlen;
- int namelen = 0;
+ unsigned int cfe_erasesize;
int i;
u32 computed_crc;
bool rootfs_first = false;
@@ -87,8 +86,11 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
if (bcm63xx_detect_cfe(master))
return -EINVAL;
- cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
- nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+ cfe_erasesize = max_t(uint32_t, master->erasesize,
+ BCM63XX_CFE_BLOCK_SIZE);
+
+ cfelen = cfe_erasesize;
+ nvramlen = cfe_erasesize;
/* Allocate memory for buffer */
buf = vmalloc(sizeof(struct bcm_tag));
@@ -121,7 +123,6 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
spareaddr = roundup(totallen, master->erasesize) + cfelen;
- sparelen = master->size - spareaddr - nvramlen;
if (rootfsaddr < kerneladdr) {
/* default Broadcom layout */
@@ -139,19 +140,15 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
rootfslen = 0;
rootfsaddr = 0;
spareaddr = cfelen;
- sparelen = master->size - cfelen - nvramlen;
}
+ sparelen = master->size - spareaddr - nvramlen;
/* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
+ if (rootfslen > 0)
nrparts++;
- namelen += 6;
- }
- if (kernellen > 0) {
+
+ if (kernellen > 0)
nrparts++;
- namelen += 6;
- }
/* Ask kernel for more memory */
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
@@ -193,17 +190,16 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
parts[curpart].name = "nvram";
parts[curpart].offset = master->size - nvramlen;
parts[curpart].size = nvramlen;
+ curpart++;
/* Global partition "linux" to make easy firmware upgrade */
- curpart++;
parts[curpart].name = "linux";
parts[curpart].offset = cfelen;
parts[curpart].size = master->size - cfelen - nvramlen;
for (i = 0; i < nrparts; i++)
- pr_info("Partition %d is %s offset %lx and length %lx\n", i,
- parts[i].name, (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
+ pr_info("Partition %d is %s offset %llx and length %llx\n", i,
+ parts[i].name, parts[i].offset, parts[i].size);
pr_info("Spare partition is offset %x and length %x\n", spareaddr,
sparelen);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 5ff5c4a16943..b86197286f24 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1536,8 +1536,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1);
}
- /* reset on all failures. */
- map_write( map, CMD(0xF0), chip->start );
+ /*
+ * Recovery from write-buffer programming failures requires
+ * the write-to-buffer-reset sequence. Since the last part
+ * of the sequence also works as a normal reset, we can run
+ * the same commands regardless of why we are here.
+ * See e.g.
+ * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
+ */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
xip_enable(map, chip, adr);
/* FIXME - should have reset delay before continuing */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index aed1b8a63c9f..c533f27d863f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -56,8 +56,8 @@
/* special size referring to all the remaining space in a partition */
-#define SIZE_REMAINING UINT_MAX
-#define OFFSET_CONTINUOUS UINT_MAX
+#define SIZE_REMAINING ULLONG_MAX
+#define OFFSET_CONTINUOUS ULLONG_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
@@ -89,7 +89,7 @@ static struct mtd_partition * newpart(char *s,
int extra_mem_size)
{
struct mtd_partition *parts;
- unsigned long size, offset = OFFSET_CONTINUOUS;
+ unsigned long long size, offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -104,7 +104,8 @@ static struct mtd_partition * newpart(char *s,
} else {
size = memparse(s, &s);
if (size < PAGE_SIZE) {
- printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
+ printk(KERN_ERR ERRP "partition size too small (%llx)\n",
+ size);
return ERR_PTR(-EINVAL);
}
}
@@ -296,7 +297,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
- unsigned long offset;
+ unsigned long long offset;
int i, err;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
@@ -308,48 +309,52 @@ static int parse_cmdline_partitions(struct mtd_info *master,
return err;
}
+ /*
+ * Search for the partition definition matching master->name.
+ * If master->name is not set, stop at first partition definition.
+ */
for (part = partitions; part; part = part->next) {
- if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) {
- for (i = 0, offset = 0; i < part->num_parts; i++) {
- if (part->parts[i].offset == OFFSET_CONTINUOUS)
- part->parts[i].offset = offset;
- else
- offset = part->parts[i].offset;
-
- if (part->parts[i].size == SIZE_REMAINING)
- part->parts[i].size = master->size - offset;
-
- if (part->parts[i].size == 0) {
- printk(KERN_WARNING ERRP
- "%s: skipping zero sized partition\n",
- part->mtd_id);
- part->num_parts--;
- memmove(&part->parts[i],
- &part->parts[i + 1],
- sizeof(*part->parts) * (part->num_parts - i));
- continue;
- }
-
- if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
- }
- offset += part->parts[i].size;
- }
-
- *pparts = kmemdup(part->parts,
- sizeof(*part->parts) * part->num_parts,
- GFP_KERNEL);
- if (!*pparts)
- return -ENOMEM;
-
- return part->num_parts;
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
+ break;
+ }
+
+ if (!part)
+ return 0;
+
+ for (i = 0, offset = 0; i < part->num_parts; i++) {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+
+ if (part->parts[i].size == 0) {
+ printk(KERN_WARNING ERRP
+ "%s: skipping zero sized partition\n",
+ part->mtd_id);
+ part->num_parts--;
+ memmove(&part->parts[i], &part->parts[i + 1],
+ sizeof(*part->parts) * (part->num_parts - i));
+ continue;
}
+
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
}
- return 0;
+ *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
+ GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ return part->num_parts;
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 27f80cd8aef3..46dcb54c32ec 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -272,6 +272,7 @@ config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
select BCH_CONST_PARAMS
+ select BITREVERSE
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 2dc5a6f3fd57..4714584aa993 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -66,7 +66,7 @@ out:
return err;
}
-static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_remove(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
@@ -77,7 +77,7 @@ static int __devexit bcm47xxsflash_remove(struct platform_device *pdev)
}
static struct platform_driver bcma_sflash_driver = {
- .remove = __devexit_p(bcm47xxsflash_remove),
+ .remove = bcm47xxsflash_remove,
.driver = {
.name = "bcma_sflash",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 681e2ee0f2d6..e081bfeaaf7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -62,6 +62,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
memset(page_address(page), 0xff, PAGE_SIZE);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
break;
}
@@ -152,6 +153,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
memcpy(page_address(page) + offset, buf, cpylen);
set_page_dirty(page);
unlock_page(page);
+ balance_dirty_pages_ratelimited(mapping);
}
page_cache_release(page);
@@ -433,7 +435,7 @@ static int __init block2mtd_init(void)
}
-static void __devexit block2mtd_exit(void)
+static void block2mtd_exit(void)
{
struct list_head *pos, *next;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index d34d83b8f9c2..8510ccb9c6f0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1440,7 +1440,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
oobdelta = mtd->ecclayout->oobavail;
break;
default:
- oobdelta = 0;
+ return -EINVAL;
}
if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
(ofs % DOC_LAYOUT_PAGE_SIZE))
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 706b847b46b3..88b3fd3e18a7 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -70,8 +70,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 03838bab1f59..4eeeb2d7f6ea 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -73,14 +73,6 @@
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define MAX_CMD_SIZE 5
-#ifdef CONFIG_M25PXX_USE_FAST_READ
-#define OPCODE_READ OPCODE_FAST_READ
-#define FAST_READ_DUMMY_BYTE 1
-#else
-#define OPCODE_READ OPCODE_NORM_READ
-#define FAST_READ_DUMMY_BYTE 0
-#endif
-
#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
/****************************************************************************/
@@ -93,6 +85,7 @@ struct m25p {
u16 addr_width;
u8 erase_opcode;
u8 *command;
+ bool fast_read;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -168,6 +161,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
{
switch (JEDEC_MFR(jedec_id)) {
case CFI_MFR_MACRONIX:
+ case 0xEF /* winbond */:
flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
return spi_write(flash->spi, flash->command, 1);
default:
@@ -342,6 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
struct m25p *flash = mtd_to_m25p(mtd);
struct spi_transfer t[2];
struct spi_message m;
+ uint8_t opcode;
pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
__func__, (u32)from, len);
@@ -354,7 +349,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
* Should add 1 byte DUMMY_BYTE.
*/
t[0].tx_buf = flash->command;
- t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
+ t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0);
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
@@ -376,12 +371,14 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
/* Set up the write data buffer. */
- flash->command[0] = OPCODE_READ;
+ opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ;
+ flash->command[0] = opcode;
m25p_addr2cmd(flash, from, flash->command);
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
+ *retlen = m.actual_length - m25p_cmdsz(flash) -
+ (flash->fast_read ? 1 : 0);
mutex_unlock(&flash->lock);
@@ -664,7 +661,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
/* Micron */
- { "n25q128", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
/* Spansion -- single (large) sector size only, at least
@@ -745,6 +743,8 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
@@ -756,7 +756,7 @@ static const struct spi_device_id m25p_ids[] = {
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
-static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
+static const struct spi_device_id *jedec_probe(struct spi_device *spi)
{
int tmp;
u8 code = OPCODE_RDID;
@@ -801,7 +801,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
* matches what the READ command supports, at least until this driver
* understands FAST_READ (for clocks over 25 MHz).
*/
-static int __devinit m25p_probe(struct spi_device *spi)
+static int m25p_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct flash_platform_data *data;
@@ -809,9 +809,10 @@ static int __devinit m25p_probe(struct spi_device *spi)
struct flash_info *info;
unsigned i;
struct mtd_part_parser_data ppdata;
+ struct device_node __maybe_unused *np = spi->dev.of_node;
#ifdef CONFIG_MTD_OF_PARTS
- if (!of_device_is_available(spi->dev.of_node))
+ if (!of_device_is_available(np))
return -ENODEV;
#endif
@@ -863,7 +864,8 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash = kzalloc(sizeof *flash, GFP_KERNEL);
if (!flash)
return -ENOMEM;
- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+ flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0),
+ GFP_KERNEL);
if (!flash->command) {
kfree(flash);
return -ENOMEM;
@@ -920,6 +922,16 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->page_size = info->page_size;
flash->mtd.writebufsize = flash->page_size;
+ flash->fast_read = false;
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(np, "m25p,fast-read"))
+ flash->fast_read = true;
+#endif
+
+#ifdef CONFIG_M25PXX_USE_FAST_READ
+ flash->fast_read = true;
+#endif
+
if (info->addr_width)
flash->addr_width = info->addr_width;
else {
@@ -961,7 +973,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
}
-static int __devexit m25p_remove(struct spi_device *spi)
+static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = dev_get_drvdata(&spi->dev);
int status;
@@ -983,7 +995,7 @@ static struct spi_driver m25p80_driver = {
},
.id_table = m25p_ids,
.probe = m25p_probe,
- .remove = __devexit_p(m25p_remove),
+ .remove = m25p_remove,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 928fb0e6d73a..945c9f762349 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -618,9 +618,8 @@ static char *otp_setup(struct mtd_info *device, char revision)
/*
* Register DataFlash device with MTD subsystem.
*/
-static int __devinit
-add_dataflash_otp(struct spi_device *spi, char *name,
- int nr_pages, int pagesize, int pageoffset, char revision)
+static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
+ int pagesize, int pageoffset, char revision)
{
struct dataflash *priv;
struct mtd_info *device;
@@ -679,9 +678,8 @@ add_dataflash_otp(struct spi_device *spi, char *name,
return err;
}
-static inline int __devinit
-add_dataflash(struct spi_device *spi, char *name,
- int nr_pages, int pagesize, int pageoffset)
+static inline int add_dataflash(struct spi_device *spi, char *name,
+ int nr_pages, int pagesize, int pageoffset)
{
return add_dataflash_otp(spi, name, nr_pages, pagesize,
pageoffset, 0);
@@ -705,7 +703,7 @@ struct flash_info {
#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
};
-static struct flash_info __devinitdata dataflash_data [] = {
+static struct flash_info dataflash_data[] = {
/*
* NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
@@ -740,7 +738,7 @@ static struct flash_info __devinitdata dataflash_data [] = {
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
};
-static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
+static struct flash_info *jedec_probe(struct spi_device *spi)
{
int tmp;
uint8_t code = OP_READ_ID;
@@ -823,7 +821,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
* AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
* AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
*/
-static int __devinit dataflash_probe(struct spi_device *spi)
+static int dataflash_probe(struct spi_device *spi)
{
int status;
struct flash_info *info;
@@ -897,7 +895,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
return status;
}
-static int __devexit dataflash_remove(struct spi_device *spi)
+static int dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = dev_get_drvdata(&spi->dev);
int status;
@@ -920,7 +918,7 @@ static struct spi_driver dataflash_driver = {
},
.probe = dataflash_probe,
- .remove = __devexit_p(dataflash_remove),
+ .remove = dataflash_remove,
/* FIXME: investigate suspend and resume... */
};
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index dcc3c9511530..2aabd96bf0ff 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -756,8 +756,8 @@ err_probe:
#ifdef CONFIG_OF
-static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *pp = NULL;
@@ -799,8 +799,8 @@ static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
return 0;
}
#else
-static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int spear_smi_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
return -ENOSYS;
}
@@ -901,7 +901,7 @@ static int spear_smi_setup_banks(struct platform_device *pdev,
* and do proper init for any found one.
* Returns 0 on success, non zero otherwise
*/
-static int __devinit spear_smi_probe(struct platform_device *pdev)
+static int spear_smi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
@@ -1016,7 +1016,7 @@ err:
*
* free all allocations and delete the partitions.
*/
-static int __devexit spear_smi_remove(struct platform_device *pdev)
+static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
@@ -1092,20 +1092,9 @@ static struct platform_driver spear_smi_driver = {
#endif
},
.probe = spear_smi_probe,
- .remove = __devexit_p(spear_smi_remove),
+ .remove = spear_smi_remove,
};
-
-static int spear_smi_init(void)
-{
- return platform_driver_register(&spear_smi_driver);
-}
-module_init(spear_smi_init);
-
-static void spear_smi_exit(void)
-{
- platform_driver_unregister(&spear_smi_driver);
-}
-module_exit(spear_smi_exit);
+module_platform_driver(spear_smi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index ab8a2f4c8d60..8091b0163694 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -64,7 +64,7 @@ struct flash_info {
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
-static struct flash_info __devinitdata sst25l_flash_info[] = {
+static struct flash_info sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
@@ -313,7 +313,7 @@ out:
return ret;
}
-static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
+static struct flash_info *sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
@@ -353,7 +353,7 @@ static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
return flash_info;
}
-static int __devinit sst25l_probe(struct spi_device *spi)
+static int sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
@@ -411,7 +411,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
return 0;
}
-static int __devexit sst25l_remove(struct spi_device *spi)
+static int sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
@@ -428,7 +428,7 @@ static struct spi_driver sst25l_driver = {
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
- .remove = __devexit_p(sst25l_remove),
+ .remove = sst25l_remove,
};
module_spi_driver(sst25l_driver);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 2e47c2ed0a2d..62ba82c396c2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -324,13 +324,6 @@ config MTD_SOLUTIONENGINE
This enables access to the flash chips on the Hitachi SolutionEngine and
similar boards. Say 'Y' if you are building a kernel for such a board.
-config MTD_CDB89712
- tristate "Cirrus CDB89712 evaluation board mappings"
- depends on MTD_CFI && ARCH_CDB89712
- help
- This enables access to the flash or ROM chips on the CDB89712 board.
- If you have such a board, say 'Y'.
-
config MTD_SA1100
tristate "CFI Flash device mapped on StrongARM SA11x0"
depends on MTD_CFI && ARCH_SA1100
@@ -365,13 +358,6 @@ config MTD_IXP2000
IXP2000 based board and would like to use the flash chips on it,
say 'Y'.
-config MTD_FORTUNET
- tristate "CFI Flash device mapped on the FortuNet board"
- depends on MTD_CFI && SA1100_FORTUNET
- help
- This enables access to the Flash on the FortuNet board. If you
- have such a board, say 'Y'.
-
config MTD_AUTCPU12
bool "NV-RAM mapping AUTCPU12 board"
depends on ARCH_AUTCPU12
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index deb43e9a1e7f..4ded28711bc1 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_MTD) += map_funcs.o
endif
# Chip mappings
-obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
@@ -40,7 +39,6 @@ obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
-obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index e2875d6fe129..f7207b0a76dc 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -100,8 +100,8 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
}
-static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int amd76xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -289,7 +289,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
+static void amd76xrom_remove_one(struct pci_dev *pdev)
{
struct amd76xrom_window *window = &amd76xrom_window;
@@ -347,4 +347,3 @@ module_exit(cleanup_amd76xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
-
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 76fb594bb1d9..a2dc2ae4b24e 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -33,7 +33,7 @@ struct autcpu12_nvram_priv {
struct map_info map;
};
-static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
+static int autcpu12_nvram_probe(struct platform_device *pdev)
{
map_word tmp, save0, save1;
struct resource *res;
@@ -105,7 +105,7 @@ static int __devinit autcpu12_nvram_probe(struct platform_device *pdev)
return -ENOMEM;
}
-static int __devexit autcpu12_nvram_remove(struct platform_device *pdev)
+static int autcpu12_nvram_remove(struct platform_device *pdev)
{
struct autcpu12_nvram_priv *priv = platform_get_drvdata(pdev);
@@ -121,7 +121,7 @@ static struct platform_driver autcpu12_nvram_driver = {
.owner = THIS_MODULE,
},
.probe = autcpu12_nvram_probe,
- .remove = __devexit_p(autcpu12_nvram_remove),
+ .remove = autcpu12_nvram_remove,
};
module_platform_driver(autcpu12_nvram_driver);
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index ef5cde84a8b3..f833edfaab79 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -30,7 +30,8 @@
#include <linux/io.h>
#include <asm/unaligned.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "bfin-async-flash"
@@ -123,7 +124,7 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi
static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit bfin_flash_probe(struct platform_device *pdev)
+static int bfin_flash_probe(struct platform_device *pdev)
{
int ret;
struct physmap_flash_data *pdata = pdev->dev.platform_data;
@@ -172,7 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bfin_flash_remove(struct platform_device *pdev)
+static int bfin_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
gpio_free(state->enet_flash_pin);
@@ -184,7 +185,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
static struct platform_driver bfin_flash_driver = {
.probe = bfin_flash_probe,
- .remove = __devexit_p(bfin_flash_remove),
+ .remove = bfin_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
deleted file mode 100644
index c29cbf87ea0c..000000000000
--- a/drivers/mtd/maps/cdb89712.c
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Flash on Cirrus CDB89712
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-/* dynamic ioremap() areas */
-#define FLASH_START 0x00000000
-#define FLASH_SIZE 0x800000
-#define FLASH_WIDTH 4
-
-#define SRAM_START 0x60000000
-#define SRAM_SIZE 0xc000
-#define SRAM_WIDTH 4
-
-#define BOOTROM_START 0x70000000
-#define BOOTROM_SIZE 0x80
-#define BOOTROM_WIDTH 4
-
-
-static struct mtd_info *flash_mtd;
-
-struct map_info cdb89712_flash_map = {
- .name = "flash",
- .size = FLASH_SIZE,
- .bankwidth = FLASH_WIDTH,
- .phys = FLASH_START,
-};
-
-struct resource cdb89712_flash_resource = {
- .name = "Flash",
- .start = FLASH_START,
- .end = FLASH_START + FLASH_SIZE - 1,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
-
-static int __init init_cdb89712_flash (void)
-{
- int err;
-
- if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
- printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
- err = -EBUSY;
- goto out;
- }
-
- cdb89712_flash_map.virt = ioremap(FLASH_START, FLASH_SIZE);
- if (!cdb89712_flash_map.virt) {
- printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
- err = -EIO;
- goto out_resource;
- }
- simple_map_init(&cdb89712_flash_map);
- flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
- if (!flash_mtd) {
- flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
- if (flash_mtd)
- flash_mtd->erasesize = 0x10000;
- }
- if (!flash_mtd) {
- printk("FLASH probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- flash_mtd->owner = THIS_MODULE;
-
- if (mtd_device_register(flash_mtd, NULL, 0)) {
- printk("FLASH device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- return 0;
-
-out_probe:
- map_destroy(flash_mtd);
- flash_mtd = 0;
-out_ioremap:
- iounmap((void *)cdb89712_flash_map.virt);
-out_resource:
- release_resource (&cdb89712_flash_resource);
-out:
- return err;
-}
-
-
-
-
-
-static struct mtd_info *sram_mtd;
-
-struct map_info cdb89712_sram_map = {
- .name = "SRAM",
- .size = SRAM_SIZE,
- .bankwidth = SRAM_WIDTH,
- .phys = SRAM_START,
-};
-
-struct resource cdb89712_sram_resource = {
- .name = "SRAM",
- .start = SRAM_START,
- .end = SRAM_START + SRAM_SIZE - 1,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
-
-static int __init init_cdb89712_sram (void)
-{
- int err;
-
- if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
- printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
- err = -EBUSY;
- goto out;
- }
-
- cdb89712_sram_map.virt = ioremap(SRAM_START, SRAM_SIZE);
- if (!cdb89712_sram_map.virt) {
- printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
- err = -EIO;
- goto out_resource;
- }
- simple_map_init(&cdb89712_sram_map);
- sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
- if (!sram_mtd) {
- printk("SRAM probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- sram_mtd->owner = THIS_MODULE;
- sram_mtd->erasesize = 16;
-
- if (mtd_device_register(sram_mtd, NULL, 0)) {
- printk("SRAM device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- return 0;
-
-out_probe:
- map_destroy(sram_mtd);
- sram_mtd = 0;
-out_ioremap:
- iounmap((void *)cdb89712_sram_map.virt);
-out_resource:
- release_resource (&cdb89712_sram_resource);
-out:
- return err;
-}
-
-
-
-
-
-
-
-static struct mtd_info *bootrom_mtd;
-
-struct map_info cdb89712_bootrom_map = {
- .name = "BootROM",
- .size = BOOTROM_SIZE,
- .bankwidth = BOOTROM_WIDTH,
- .phys = BOOTROM_START,
-};
-
-struct resource cdb89712_bootrom_resource = {
- .name = "BootROM",
- .start = BOOTROM_START,
- .end = BOOTROM_START + BOOTROM_SIZE - 1,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
-
-static int __init init_cdb89712_bootrom (void)
-{
- int err;
-
- if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
- printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
- err = -EBUSY;
- goto out;
- }
-
- cdb89712_bootrom_map.virt = ioremap(BOOTROM_START, BOOTROM_SIZE);
- if (!cdb89712_bootrom_map.virt) {
- printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
- err = -EIO;
- goto out_resource;
- }
- simple_map_init(&cdb89712_bootrom_map);
- bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
- if (!bootrom_mtd) {
- printk("BootROM probe failed\n");
- err = -ENXIO;
- goto out_ioremap;
- }
-
- bootrom_mtd->owner = THIS_MODULE;
- bootrom_mtd->erasesize = 0x10000;
-
- if (mtd_device_register(bootrom_mtd, NULL, 0)) {
- printk("BootROM device addition failed\n");
- err = -ENOMEM;
- goto out_probe;
- }
-
- return 0;
-
-out_probe:
- map_destroy(bootrom_mtd);
- bootrom_mtd = 0;
-out_ioremap:
- iounmap((void *)cdb89712_bootrom_map.virt);
-out_resource:
- release_resource (&cdb89712_bootrom_resource);
-out:
- return err;
-}
-
-
-
-
-
-static int __init init_cdb89712_maps(void)
-{
-
- printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
- FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
-
- init_cdb89712_flash();
- init_cdb89712_sram();
- init_cdb89712_bootrom();
-
- return 0;
-}
-
-
-static void __exit cleanup_cdb89712_maps(void)
-{
- if (sram_mtd) {
- mtd_device_unregister(sram_mtd);
- map_destroy(sram_mtd);
- iounmap((void *)cdb89712_sram_map.virt);
- release_resource (&cdb89712_sram_resource);
- }
-
- if (flash_mtd) {
- mtd_device_unregister(flash_mtd);
- map_destroy(flash_mtd);
- iounmap((void *)cdb89712_flash_map.virt);
- release_resource (&cdb89712_flash_resource);
- }
-
- if (bootrom_mtd) {
- mtd_device_unregister(bootrom_mtd);
- map_destroy(bootrom_mtd);
- iounmap((void *)cdb89712_bootrom_map.virt);
- release_resource (&cdb89712_bootrom_resource);
- }
-}
-
-module_init(init_cdb89712_maps);
-module_exit(cleanup_cdb89712_maps);
-
-MODULE_AUTHOR("Ray L");
-MODULE_DESCRIPTION("ARM CDB89712 map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 3d0e762fa5f2..586a1c77e48a 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
}
-static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
@@ -320,7 +320,7 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
+static void ck804xrom_remove_one(struct pci_dev *pdev)
{
struct ck804xrom_window *window = &ck804xrom_window;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 08322b1c3e81..f784cf0caa13 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
pci_dev_put(window->pdev);
}
-static int __devinit esb2rom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct esb2rom_window *window = &esb2rom_window;
@@ -378,13 +378,13 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
return 0;
}
-static void __devexit esb2rom_remove_one (struct pci_dev *pdev)
+static void esb2rom_remove_one(struct pci_dev *pdev)
{
struct esb2rom_window *window = &esb2rom_window;
esb2rom_cleanup(window);
}
-static struct pci_device_id esb2rom_pci_tbl[] __devinitdata = {
+static struct pci_device_id esb2rom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
deleted file mode 100644
index 956e2e4f30ea..000000000000
--- a/drivers/mtd/maps/fortunet.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/* fortunet.c memory map
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define MAX_NUM_REGIONS 4
-#define MAX_NUM_PARTITIONS 8
-
-#define DEF_WINDOW_ADDR_PHY 0x00000000
-#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
-
-#define MTD_FORTUNET_PK "MTD FortuNet: "
-
-#define MAX_NAME_SIZE 128
-
-struct map_region
-{
- int window_addr_physical;
- int altbankwidth;
- struct map_info map_info;
- struct mtd_info *mymtd;
- struct mtd_partition parts[MAX_NUM_PARTITIONS];
- char map_name[MAX_NAME_SIZE];
- char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
-};
-
-static struct map_region map_regions[MAX_NUM_REGIONS];
-static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
-static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
-
-
-
-struct map_info default_map = {
- .size = DEF_WINDOW_SIZE,
- .bankwidth = 4,
-};
-
-static char * __init get_string_option(char *dest,int dest_size,char *sor)
-{
- if(!dest_size)
- return sor;
- dest_size--;
- while(*sor)
- {
- if(*sor==',')
- {
- sor++;
- break;
- }
- else if(*sor=='\"')
- {
- sor++;
- while(*sor)
- {
- if(*sor=='\"')
- {
- sor++;
- break;
- }
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- else
- {
- *dest = *sor;
- dest++;
- sor++;
- dest_size--;
- if(!dest_size)
- {
- *dest = 0;
- return sor;
- }
- }
- }
- *dest = 0;
- return sor;
-}
-
-static int __init MTD_New_Region(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[6];
- get_options (get_string_option(string,sizeof(string),line),6,params);
- if(params[0]<1)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
- " name,region-number[,base,size,bankwidth,altbankwidth]\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
- memcpy(&map_regions[params[1]].map_info,
- &default_map,sizeof(map_regions[params[1]].map_info));
- map_regions_set[params[1]] = 1;
- map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[params[1]].altbankwidth = 2;
- map_regions[params[1]].mymtd = NULL;
- map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
- strcpy(map_regions[params[1]].map_info.name,string);
- if(params[0]>1)
- {
- map_regions[params[1]].window_addr_physical = params[2];
- }
- if(params[0]>2)
- {
- map_regions[params[1]].map_info.size = params[3];
- }
- if(params[0]>3)
- {
- map_regions[params[1]].map_info.bankwidth = params[4];
- }
- if(params[0]>4)
- {
- map_regions[params[1]].altbankwidth = params[5];
- }
- return 1;
-}
-
-static int __init MTD_New_Partition(char *line)
-{
- char string[MAX_NAME_SIZE];
- int params[4];
- get_options (get_string_option(string,sizeof(string),line),4,params);
- if(params[0]<3)
- {
- printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
- " name,region-number,size,offset\n");
- return 1;
- }
- if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
- {
- printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
- params[1],MAX_NUM_REGIONS-1);
- return 1;
- }
- if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
- {
- printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
- return 1;
- }
- map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
- map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
- strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
- map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
- params[2];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
- params[3];
- map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
- map_regions_parts[params[1]]++;
- return 1;
-}
-
-__setup("MTD_Region=", MTD_New_Region);
-__setup("MTD_Partition=", MTD_New_Partition);
-
-/* Backwards-spelling-compatibility */
-__setup("MTD_Partion=", MTD_New_Partition);
-
-static int __init init_fortunet(void)
-{
- int ix,iy;
- for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_parts[ix]&&(!map_regions_set[ix]))
- {
- printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
- ix);
- memset(&map_regions[ix],0,sizeof(map_regions[ix]));
- memcpy(&map_regions[ix].map_info,&default_map,
- sizeof(map_regions[ix].map_info));
- map_regions_set[ix] = 1;
- map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
- map_regions[ix].altbankwidth = 2;
- map_regions[ix].mymtd = NULL;
- map_regions[ix].map_info.name = map_regions[ix].map_name;
- strcpy(map_regions[ix].map_info.name,"FORTUNET");
- }
- if(map_regions_set[ix])
- {
- iy++;
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
- " address %x size %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
-
- map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
-
- map_regions[ix].map_info.virt =
- ioremap_nocache(
- map_regions[ix].window_addr_physical,
- map_regions[ix].map_info.size);
- if(!map_regions[ix].map_info.virt)
- {
- int j = 0;
- printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
- map_regions[ix].map_info.name);
- for (j = 0 ; j < ix; j++)
- iounmap(map_regions[j].map_info.virt);
- return -ENXIO;
- }
- simple_map_init(&map_regions[ix].map_info);
-
- printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
- map_regions[ix].map_info.name,
- map_regions[ix].map_info.virt);
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- if((!map_regions[ix].mymtd)&&(
- map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
- {
- printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
- "for %s flash.\n",
- map_regions[ix].map_info.name);
- map_regions[ix].map_info.bankwidth =
- map_regions[ix].altbankwidth;
- map_regions[ix].mymtd = do_map_probe("cfi_probe",
- &map_regions[ix].map_info);
- }
- map_regions[ix].mymtd->owner = THIS_MODULE;
- mtd_device_register(map_regions[ix].mymtd,
- map_regions[ix].parts,
- map_regions_parts[ix]);
- }
- }
- if(iy)
- return 0;
- return -ENXIO;
-}
-
-static void __exit cleanup_fortunet(void)
-{
- int ix;
- for(ix=0;ix<MAX_NUM_REGIONS;ix++)
- {
- if(map_regions_set[ix])
- {
- if( map_regions[ix].mymtd )
- {
- mtd_device_unregister(map_regions[ix].mymtd);
- map_destroy( map_regions[ix].mymtd );
- }
- iounmap((void *)map_regions[ix].map_info.virt);
- }
- }
-}
-
-module_init(init_fortunet);
-module_exit(cleanup_fortunet);
-
-MODULE_AUTHOR("FortuNet, Inc.");
-MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index e4de96ba52b3..7b643de2500b 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -26,7 +26,8 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "gpio-addr-flash"
#define PFX DRIVER_NAME ": "
@@ -142,7 +143,8 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
*
* See gf_copy_from() caveat.
*/
-static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void gf_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
{
struct async_state *state = gf_map_info_to_state(map);
@@ -185,7 +187,7 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
* ...
* };
*/
-static int __devinit gpio_flash_probe(struct platform_device *pdev)
+static int gpio_flash_probe(struct platform_device *pdev)
{
size_t i, arr_size;
struct physmap_flash_data *pdata;
@@ -258,7 +260,7 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit gpio_flash_remove(struct platform_device *pdev)
+static int gpio_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
size_t i = 0;
@@ -273,7 +275,7 @@ static int __devexit gpio_flash_remove(struct platform_device *pdev)
static struct platform_driver gpio_flash_driver = {
.probe = gpio_flash_probe,
- .remove = __devexit_p(gpio_flash_remove),
+ .remove = gpio_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 6689dcb3124d..c7478e18f485 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
}
-static int __devinit ichxrom_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
@@ -315,13 +315,13 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
}
-static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
+static void ichxrom_remove_one(struct pci_dev *pdev)
{
struct ichxrom_window *window = &ichxrom_window;
ichxrom_cleanup(window);
}
-static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
+static struct pci_device_id ichxrom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 93f03175c82d..b14053b25026 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -63,24 +63,24 @@ struct vr_nor_mtd {
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
-static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
+static void vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
-static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
+static int vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
-static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
+static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
-static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
+static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char *probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
@@ -96,7 +96,7 @@ static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
return 0;
}
-static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
+static void vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
@@ -116,7 +116,7 @@ static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
-static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
+static int vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
@@ -176,7 +176,7 @@ static struct pci_device_id vr_nor_pci_ids[] = {
{0,}
};
-static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
+static void vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
@@ -189,8 +189,7 @@ static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
pci_disable_device(dev);
}
-static int __devinit
-vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
@@ -256,7 +255,7 @@ vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
- .remove = __devexit_p(vr_nor_pci_remove),
+ .remove = vr_nor_pci_remove,
.id_table = vr_nor_pci_ids,
};
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index c03456f17004..3c3c791eb96a 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,7 +45,7 @@ struct ltq_mtd {
};
static const char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = {
+static const char *ltq_probe_types[] = {
"cmdlinepart", "ofpart", NULL };
static map_word
@@ -109,7 +109,7 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __devinit
+static int
ltq_mtd_probe(struct platform_device *pdev)
{
struct mtd_part_parser_data ppdata;
@@ -185,7 +185,7 @@ err_out:
return err;
}
-static int __devexit
+static int
ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
@@ -209,7 +209,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match);
static struct platform_driver ltq_mtd_driver = {
.probe = ltq_mtd_probe,
- .remove = __devexit_p(ltq_mtd_remove),
+ .remove = ltq_mtd_remove,
.driver = {
.name = "ltq-nor",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 3c7ad17fca78..ab0fead56b83 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -125,7 +125,7 @@ static int latch_addr_flash_remove(struct platform_device *dev)
return 0;
}
-static int __devinit latch_addr_flash_probe(struct platform_device *dev)
+static int latch_addr_flash_probe(struct platform_device *dev)
{
struct latch_addr_flash_data *latch_addr_data;
struct latch_addr_flash_info *info;
@@ -218,7 +218,7 @@ done:
static struct platform_driver latch_addr_flash_driver = {
.probe = latch_addr_flash_probe,
- .remove = __devexit_p(latch_addr_flash_remove),
+ .remove = latch_addr_flash_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1c30c1a307f4..c3aebd5da5d6 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -253,8 +253,7 @@ static struct pci_device_id mtd_pci_ids[] = {
* Generic code follows.
*/
-static int __devinit
-mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct mtd_pci_info *info = (struct mtd_pci_info *)id->driver_data;
struct map_pci_info *map = NULL;
@@ -308,8 +307,7 @@ out:
return err;
}
-static void __devexit
-mtd_pci_remove(struct pci_dev *dev)
+static void mtd_pci_remove(struct pci_dev *dev)
{
struct mtd_info *mtd = pci_get_drvdata(dev);
struct map_pci_info *map = mtd->priv;
@@ -326,7 +324,7 @@ mtd_pci_remove(struct pci_dev *dev)
static struct pci_driver mtd_pci_driver = {
.name = "MTD PCI",
.probe = mtd_pci_probe,
- .remove = __devexit_p(mtd_pci_remove),
+ .remove = mtd_pci_remove,
.id_table = mtd_pci_ids,
};
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 6f19acadb06c..7901d72c9242 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -77,8 +77,8 @@ static int of_flash_remove(struct platform_device *dev)
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
-static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
- struct map_info *map)
+static struct mtd_info *obsolete_probe(struct platform_device *dev,
+ struct map_info *map)
{
struct device_node *dp = dev->dev.of_node;
const char *of_probe;
@@ -116,7 +116,7 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev,
information. */
static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
"ofpart", "ofoldpart", NULL };
-static const char ** __devinit of_get_probes(struct device_node *dp)
+static const char **of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
@@ -145,14 +145,14 @@ static const char ** __devinit of_get_probes(struct device_node *dp)
return res;
}
-static void __devinit of_free_probes(const char **probes)
+static void of_free_probes(const char **probes)
{
if (probes != part_probe_types_def)
kfree(probes);
}
static struct of_device_id of_flash_match[];
-static int __devinit of_flash_probe(struct platform_device *dev)
+static int of_flash_probe(struct platform_device *dev)
{
const char **part_probe_types;
const struct of_device_id *match;
@@ -170,6 +170,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
resource_size_t res_size;
struct mtd_part_parser_data ppdata;
bool map_indirect;
+ const char *mtd_name = NULL;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
@@ -178,6 +179,8 @@ static int __devinit of_flash_probe(struct platform_device *dev)
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
/*
* Get number of "reg" tuples. Scan for MTD devices on area's
* described by each "reg" region. This makes it possible (including
@@ -234,7 +237,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
goto err_out;
}
- info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
@@ -282,6 +285,7 @@ static int __devinit of_flash_probe(struct platform_device *dev)
}
err = 0;
+ info->cmtd = NULL;
if (info->list_size == 1) {
info->cmtd = info->list[0].mtd;
} else if (info->list_size > 1) {
@@ -290,9 +294,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
*/
info->cmtd = mtd_concat_create(mtd_list, info->list_size,
dev_name(&dev->dev));
- if (info->cmtd == NULL)
- err = -ENXIO;
}
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+
if (err)
goto err_out;
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 65bd1cd4d627..dc6df9abea0b 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -58,7 +58,7 @@ static void pismo_set_vpp(struct platform_device *pdev, int on)
pismo->vpp(pismo->vpp_data, on);
}
-static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
+static unsigned int pismo_width_to_bytes(unsigned int width)
{
width &= 15;
if (width > 2)
@@ -66,8 +66,8 @@ static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
return 1 << width;
}
-static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
- u8 addr, size_t size)
+static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
+ size_t size)
{
int ret;
struct i2c_msg msg[] = {
@@ -88,8 +88,9 @@ static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
return ret == ARRAY_SIZE(msg) ? size : -EIO;
}
-static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
- struct pismo_mem *region, const char *name, void *pdata, size_t psize)
+static int pismo_add_device(struct pismo_data *pismo, int i,
+ struct pismo_mem *region, const char *name,
+ void *pdata, size_t psize)
{
struct platform_device *dev;
struct resource res = { };
@@ -129,8 +130,8 @@ static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
return ret;
}
-static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
- struct pismo_mem *region)
+static int pismo_add_nor(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
{
struct physmap_flash_data data = {
.width = region->width,
@@ -143,8 +144,8 @@ static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
- struct pismo_mem *region)
+static int pismo_add_sram(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
{
struct platdata_mtd_ram data = {
.bankwidth = region->width,
@@ -154,8 +155,8 @@ static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
&data, sizeof(data));
}
-static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
- const struct pismo_cs_block *cs, phys_addr_t base)
+static void pismo_add_one(struct pismo_data *pismo, int i,
+ const struct pismo_cs_block *cs, phys_addr_t base)
{
struct device *dev = &pismo->client->dev;
struct pismo_mem region;
@@ -197,7 +198,7 @@ static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
}
}
-static int __devexit pismo_remove(struct i2c_client *client)
+static int pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
@@ -210,8 +211,8 @@ static int __devexit pismo_remove(struct i2c_client *client)
return 0;
}
-static int __devinit pismo_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pismo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct pismo_pdata *pdata = client->dev.platform_data;
@@ -267,7 +268,7 @@ static struct i2c_driver pismo_driver = {
.owner = THIS_MODULE,
},
.probe = pismo_probe,
- .remove = __devexit_p(pismo_remove),
+ .remove = pismo_remove,
.id_table = pismo_id,
};
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 891558de3ec1..2de66b062f0d 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -219,7 +219,7 @@ static int platram_probe(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RW);
- /* check to see if there are any available partitions, or wether
+ /* check to see if there are any available partitions, or whether
* to add this device whole */
err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 81884c277405..43e3dbb976d9 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -49,7 +49,7 @@ struct pxa2xx_flash_info {
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
+static int pxa2xx_flash_probe(struct platform_device *pdev)
{
struct flash_platform_data *flash = pdev->dev.platform_data;
struct pxa2xx_flash_info *info;
@@ -105,7 +105,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
+static int pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -139,7 +139,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.owner = THIS_MODULE,
},
.probe = pxa2xx_flash_probe,
- .remove = __devexit_p(pxa2xx_flash_remove),
+ .remove = pxa2xx_flash_remove,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a675bdbcb0fe..f694417cf7e6 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -149,8 +149,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
plat->exit();
}
-static struct sa_info *__devinit
-sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
+static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ struct flash_platform_data *plat)
{
struct sa_info *info;
int nr, size, i, ret = 0;
@@ -246,7 +246,7 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
+static int sa1100_mtd_probe(struct platform_device *pdev)
{
struct flash_platform_data *plat = pdev->dev.platform_data;
struct sa_info *info;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 9dcbc684abdb..c77b68c9412f 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -69,8 +69,7 @@ static struct map_info scb2_map = {
};
static int region_fail;
-static int __devinit
-scb2_fixup_mtd(struct mtd_info *mtd)
+static int scb2_fixup_mtd(struct mtd_info *mtd)
{
int i;
int done = 0;
@@ -133,8 +132,8 @@ scb2_fixup_mtd(struct mtd_info *mtd)
/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
#define CSB5_FCR 0x41
#define CSB5_FCR_DECODE_ALL 0x0e
-static int __devinit
-scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int scb2_flash_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
u8 reg;
@@ -197,8 +196,7 @@ scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
return 0;
}
-static void __devexit
-scb2_flash_remove(struct pci_dev *dev)
+static void scb2_flash_remove(struct pci_dev *dev)
{
if (!scb2_mtd)
return;
@@ -231,7 +229,7 @@ static struct pci_driver scb2_flash_driver = {
.name = "Intel SCB2 BIOS Flash",
.id_table = scb2_flash_pci_ids,
.probe = scb2_flash_probe,
- .remove = __devexit_p(scb2_flash_remove),
+ .remove = scb2_flash_remove,
};
module_pci_driver(scb2_flash_driver);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 175e537b444f..d467f3b11c96 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -108,7 +108,7 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp)
return 0;
}
-static int __devinit uflash_probe(struct platform_device *op)
+static int uflash_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
@@ -121,7 +121,7 @@ static int __devinit uflash_probe(struct platform_device *op)
return uflash_devinit(op, dp);
}
-static int __devexit uflash_remove(struct platform_device *op)
+static int uflash_remove(struct platform_device *op)
{
struct uflash_dev *up = dev_get_drvdata(&op->dev);
@@ -155,7 +155,7 @@ static struct platform_driver uflash_driver = {
.of_match_table = uflash_match,
},
.probe = uflash_probe,
- .remove = __devexit_p(uflash_remove),
+ .remove = uflash_remove,
};
module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 2e2b0945edc7..6b223cfe92b7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -596,7 +596,7 @@ fail_name:
}
/* Handles very basic info about the flash, queries for details */
-static int __devinit vmu_connect(struct maple_device *mdev)
+static int vmu_connect(struct maple_device *mdev)
{
unsigned long test_flash_data, basic_flash_data;
int c, error;
@@ -690,7 +690,7 @@ fail_nomem:
return error;
}
-static void __devexit vmu_disconnect(struct maple_device *mdev)
+static void vmu_disconnect(struct maple_device *mdev)
{
struct memcard *card;
struct mdev_part *mpart;
@@ -772,7 +772,7 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
}
-static int __devinit probe_maple_vmu(struct device *dev)
+static int probe_maple_vmu(struct device *dev)
{
int error;
struct maple_device *mdev = to_maple_dev(dev);
@@ -789,7 +789,7 @@ static int __devinit probe_maple_vmu(struct device *dev)
return 0;
}
-static int __devexit remove_maple_vmu(struct device *dev)
+static int remove_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
@@ -802,7 +802,7 @@ static struct maple_driver vmu_flash_driver = {
.drv = {
.name = "Dreamcast_visual_memory",
.probe = probe_maple_vmu,
- .remove = __devexit_p(remove_maple_vmu),
+ .remove = remove_maple_vmu,
},
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f1f06715d4e0..5ad39bb5ab4c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,7 +32,6 @@
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <asm/uaccess.h>
#include "mtdcore.h"
@@ -121,16 +120,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
{
- if (kthread_should_stop())
- return 1;
-
return dev->bg_stop;
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
-static int mtd_blktrans_thread(void *arg)
+static void mtd_blktrans_work(struct work_struct *work)
{
- struct mtd_blktrans_dev *dev = arg;
+ struct mtd_blktrans_dev *dev =
+ container_of(work, struct mtd_blktrans_dev, work);
struct mtd_blktrans_ops *tr = dev->tr;
struct request_queue *rq = dev->rq;
struct request *req = NULL;
@@ -138,7 +135,7 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
+ while (1) {
int res;
dev->bg_stop = false;
@@ -156,15 +153,7 @@ static int mtd_blktrans_thread(void *arg)
background_done = !dev->bg_stop;
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop())
- set_current_state(TASK_RUNNING);
-
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
+ break;
}
spin_unlock_irq(rq->queue_lock);
@@ -185,8 +174,6 @@ static int mtd_blktrans_thread(void *arg)
__blk_end_request_all(req, -EIO);
spin_unlock_irq(rq->queue_lock);
-
- return 0;
}
static void mtd_blktrans_request(struct request_queue *rq)
@@ -199,10 +186,8 @@ static void mtd_blktrans_request(struct request_queue *rq)
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
- else {
- dev->bg_stop = true;
- wake_up_process(dev->thread);
- }
+ else
+ queue_work(dev->wq, &dev->work);
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -325,7 +310,7 @@ unlock:
return ret;
}
-static const struct block_device_operations mtd_blktrans_ops = {
+static const struct block_device_operations mtd_block_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
@@ -401,7 +386,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
- gd->fops = &mtd_blktrans_ops;
+ gd->fops = &mtd_block_ops;
if (tr->part_bits)
if (new->devnum < 26)
@@ -437,14 +422,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- /* Create processing thread */
- /* TODO: workqueue ? */
- new->thread = kthread_run(mtd_blktrans_thread, new,
- "%s%d", tr->name, new->mtd->index);
- if (IS_ERR(new->thread)) {
- ret = PTR_ERR(new->thread);
+ /* Create processing workqueue */
+ new->wq = alloc_workqueue("%s%d", 0, 0,
+ tr->name, new->mtd->index);
+ if (!new->wq)
goto error4;
- }
+ INIT_WORK(&new->work, mtd_blktrans_work);
+
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -484,9 +468,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
/* Stop new requests to arrive */
del_gendisk(old->disk);
-
- /* Stop the thread */
- kthread_stop(old->thread);
+ /* Stop workqueue. This will perform any pending request. */
+ destroy_workqueue(old->wq);
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f5b3f91fa1cc..97bb8f6304d4 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -271,7 +271,7 @@ static void find_next_position(struct mtdoops_context *cxt)
if (count[0] == 0xffffffff && count[1] == 0xffffffff)
mark_page_unused(cxt, page);
- if (count[0] == 0xffffffff)
+ if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
continue;
if (maxcount == 0xffffffff) {
maxcount = count[0];
@@ -289,14 +289,13 @@ static void find_next_position(struct mtdoops_context *cxt)
}
}
if (maxcount == 0xffffffff) {
- cxt->nextpage = 0;
- cxt->nextcount = 1;
- schedule_work(&cxt->work_erase);
- return;
+ cxt->nextpage = cxt->oops_pages - 1;
+ cxt->nextcount = 0;
+ }
+ else {
+ cxt->nextpage = maxpos;
+ cxt->nextcount = maxcount;
}
-
- cxt->nextpage = maxpos;
- cxt->nextcount = maxcount;
mtdoops_inc_counter(cxt);
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 4883139460be..5819eb575210 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -49,24 +49,31 @@ config MTD_NAND_MUSEUM_IDS
NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
of these chips were reused by later, larger chips.
-config MTD_NAND_AUTCPU12
- tristate "SmartMediaCard on autronix autcpu12 board"
- depends on ARCH_AUTCPU12
- help
- This enables the driver for the autronix autcpu12 board to
- access the SmartMediaCard.
-
config MTD_NAND_DENALI
- depends on PCI
+ tristate "Support Denali NAND controller"
+ help
+ Enable support for the Denali NAND controller. This should be
+ combined with either the PCI or platform drivers to provide device
+ registration.
+
+config MTD_NAND_DENALI_PCI
tristate "Support Denali NAND controller on Intel Moorestown"
+ depends on PCI && MTD_NAND_DENALI
help
Enable the driver for NAND flash on Intel Moorestown, using the
Denali NAND controller core.
-
+
+config MTD_NAND_DENALI_DT
+ tristate "Support Denali NAND controller as a DT device"
+ depends on HAVE_CLK && MTD_NAND_DENALI
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
config MTD_NAND_DENALI_SCRATCH_REG_ADDR
hex "Denali NAND size scratch register address"
default "0xFF108018"
- depends on MTD_NAND_DENALI
+ depends on MTD_NAND_DENALI_PCI
help
Some platforms place the NAND chip size in a scratch register
because (some versions of) the driver aren't able to automatically
@@ -86,12 +93,6 @@ config MTD_NAND_GPIO
help
This enables a GPIO based NAND flash driver.
-config MTD_NAND_SPIA
- tristate "NAND Flash device on SPIA board"
- depends on ARCH_P720T
- help
- If you had to ask, you don't have one. Say 'N'.
-
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
depends on MACH_AMS_DELTA
@@ -446,6 +447,14 @@ config MTD_NAND_GPMI_NAND
block, such as SD card. So pay attention to it when you enable
the GPMI.
+config MTD_NAND_BCM47XXNFLASH
+ tristate "Support for NAND flash on BCM4706 BCMA bus"
+ depends on BCMA_NFLASH
+ help
+ BCMA bus can have various flash memories attached, they are
+ registered by bcma as platform devices. This enables driver for
+ NAND flash memories. For now only BCM4706 is supported.
+
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
depends on HAS_IOMEM
@@ -512,12 +521,6 @@ config MTD_NAND_MXC
This enables the driver for the NAND flash controller on the
MXC processors.
-config MTD_NAND_NOMADIK
- tristate "ST Nomadik 8815 NAND support"
- depends on ARCH_NOMADIK
- help
- Driver for the NAND flash controller on the Nomadik, with ECC.
-
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
depends on SUPERH || ARCH_SHMOBILE
@@ -559,7 +562,7 @@ config MTD_NAND_JZ4740
config MTD_NAND_FSMC
tristate "Support for NAND on ST Micros FSMC"
- depends on PLAT_SPEAR || PLAT_NOMADIK || MACH_U300
+ depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
help
Enables support for NAND Flash chips on the ST Microelectronics
Flexible Static Memory Controller (FSMC)
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 2cbd0916b733..d76d91205691 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -9,10 +9,10 @@ obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
-obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
-obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
@@ -47,11 +47,11 @@ obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
-obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e7723aa7acc..f1d71cdc8aac 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -173,7 +173,7 @@ static const struct gpio _mandatory_gpio[] = {
/*
* Main initialization routine
*/
-static int __devinit ams_delta_init(struct platform_device *pdev)
+static int ams_delta_init(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -270,7 +270,7 @@ out_free:
/*
* Clean up routine
*/
-static int __devexit ams_delta_cleanup(struct platform_device *pdev)
+static int ams_delta_cleanup(struct platform_device *pdev)
{
void __iomem *io_base = platform_get_drvdata(pdev);
@@ -289,7 +289,7 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
static struct platform_driver ams_delta_nand_driver = {
.probe = ams_delta_init,
- .remove = __devexit_p(ams_delta_cleanup),
+ .remove = ams_delta_cleanup,
.driver = {
.name = "ams-delta-nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 92623ac2015a..c516a9408087 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -331,14 +331,14 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* 12-bits 20-bytes 21-bytes
* 24-bits 39-bytes 42-bytes
*/
-static int __devinit pmecc_get_ecc_bytes(int cap, int sector_size)
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
{
int m = 12 + sector_size / 512;
return (m * cap + 7) / 8;
}
-static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
- int oobsize, int ecc_len)
+static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+ int oobsize, int ecc_len)
{
int i;
@@ -353,7 +353,7 @@ static void __devinit pmecc_config_ecc_layout(struct nand_ecclayout *layout,
oobsize - ecc_len - layout->oobfree[0].offset;
}
-static void __devinit __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
{
int table_size;
@@ -375,7 +375,7 @@ static void pmecc_data_free(struct atmel_nand_host *host)
kfree(host->pmecc_delta);
}
-static int __devinit pmecc_data_alloc(struct atmel_nand_host *host)
+static int pmecc_data_alloc(struct atmel_nand_host *host)
{
const int cap = host->pmecc_corr_cap;
@@ -724,6 +724,7 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
struct atmel_nand_host *host = nand_chip->priv;
int i, err_nbr, eccbytes;
uint8_t *buf_pos;
+ int total_err = 0;
eccbytes = nand_chip->ecc.bytes;
for (i = 0; i < eccbytes; i++)
@@ -751,12 +752,13 @@ normal_check:
pmecc_correct_data(mtd, buf_pos, ecc, i,
host->pmecc_bytes_per_sector, err_nbr);
mtd->ecc_stats.corrected += err_nbr;
+ total_err += err_nbr;
}
}
pmecc_stat >>= 1;
}
- return 0;
+ return total_err;
}
static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
@@ -768,6 +770,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
unsigned long end_time;
+ int bitflips = 0;
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -790,11 +793,14 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
}
stat = pmecc_readl_relaxed(host->ecc, ISR);
- if (stat != 0)
- if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
- return -EIO;
+ if (stat != 0) {
+ bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+ if (bitflips < 0)
+ /* uncorrectable errors */
+ return 0;
+ }
- return 0;
+ return bitflips;
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -1206,8 +1212,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
}
#if defined(CONFIG_OF)
-static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
{
u32 val, table_offset;
u32 offset[2];
@@ -1293,8 +1299,8 @@ static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
return 0;
}
#else
-static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
{
return -EINVAL;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 5c47b200045a..217459d02b2f 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -382,7 +382,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
while(!this->dev_ready(mtd));
}
-static int __devinit find_nand_cs(unsigned long nand_base)
+static int find_nand_cs(unsigned long nand_base)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
@@ -403,7 +403,7 @@ static int __devinit find_nand_cs(unsigned long nand_base)
return -ENODEV;
}
-static int __devinit au1550nd_probe(struct platform_device *pdev)
+static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
struct au1550nd_ctx *ctx;
@@ -491,7 +491,7 @@ out1:
return ret;
}
-static int __devexit au1550nd_remove(struct platform_device *pdev)
+static int au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -509,7 +509,7 @@ static struct platform_driver au1550nd_driver = {
.owner = THIS_MODULE,
},
.probe = au1550nd_probe,
- .remove = __devexit_p(au1550nd_remove),
+ .remove = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
deleted file mode 100644
index 04769a49a7cb..000000000000
--- a/drivers/mtd/nand/autcpu12.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * drivers/mtd/autcpu12.c
- *
- * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
- *
- * Derived from drivers/mtd/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * autronix autcpu12 board, which is a SmartMediaCard. It supports
- * 16MiB, 32MiB and 64MiB cards.
- *
- *
- * 02-12-2002 TG Cleanup of module params
- *
- * 02-20-2002 TG adjusted for different rd/wr address support
- * added support for read device ready/busy line
- * added page_cache
- *
- * 10-06-2002 TG 128K card support added
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/sizes.h>
-#include <mach/autcpu12.h>
-
-/*
- * MTD structure for AUTCPU12 board
- */
-static struct mtd_info *autcpu12_mtd = NULL;
-static void __iomem *autcpu12_fio_base;
-
-/*
- * Define partitions for flash devices
- */
-static struct mtd_partition partition_info16k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 8 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 8 * SZ_1M,
- .size = 8 * SZ_1M },
-};
-
-static struct mtd_partition partition_info32k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 8 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 8 * SZ_1M,
- .size = 24 * SZ_1M },
-};
-
-static struct mtd_partition partition_info64k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 16 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 16 * SZ_1M,
- .size = 48 * SZ_1M },
-};
-
-static struct mtd_partition partition_info128k[] = {
- { .name = "AUTCPU12 flash partition 1",
- .offset = 0,
- .size = 16 * SZ_1M },
- { .name = "AUTCPU12 flash partition 2",
- .offset = 16 * SZ_1M,
- .size = 112 * SZ_1M },
-};
-
-#define NUM_PARTITIONS16K 2
-#define NUM_PARTITIONS32K 2
-#define NUM_PARTITIONS64K 2
-#define NUM_PARTITIONS128K 2
-/*
- * hardware specific access to control-lines
- *
- * ALE bit 4 autcpu12_pedr
- * CLE bit 5 autcpu12_pedr
- * NCE bit 0 fio_ctrl
- *
- */
-static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr;
- unsigned char bits;
-
- bits = clps_readb(AUTCPU12_SMC_PORT_OFFSET) & ~0x30;
- bits |= (ctrl & NAND_CLE) << 4;
- bits |= (ctrl & NAND_ALE) << 2;
- clps_writeb(bits, AUTCPU12_SMC_PORT_OFFSET);
-
- addr = autcpu12_fio_base + AUTCPU12_SMC_SELECT_OFFSET;
- writeb((readb(addr) & ~0x1) | (ctrl & NAND_NCE), addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-int autcpu12_device_ready(struct mtd_info *mtd)
-{
- return clps_readb(AUTCPU12_SMC_PORT_OFFSET) & AUTCPU12_SMC_RDY;
-}
-
-/*
- * Main initialization routine
- */
-static int __init autcpu12_init(void)
-{
- struct nand_chip *this;
- int err = 0;
-
- /* Allocate memory for MTD device structure and private data */
- autcpu12_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!autcpu12_mtd) {
- printk("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
- err = -ENOMEM;
- goto out;
- }
-
- /* map physical address */
- autcpu12_fio_base = ioremap(AUTCPU12_PHYS_SMC, SZ_1K);
- if (!autcpu12_fio_base) {
- printk("Ioremap autcpu12 SmartMedia Card failed\n");
- err = -EIO;
- goto out_mtd;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&autcpu12_mtd[1]);
-
- /* Initialize structures */
- memset(autcpu12_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- autcpu12_mtd->priv = this;
- autcpu12_mtd->owner = THIS_MODULE;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = autcpu12_fio_base;
- this->IO_ADDR_W = autcpu12_fio_base;
- this->cmd_ctrl = autcpu12_hwcontrol;
- this->dev_ready = autcpu12_device_ready;
- /* 20 us command delay time */
- this->chip_delay = 20;
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Enable the following for a flash based bad block table */
- /*
- this->bbt_options = NAND_BBT_USE_FLASH;
- */
- this->bbt_options = NAND_BBT_USE_FLASH;
-
- /* Scan to find existence of the device */
- if (nand_scan(autcpu12_mtd, 1)) {
- err = -ENXIO;
- goto out_ior;
- }
-
- /* Register the partitions */
- switch (autcpu12_mtd->size) {
- case SZ_16M:
- mtd_device_register(autcpu12_mtd, partition_info16k,
- NUM_PARTITIONS16K);
- break;
- case SZ_32M:
- mtd_device_register(autcpu12_mtd, partition_info32k,
- NUM_PARTITIONS32K);
- break;
- case SZ_64M:
- mtd_device_register(autcpu12_mtd, partition_info64k,
- NUM_PARTITIONS64K);
- break;
- case SZ_128M:
- mtd_device_register(autcpu12_mtd, partition_info128k,
- NUM_PARTITIONS128K);
- break;
- default:
- printk("Unsupported SmartMedia device\n");
- err = -ENXIO;
- goto out_ior;
- }
- goto out;
-
- out_ior:
- iounmap(autcpu12_fio_base);
- out_mtd:
- kfree(autcpu12_mtd);
- out:
- return err;
-}
-
-module_init(autcpu12_init);
-
-/*
- * Clean up routine
- */
-static void __exit autcpu12_cleanup(void)
-{
- /* Release resources, unregister device */
- nand_release(autcpu12_mtd);
-
- /* unmap physical address */
- iounmap(autcpu12_fio_base);
-
- /* Free the MTD device structure */
- kfree(autcpu12_mtd);
-}
-
-module_exit(autcpu12_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("Glue layer for SmartMediaCard on autronix autcpu12");
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
new file mode 100644
index 000000000000..f05b119e134b
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@
+bcm47xxnflash-y += main.o
+bcm47xxnflash-y += ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 000000000000..0bdb2ce4da75
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,22 @@
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+
+struct bcm47xxnflash {
+ struct bcma_drv_cc *cc;
+
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+
+ unsigned curr_command;
+ int curr_page_addr;
+ int curr_column;
+
+ u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
new file mode 100644
index 000000000000..8363a9a5fa3f
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -0,0 +1,108 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxnflash.h"
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxnflash *b47n;
+ int err = 0;
+
+ b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
+ if (!b47n) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ b47n->nand_chip.priv = b47n;
+ b47n->mtd.owner = THIS_MODULE;
+ b47n->mtd.priv = &b47n->nand_chip; /* Required */
+ b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+ if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ err = bcm47xxnflash_ops_bcm4706_init(b47n);
+ } else {
+ pr_err("Device not supported\n");
+ err = -ENOTSUPP;
+ }
+ if (err) {
+ pr_err("Initialization failed: %d\n", err);
+ goto err_init;
+ }
+
+ err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
+ if (err) {
+ pr_err("Failed to register MTD device: %d\n", err);
+ goto err_dev_reg;
+ }
+
+ return 0;
+
+err_dev_reg:
+err_init:
+ kfree(b47n);
+out:
+ return err;
+}
+
+static int bcm47xxnflash_remove(struct platform_device *pdev)
+{
+ struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+
+ if (nflash->mtd)
+ mtd_device_unregister(nflash->mtd);
+
+ return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+ .remove = bcm47xxnflash_remove,
+ .driver = {
+ .name = "bcma_nflash",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bcm47xxnflash_init(void)
+{
+ int err;
+
+ /*
+ * Platform device "bcma_nflash" exists on SoCs and is registered very
+ * early, it won't be added during runtime (use platform_driver_probe).
+ */
+ err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+ if (err)
+ pr_err("Failed to register serial flash driver: %d\n", err);
+
+ return err;
+}
+
+static void __exit bcm47xxnflash_exit(void)
+{
+ platform_driver_unregister(&bcm47xxnflash_driver);
+}
+
+module_init(bcm47xxnflash_init);
+module_exit(bcm47xxnflash_exit);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 000000000000..595de4012e71
--- /dev/null
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,413 @@
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcm47xxnflash.h"
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 10000
+
+#define NFLASH_SECTOR_SIZE 512
+
+#define NCTL_CMD0 0x00010000
+#define NCTL_CMD1W 0x00080000
+#define NCTL_READ 0x00100000
+#define NCTL_WRITE 0x00200000
+#define NCTL_SPECADDR 0x01000000
+#define NCTL_READY 0x04000000
+#define NCTL_ERR 0x08000000
+#define NCTL_CSA 0x40000000
+#define NCTL_START 0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+ return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+ int i = 0;
+
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+ i = 0;
+ break;
+ }
+ }
+ if (i) {
+ pr_err("NFLASH control command not ready!\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+ int i;
+
+ for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+ if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+ BCMA_CC_NFLASH_CTL_ERR) {
+ pr_err("Error on polling\n");
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ pr_err("Polling timeout!\n");
+ return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ u32 ctlcode;
+ u32 *dest = (u32 *)buf;
+ int i;
+ int toread;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ while (len) {
+ /* We can read maximum of 0x200 bytes at once */
+ toread = min(len, 0x200);
+
+ /* Set page and column */
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to read */
+ ctlcode = NCTL_CSA | NCTL_CMD1W | 0x00040000 | 0x00020000 |
+ NCTL_CMD0;
+ ctlcode |= NAND_CMD_READSTART << 8;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+ return;
+ if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+ return;
+
+ /* Eventually read some data :) */
+ for (i = 0; i < toread; i += 4, dest++) {
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+ if (i == toread - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode))
+ return;
+ *dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+ }
+
+ b47n->curr_column += toread;
+ len -= toread;
+ }
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+
+ u32 ctlcode;
+ const u32 *data = (u32 *)buf;
+ int i;
+
+ BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+ /* Don't validate column using nand_chip->page_shift, it may be bigger
+ * when accessing OOB */
+
+ for (i = 0; i < len; i += 4, data++) {
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+ ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+ if (i == len - 4) /* Last read goes without that */
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+ pr_err("%s ctl_cmd didn't work!\n", __func__);
+ return;
+ }
+ }
+
+ b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
+ int chip)
+{
+ return;
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+ unsigned command, int column,
+ int page_addr)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 ctlcode;
+ int i;
+
+ if (column != -1)
+ b47n->curr_column = column;
+ if (page_addr != -1)
+ b47n->curr_page_addr = page_addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ pr_warn("Chip reset not implemented yet\n");
+ break;
+ case NAND_CMD_READID:
+ ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+ ctlcode |= NAND_CMD_READID;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+
+ /*
+ * Reading is specific, last one has to go without NCTL_CSA
+ * bit. We don't know how many reads NAND subsystem is going
+ * to perform, so cache everything.
+ */
+ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+ ctlcode = NCTL_CSA | NCTL_READ;
+ if (i == ARRAY_SIZE(b47n->id_data) - 1)
+ ctlcode &= ~NCTL_CSA;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+ ctlcode)) {
+ pr_err("READID error\n");
+ break;
+ }
+ b47n->id_data[i] =
+ bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+ & 0xFF;
+ }
+
+ break;
+ case NAND_CMD_STATUS:
+ ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("STATUS command error\n");
+ break;
+ case NAND_CMD_READ0:
+ break;
+ case NAND_CMD_READOOB:
+ if (page_addr != -1)
+ b47n->curr_column += mtd->writesize;
+ break;
+ case NAND_CMD_ERASE1:
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+ ctlcode = 0x00040000 | NCTL_CMD1W | NCTL_CMD0 |
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("ERASE1 failed\n");
+ break;
+ case NAND_CMD_ERASE2:
+ break;
+ case NAND_CMD_SEQIN:
+ /* Set page and column */
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+ b47n->curr_column);
+ bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+ b47n->curr_page_addr);
+
+ /* Prepare to write */
+ ctlcode = 0x40000000 | 0x00040000 | 0x00020000 | 0x00010000;
+ ctlcode |= NAND_CMD_SEQIN;
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+ pr_err("SEQIN failed\n");
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, 0x00010000 |
+ NAND_CMD_PAGEPROG))
+ pr_err("PAGEPROG failed\n");
+ if (bcm47xxnflash_ops_bcm4706_poll(cc))
+ pr_err("PAGEPROG not ready\n");
+ break;
+ default:
+ pr_err("Command 0x%X unsupported\n", command);
+ break;
+ }
+ b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+ struct bcma_drv_cc *cc = b47n->cc;
+ u32 tmp = 0;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READID:
+ if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+ pr_err("Requested invalid id_data: %d\n",
+ b47n->curr_column);
+ return 0;
+ }
+ return b47n->id_data[b47n->curr_column++];
+ case NAND_CMD_STATUS:
+ if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+ return 0;
+ return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+ return tmp & 0xFF;
+ }
+
+ pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+ return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+ uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *nand_chip = (struct nand_chip *)mtd->priv;
+ struct bcm47xxnflash *b47n = (struct bcm47xxnflash *)nand_chip->priv;
+
+ switch (b47n->curr_command) {
+ case NAND_CMD_SEQIN:
+ bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+ return;
+ }
+
+ pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+ int err;
+ u32 freq;
+ u16 clock;
+ u8 w0, w1, w2, w3, w4;
+
+ unsigned long chipsize; /* MiB */
+ u8 tbits, col_bits, col_size, row_bits, row_bsize;
+ u32 val;
+
+ b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+ b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+
+ /* Enable NAND flash access */
+ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ BCMA_CC_4706_FLASHSCFG_NF1);
+
+ /* Configure wait counters */
+ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+ freq = 100000000;
+ } else {
+ freq = bcma_chipco_pll_read(b47n->cc, 4);
+ freq = (freq * 0xFFF) >> 3;
+ freq = (freq * 25000000) >> 3;
+ }
+ clock = freq / 1000000;
+ w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+ w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+ w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+ w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+ (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+ /* Scan NAND */
+ err = nand_scan(&b47n->mtd, 1);
+ if (err) {
+ pr_err("Could not scan NAND flash: %d\n", err);
+ goto exit;
+ }
+
+ /* Configure FLASH */
+ chipsize = b47n->nand_chip.chipsize >> 20;
+ tbits = ffs(chipsize); /* find first bit set */
+ if (!tbits || tbits != fls(chipsize)) {
+ pr_err("Invalid flash size: 0x%lX\n", chipsize);
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+ col_bits = b47n->nand_chip.page_shift + 1;
+ col_size = (col_bits + 7) / 8;
+
+ row_bits = tbits - col_bits + 1;
+ row_bsize = (row_bits + 7) / 8;
+
+ val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+ bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+ if (err)
+ bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+ ~BCMA_CC_4706_FLASHSCFG_NF1);
+ return err;
+}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index ab0caa74eb43..4271e948d1e2 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -658,7 +658,7 @@ static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
/*
* Device management interface
*/
-static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
{
struct mtd_info *mtd = &info->mtd;
struct mtd_partition *parts = info->platform->partitions;
@@ -667,7 +667,7 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
return mtd_device_register(mtd, parts, nr);
}
-static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
+static int bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
@@ -725,7 +725,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-static int __devinit bf5xx_nand_probe(struct platform_device *pdev)
+static int bf5xx_nand_probe(struct platform_device *pdev)
{
struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
struct bf5xx_nand_info *info = NULL;
@@ -865,7 +865,7 @@ static int bf5xx_nand_resume(struct platform_device *dev)
/* driver device registration */
static struct platform_driver bf5xx_nand_driver = {
.probe = bf5xx_nand_probe,
- .remove = __devexit_p(bf5xx_nand_remove),
+ .remove = bf5xx_nand_remove,
.suspend = bf5xx_nand_suspend,
.resume = bf5xx_nand_resume,
.driver = {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2bb7170502c2..010d61266536 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -585,7 +585,7 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
}
/* F_2[X]/(X**6+X+1) */
-static unsigned short __devinit gf64_mul(u8 a, u8 b)
+static unsigned short gf64_mul(u8 a, u8 b)
{
u8 c;
unsigned int i;
@@ -604,7 +604,7 @@ static unsigned short __devinit gf64_mul(u8 a, u8 b)
}
/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
-static u16 __devinit gf4096_mul(u16 a, u16 b)
+static u16 gf4096_mul(u16 a, u16 b)
{
u8 ah, al, bh, bl, ch, cl;
@@ -619,14 +619,14 @@ static u16 __devinit gf4096_mul(u16 a, u16 b)
return (ch << 6) ^ cl;
}
-static int __devinit cafe_mul(int x)
+static int cafe_mul(int x)
{
if (x == 0)
return 1;
return gf4096_mul(x, 0xe01);
}
-static int __devinit cafe_nand_probe(struct pci_dev *pdev,
+static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mtd_info *mtd;
@@ -821,7 +821,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
return err;
}
-static void __devexit cafe_nand_remove(struct pci_dev *pdev)
+static void cafe_nand_remove(struct pci_dev *pdev)
{
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct cafe_priv *cafe = mtd->priv;
@@ -887,7 +887,7 @@ static struct pci_driver cafe_nand_pci_driver = {
.name = "CAFÉ NAND",
.id_table = cafe_nand_tbl,
.probe = cafe_nand_probe,
- .remove = __devexit_p(cafe_nand_remove),
+ .remove = cafe_nand_remove,
.resume = cafe_nand_resume,
};
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index adb6c3ef37fb..2cdeab8bebc4 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
this->ecc.hwctl = cs_enable_hwecc;
this->ecc.calculate = cs_calculate_ecc;
this->ecc.correct = nand_correct_data;
+ this->ecc.strength = 1;
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
@@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
goto out_ior;
}
- this->ecc.strength = 1;
-
new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
cs553x_mtd[cs] = new_mtd;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 945047ad0952..feae55c7b880 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
static struct davinci_nand_pdata
@@ -821,9 +821,16 @@ syndrome_done:
if (ret < 0)
goto err_scan;
- ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
- pdata->nr_parts);
+ if (pdata->parts)
+ ret = mtd_device_parse_register(&info->mtd, NULL, NULL,
+ pdata->parts, pdata->nr_parts);
+ else {
+ struct mtd_part_parser_data ppdata;
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(&info->mtd, NULL, &ppdata,
+ NULL, 0);
+ }
if (ret < 0)
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index e706a237170f..0c8bb6bf8424 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -16,14 +16,12 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
@@ -89,13 +87,6 @@ MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
* format the bank into the proper bits for the controller */
#define BANK(x) ((x) << 24)
-/* List of platforms this NAND controller has be integrated into */
-static const struct pci_device_id denali_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
- { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
- { /* end: all zeroes */ }
-};
-
/* forward declarations */
static void clear_interrupts(struct denali_nand_info *denali);
static uint32_t wait_for_irq(struct denali_nand_info *denali,
@@ -699,7 +690,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
if (comp_res == 0) {
/* timeout */
- printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
intr_status, irq_mask);
intr_status = 0;
@@ -1305,8 +1296,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
/* TODO: Read OOB data */
break;
default:
- printk(KERN_ERR ": unsupported command"
- " received 0x%x\n", cmd);
+ pr_err(": unsupported command received 0x%x\n", cmd);
break;
}
}
@@ -1425,107 +1415,48 @@ void denali_drv_init(struct denali_nand_info *denali)
denali->irq_status = 0;
}
-/* driver entry point */
-static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int denali_init(struct denali_nand_info *denali)
{
- int ret = -ENODEV;
- resource_size_t csr_base, mem_base;
- unsigned long csr_len, mem_len;
- struct denali_nand_info *denali;
-
- denali = kzalloc(sizeof(*denali), GFP_KERNEL);
- if (!denali)
- return -ENOMEM;
+ int ret;
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- goto failed_alloc_memery;
- }
-
- if (id->driver_data == INTEL_CE4100) {
+ if (denali->platform == INTEL_CE4100) {
/* Due to a silicon limitation, we can only support
* ONFI timing mode 1 and below.
*/
if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
- printk(KERN_ERR "Intel CE4100 only supports"
- " ONFI timing mode 1 or below\n");
- ret = -EINVAL;
- goto failed_enable_dev;
- }
- denali->platform = INTEL_CE4100;
- mem_base = pci_resource_start(dev, 0);
- mem_len = pci_resource_len(dev, 1);
- csr_base = pci_resource_start(dev, 1);
- csr_len = pci_resource_len(dev, 1);
- } else {
- denali->platform = INTEL_MRST;
- csr_base = pci_resource_start(dev, 0);
- csr_len = pci_resource_len(dev, 0);
- mem_base = pci_resource_start(dev, 1);
- mem_len = pci_resource_len(dev, 1);
- if (!mem_len) {
- mem_base = csr_base + csr_len;
- mem_len = csr_len;
+ pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
+ return -EINVAL;
}
}
/* Is 32-bit DMA supported? */
- ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
if (ret) {
- printk(KERN_ERR "Spectra: no usable DMA configuration\n");
- goto failed_enable_dev;
+ pr_err("Spectra: no usable DMA configuration\n");
+ return ret;
}
- denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
+ denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
DENALI_BUF_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
- dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
- goto failed_enable_dev;
- }
-
- pci_set_master(dev);
- denali->dev = &dev->dev;
- denali->mtd.dev.parent = &dev->dev;
-
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request memory regions\n");
- goto failed_dma_map;
- }
-
- denali->flash_reg = ioremap_nocache(csr_base, csr_len);
- if (!denali->flash_reg) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_req_regions;
- }
-
- denali->flash_mem = ioremap_nocache(mem_base, mem_len);
- if (!denali->flash_mem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- ret = -ENOMEM;
- goto failed_remap_reg;
+ if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+ dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+ return -EIO;
}
-
+ denali->mtd.dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
/* denali_isr register is done after all the hardware
* initilization is finished*/
- if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
DENALI_NAND_NAME, denali)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- goto failed_remap_mem;
+ pr_err("Spectra: Unable to allocate IRQ\n");
+ return -ENODEV;
}
/* now that our ISR is registered, we can enable interrupts */
denali_set_intr_modes(denali, true);
-
- pci_set_drvdata(dev, denali);
-
denali->mtd.name = "denali-nand";
denali->mtd.owner = THIS_MODULE;
denali->mtd.priv = &denali->nand;
@@ -1549,8 +1480,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
*/
if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
ret = -ENODEV;
- printk(KERN_ERR "Spectra: device size not supported by this "
- "version of MTD.");
+ pr_err("Spectra: device size not supported by this version of MTD.");
goto failed_req_irq;
}
@@ -1602,8 +1532,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
ECC_8BITS * (denali->mtd.writesize /
ECC_SECTOR_SIZE))) {
- printk(KERN_ERR "Your NAND chip OOB is not large enough to"
- " contain 8bit ECC correction codes");
+ pr_err("Your NAND chip OOB is not large enough to \
+ contain 8bit ECC correction codes");
goto failed_req_irq;
} else {
denali->nand.ecc.strength = 8;
@@ -1655,56 +1585,24 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
ret = mtd_device_register(&denali->mtd, NULL, 0);
if (ret) {
- dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
+ dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
ret);
goto failed_req_irq;
}
return 0;
failed_req_irq:
- denali_irq_cleanup(dev->irq, denali);
-failed_remap_mem:
- iounmap(denali->flash_mem);
-failed_remap_reg:
- iounmap(denali->flash_reg);
-failed_req_regions:
- pci_release_regions(dev);
-failed_dma_map:
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
-failed_enable_dev:
- pci_disable_device(dev);
-failed_alloc_memery:
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+
return ret;
}
+EXPORT_SYMBOL(denali_init);
/* driver exit point */
-static void denali_pci_remove(struct pci_dev *dev)
+void denali_remove(struct denali_nand_info *denali)
{
- struct denali_nand_info *denali = pci_get_drvdata(dev);
-
- nand_release(&denali->mtd);
-
- denali_irq_cleanup(dev->irq, denali);
-
- iounmap(denali->flash_reg);
- iounmap(denali->flash_mem);
- pci_release_regions(dev);
- pci_disable_device(dev);
- dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- pci_set_drvdata(dev, NULL);
- kfree(denali);
+ denali_irq_cleanup(denali->irq, denali);
+ dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
}
-
-MODULE_DEVICE_TABLE(pci, denali_pci_ids);
-
-static struct pci_driver denali_pci_driver = {
- .name = DENALI_NAND_NAME,
- .id_table = denali_pci_ids,
- .probe = denali_pci_probe,
- .remove = denali_pci_remove,
-};
-
-module_pci_driver(denali_pci_driver);
+EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index fabb9d56b39e..cec5712862c9 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -466,6 +466,7 @@ struct nand_buf {
#define INTEL_CE4100 1
#define INTEL_MRST 2
+#define DT 3
struct denali_nand_info {
struct mtd_info mtd;
@@ -487,6 +488,7 @@ struct denali_nand_info {
uint32_t irq_status;
int irq_debug_array[32];
int idx;
+ int irq;
uint32_t devnum; /* represent how many nands connected */
uint32_t fwblks; /* represent how many blocks FW used */
@@ -496,4 +498,7 @@ struct denali_nand_info {
uint32_t max_banks;
};
+extern int denali_init(struct denali_nand_info *denali);
+extern void denali_remove(struct denali_nand_info *denali);
+
#endif /*_LLD_NAND_*/
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
new file mode 100644
index 000000000000..546f8cb5688d
--- /dev/null
+++ b/drivers/mtd/nand/denali_dt.c
@@ -0,0 +1,167 @@
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+struct denali_dt {
+ struct denali_nand_info denali;
+ struct clk *clk;
+};
+
+static void __iomem *request_and_map(struct device *dev,
+ const struct resource *res)
+{
+ void __iomem *ptr;
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ "denali-dt")) {
+ dev_err(dev, "unable to request %s\n", res->name);
+ return NULL;
+ }
+
+ ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!res)
+ dev_err(dev, "ioremap_nocache of %s failed!", res->name);
+
+ return ptr;
+}
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+ { .compatible = "denali,denali-nand-dt" },
+ { /* sentinel */ }
+ };
+
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static u64 denali_dma_mask;
+
+static int denali_dt_probe(struct platform_device *ofdev)
+{
+ struct resource *denali_reg, *nand_data;
+ struct denali_dt *dt;
+ struct denali_nand_info *denali;
+ int ret;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
+ if (of_id) {
+ ofdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
+ dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+ if (!dt)
+ return -ENOMEM;
+ denali = &dt->denali;
+
+ denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+ nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+ if (!denali_reg || !nand_data) {
+ dev_err(&ofdev->dev, "resources not completely defined\n");
+ return -EINVAL;
+ }
+
+ denali->platform = DT;
+ denali->dev = &ofdev->dev;
+ denali->irq = platform_get_irq(ofdev, 0);
+ if (denali->irq < 0) {
+ dev_err(&ofdev->dev, "no irq defined\n");
+ return -ENXIO;
+ }
+
+ denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
+ if (!denali->flash_reg)
+ return -ENOMEM;
+
+ denali->flash_mem = request_and_map(&ofdev->dev, nand_data);
+ if (!denali->flash_mem)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(ofdev->dev.of_node,
+ "dma-mask", (u32 *)&denali_dma_mask)) {
+ denali->dev->dma_mask = &denali_dma_mask;
+ } else {
+ denali->dev->dma_mask = NULL;
+ }
+
+ dt->clk = clk_get(&ofdev->dev, NULL);
+ if (IS_ERR(dt->clk)) {
+ dev_err(&ofdev->dev, "no clk available\n");
+ return PTR_ERR(dt->clk);
+ }
+ clk_prepare_enable(dt->clk);
+
+ ret = denali_init(denali);
+ if (ret)
+ goto out_disable_clk;
+
+ platform_set_drvdata(ofdev, dt);
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(dt->clk);
+ clk_put(dt->clk);
+
+ return ret;
+}
+
+static int denali_dt_remove(struct platform_device *ofdev)
+{
+ struct denali_dt *dt = platform_get_drvdata(ofdev);
+
+ denali_remove(&dt->denali);
+ clk_disable(dt->clk);
+ clk_put(dt->clk);
+
+ return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+ .probe = denali_dt_probe,
+ .remove = denali_dt_remove,
+ .driver = {
+ .name = "denali-nand-dt",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(denali_nand_dt_ids),
+ },
+};
+
+static int __init denali_init_dt(void)
+{
+ return platform_driver_register(&denali_dt_driver);
+}
+module_init(denali_init_dt);
+
+static void __exit denali_exit_dt(void)
+{
+ platform_driver_unregister(&denali_dt_driver);
+}
+module_exit(denali_exit_dt);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
new file mode 100644
index 000000000000..e3e46623b2b4
--- /dev/null
+++ b/drivers/mtd/nand/denali_pci.c
@@ -0,0 +1,144 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand-pci"
+
+/* List of platforms this NAND controller has be integrated into */
+static DEFINE_PCI_DEVICE_TABLE(denali_pci_ids) = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ pr_err("Spectra: pci_enable_device failed.\n");
+ goto failed_alloc_memery;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_len(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ }
+ }
+
+ pci_set_master(dev);
+ denali->dev = &dev->dev;
+ denali->irq = dev->irq;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ pr_err("Spectra: Unable to request memory regions\n");
+ goto failed_enable_dev;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ pr_err("Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_req_regions;
+ }
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ pr_err("Spectra: ioremap_nocache failed!");
+ ret = -ENOMEM;
+ goto failed_remap_reg;
+ }
+
+ ret = denali_init(denali);
+ if (ret)
+ goto failed_remap_mem;
+
+ pci_set_drvdata(dev, denali);
+
+ return 0;
+
+failed_remap_mem:
+ iounmap(denali->flash_mem);
+failed_remap_reg:
+ iounmap(denali->flash_reg);
+failed_req_regions:
+ pci_release_regions(dev);
+failed_enable_dev:
+ pci_disable_device(dev);
+failed_alloc_memery:
+ kfree(denali);
+
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ denali_remove(denali);
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int denali_init_pci(void)
+{
+ pr_info("Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ return pci_register_driver(&denali_pci_driver);
+}
+module_init(denali_init_pci);
+
+static void denali_exit_pci(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+module_exit(denali_exit_pci);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 256eb30f6180..81fa5784f98b 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -53,8 +53,6 @@ static unsigned long __initdata doc_locations[] = {
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#else
-#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
0xffffffff };
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 799da5d1c857..18fa4489e52e 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -46,6 +46,25 @@
#include <linux/bitrev.h>
/*
+ * In "reliable mode" consecutive 2k pages are used in parallel (in some
+ * fashion) to store the same data. The data can be read back from the
+ * even-numbered pages in the normal manner; odd-numbered pages will appear to
+ * contain junk. Systems that boot from the docg4 typically write the secondary
+ * program loader (SPL) code in this mode. The SPL is loaded by the initial
+ * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
+ * to the reset vector address). This module parameter enables you to use this
+ * driver to write the SPL. When in this mode, no more than 2k of data can be
+ * written at a time, because the addresses do not increment in the normal
+ * manner, and the starting offset must be within an even-numbered 2k region;
+ * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
+ * 0x1a00, ... Reliable mode is a special case and should not be used unless
+ * you know what you're doing.
+ */
+static bool reliable_mode;
+module_param(reliable_mode, bool, 0);
+MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
+
+/*
* You'll want to ignore badblocks if you're reading a partition that contains
* data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
* it does not use mtd nand's method for marking bad blocks (using oob area).
@@ -113,6 +132,7 @@ struct docg4_priv {
#define DOCG4_SEQ_PAGEWRITE 0x16
#define DOCG4_SEQ_PAGEPROG 0x1e
#define DOCG4_SEQ_BLOCKERASE 0x24
+#define DOCG4_SEQ_SETMODE 0x45
/* DOC_FLASHCOMMAND register commands */
#define DOCG4_CMD_PAGE_READ 0x00
@@ -122,6 +142,8 @@ struct docg4_priv {
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOCG4_CMD_PAGEWRITE 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
+#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_RESET 0xff
/* DOC_POWERMODE register bits */
@@ -190,17 +212,20 @@ struct docg4_priv {
#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
/*
- * Oob bytes 0 - 6 are available to the user.
- * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
+ * Bytes 0, 1 are used as badblock marker.
+ * Bytes 2 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 oob bytes only.
+ * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
* Byte 15 (the last) is used by the driver as a "page written" flag.
*/
static struct nand_ecclayout docg4_oobinfo = {
.eccbytes = 9,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
- .oobavail = 7,
- .oobfree = { {0, 7} }
+ .oobavail = 5,
+ .oobfree = { {.offset = 2, .length = 5} }
};
/*
@@ -611,6 +636,14 @@ static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
dev_dbg(doc->dev,
"docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
sequence_reset(mtd);
+
+ if (unlikely(reliable_mode)) {
+ writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
+ writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
+ writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
+ write_nop(docptr);
+ }
+
writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
@@ -691,6 +724,15 @@ static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
break;
case NAND_CMD_SEQIN:
+ if (unlikely(reliable_mode)) {
+ uint16_t g4_page = g4_addr >> 16;
+
+ /* writes to odd-numbered 2k pages are invalid */
+ if (g4_page & 0x01)
+ dev_warn(doc->dev,
+ "invalid reliable mode address\n");
+ }
+
write_page_prologue(mtd, g4_addr);
/* hack for deferred write of oob bytes */
@@ -979,16 +1021,15 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
struct docg4_priv *doc = nand->priv;
uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
uint8_t *buf;
- int i, block, status;
+ int i, block;
+ __u32 eccfailed_stats = mtd->ecc_stats.failed;
buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
read_page_prologue(mtd, g4_addr);
- status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
- if (status)
- goto exit;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
/*
* If no memory-based bbt was created, exit. This will happen if module
@@ -1000,6 +1041,20 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
if (nand->bbt == NULL) /* no memory-based bbt */
goto exit;
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ /*
+ * Whoops, an ecc failure ocurred reading the factory bbt.
+ * It is stored redundantly, so we get another chance.
+ */
+ eccfailed_stats = mtd->ecc_stats.failed;
+ docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
+ if (mtd->ecc_stats.failed > eccfailed_stats) {
+ dev_warn(doc->dev,
+ "The factory bbt could not be read!\n");
+ goto exit;
+ }
+ }
+
/*
* Parse factory bbt and update memory-based bbt. Factory bbt format is
* simple: one bit per block, block numbers increase left to right (msb
@@ -1019,7 +1074,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
}
exit:
kfree(buf);
- return status;
+ return 0;
}
static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index cc1480a5e4c1..20657209a472 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -109,20 +109,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
};
/*
- * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
- * 1, so we have to adjust bad block pattern. This pattern should be used for
- * x8 chips only. So far hardware does not support x16 chips anyway.
- */
-static u8 scan_ff_pattern[] = { 0xff, };
-
-static struct nand_bbt_descr largepage_memorybased = {
- .options = 0,
- .offs = 0,
- .len = 1,
- .pattern = scan_ff_pattern,
-};
-
-/*
* ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
* interfere with ECC positions, that's why we implement our own descriptors.
* OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
@@ -699,7 +685,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
&fsl_elbc_oob_lp_eccm1 :
&fsl_elbc_oob_lp_eccm0;
- chip->badblock_pattern = &largepage_memorybased;
}
} else {
dev_err(priv->dev,
@@ -814,7 +799,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
static DEFINE_MUTEX(fsl_elbc_nand_mutex);
-static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev)
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
{
struct fsl_lbc_regs __iomem *lbc;
struct fsl_elbc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 3551a99076ba..ad6222627fed 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -389,7 +389,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
timing = IFC_FIR_OP_RBCD;
out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(timing << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -754,7 +754,7 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
/* READID */
out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
@@ -922,7 +922,7 @@ static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
static DEFINE_MUTEX(fsl_ifc_nand_mutex);
-static int __devinit fsl_ifc_nand_probe(struct platform_device *dev)
+static int fsl_ifc_nand_probe(struct platform_device *dev)
{
struct fsl_ifc_regs __iomem *ifc;
struct fsl_ifc_mtd *priv;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 45df542b9c61..04e07252d74b 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -152,9 +152,9 @@ static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
fun_wait_rnb(fun);
}
-static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
- const struct device_node *upm_np,
- const struct resource *io_res)
+static int fun_chip_init(struct fsl_upm_nand *fun,
+ const struct device_node *upm_np,
+ const struct resource *io_res)
{
int ret;
struct device_node *flash_np;
@@ -201,7 +201,7 @@ err:
return ret;
}
-static int __devinit fun_probe(struct platform_device *ofdev)
+static int fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource io_res;
@@ -318,7 +318,7 @@ err1:
return ret;
}
-static int __devexit fun_remove(struct platform_device *ofdev)
+static int fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
int i;
@@ -350,7 +350,7 @@ static struct platform_driver of_fun_driver = {
.of_match_table = of_fun_match,
},
.probe = fun_probe,
- .remove = __devexit_p(fun_remove),
+ .remove = fun_remove,
};
module_platform_driver(of_fun_driver);
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 38d26240d8b1..67e62d3d495c 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -361,7 +361,7 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
struct nand_chip *this = mtd->priv;
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
- void *__iomem *regs = host->regs_va;
+ void __iomem *regs = host->regs_va;
unsigned int bank = host->bank;
if (ctrl & NAND_CTRL_CHANGE) {
@@ -383,13 +383,13 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
pc |= FSMC_ENABLE;
else
pc &= ~FSMC_ENABLE;
- writel(pc, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
}
mb();
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W);
+ writeb_relaxed(cmd, this->IO_ADDR_W);
}
/*
@@ -426,14 +426,18 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (busw)
- writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(value | FSMC_DEVWID_16,
+ FSMC_NAND_REG(regs, bank, PC));
else
- writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
+ writel_relaxed(value | FSMC_DEVWID_8,
+ FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
FSMC_NAND_REG(regs, bank, PC));
- writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
- writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, COMM));
+ writel_relaxed(thiz | thold | twait | tset,
+ FSMC_NAND_REG(regs, bank, ATTRIB));
}
/*
@@ -446,11 +450,11 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
void __iomem *regs = host->regs_va;
uint32_t bank = host->bank;
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
FSMC_NAND_REG(regs, bank, PC));
- writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+ writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
FSMC_NAND_REG(regs, bank, PC));
}
@@ -470,7 +474,7 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
- if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
+ if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
break;
else
cond_resched();
@@ -481,25 +485,25 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
return -ETIMEDOUT;
}
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
ecc[3] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
ecc[4] = (uint8_t) (ecc_tmp >> 0);
ecc[5] = (uint8_t) (ecc_tmp >> 8);
ecc[6] = (uint8_t) (ecc_tmp >> 16);
ecc[7] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
ecc[8] = (uint8_t) (ecc_tmp >> 0);
ecc[9] = (uint8_t) (ecc_tmp >> 8);
ecc[10] = (uint8_t) (ecc_tmp >> 16);
ecc[11] = (uint8_t) (ecc_tmp >> 24);
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
ecc[12] = (uint8_t) (ecc_tmp >> 16);
return 0;
@@ -519,7 +523,7 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
uint32_t bank = host->bank;
uint32_t ecc_tmp;
- ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
ecc[0] = (uint8_t) (ecc_tmp >> 0);
ecc[1] = (uint8_t) (ecc_tmp >> 8);
ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -601,7 +605,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_async_issue_pending(chan);
ret =
- wait_for_completion_interruptible_timeout(&host->dma_access_complete,
+ wait_for_completion_timeout(&host->dma_access_complete,
msecs_to_jiffies(3000));
if (ret <= 0) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -628,10 +632,10 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
- writel(p[i], chip->IO_ADDR_W);
+ writel_relaxed(p[i], chip->IO_ADDR_W);
} else {
for (i = 0; i < len; i++)
- writeb(buf[i], chip->IO_ADDR_W);
+ writeb_relaxed(buf[i], chip->IO_ADDR_W);
}
}
@@ -651,10 +655,10 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
uint32_t *p = (uint32_t *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
- p[i] = readl(chip->IO_ADDR_R);
+ p[i] = readl_relaxed(chip->IO_ADDR_R);
} else {
for (i = 0; i < len; i++)
- buf[i] = readb(chip->IO_ADDR_R);
+ buf[i] = readb_relaxed(chip->IO_ADDR_R);
}
}
@@ -783,7 +787,7 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
uint32_t num_err, i;
uint32_t ecc1, ecc2, ecc3, ecc4;
- num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+ num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
/* no bit flipping */
if (likely(num_err == 0))
@@ -826,10 +830,10 @@ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
* uint64_t array and error offset indexes are populated in err_idx
* array
*/
- ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
- ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
- ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
- ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
+ ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+ ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+ ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+ ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
err_idx[0] = (ecc1 >> 0) & 0x1FFF;
err_idx[1] = (ecc1 >> 13) & 0x1FFF;
@@ -860,8 +864,8 @@ static bool filter(struct dma_chan *chan, void *slave)
}
#ifdef CONFIG_OF
-static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 val;
@@ -876,16 +880,14 @@ static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
return -EINVAL;
}
}
- of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
- of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
if (of_get_property(np, "nand-skip-bbtscan", NULL))
pdata->options = NAND_SKIP_BBTSCAN;
return 0;
}
#else
-static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
- struct device_node *np)
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ struct device_node *np)
{
return -ENOSYS;
}
@@ -935,41 +937,28 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory data resourse\n");
- return -ENOENT;
- }
-
- host->data_pa = (dma_addr_t)res->start;
- host->data_va = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ host->data_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->data_va) {
dev_err(&pdev->dev, "data ioremap failed\n");
return -ENOMEM;
}
+ host->data_pa = (dma_addr_t)res->start;
- if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
- return -ENOENT;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ if (!res)
+ return -EINVAL;
- host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
- resource_size(res));
+ host->addr_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->addr_va) {
dev_err(&pdev->dev, "ale ioremap failed\n");
return -ENOMEM;
}
- if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
- resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
- return -ENOENT;
- }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ if (!res)
+ return -EINVAL;
- host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
- resource_size(res));
+ host->cmd_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->cmd_va) {
dev_err(&pdev->dev, "ale ioremap failed\n");
return -ENOMEM;
@@ -979,14 +968,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name)) {
- dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
- return -ENOENT;
- }
-
- host->regs_va = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+ host->regs_va = devm_request_and_ioremap(&pdev->dev, res);
if (!host->regs_va) {
dev_err(&pdev->dev, "regs ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index bc73bc5f2713..e789e3f51710 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -90,14 +90,14 @@ static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
- writesb(this->IO_ADDR_W, buf, len);
+ iowrite8_rep(this->IO_ADDR_W, buf, len);
}
static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
- readsb(this->IO_ADDR_R, buf, len);
+ ioread8_rep(this->IO_ADDR_R, buf, len);
}
static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
@@ -106,7 +106,7 @@ static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
- writesw(this->IO_ADDR_W, buf, len>>1);
+ iowrite16_rep(this->IO_ADDR_W, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
@@ -121,7 +121,7 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
- readsw(this->IO_ADDR_R, buf, len>>1);
+ ioread16_rep(this->IO_ADDR_R, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
@@ -134,7 +134,11 @@ static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ return gpio_get_value(gpiomtd->plat.gpio_rdy);
+
+ return 1;
}
#ifdef CONFIG_OF
@@ -227,7 +231,7 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
return platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
-static int __devexit gpio_nand_remove(struct platform_device *dev)
+static int gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
struct resource *res;
@@ -252,7 +256,8 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
gpio_free(gpiomtd->plat.gpio_nce);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_free(gpiomtd->plat.gpio_nwp);
- gpio_free(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ gpio_free(gpiomtd->plat.gpio_rdy);
kfree(gpiomtd);
@@ -277,7 +282,7 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
return ptr;
}
-static int __devinit gpio_nand_probe(struct platform_device *dev)
+static int gpio_nand_probe(struct platform_device *dev)
{
struct gpiomtd *gpiomtd;
struct nand_chip *this;
@@ -336,10 +341,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
if (ret)
goto err_cle;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
- ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
- if (ret)
- goto err_rdy;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
+ ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+ if (ret)
+ goto err_rdy;
+ gpio_direction_input(gpiomtd->plat.gpio_rdy);
+ }
this->IO_ADDR_W = this->IO_ADDR_R;
@@ -386,7 +393,8 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- gpio_free(gpiomtd->plat.gpio_rdy);
+ if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
+ gpio_free(gpiomtd->plat.gpio_rdy);
err_rdy:
gpio_free(gpiomtd->plat.gpio_cle);
err_cle:
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 3502accd4bc3..d84699c7968e 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -18,7 +18,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#include <linux/mtd/gpmi-nand.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -166,6 +165,15 @@ int gpmi_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+ * See later BCH reset for explanation of MX23 handling
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+
/* Choose NAND mode. */
writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d79696b2f19b..e9b1c47e3cf9 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
@@ -33,6 +32,12 @@
#include <linux/of_mtd.h>
#include "gpmi-nand.h"
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
+
/* add our owner bbt descriptor */
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -222,7 +227,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
- pr_err("map failed.\n");
+ pr_err("DMA mapping failed.\n");
this->direct_dma_map_ok = false;
}
@@ -314,8 +319,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
return 0;
}
-static int __devinit
-acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
+static int acquire_register_block(struct gpmi_nand_data *this,
+ const char *res_name)
{
struct platform_device *pdev = this->pdev;
struct resources *res = &this->resources;
@@ -355,8 +360,7 @@ static void release_register_block(struct gpmi_nand_data *this)
res->bch_regs = NULL;
}
-static int __devinit
-acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
struct resources *res = &this->resources;
@@ -422,7 +426,7 @@ static void release_dma_channels(struct gpmi_nand_data *this)
}
}
-static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
+static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct resource *r_dma;
@@ -456,7 +460,7 @@ static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
if (!dma_chan) {
- pr_err("dma_request_channel failed.\n");
+ pr_err("Failed to request DMA channel.\n");
goto acquire_err;
}
@@ -487,7 +491,7 @@ static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
-static int __devinit gpmi_get_clks(struct gpmi_nand_data *this)
+static int gpmi_get_clks(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
char **extra_clks = NULL;
@@ -533,7 +537,7 @@ err_clock:
return -ENOMEM;
}
-static int __devinit acquire_resources(struct gpmi_nand_data *this)
+static int acquire_resources(struct gpmi_nand_data *this)
{
struct pinctrl *pinctrl;
int ret;
@@ -583,7 +587,7 @@ static void release_resources(struct gpmi_nand_data *this)
release_dma_channels(this);
}
-static int __devinit init_hardware(struct gpmi_nand_data *this)
+static int init_hardware(struct gpmi_nand_data *this)
{
int ret;
@@ -625,7 +629,8 @@ static int read_page_prepare(struct gpmi_nand_data *this,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dest_phys)) {
if (alt_size < length) {
- pr_err("Alternate buffer is too small\n");
+ pr_err("%s, Alternate buffer is too small\n",
+ __func__);
return -ENOMEM;
}
goto map_failed;
@@ -675,7 +680,8 @@ static int send_page_prepare(struct gpmi_nand_data *this,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, source_phys)) {
if (alt_size < length) {
- pr_err("Alternate buffer is too small\n");
+ pr_err("%s, Alternate buffer is too small\n",
+ __func__);
return -ENOMEM;
}
goto map_failed;
@@ -763,7 +769,7 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
error_alloc:
gpmi_free_dma_buffer(this);
- pr_err("allocate DMA buffer ret!!\n");
+ pr_err("Error allocating DMA buffers!\n");
return -ENOMEM;
}
@@ -1474,7 +1480,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this)
/* Set up the NFC geometry which is used by BCH. */
ret = bch_set_geometry(this);
if (ret) {
- pr_err("set geometry ret : %d\n", ret);
+ pr_err("Error setting BCH geometry : %d\n", ret);
return ret;
}
@@ -1535,7 +1541,7 @@ static void gpmi_nfc_exit(struct gpmi_nand_data *this)
gpmi_free_dma_buffer(this);
}
-static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
+static int gpmi_nfc_init(struct gpmi_nand_data *this)
{
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
@@ -1618,7 +1624,7 @@ static const struct of_device_id gpmi_nand_id_table[] = {
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
-static int __devinit gpmi_nand_probe(struct platform_device *pdev)
+static int gpmi_nand_probe(struct platform_device *pdev)
{
struct gpmi_nand_data *this;
const struct of_device_id *of_id;
@@ -1668,7 +1674,7 @@ exit_acquire_resources:
return ret;
}
-static int __devexit gpmi_nand_remove(struct platform_device *pdev)
+static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
@@ -1685,7 +1691,7 @@ static struct platform_driver gpmi_nand_driver = {
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
- .remove = __devexit_p(gpmi_nand_remove),
+ .remove = gpmi_nand_remove,
.id_table = gpmi_ids,
};
module_platform_driver(gpmi_nand_driver);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 7ac25c1e58f9..3d93a5e39090 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -130,7 +130,6 @@ struct gpmi_nand_data {
/* System Interface */
struct device *dev;
struct platform_device *pdev;
- struct gpmi_nand_platform_data *pdata;
/* Resources */
struct resources resources;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 100b6775e175..b76460eeaf22 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -316,13 +316,18 @@ err:
return ret;
}
-static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base)
+static inline void jz_nand_iounmap_resource(struct resource *res,
+ void __iomem *base)
{
iounmap(base);
release_mem_region(res->start, resource_size(res));
}
-static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) {
+static int jz_nand_detect_bank(struct platform_device *pdev,
+ struct jz_nand *nand, unsigned char bank,
+ size_t chipnr, uint8_t *nand_maf_id,
+ uint8_t *nand_dev_id)
+{
int ret;
int gpio;
char gpio_name[9];
@@ -400,7 +405,7 @@ notfound_gpio:
return ret;
}
-static int __devinit jz_nand_probe(struct platform_device *pdev)
+static int jz_nand_probe(struct platform_device *pdev)
{
int ret;
struct jz_nand *nand;
@@ -541,7 +546,7 @@ err_free:
return ret;
}
-static int __devexit jz_nand_remove(struct platform_device *pdev)
+static int jz_nand_remove(struct platform_device *pdev)
{
struct jz_nand *nand = platform_get_drvdata(pdev);
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
@@ -573,7 +578,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
static struct platform_driver jz_nand_driver = {
.probe = jz_nand_probe,
- .remove = __devexit_p(jz_nand_remove),
+ .remove = jz_nand_remove,
.driver = {
.name = "jz4740-nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c29b7ac1f6af..f182befa7360 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -655,7 +655,7 @@ static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
/*
* Probe for NAND controller
*/
-static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
@@ -845,7 +845,7 @@ err_exit1:
/*
* Remove NAND device
*/
-static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+static int lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
@@ -907,7 +907,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove = __devexit_p(lpc32xx_nand_remove),
+ .remove = lpc32xx_nand_remove,
.resume = lpc32xx_nand_resume,
.suspend = lpc32xx_nand_suspend,
.driver = {
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 32409c45d479..030b78c62895 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -755,7 +755,7 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
/*
* Probe for NAND controller
*/
-static int __devinit lpc32xx_nand_probe(struct platform_device *pdev)
+static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
@@ -949,7 +949,7 @@ err_exit1:
/*
* Remove NAND device.
*/
-static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
+static int lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
@@ -1021,7 +1021,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
- .remove = __devexit_p(lpc32xx_nand_remove),
+ .remove = lpc32xx_nand_remove,
.resume = lpc32xx_nand_resume,
.suspend = lpc32xx_nand_suspend,
.driver = {
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index f776c8577b8c..3c9cdcbc4cba 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
iounmap(prv->csreg);
}
-static int __devinit mpc5121_nfc_probe(struct platform_device *op)
+static int mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *rootnode, *dn = op->dev.of_node;
struct device *dev = &op->dev;
@@ -827,7 +827,7 @@ error:
return retval;
}
-static int __devexit mpc5121_nfc_remove(struct platform_device *op)
+static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
@@ -841,14 +841,14 @@ static int __devexit mpc5121_nfc_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+static struct of_device_id mpc5121_nfc_match[] = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
- .remove = __devexit_p(mpc5121_nfc_remove),
+ .remove = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 72e31d86030d..45204e41a028 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -37,15 +37,9 @@
#include <asm/mach/flash.h>
#include <linux/platform_data/mtd-mxc_nand.h>
-#include <mach/hardware.h>
#define DRIVER_NAME "mxc_nand"
-#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
-#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2a() cpu_is_mx51()
-#define nfc_is_v3_2b() cpu_is_mx53()
-
/* Addresses for NFC registers */
#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
@@ -272,7 +266,8 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
+static const char const *part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", NULL };
static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
@@ -1283,6 +1278,53 @@ static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
.ppb_shift = 8,
};
+static inline int is_imx21_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx21_nand_devtype_data;
+}
+
+static inline int is_imx27_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx27_nand_devtype_data;
+}
+
+static inline int is_imx25_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx25_nand_devtype_data;
+}
+
+static inline int is_imx51_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx51_nand_devtype_data;
+}
+
+static inline int is_imx53_nfc(struct mxc_nand_host *host)
+{
+ return host->devtype_data == &imx53_nand_devtype_data;
+}
+
+static struct platform_device_id mxcnd_devtype[] = {
+ {
+ .name = "imx21-nand",
+ .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
+ }, {
+ .name = "imx27-nand",
+ .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
+ }, {
+ .name = "imx25-nand",
+ .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
+ }, {
+ .name = "imx51-nand",
+ .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
+ }, {
+ .name = "imx53-nand",
+ .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
+
#ifdef CONFIG_OF_MTD
static const struct of_device_id mxcnd_dt_ids[] = {
{
@@ -1337,33 +1379,7 @@ static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
}
#endif
-static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
-{
- struct mxc_nand_platform_data *pdata = host->dev->platform_data;
-
- if (!pdata)
- return -ENODEV;
-
- host->pdata = *pdata;
-
- if (nfc_is_v1()) {
- if (cpu_is_mx21())
- host->devtype_data = &imx21_nand_devtype_data;
- else
- host->devtype_data = &imx27_nand_devtype_data;
- } else if (nfc_is_v21()) {
- host->devtype_data = &imx25_nand_devtype_data;
- } else if (nfc_is_v3_2a()) {
- host->devtype_data = &imx51_nand_devtype_data;
- } else if (nfc_is_v3_2b()) {
- host->devtype_data = &imx53_nand_devtype_data;
- } else
- BUG();
-
- return 0;
-}
-
-static int __devinit mxcnd_probe(struct platform_device *pdev)
+static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
@@ -1404,8 +1420,16 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
return PTR_ERR(host->clk);
err = mxcnd_probe_dt(host);
- if (err > 0)
- err = mxcnd_probe_pdata(host);
+ if (err > 0) {
+ struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
+ if (pdata) {
+ host->pdata = *pdata;
+ host->devtype_data = (struct mxc_nand_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ }
+ }
if (err < 0)
return err;
@@ -1494,7 +1518,7 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
+ if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -1508,7 +1532,7 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
this->ecc.layout = host->devtype_data->ecclayout_4k;
if (this->ecc.mode == NAND_ECC_HW) {
- if (nfc_is_v1())
+ if (is_imx21_nfc(host) || is_imx27_nfc(host))
this->ecc.strength = 1;
else
this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
@@ -1533,12 +1557,13 @@ static int __devinit mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- clk_disable_unprepare(host->clk);
+ if (host->clk_act)
+ clk_disable_unprepare(host->clk);
return err;
}
-static int __devexit mxcnd_remove(struct platform_device *pdev)
+static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
@@ -1555,8 +1580,9 @@ static struct platform_driver mxcnd_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(mxcnd_dt_ids),
},
+ .id_table = mxcnd_devtype,
.probe = mxcnd_probe,
- .remove = __devexit_p(mxcnd_remove),
+ .remove = mxcnd_remove,
};
module_platform_driver(mxcnd_driver);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1a03b7f673ce..3766682a0289 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -93,8 +93,7 @@ static struct nand_ecclayout nand_oob_128 = {
.length = 78} }
};
-static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
- int new_state);
+static int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
@@ -130,15 +129,12 @@ static int check_offs_len(struct mtd_info *mtd,
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
*
- * Deselect, release chip lock and wake up anyone waiting on the device.
+ * Release chip lock and wake up anyone waiting on the device.
*/
static void nand_release_device(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- /* De-select the NAND device */
- chip->select_chip(mtd, -1);
-
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
@@ -160,7 +156,7 @@ static uint8_t nand_read_byte(struct mtd_info *mtd)
}
/**
- * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* @mtd: MTD device structure
*
@@ -303,7 +299,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
if (getchip) {
chipnr = (int)(ofs >> chip->chip_shift);
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
@@ -333,8 +329,10 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
i++;
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
- if (getchip)
+ if (getchip) {
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
+ }
return res;
}
@@ -383,7 +381,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_oob_ops ops;
loff_t wr_ofs = ofs;
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
ops.datbuf = NULL;
ops.oobbuf = buf;
@@ -492,7 +490,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
void nand_wait_ready(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- unsigned long timeo = jiffies + 2;
+ unsigned long timeo = jiffies + msecs_to_jiffies(20);
/* 400ms timeout */
if (in_interrupt() || oops_in_progress)
@@ -750,15 +748,15 @@ static void panic_nand_get_device(struct nand_chip *chip,
/**
* nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
static int
-nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
+nand_get_device(struct mtd_info *mtd, int new_state)
{
+ struct nand_chip *chip = mtd->priv;
spinlock_t *lock = &chip->controller->lock;
wait_queue_head_t *wq = &chip->controller->wq;
DECLARE_WAITQUEUE(wait, current);
@@ -865,6 +863,8 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
return status;
}
@@ -899,7 +899,7 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
- if (status & 0x01) {
+ if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
@@ -932,7 +932,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (ofs + len == mtd->size)
len -= mtd->erasesize;
- nand_get_device(chip, mtd, FL_UNLOCKING);
+ nand_get_device(mtd, FL_UNLOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -950,6 +950,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -981,7 +982,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
if (check_offs_len(mtd, ofs, len))
ret = -EINVAL;
- nand_get_device(chip, mtd, FL_LOCKING);
+ nand_get_device(mtd, FL_LOCKING);
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
@@ -1004,7 +1005,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Call wait ready function */
status = chip->waitfunc(mtd, chip);
/* See if device thinks it succeeded */
- if (status & 0x01) {
+ if (status & NAND_STATUS_FAIL) {
pr_debug("%s: error status = 0x%08x\n",
__func__, status);
ret = -EIO;
@@ -1014,6 +1015,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
ret = __nand_unlock(mtd, ofs, len, 0x1);
out:
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
return ret;
@@ -1550,6 +1552,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
}
+ chip->select_chip(mtd, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
@@ -1577,11 +1580,10 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
ops.len = len;
ops.datbuf = buf;
ops.oobbuf = NULL;
@@ -1804,6 +1806,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, chipnr);
}
}
+ chip->select_chip(mtd, -1);
ops->oobretlen = ops->ooblen - readlen;
@@ -1827,7 +1830,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
@@ -1839,7 +1841,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_READING);
+ nand_get_device(mtd, FL_READING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -2186,8 +2188,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
- return -EIO;
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -2199,8 +2203,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
/* Don't allow multipage oob writes with offset */
- if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
- return -EINVAL;
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
while (1) {
int bytes = mtd->writesize;
@@ -2251,6 +2257,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->retlen = ops->len - writelen;
if (unlikely(oob))
ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
return ret;
}
@@ -2302,11 +2311,10 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
- struct nand_chip *chip = mtd->priv;
struct mtd_oob_ops ops;
int ret;
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.oobbuf = NULL;
@@ -2377,8 +2385,10 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
/* Check, if it is write protected */
- if (nand_check_wp(mtd))
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
return -EROFS;
+ }
/* Invalidate the page cache, if we write to the cached page */
if (page == chip->pagebuf)
@@ -2391,6 +2401,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
else
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+ chip->select_chip(mtd, -1);
+
if (status)
return status;
@@ -2408,7 +2420,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
- struct nand_chip *chip = mtd->priv;
int ret = -ENOTSUPP;
ops->retlen = 0;
@@ -2420,7 +2431,7 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- nand_get_device(chip, mtd, FL_WRITING);
+ nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -2513,7 +2524,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_ERASING);
+ nand_get_device(mtd, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -2623,6 +2634,7 @@ erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
nand_release_device(mtd);
/* Do call back function */
@@ -2658,12 +2670,10 @@ erase_exit:
*/
static void nand_sync(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(chip, mtd, FL_SYNCING);
+ nand_get_device(mtd, FL_SYNCING);
/* Release it and go back */
nand_release_device(mtd);
}
@@ -2749,9 +2759,7 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
*/
static int nand_suspend(struct mtd_info *mtd)
{
- struct nand_chip *chip = mtd->priv;
-
- return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
+ return nand_get_device(mtd, FL_PM_SUSPENDED);
}
/**
@@ -2849,6 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
@@ -2913,7 +2926,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
*
* Check if an ID string is repeated within a given sequence of bytes at
* specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
- * period of 2). This is a helper function for nand_id_len(). Returns non-zero
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
* if the repetition has a period of @period; otherwise, returns zero.
*/
static int nand_id_has_period(u8 *id_data, int arrlen, int period)
@@ -3242,11 +3255,15 @@ ident_done:
break;
}
- /*
- * Check, if buswidth is correct. Hardware drivers should set
- * chip correct!
- */
- if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ chip->options |= busw;
+ nand_set_defaults(chip, busw);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
pr_info("NAND device: Manufacturer ID:"
" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
@@ -3285,10 +3302,10 @@ ident_done:
chip->cmdfunc = nand_command_lp;
pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " page size: %d, OOB size: %d\n",
+ " %dMiB, page size: %d, OOB size: %d\n",
*maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
chip->onfi_version ? chip->onfi_params.model : type->name,
- mtd->writesize, mtd->oobsize);
+ (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
return type;
}
@@ -3327,6 +3344,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return PTR_ERR(type);
}
+ chip->select_chip(mtd, -1);
+
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
chip->select_chip(mtd, i);
@@ -3336,8 +3355,11 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd))
+ nand_dev_id != chip->read_byte(mtd)) {
+ chip->select_chip(mtd, -1);
break;
+ }
+ chip->select_chip(mtd, -1);
}
if (i > 1)
pr_info("%d NAND chips detected\n", i);
@@ -3596,9 +3618,6 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Initialize state */
chip->state = FL_READY;
- /* De-select the device */
- chip->select_chip(mtd, -1);
-
/* Invalidate the pagebuffer reference */
chip->pagebuf = -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a932c485eb04..818b65c85d12 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -42,6 +42,8 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -105,7 +107,6 @@ static char *weakblocks = NULL;
static char *weakpages = NULL;
static unsigned int bitflips = 0;
static char *gravepages = NULL;
-static unsigned int rptwear = 0;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
@@ -130,7 +131,6 @@ module_param(weakblocks, charp, 0400);
module_param(weakpages, charp, 0400);
module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
-module_param(rptwear, uint, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
@@ -162,7 +162,6 @@ MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (z
MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be read only twice before failing");
-MODULE_PARM_DESC(rptwear, "Number of erases between reporting wear, if not zero");
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
@@ -286,6 +285,11 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
#define NS_MAX_HELD_PAGES 16
+struct nandsim_debug_info {
+ struct dentry *dfs_root;
+ struct dentry *dfs_wear_report;
+};
+
/*
* A union to represent flash memory contents and flash buffer.
*/
@@ -365,6 +369,8 @@ struct nandsim {
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
+
+ struct nandsim_debug_info dbg;
};
/*
@@ -442,11 +448,123 @@ static LIST_HEAD(grave_pages);
static unsigned long *erase_block_wear = NULL;
static unsigned int wear_eb_count = 0;
static unsigned long total_wear = 0;
-static unsigned int rptwear_cnt = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
+static int nandsim_debugfs_show(struct seq_file *m, void *private)
+{
+ unsigned long wmin = -1, wmax = 0, avg;
+ unsigned long deciles[10], decile_max[10], tot = 0;
+ unsigned int i;
+
+ /* Calc wear stats */
+ for (i = 0; i < wear_eb_count; ++i) {
+ unsigned long wear = erase_block_wear[i];
+ if (wear < wmin)
+ wmin = wear;
+ if (wear > wmax)
+ wmax = wear;
+ tot += wear;
+ }
+
+ for (i = 0; i < 9; ++i) {
+ deciles[i] = 0;
+ decile_max[i] = (wmax * (i + 1) + 5) / 10;
+ }
+ deciles[9] = 0;
+ decile_max[9] = wmax;
+ for (i = 0; i < wear_eb_count; ++i) {
+ int d;
+ unsigned long wear = erase_block_wear[i];
+ for (d = 0; d < 10; ++d)
+ if (wear <= decile_max[d]) {
+ deciles[d] += 1;
+ break;
+ }
+ }
+ avg = tot / wear_eb_count;
+
+ /* Output wear report */
+ seq_printf(m, "Total numbers of erases: %lu\n", tot);
+ seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
+ seq_printf(m, "Average number of erases: %lu\n", avg);
+ seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+ seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+ for (i = 0; i < 10; ++i) {
+ unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+ if (from > decile_max[i])
+ continue;
+ seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+ from,
+ decile_max[i],
+ deciles[i]);
+ }
+
+ return 0;
+}
+
+static int nandsim_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nandsim_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+ .open = nandsim_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * nandsim_debugfs_create - initialize debugfs
+ * @dev: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int nandsim_debugfs_create(struct nandsim *dev)
+{
+ struct nandsim_debug_info *dbg = &dev->dbg;
+ struct dentry *dent;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dent = debugfs_create_dir("nandsim", NULL);
+ if (IS_ERR_OR_NULL(dent)) {
+ int err = dent ? -ENODEV : PTR_ERR(dent);
+
+ NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
+ err);
+ return err;
+ }
+ dbg->dfs_root = dent;
+
+ dent = debugfs_create_file("wear_report", S_IRUSR,
+ dbg->dfs_root, dev, &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dbg->dfs_wear_report = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(dbg->dfs_root);
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ return err;
+}
+
+/**
+ * nandsim_debugfs_remove - destroy all debugfs files
+ */
+static void nandsim_debugfs_remove(struct nandsim *ns)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ns->dbg.dfs_root);
+}
+
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
@@ -911,8 +1029,6 @@ static int setup_wear_reporting(struct mtd_info *mtd)
{
size_t mem;
- if (!rptwear)
- return 0;
wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
@@ -929,64 +1045,18 @@ static int setup_wear_reporting(struct mtd_info *mtd)
static void update_wear(unsigned int erase_block_no)
{
- unsigned long wmin = -1, wmax = 0, avg;
- unsigned long deciles[10], decile_max[10], tot = 0;
- unsigned int i;
-
if (!erase_block_wear)
return;
total_wear += 1;
+ /*
+ * TODO: Notify this through a debugfs entry,
+ * instead of showing an error message.
+ */
if (total_wear == 0)
NS_ERR("Erase counter total overflow\n");
erase_block_wear[erase_block_no] += 1;
if (erase_block_wear[erase_block_no] == 0)
NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
- rptwear_cnt += 1;
- if (rptwear_cnt < rptwear)
- return;
- rptwear_cnt = 0;
- /* Calc wear stats */
- for (i = 0; i < wear_eb_count; ++i) {
- unsigned long wear = erase_block_wear[i];
- if (wear < wmin)
- wmin = wear;
- if (wear > wmax)
- wmax = wear;
- tot += wear;
- }
- for (i = 0; i < 9; ++i) {
- deciles[i] = 0;
- decile_max[i] = (wmax * (i + 1) + 5) / 10;
- }
- deciles[9] = 0;
- decile_max[9] = wmax;
- for (i = 0; i < wear_eb_count; ++i) {
- int d;
- unsigned long wear = erase_block_wear[i];
- for (d = 0; d < 10; ++d)
- if (wear <= decile_max[d]) {
- deciles[d] += 1;
- break;
- }
- }
- avg = tot / wear_eb_count;
- /* Output wear report */
- NS_INFO("*** Wear Report ***\n");
- NS_INFO("Total numbers of erases: %lu\n", tot);
- NS_INFO("Number of erase blocks: %u\n", wear_eb_count);
- NS_INFO("Average number of erases: %lu\n", avg);
- NS_INFO("Maximum number of erases: %lu\n", wmax);
- NS_INFO("Minimum number of erases: %lu\n", wmin);
- for (i = 0; i < 10; ++i) {
- unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
- if (from > decile_max[i])
- continue;
- NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
- from,
- decile_max[i],
- deciles[i]);
- }
- NS_INFO("*** End of Wear Report ***\n");
}
/*
@@ -1397,10 +1467,7 @@ int do_read_error(struct nandsim *ns, int num)
unsigned int page_no = ns->regs.row;
if (read_error(page_no)) {
- int i;
- memset(ns->buf.byte, 0xFF, num);
- for (i = 0; i < num; ++i)
- ns->buf.byte[i] = random32();
+ prandom_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
@@ -2330,6 +2397,9 @@ static int __init ns_init_module(void)
if ((retval = setup_wear_reporting(nsmtd)) != 0)
goto err_exit;
+ if ((retval = nandsim_debugfs_create(nand)) != 0)
+ goto err_exit;
+
if ((retval = init_nandsim(nsmtd)) != 0)
goto err_exit;
@@ -2369,6 +2439,7 @@ static void __exit ns_cleanup_module(void)
struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
int i;
+ nandsim_debugfs_remove(ns);
free_nandsim(ns); /* Free nandsim private resources */
nand_release(nsmtd); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 5fd3f010e3ae..8e148f1478fd 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -197,7 +197,7 @@ err:
return ret;
}
-static int __devinit ndfc_probe(struct platform_device *ofdev)
+static int ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc;
const __be32 *reg;
@@ -256,7 +256,7 @@ static int __devinit ndfc_probe(struct platform_device *ofdev)
return 0;
}
-static int __devexit ndfc_remove(struct platform_device *ofdev)
+static int ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
@@ -279,7 +279,7 @@ static struct platform_driver ndfc_driver = {
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
- .remove = __devexit_p(ndfc_remove),
+ .remove = ndfc_remove,
};
module_platform_driver(ndfc_driver);
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
deleted file mode 100644
index 9ee0c4edfacf..000000000000
--- a/drivers/mtd/nand/nomadik_nand.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * drivers/mtd/nand/nomadik_nand.c
- *
- * Overview:
- * Driver for on-board NAND flash on Nomadik Platforms
- *
- * Copyright © 2007 STMicroelectronics Pvt. Ltd.
- * Author: Sachin Verma <sachin.verma@st.com>
- *
- * Copyright © 2009 Alessandro Rubini
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/platform_data/mtd-nomadik-nand.h>
-#include <mach/fsmc.h>
-
-#include <mtd/mtd-abi.h>
-
-struct nomadik_nand_host {
- struct mtd_info mtd;
- struct nand_chip nand;
- void __iomem *data_va;
- void __iomem *cmd_va;
- void __iomem *addr_va;
- struct nand_bbt_descr *bbt_desc;
-};
-
-static struct nand_ecclayout nomadik_ecc_layout = {
- .eccbytes = 3 * 4,
- .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
- 0x02, 0x03, 0x04,
- 0x12, 0x13, 0x14,
- 0x22, 0x23, 0x24,
- 0x32, 0x33, 0x34},
- /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
- .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
-};
-
-static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
-{
- /* No need to enable hw ecc, it's on by default */
-}
-
-static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *nand = mtd->priv;
- struct nomadik_nand_host *host = nand->priv;
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, host->cmd_va);
- else
- writeb(cmd, host->addr_va);
-}
-
-static int nomadik_nand_probe(struct platform_device *pdev)
-{
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
- struct nomadik_nand_host *host;
- struct mtd_info *mtd;
- struct nand_chip *nand;
- struct resource *res;
- int ret = 0;
-
- /* Allocate memory for the device structure (and zero it) */
- host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
- if (!host) {
- dev_err(&pdev->dev, "Failed to allocate device structure.\n");
- return -ENOMEM;
- }
-
- /* Call the client's init function, if any */
- if (pdata->init)
- ret = pdata->init();
- if (ret < 0) {
- dev_err(&pdev->dev, "Init function failed\n");
- goto err;
- }
-
- /* ioremap three regions */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->addr_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->data_va = ioremap(res->start, resource_size(res));
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
- if (!res) {
- ret = -EIO;
- goto err_unmap;
- }
- host->cmd_va = ioremap(res->start, resource_size(res));
-
- if (!host->addr_va || !host->data_va || !host->cmd_va) {
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- /* Link all private pointers */
- mtd = &host->mtd;
- nand = &host->nand;
- mtd->priv = nand;
- nand->priv = host;
-
- host->mtd.owner = THIS_MODULE;
- nand->IO_ADDR_R = host->data_va;
- nand->IO_ADDR_W = host->data_va;
- nand->cmd_ctrl = nomadik_cmd_ctrl;
-
- /*
- * This stanza declares ECC_HW but uses soft routines. It's because
- * HW claims to make the calculation but not the correction. However,
- * I haven't managed to get the desired data out of it until now.
- */
- nand->ecc.mode = NAND_ECC_SOFT;
- nand->ecc.layout = &nomadik_ecc_layout;
- nand->ecc.hwctl = nomadik_ecc_control;
- nand->ecc.size = 512;
- nand->ecc.bytes = 3;
-
- nand->options = pdata->options;
-
- /*
- * Scan to find existence of the device
- */
- if (nand_scan(&host->mtd, 1)) {
- ret = -ENXIO;
- goto err_unmap;
- }
-
- mtd_device_register(&host->mtd, pdata->parts, pdata->nparts);
-
- platform_set_drvdata(pdev, host);
- return 0;
-
- err_unmap:
- if (host->cmd_va)
- iounmap(host->cmd_va);
- if (host->data_va)
- iounmap(host->data_va);
- if (host->addr_va)
- iounmap(host->addr_va);
- err:
- kfree(host);
- return ret;
-}
-
-/*
- * Clean up routine
- */
-static int nomadik_nand_remove(struct platform_device *pdev)
-{
- struct nomadik_nand_host *host = platform_get_drvdata(pdev);
- struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
-
- if (pdata->exit)
- pdata->exit();
-
- if (host) {
- nand_release(&host->mtd);
- iounmap(host->cmd_va);
- iounmap(host->data_va);
- iounmap(host->addr_va);
- kfree(host);
- }
- return 0;
-}
-
-static int nomadik_nand_suspend(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- int ret = 0;
- if (host)
- ret = mtd_suspend(&host->mtd);
- return ret;
-}
-
-static int nomadik_nand_resume(struct device *dev)
-{
- struct nomadik_nand_host *host = dev_get_drvdata(dev);
- if (host)
- mtd_resume(&host->mtd);
- return 0;
-}
-
-static const struct dev_pm_ops nomadik_nand_pm_ops = {
- .suspend = nomadik_nand_suspend,
- .resume = nomadik_nand_resume,
-};
-
-static struct platform_driver nomadik_nand_driver = {
- .probe = nomadik_nand_probe,
- .remove = nomadik_nand_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "nomadik_nand",
- .pm = &nomadik_nand_pm_ops,
- },
-};
-
-module_platform_driver(nomadik_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
-MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 94dc46bc118c..a6191198d259 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -246,7 +246,7 @@ static void nuc900_nand_enable(struct nuc900_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit nuc900_nand_probe(struct platform_device *pdev)
+static int nuc900_nand_probe(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
@@ -317,7 +317,7 @@ fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit nuc900_nand_remove(struct platform_device *pdev)
+static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
@@ -340,7 +340,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev)
static struct platform_driver nuc900_nand_driver = {
.probe = nuc900_nand_probe,
- .remove = __devexit_p(nuc900_nand_remove),
+ .remove = nuc900_nand_remove,
.driver = {
.name = "nuc900-fmi",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5b3138620646..0002d5e94f0d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -27,8 +27,6 @@
#include <linux/bch.h>
#endif
-#include <plat/dma.h>
-#include <plat/gpmc.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
@@ -106,10 +104,18 @@
#define CS_MASK 0x7
#define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE0_SHIFT 12
#define ECCSIZE1_SHIFT 22
#define ECC1RESULTSIZE 0x1
#define ECCCLEAR 0x100
#define ECC1 0x1
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
+#define STATUS_BUFF_EMPTY 0x00000001
+
+#define OMAP24XX_DMA_GPMC 4
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
@@ -269,7 +275,7 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
/* wait until buffer is available for write */
do {
status = readl(info->reg.gpmc_status) &
- GPMC_STATUS_BUFF_EMPTY;
+ STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -307,7 +313,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
/* wait until buffer is available for write */
do {
status = readl(info->reg.gpmc_status) &
- GPMC_STATUS_BUFF_EMPTY;
+ STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -348,7 +354,7 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
} else {
do {
r_count = readl(info->reg.gpmc_prefetch_status);
- r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
+ r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count;
@@ -395,7 +401,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
} else {
while (len) {
w_count = readl(info->reg.gpmc_prefetch_status);
- w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
+ w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.IO_ADDR_W);
@@ -407,7 +413,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
do {
cpu_relax();
val = readl(info->reg.gpmc_prefetch_status);
- val = GPMC_PREFETCH_STATUS_COUNT(val);
+ val = PREFETCH_STATUS_COUNT(val);
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
@@ -493,7 +499,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
do {
cpu_relax();
val = readl(info->reg.gpmc_prefetch_status);
- val = GPMC_PREFETCH_STATUS_COUNT(val);
+ val = PREFETCH_STATUS_COUNT(val);
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
@@ -556,7 +562,7 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
u32 bytes;
bytes = readl(info->reg.gpmc_prefetch_status);
- bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
+ bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
if (this_irq == info->gpmc_irq_count)
@@ -682,7 +688,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
do {
val = readl(info->reg.gpmc_prefetch_status);
- val = GPMC_PREFETCH_STATUS_COUNT(val);
+ val = PREFETCH_STATUS_COUNT(val);
cpu_relax();
} while (val && (tim++ < limit));
@@ -996,7 +1002,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
cond_resched();
}
- status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
+ status = readb(info->reg.gpmc_nand_data);
return status;
}
@@ -1029,19 +1035,45 @@ static int omap_dev_ready(struct mtd_info *mtd)
static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
{
int nerrors;
- unsigned int dev_width;
+ unsigned int dev_width, nsectors;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_chip *chip = mtd->priv;
+ u32 val;
nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ nsectors = 1;
/*
* Program GPMC to perform correction on one 512-byte sector at a time.
* Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
* gives a slight (5%) performance gain (but requires additional code).
*/
- (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
+
+ writel(ECC1, info->reg.gpmc_ecc_control);
+
+ /*
+ * When using BCH, sector size is hardcoded to 512 bytes.
+ * Here we are using wrapping mode 6 both for reading and writing, with:
+ * size0 = 0 (no additional protected byte in spare area)
+ * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+ */
+ val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ /* BCH configuration */
+ val = ((1 << 16) | /* enable BCH */
+ (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
+ (0x06 << 8) | /* wrap mode = 6 */
+ (dev_width << 7) | /* bus width */
+ (((nsectors-1) & 0x7) << 4) | /* number of sectors */
+ (info->gpmc_cs << 1) | /* ECC CS */
+ (0x1)); /* enable ECC */
+
+ writel(val, info->reg.gpmc_ecc_config);
+
+ /* clear ecc and enable bits */
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
/**
@@ -1055,7 +1087,32 @@ static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
+ unsigned long nsectors, val1, val2;
+ int i;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+
+ for (i = 0; i < nsectors; i++) {
+
+ /* Read hw-computed remainder */
+ val1 = readl(info->reg.gpmc_bch_result0[i]);
+ val2 = readl(info->reg.gpmc_bch_result1[i]);
+
+ /*
+ * Add constant polynomial to remainder, in order to get an ecc
+ * sequence of 0xFFs for a buffer filled with 0xFFs; and
+ * left-justify the resulting polynomial.
+ */
+ *ecc_code++ = 0x28 ^ ((val2 >> 12) & 0xFF);
+ *ecc_code++ = 0x13 ^ ((val2 >> 4) & 0xFF);
+ *ecc_code++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
+ *ecc_code++ = 0x39 ^ ((val1 >> 20) & 0xFF);
+ *ecc_code++ = 0x96 ^ ((val1 >> 12) & 0xFF);
+ *ecc_code++ = 0xac ^ ((val1 >> 4) & 0xFF);
+ *ecc_code++ = 0x7f ^ ((val1 & 0xF) << 4);
+ }
+
+ return 0;
}
/**
@@ -1069,7 +1126,39 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
+ unsigned long nsectors, val1, val2, val3, val4;
+ int i;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+
+ for (i = 0; i < nsectors; i++) {
+
+ /* Read hw-computed remainder */
+ val1 = readl(info->reg.gpmc_bch_result0[i]);
+ val2 = readl(info->reg.gpmc_bch_result1[i]);
+ val3 = readl(info->reg.gpmc_bch_result2[i]);
+ val4 = readl(info->reg.gpmc_bch_result3[i]);
+
+ /*
+ * Add constant polynomial to remainder, in order to get an ecc
+ * sequence of 0xFFs for a buffer filled with 0xFFs.
+ */
+ *ecc_code++ = 0xef ^ (val4 & 0xFF);
+ *ecc_code++ = 0x51 ^ ((val3 >> 24) & 0xFF);
+ *ecc_code++ = 0x2e ^ ((val3 >> 16) & 0xFF);
+ *ecc_code++ = 0x09 ^ ((val3 >> 8) & 0xFF);
+ *ecc_code++ = 0xed ^ (val3 & 0xFF);
+ *ecc_code++ = 0x93 ^ ((val2 >> 24) & 0xFF);
+ *ecc_code++ = 0x9a ^ ((val2 >> 16) & 0xFF);
+ *ecc_code++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
+ *ecc_code++ = 0x97 ^ (val2 & 0xFF);
+ *ecc_code++ = 0x79 ^ ((val1 >> 24) & 0xFF);
+ *ecc_code++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
+ *ecc_code++ = 0x24 ^ ((val1 >> 8) & 0xFF);
+ *ecc_code++ = 0xb5 ^ (val1 & 0xFF);
+ }
+
+ return 0;
}
/**
@@ -1125,7 +1214,7 @@ static void omap3_free_bch(struct mtd_info *mtd)
*/
static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
{
- int ret, max_errors;
+ int max_errors;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
#ifdef CONFIG_MTD_NAND_OMAP_BCH8
@@ -1142,11 +1231,6 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
goto fail;
}
- /* initialize GPMC BCH engine */
- ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
- if (ret)
- goto fail;
-
/* software bch library is only used to detect and locate errors */
info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
if (!info->bch)
@@ -1239,7 +1323,7 @@ static void omap3_free_bch(struct mtd_info *mtd)
}
#endif /* CONFIG_MTD_NAND_OMAP_BCH */
-static int __devinit omap_nand_probe(struct platform_device *pdev)
+static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
@@ -1513,7 +1597,7 @@ static int omap_nand_remove(struct platform_device *pdev)
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
- release_mem_region(info->phys_base, NAND_IO_SIZE);
+ release_mem_region(info->phys_base, info->mem_size);
kfree(info);
return 0;
}
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index aefaf8cd31ef..cd72b9299f6b 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -194,7 +194,7 @@ no_res:
return ret;
}
-static int __devexit orion_nand_remove(struct platform_device *pdev)
+static int orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
@@ -223,7 +223,7 @@ static struct of_device_id orion_nand_of_match_table[] = {
#endif
static struct platform_driver orion_nand_driver = {
- .remove = __devexit_p(orion_nand_remove),
+ .remove = orion_nand_remove,
.driver = {
.name = "orion_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 1440e51cedcc..5a67082c07ee 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -89,7 +89,7 @@ int pasemi_device_ready(struct mtd_info *mtd)
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
-static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
+static int pasemi_nand_probe(struct platform_device *ofdev)
{
struct pci_dev *pdev;
struct device_node *np = ofdev->dev.of_node;
@@ -184,7 +184,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit pasemi_nand_remove(struct platform_device *ofdev)
+static int pasemi_nand_remove(struct platform_device *ofdev)
{
struct nand_chip *chip;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a47ee68a0cfa..c004566a9ad2 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -28,7 +28,7 @@ static const char *part_probe_types[] = { "cmdlinepart", NULL };
/*
* Probe for the NAND device.
*/
-static int __devinit plat_nand_probe(struct platform_device *pdev)
+static int plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
struct mtd_part_parser_data ppdata;
@@ -134,7 +134,7 @@ out_free:
/*
* Remove a NAND device.
*/
-static int __devexit plat_nand_remove(struct platform_device *pdev)
+static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = pdev->dev.platform_data;
@@ -160,7 +160,7 @@ MODULE_DEVICE_TABLE(of, plat_nand_match);
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = __devexit_p(plat_nand_remove),
+ .remove = plat_nand_remove,
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 295e4bedad96..df954b4dcba2 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -730,11 +730,14 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
- if (set)
+ if (set) {
mtd->mtd.name = set->name;
- return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
+ return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
set->partitions, set->nr_partitions);
+ }
+
+ return -ENODEV;
}
/**
@@ -879,7 +882,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
if (chip->ecc.mode != NAND_ECC_HW)
return;
- /* change the behaviour depending on wether we are using
+ /* change the behaviour depending on whether we are using
* the large or small page nand device */
if (chip->page_shift > 10) {
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 4fbfe96e37a1..57b3971c9c0a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -23,11 +23,18 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -106,6 +113,84 @@ static void wait_completion(struct sh_flctl *flctl)
writeb(0x0, FLTRCR(flctl));
}
+static void flctl_dma_complete(void *param)
+{
+ struct sh_flctl *flctl = param;
+
+ complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+ if (flctl->chan_fifo0_rx) {
+ dma_release_channel(flctl->chan_fifo0_rx);
+ flctl->chan_fifo0_rx = NULL;
+ }
+ if (flctl->chan_fifo0_tx) {
+ dma_release_channel(flctl->chan_fifo0_tx);
+ flctl->chan_fifo0_tx = NULL;
+ }
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+ dma_cap_mask_t mask;
+ struct dma_slave_config cfg;
+ struct platform_device *pdev = flctl->pdev;
+ struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+ return;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_fifo0_tx);
+ dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+ flctl->chan_fifo0_tx);
+
+ if (!flctl->chan_fifo0_tx)
+ return;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.slave_id = pdata->slave_id_fifo0_tx;
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_fifo0_rx);
+ dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+ flctl->chan_fifo0_rx);
+
+ if (!flctl->chan_fifo0_rx)
+ goto err;
+
+ cfg.slave_id = pdata->slave_id_fifo0_rx;
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
+ ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ if (ret < 0)
+ goto err;
+
+ init_completion(&flctl->dma_complete);
+
+ return;
+
+err:
+ flctl_release_dma(flctl);
+}
+
static void set_addr(struct mtd_info *mtd, int column, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -225,7 +310,7 @@ static enum flctl_ecc_res_t wait_recfifo_ready
for (i = 0; i < 3; i++) {
uint8_t org;
- int index;
+ unsigned int index;
data = readl(ecc_reg[i]);
@@ -261,6 +346,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl)
timeout_error(flctl, __func__);
}
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+ int len, enum dma_data_direction dir)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan;
+ enum dma_transfer_direction tr_dir;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie = -EINVAL;
+ uint32_t reg;
+ int ret;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = flctl->chan_fifo0_rx;
+ tr_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = flctl->chan_fifo0_tx;
+ tr_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+ if (dma_addr)
+ desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+ tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (desc) {
+ reg = readl(FLINTDMACR(flctl));
+ reg |= DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ desc->callback = flctl_dma_complete;
+ desc->callback_param = flctl;
+ cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(chan);
+ } else {
+ /* DMA failed, fall back to PIO */
+ flctl_release_dma(flctl);
+ dev_warn(&flctl->pdev->dev,
+ "DMA failed, falling back to PIO\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret =
+ wait_for_completion_timeout(&flctl->dma_complete,
+ msecs_to_jiffies(3000));
+
+ if (ret <= 0) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ }
+
+out:
+ reg = readl(FLINTDMACR(flctl));
+ reg &= ~DREQ0EN;
+ writel(reg, FLINTDMACR(flctl));
+
+ dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+ /* ret > 0 is success */
+ return ret;
+}
+
static void read_datareg(struct sh_flctl *flctl, int offset)
{
unsigned long data;
@@ -279,11 +428,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
len_4align = (rlen + 3) / 4;
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_rx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ goto convert; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_rfifo_ready(flctl);
buf[i] = readl(FLDTFIFO(flctl));
- buf[i] = be32_to_cpu(buf[i]);
}
+
+convert:
+ for (i = 0; i < len_4align; i++)
+ buf[i] = be32_to_cpu(buf[i]);
}
static enum flctl_ecc_res_t read_ecfiforeg
@@ -305,28 +463,39 @@ static enum flctl_ecc_res_t read_ecfiforeg
return res;
}
-static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
- void *fifo_addr = (void *)FLDTFIFO(flctl);
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++) {
wait_wfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), fifo_addr);
+ writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
}
}
-static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ unsigned int offset)
{
int i, len_4align;
- unsigned long *data = (unsigned long *)&flctl->done_buff[offset];
+ unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
+
+ for (i = 0; i < len_4align; i++)
+ buf[i] = cpu_to_be32(buf[i]);
+
+ /* initiate DMA transfer */
+ if (flctl->chan_fifo0_tx && rlen >= 32 &&
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ return; /* DMA success */
+
+ /* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_wecfifo_ready(flctl);
- writel(cpu_to_be32(data[i]), FLECFIFO(flctl));
+ writel(buf[i], FLECFIFO(flctl));
}
}
@@ -727,7 +896,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
if (!flctl->qos_request) {
ret = dev_pm_qos_add_request(&flctl->pdev->dev,
- &flctl->pm_qos, 100);
+ &flctl->pm_qos,
+ DEV_PM_QOS_LATENCY,
+ 100);
if (ret < 0)
dev_err(&flctl->pdev->dev,
"PM QoS request failed: %d\n", ret);
@@ -748,41 +919,35 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- memcpy(&flctl->done_buff[index], buf, len);
+ memcpy(&flctl->done_buff[flctl->index], buf, len);
flctl->index += len;
}
static uint8_t flctl_read_byte(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
uint8_t data;
- data = flctl->done_buff[index];
+ data = flctl->done_buff[flctl->index];
flctl->index++;
return data;
}
static uint16_t flctl_read_word(struct mtd_info *mtd)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- uint16_t data;
- uint16_t *buf = (uint16_t *)&flctl->done_buff[index];
+ struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
- data = *buf;
- flctl->index += 2;
- return data;
+ flctl->index += 2;
+ return *buf;
}
static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
- int index = flctl->index;
- memcpy(buf, &flctl->done_buff[index], len);
+ memcpy(buf, &flctl->done_buff[flctl->index], len);
flctl->index += len;
}
@@ -856,7 +1021,74 @@ static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit flctl_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+struct flctl_soc_config {
+ unsigned long flcmncr_val;
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+ .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+ .has_hwecc = 1,
+ .use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+ { .compatible = "renesas,shmobile-flctl-sh7372",
+ .data = &flctl_sh7372_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct flctl_soc_config *config;
+ struct sh_flctl_platform_data *pdata;
+ struct device_node *dn = dev->of_node;
+ int ret;
+
+ match = of_match_device(of_flctl_match, dev);
+ if (match)
+ config = (struct flctl_soc_config *)match->data;
+ else {
+ dev_err(dev, "%s: no OF configuration attached\n", __func__);
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "%s: failed to allocate config data\n", __func__);
+ return NULL;
+ }
+
+ /* set SoC specific options */
+ pdata->flcmncr_val = config->flcmncr_val;
+ pdata->has_hwecc = config->has_hwecc;
+ pdata->use_holden = config->use_holden;
+
+ /* parse user defined options */
+ ret = of_get_nand_bus_width(dn);
+ if (ret == 16)
+ pdata->flcmncr_val |= SEL_16BIT;
+ else if (ret != 8) {
+ dev_err(dev, "%s: invalid bus width\n", __func__);
+ return NULL;
+ }
+
+ return pdata;
+}
+#else /* CONFIG_OF */
+#define of_flctl_match NULL
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static int flctl_probe(struct platform_device *pdev)
{
struct resource *res;
struct sh_flctl *flctl;
@@ -865,12 +1097,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
struct sh_flctl_platform_data *pdata;
int ret = -ENXIO;
int irq;
-
- pdata = pdev->dev.platform_data;
- if (pdata == NULL) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
+ struct mtd_part_parser_data ppdata = {};
flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
if (!flctl) {
@@ -902,6 +1129,17 @@ static int __devinit flctl_probe(struct platform_device *pdev)
goto err_flste;
}
+ if (pdev->dev.of_node)
+ pdata = flctl_parse_dt(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no setup data defined\n");
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+
platform_set_drvdata(pdev, flctl);
flctl_mtd = &flctl->mtd;
nand = &flctl->chip;
@@ -930,6 +1168,8 @@ static int __devinit flctl_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ flctl_setup_dma(flctl);
+
ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err_chip;
@@ -942,12 +1182,16 @@ static int __devinit flctl_probe(struct platform_device *pdev)
if (ret)
goto err_chip;
- mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ ret = mtd_device_parse_register(flctl_mtd, NULL, &ppdata, pdata->parts,
+ pdata->nr_parts);
return 0;
err_chip:
+ flctl_release_dma(flctl);
pm_runtime_disable(&pdev->dev);
+err_pdata:
free_irq(irq, flctl);
err_flste:
iounmap(flctl->reg);
@@ -956,10 +1200,11 @@ err_iomap:
return ret;
}
-static int __devexit flctl_remove(struct platform_device *pdev)
+static int flctl_remove(struct platform_device *pdev)
{
struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ flctl_release_dma(flctl);
nand_release(&flctl->mtd);
pm_runtime_disable(&pdev->dev);
free_irq(platform_get_irq(pdev, 0), flctl);
@@ -974,6 +1219,7 @@ static struct platform_driver flctl_driver = {
.driver = {
.name = "sh_flctl",
.owner = THIS_MODULE,
+ .of_match_table = of_flctl_match,
},
};
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 3421e3762a5a..127bc4271821 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -106,7 +106,7 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat,
/*
* Main initialization routine
*/
-static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
+static int sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct resource *r;
@@ -205,7 +205,7 @@ err_get_res:
/*
* Clean up routine
*/
-static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
+static int sharpsl_nand_remove(struct platform_device *pdev)
{
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
@@ -228,7 +228,7 @@ static struct platform_driver sharpsl_nand_driver = {
.owner = THIS_MODULE,
},
.probe = sharpsl_nand_probe,
- .remove = __devexit_p(sharpsl_nand_remove),
+ .remove = sharpsl_nand_remove,
};
module_platform_driver(sharpsl_nand_driver);
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index f3f28fafbf7a..09dde7d27178 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -140,7 +140,7 @@ static int socrates_nand_device_ready(struct mtd_info *mtd)
/*
* Probe for the NAND device.
*/
-static int __devinit socrates_nand_probe(struct platform_device *ofdev)
+static int socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
@@ -220,7 +220,7 @@ out:
/*
* Remove a NAND device.
*/
-static int __devexit socrates_nand_remove(struct platform_device *ofdev)
+static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = &host->mtd;
@@ -251,7 +251,7 @@ static struct platform_driver socrates_nand_driver = {
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
- .remove = __devexit_p(socrates_nand_remove),
+ .remove = socrates_nand_remove,
};
module_platform_driver(socrates_nand_driver);
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
deleted file mode 100644
index bef76cd7c24c..000000000000
--- a/drivers/mtd/nand/spia.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * drivers/mtd/nand/spia.c
- *
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- *
- * 10-29-2001 TG change to support hardwarespecific access
- * to controllines (due to change in nand.c)
- * page_cache added
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * SPIA board which utilizes the Toshiba TC58V64AFT part. This is
- * a 64Mibit (8MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-/*
- * MTD structure for SPIA board
- */
-static struct mtd_info *spia_mtd = NULL;
-
-/*
- * Values specific to the SPIA board (used with EP7212 processor)
- */
-#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
-#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
-#define SPIA_PEDR 0x0080 /*
- * IO offset to Port E data register
- * where the CLE, ALE and NCE pins
- * are wired to.
- */
-#define SPIA_PEDDR 0x00c0 /*
- * IO offset to Port E data direction
- * register so we can control the IO
- * lines.
- */
-
-/*
- * Module stuff
- */
-
-static int spia_io_base = SPIA_IO_BASE;
-static int spia_fio_base = SPIA_FIO_BASE;
-static int spia_pedr = SPIA_PEDR;
-static int spia_peddr = SPIA_PEDDR;
-
-module_param(spia_io_base, int, 0);
-module_param(spia_fio_base, int, 0);
-module_param(spia_pedr, int, 0);
-module_param(spia_peddr, int, 0);
-
-/*
- * Define partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "SPIA flash partition 1",
- .offset = 0,
- .size = 2 * 1024 * 1024},
- {
- .name = "SPIA flash partition 2",
- .offset = 2 * 1024 * 1024,
- .size = 6 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 2
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_CNE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 0
- * NAND_ALE: bit 2 -> bit 1
- */
-static void spia_hwcontrol(struct mtd_info *mtd, int cmd)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr = spia_io_base + spia_pedr;
- unsigned char bits;
-
- bits = (ctrl & NAND_CNE) << 2;
- bits |= (ctrl & NAND_CLE | NAND_ALE) >> 1;
- writeb((readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * Main initialization routine
- */
-static int __init spia_init(void)
-{
- struct nand_chip *this;
-
- /* Allocate memory for MTD device structure and private data */
- spia_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!spia_mtd) {
- printk("Unable to allocate SPIA NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&spia_mtd[1]);
-
- /* Initialize structures */
- memset(spia_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- spia_mtd->priv = this;
- spia_mtd->owner = THIS_MODULE;
-
- /*
- * Set GPIO Port E control register so that the pins are configured
- * to be outputs for controlling the NAND flash.
- */
- (*(volatile unsigned char *)(spia_io_base + spia_peddr)) = 0x07;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = (void __iomem *)spia_fio_base;
- this->IO_ADDR_W = (void __iomem *)spia_fio_base;
- /* Set address of hardware control function */
- this->cmd_ctrl = spia_hwcontrol;
- /* 15 us command delay time */
- this->chip_delay = 15;
-
- /* Scan to find existence of the device */
- if (nand_scan(spia_mtd, 1)) {
- kfree(spia_mtd);
- return -ENXIO;
- }
-
- /* Register the partitions */
- mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
-
- /* Return happy */
- return 0;
-}
-
-module_init(spia_init);
-
-/*
- * Clean up routine
- */
-static void __exit spia_cleanup(void)
-{
- /* Release resources, unregister device */
- nand_release(spia_mtd);
-
- /* Free the MTD device structure */
- kfree(spia_mtd);
-}
-
-module_exit(spia_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
-MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index d9127e2ed808..dbd3aa574eaf 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -71,7 +71,10 @@ static int parse_ofpart_partitions(struct mtd_info *master,
(*pparts)[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
- (*pparts)[i].mask_flags = MTD_WRITEABLE;
+ (*pparts)[i].mask_flags |= MTD_WRITEABLE;
+
+ if (of_get_property(pp, "lock", &len))
+ (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
i++;
}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 1c4f97c63e62..9f11562f849d 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -35,7 +35,7 @@ struct onenand_info {
struct onenand_chip onenand;
};
-static int __devinit generic_onenand_probe(struct platform_device *pdev)
+static int generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
struct onenand_platform_data *pdata = pdev->dev.platform_data;
@@ -88,7 +88,7 @@ out_free_info:
return err;
}
-static int __devexit generic_onenand_remove(struct platform_device *pdev)
+static int generic_onenand_remove(struct platform_device *pdev)
{
struct onenand_info *info = platform_get_drvdata(pdev);
struct resource *res = pdev->resource;
@@ -112,7 +112,7 @@ static struct platform_driver generic_onenand_driver = {
.owner = THIS_MODULE,
},
.probe = generic_onenand_probe,
- .remove = __devexit_p(generic_onenand_remove),
+ .remove = generic_onenand_remove,
};
module_platform_driver(generic_onenand_driver);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 1961be985171..065f3fe02a2f 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -38,12 +38,10 @@
#include <linux/regulator/consumer.h>
#include <asm/mach/flash.h>
-#include <plat/gpmc.h>
#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/gpio.h>
-#include <plat/dma.h>
-#include <plat/cpu.h>
+#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -63,6 +61,7 @@ struct omap2_onenand {
int freq;
int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
+ u8 flags;
};
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
@@ -155,7 +154,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (cpu_is_omap34xx())
+ if (c->flags & ONENAND_IN_OMAP34XX)
/* Add a delay to let GPIO settle */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
@@ -446,13 +445,19 @@ out_copy:
#else
-int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count);
+static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ return -ENOSYS;
+}
-int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count);
+static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ return -ENOSYS;
+}
#endif
@@ -550,13 +555,19 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
#else
-int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count);
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ return -ENOSYS;
+}
-int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count);
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer,
+ int offset, size_t count)
+{
+ return -ENOSYS;
+}
#endif
@@ -619,7 +630,7 @@ static int omap2_onenand_disable(struct mtd_info *mtd)
return ret;
}
-static int __devinit omap2_onenand_probe(struct platform_device *pdev)
+static int omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
struct omap2_onenand *c;
@@ -639,6 +650,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
init_completion(&c->irq_done);
init_completion(&c->dma_done);
+ c->flags = pdata->flags;
c->gpmc_cs = pdata->cs;
c->gpio_irq = pdata->gpio_irq;
c->dma_channel = pdata->dma_channel;
@@ -729,7 +741,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
this = &c->onenand;
if (c->dma_channel >= 0) {
this->wait = omap2_onenand_wait;
- if (cpu_is_omap34xx()) {
+ if (c->flags & ONENAND_IN_OMAP34XX) {
this->read_bufferram = omap3_onenand_read_bufferram;
this->write_bufferram = omap3_onenand_write_bufferram;
} else {
@@ -787,7 +799,7 @@ err_kfree:
return r;
}
-static int __devexit omap2_onenand_remove(struct platform_device *pdev)
+static int omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -803,7 +815,6 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
}
iounmap(c->onenand.base);
release_mem_region(c->phys_base, c->mem_size);
- gpmc_cs_free(c->gpmc_cs);
kfree(c);
return 0;
@@ -811,7 +822,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = __devexit_p(omap2_onenand_remove),
+ .remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 8e4b3f2742ba..33f2a8fb8df9 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1053,7 +1053,7 @@ onenand_fail:
return err;
}
-static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+static int s3c_onenand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
@@ -1130,7 +1130,7 @@ static struct platform_driver s3c_onenand_driver = {
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
- .remove = __devexit_p(s3c_onenand_remove),
+ .remove = s3c_onenand_remove,
};
module_platform_driver(s3c_onenand_driver);
diff --git a/drivers/mtd/tests/mtd_nandbiterrs.c b/drivers/mtd/tests/mtd_nandbiterrs.c
index cc8d62cb280c..207bf9a9972f 100644
--- a/drivers/mtd/tests/mtd_nandbiterrs.c
+++ b/drivers/mtd/tests/mtd_nandbiterrs.c
@@ -39,6 +39,9 @@
* this program; see the file COPYING. If not, write to the Free Software
* Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -47,8 +50,6 @@
#include <linux/mtd/nand.h>
#include <linux/slab.h>
-#define msg(FMT, VA...) pr_info("mtd_nandbiterrs: "FMT, ##VA)
-
static int dev;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -103,7 +104,7 @@ static int erase_block(void)
struct erase_info ei;
loff_t addr = eraseblock * mtd->erasesize;
- msg("erase_block\n");
+ pr_info("erase_block\n");
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = mtd;
@@ -112,7 +113,7 @@ static int erase_block(void)
err = mtd_erase(mtd, &ei);
if (err || ei.state == MTD_ERASE_FAILED) {
- msg("error %d while erasing\n", err);
+ pr_err("error %d while erasing\n", err);
if (!err)
err = -EIO;
return err;
@@ -128,11 +129,11 @@ static int write_page(int log)
size_t written;
if (log)
- msg("write_page\n");
+ pr_info("write_page\n");
err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
if (err || written != mtd->writesize) {
- msg("error: write failed at %#llx\n", (long long)offset);
+ pr_err("error: write failed at %#llx\n", (long long)offset);
if (!err)
err = -EIO;
}
@@ -147,7 +148,7 @@ static int rewrite_page(int log)
struct mtd_oob_ops ops;
if (log)
- msg("rewrite page\n");
+ pr_info("rewrite page\n");
ops.mode = MTD_OPS_RAW; /* No ECC */
ops.len = mtd->writesize;
@@ -160,7 +161,7 @@ static int rewrite_page(int log)
err = mtd_write_oob(mtd, offset, &ops);
if (err || ops.retlen != mtd->writesize) {
- msg("error: write_oob failed (%d)\n", err);
+ pr_err("error: write_oob failed (%d)\n", err);
if (!err)
err = -EIO;
}
@@ -177,7 +178,7 @@ static int read_page(int log)
struct mtd_ecc_stats oldstats;
if (log)
- msg("read_page\n");
+ pr_info("read_page\n");
/* Saving last mtd stats */
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
@@ -187,7 +188,7 @@ static int read_page(int log)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
- msg("error: read failed at %#llx\n", (long long)offset);
+ pr_err("error: read failed at %#llx\n", (long long)offset);
if (err >= 0)
err = -EIO;
}
@@ -201,11 +202,11 @@ static int verify_page(int log)
unsigned i, errs = 0;
if (log)
- msg("verify_page\n");
+ pr_info("verify_page\n");
for (i = 0; i < mtd->writesize; i++) {
if (rbuffer[i] != hash(i+seed)) {
- msg("Error: page offset %u, expected %02x, got %02x\n",
+ pr_err("Error: page offset %u, expected %02x, got %02x\n",
i, hash(i+seed), rbuffer[i]);
errs++;
}
@@ -230,13 +231,13 @@ static int insert_biterror(unsigned byte)
for (bit = 7; bit >= 0; bit--) {
if (CBIT(wbuffer[byte], bit)) {
BCLR(wbuffer[byte], bit);
- msg("Inserted biterror @ %u/%u\n", byte, bit);
+ pr_info("Inserted biterror @ %u/%u\n", byte, bit);
return 0;
}
}
byte++;
}
- msg("biterror: Failed to find a '1' bit\n");
+ pr_err("biterror: Failed to find a '1' bit\n");
return -EIO;
}
@@ -248,7 +249,7 @@ static int incremental_errors_test(void)
unsigned i;
unsigned errs_per_subpage = 0;
- msg("incremental biterrors test\n");
+ pr_info("incremental biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
@@ -265,9 +266,9 @@ static int incremental_errors_test(void)
err = read_page(1);
if (err > 0)
- msg("Read reported %d corrected bit errors\n", err);
+ pr_info("Read reported %d corrected bit errors\n", err);
if (err < 0) {
- msg("After %d biterrors per subpage, read reported error %d\n",
+ pr_err("After %d biterrors per subpage, read reported error %d\n",
errs_per_subpage, err);
err = 0;
goto exit;
@@ -275,11 +276,11 @@ static int incremental_errors_test(void)
err = verify_page(1);
if (err) {
- msg("ECC failure, read data is incorrect despite read success\n");
+ pr_err("ECC failure, read data is incorrect despite read success\n");
goto exit;
}
- msg("Successfully corrected %d bit errors per subpage\n",
+ pr_info("Successfully corrected %d bit errors per subpage\n",
errs_per_subpage);
for (i = 0; i < subcount; i++) {
@@ -311,7 +312,7 @@ static int overwrite_test(void)
memset(bitstats, 0, sizeof(bitstats));
- msg("overwrite biterrors test\n");
+ pr_info("overwrite biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
@@ -329,18 +330,18 @@ static int overwrite_test(void)
err = read_page(0);
if (err >= 0) {
if (err >= MAXBITS) {
- msg("Implausible number of bit errors corrected\n");
+ pr_info("Implausible number of bit errors corrected\n");
err = -EIO;
break;
}
bitstats[err]++;
if (err > max_corrected) {
max_corrected = err;
- msg("Read reported %d corrected bit errors\n",
+ pr_info("Read reported %d corrected bit errors\n",
err);
}
} else { /* err < 0 */
- msg("Read reported error %d\n", err);
+ pr_info("Read reported error %d\n", err);
err = 0;
break;
}
@@ -348,7 +349,7 @@ static int overwrite_test(void)
err = verify_page(0);
if (err) {
bitstats[max_corrected] = opno;
- msg("ECC failure, read data is incorrect despite read success\n");
+ pr_info("ECC failure, read data is incorrect despite read success\n");
break;
}
@@ -357,9 +358,9 @@ static int overwrite_test(void)
/* At this point bitstats[0] contains the number of ops with no bit
* errors, bitstats[1] the number of ops with 1 bit error, etc. */
- msg("Bit error histogram (%d operations total):\n", opno);
+ pr_info("Bit error histogram (%d operations total):\n", opno);
for (i = 0; i < max_corrected; i++)
- msg("Page reads with %3d corrected bit errors: %d\n",
+ pr_info("Page reads with %3d corrected bit errors: %d\n",
i, bitstats[i]);
exit:
@@ -370,36 +371,36 @@ static int __init mtd_nandbiterrs_init(void)
{
int err = 0;
- msg("\n");
- msg("==================================================\n");
- msg("MTD device: %d\n", dev);
+ printk("\n");
+ printk(KERN_INFO "==================================================\n");
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- msg("error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
goto exit_mtddev;
}
if (mtd->type != MTD_NANDFLASH) {
- msg("this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
err = -ENODEV;
goto exit_nand;
}
- msg("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
+ pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, mtd->oobsize);
subsize = mtd->writesize >> mtd->subpage_sft;
subcount = mtd->writesize / subsize;
- msg("Device uses %d subpages of %d bytes\n", subcount, subsize);
+ pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
offset = page_offset * mtd->writesize;
eraseblock = mtd_div_by_eb(offset, mtd);
- msg("Using page=%u, offset=%llu, eraseblock=%u\n",
+ pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
page_offset, offset, eraseblock);
wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
@@ -432,8 +433,8 @@ static int __init mtd_nandbiterrs_init(void)
goto exit_error;
err = -EIO;
- msg("finished successfully.\n");
- msg("==================================================\n");
+ pr_info("finished successfully.\n");
+ printk(KERN_INFO "==================================================\n");
exit_error:
kfree(rbuffer);
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index b437fa425077..1eee264509a8 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -264,13 +266,13 @@ static int nand_ecc_test_run(const size_t size)
correct_data, size);
if (err) {
- pr_err("mtd_nandecctest: not ok - %s-%zd\n",
+ pr_err("not ok - %s-%zd\n",
nand_ecc_test[i].name, size);
dump_data_ecc(error_data, error_ecc,
correct_data, correct_ecc, size);
break;
}
- pr_info("mtd_nandecctest: ok - %s-%zd\n",
+ pr_info("ok - %s-%zd\n",
nand_ecc_test[i].name, size);
}
error:
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index ed9b62827f1b..e827fa8cd844 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_oobtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -80,13 +80,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
- ebnum);
+ pr_err("some erase error occurred at EB %d\n", ebnum);
return -EIO;
}
@@ -98,7 +97,7 @@ static int erase_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -107,7 +106,7 @@ static int erase_whole_device(void)
return err;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
return 0;
}
@@ -141,9 +140,9 @@ static int write_eraseblock(int ebnum)
ops.oobbuf = writebuf;
err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: writeoob failed at %#llx\n",
+ pr_err("error: writeoob failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
+ pr_err("error: use_len %d, use_offset %d\n",
use_len, use_offset);
errcnt += 1;
return err ? err : -1;
@@ -160,7 +159,7 @@ static int write_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -168,10 +167,10 @@ static int write_whole_device(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
return 0;
}
@@ -194,17 +193,17 @@ static int verify_eraseblock(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -221,29 +220,28 @@ static int verify_eraseblock(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
- printk(PRINT_PREF "error: readoob failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: readoob failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf + use_offset, writebuf, use_len)) {
- printk(PRINT_PREF "error: verify failed at "
- "%#llx\n", (long long)addr);
+ pr_err("error: verify failed at %#llx\n",
+ (long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many "
- "errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
for (k = 0; k < use_offset; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -251,12 +249,12 @@ static int verify_eraseblock(int ebnum)
for (k = use_offset + use_len;
k < mtd->ecclayout->oobavail; ++k)
if (readbuf[k] != 0xff) {
- printk(PRINT_PREF "error: verify 0xff "
+ pr_err("error: verify 0xff "
"failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too "
+ pr_err("error: too "
"many errors\n");
return -1;
}
@@ -286,17 +284,17 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
- printk(PRINT_PREF "error: readoob failed at %#llx\n",
+ pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
if (memcmp(readbuf, writebuf, len)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
return -1;
}
}
@@ -309,7 +307,7 @@ static int verify_all_eraseblocks(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -317,10 +315,10 @@ static int verify_all_eraseblocks(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -331,7 +329,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -341,18 +339,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kmalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -368,22 +366,22 @@ static int __init mtd_oobtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -392,7 +390,7 @@ static int __init mtd_oobtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -401,12 +399,12 @@ static int __init mtd_oobtest_init(void)
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -420,7 +418,7 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
- printk(PRINT_PREF "test 1 of 5\n");
+ pr_info("test 1 of 5\n");
err = erase_whole_device();
if (err)
@@ -440,7 +438,7 @@ static int __init mtd_oobtest_init(void)
* Second test: write all OOB, a block at a time, read it back and
* verify.
*/
- printk(PRINT_PREF "test 2 of 5\n");
+ pr_info("test 2 of 5\n");
err = erase_whole_device();
if (err)
@@ -453,7 +451,7 @@ static int __init mtd_oobtest_init(void)
/* Check all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -461,16 +459,16 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
/*
* Third test: write OOB at varying offsets and lengths, read it back
* and verify.
*/
- printk(PRINT_PREF "test 3 of 5\n");
+ pr_info("test 3 of 5\n");
err = erase_whole_device();
if (err)
@@ -503,7 +501,7 @@ static int __init mtd_oobtest_init(void)
vary_offset = 0;
/* Fourth test: try to write off end of device */
- printk(PRINT_PREF "test 4 of 5\n");
+ pr_info("test 4 of 5\n");
err = erase_whole_device();
if (err)
@@ -522,14 +520,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to start write past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to start write past end of OOB\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can write past end of OOB\n");
+ pr_err("error: can write past end of OOB\n");
errcnt += 1;
}
@@ -542,19 +540,19 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = mtd->ecclayout->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to start read past end of OOB\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to start read past end of OOB\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: can read past end of OOB\n");
+ pr_err("error: can read past end of OOB\n");
errcnt += 1;
}
if (bbt[ebcnt - 1])
- printk(PRINT_PREF "skipping end of device tests because last "
+ pr_info("skipping end of device tests because last "
"block is bad\n");
else {
/* Attempt to write off end of device */
@@ -566,14 +564,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
@@ -586,14 +584,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
@@ -610,14 +608,14 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- printk(PRINT_PREF "attempting to write past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to write past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: wrote past end of device\n");
+ pr_err("error: wrote past end of device\n");
errcnt += 1;
}
@@ -630,20 +628,20 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- printk(PRINT_PREF "attempting to read past end of device\n");
- printk(PRINT_PREF "an error is expected...\n");
+ pr_info("attempting to read past end of device\n");
+ pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
- printk(PRINT_PREF "error occurred as expected\n");
+ pr_info("error occurred as expected\n");
err = 0;
} else {
- printk(PRINT_PREF "error: read past end of device\n");
+ pr_err("error: read past end of device\n");
errcnt += 1;
}
}
/* Fifth test: write / read across block boundaries */
- printk(PRINT_PREF "test 5 of 5\n");
+ pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
err = erase_whole_device();
@@ -652,7 +650,7 @@ static int __init mtd_oobtest_init(void)
/* Write all eraseblocks */
simple_srand(11);
- printk(PRINT_PREF "writing OOBs of whole device\n");
+ pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
@@ -674,17 +672,16 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock "
- "%u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
addr += mtd->writesize;
}
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(11);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
@@ -702,28 +699,28 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
- printk(PRINT_PREF "error: too many errors\n");
+ pr_err("error: too many errors\n");
goto out;
}
}
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(writebuf);
kfree(readbuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 252ddb092fb2..f93a76f88113 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -28,8 +30,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_pagetest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -79,12 +79,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -102,7 +102,7 @@ static int write_eraseblock(int ebnum)
cond_resched();
err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
return err;
@@ -131,7 +131,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err;
}
@@ -139,7 +139,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)(addrn - bufsize));
return err;
}
@@ -148,12 +148,12 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
break;
}
if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -166,7 +166,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err;
}
@@ -174,7 +174,7 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)(addrn - bufsize));
return err;
}
@@ -183,14 +183,14 @@ static int verify_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err;
}
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
set_random_data(boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -206,10 +206,10 @@ static int crosstest(void)
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
- printk(PRINT_PREF "crosstest\n");
+ pr_info("crosstest\n");
pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
if (!pp1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
pp2 = pp1 + pgsize;
@@ -231,7 +231,7 @@ static int crosstest(void)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -243,7 +243,7 @@ static int crosstest(void)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -251,12 +251,12 @@ static int crosstest(void)
/* Read first page to pp2 */
addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp2);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -264,12 +264,12 @@ static int crosstest(void)
/* Read last page to pp3 */
addr = addrn - pgsize;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp3);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
@@ -277,25 +277,25 @@ static int crosstest(void)
/* Read first page again to pp4 */
addr = addr0;
- printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+ pr_info("reading page at %#llx\n", (long long)addr);
err = mtd_read(mtd, addr, pgsize, &read, pp4);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
kfree(pp1);
return err;
}
/* pp2 and pp4 should be the same */
- printk(PRINT_PREF "verifying pages read at %#llx match\n",
+ pr_info("verifying pages read at %#llx match\n",
(long long)addr0);
if (memcmp(pp2, pp4, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
} else if (!err)
- printk(PRINT_PREF "crosstest ok\n");
+ pr_info("crosstest ok\n");
kfree(pp1);
return err;
}
@@ -307,7 +307,7 @@ static int erasecrosstest(void)
loff_t addr0;
char *readbuf = twopages;
- printk(PRINT_PREF "erasecrosstest\n");
+ pr_info("erasecrosstest\n");
ebnum = 0;
addr0 = 0;
@@ -320,79 +320,79 @@ static int erasecrosstest(void)
while (ebnum2 && bbt[ebnum2])
ebnum2 -= 1;
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_info("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+ pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum2);
+ pr_info("erasing block %d\n", ebnum2);
err = erase_eraseblock(ebnum2);
if (err)
return err;
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+ pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
- printk(PRINT_PREF "verify failed!\n");
+ pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
if (!err)
- printk(PRINT_PREF "erasecrosstest ok\n");
+ pr_info("erasecrosstest ok\n");
return err;
}
@@ -402,7 +402,7 @@ static int erasetest(void)
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
- printk(PRINT_PREF "erasetest\n");
+ pr_info("erasetest\n");
ebnum = 0;
addr0 = 0;
@@ -411,40 +411,40 @@ static int erasetest(void)
ebnum += 1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+ pr_info("writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "erasing block %d\n", ebnum);
+ pr_info("erasing block %d\n", ebnum);
err = erase_eraseblock(ebnum);
if (err)
return err;
- printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+ pr_info("reading 1st page of block %d\n", ebnum);
err = mtd_read(mtd, addr0, pgsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr0);
return err ? err : -1;
}
- printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
+ pr_info("verifying 1st page of block %d is all 0xff\n",
ebnum);
for (i = 0; i < pgsize; ++i)
if (twopages[i] != 0xff) {
- printk(PRINT_PREF "verifying all 0xff failed at %d\n",
+ pr_err("verifying all 0xff failed at %d\n",
i);
errcnt += 1;
ok = 0;
@@ -452,7 +452,7 @@ static int erasetest(void)
}
if (ok && !err)
- printk(PRINT_PREF "erasetest ok\n");
+ pr_info("erasetest ok\n");
return err;
}
@@ -464,7 +464,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -474,18 +474,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -499,22 +499,22 @@ static int __init mtd_pagetest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -524,7 +524,7 @@ static int __init mtd_pagetest_init(void)
pgcnt = mtd->erasesize / mtd->writesize;
pgsize = mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -534,17 +534,17 @@ static int __init mtd_pagetest_init(void)
bufsize = pgsize * 2;
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
twopages = kmalloc(bufsize, GFP_KERNEL);
if (!twopages) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
boundary = kmalloc(bufsize, GFP_KERNEL);
if (!boundary) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -553,7 +553,7 @@ static int __init mtd_pagetest_init(void)
goto out;
/* Erase all eraseblocks */
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -562,11 +562,11 @@ static int __init mtd_pagetest_init(void)
goto out;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
/* Write all eraseblocks */
simple_srand(1);
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -574,14 +574,14 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -589,10 +589,10 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = crosstest();
if (err)
@@ -606,7 +606,7 @@ static int __init mtd_pagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -615,7 +615,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 121aba189cec..266de04b6d29 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_readtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -51,12 +51,12 @@ static int read_eraseblock_by_page(int ebnum)
void *oobbuf = iobuf1;
for (i = 0; i < pgcnt; i++) {
- memset(buf, 0 , pgcnt);
+ memset(buf, 0 , pgsize);
ret = mtd_read(mtd, addr, pgsize, &read, buf);
if (ret == -EUCLEAN)
ret = 0;
if (ret || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
if (!err)
err = ret;
@@ -77,7 +77,7 @@ static int read_eraseblock_by_page(int ebnum)
ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
- printk(PRINT_PREF "error: read oob failed at "
+ pr_err("error: read oob failed at "
"%#llx\n", (long long)addr);
if (!err)
err = ret;
@@ -99,7 +99,7 @@ static void dump_eraseblock(int ebnum)
char line[128];
int pg, oob;
- printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
+ pr_info("dumping eraseblock %d\n", ebnum);
n = mtd->erasesize;
for (i = 0; i < n;) {
char *p = line;
@@ -112,7 +112,7 @@ static void dump_eraseblock(int ebnum)
}
if (!mtd->oobsize)
return;
- printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
+ pr_info("dumping oob from eraseblock %d\n", ebnum);
n = mtd->oobsize;
for (pg = 0, i = 0; pg < pgcnt; pg++)
for (oob = 0; oob < n;) {
@@ -134,7 +134,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -144,21 +144,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
return 0;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -171,21 +171,21 @@ static int __init mtd_readtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: Cannot get MTD device\n");
+ pr_err("error: Cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -196,7 +196,7 @@ static int __init mtd_readtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -205,12 +205,12 @@ static int __init mtd_readtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf1) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -219,7 +219,7 @@ static int __init mtd_readtest_init(void)
goto out;
/* Read all eraseblocks 1 page at a time */
- printk(PRINT_PREF "testing page read\n");
+ pr_info("testing page read\n");
for (i = 0; i < ebcnt; ++i) {
int ret;
@@ -235,9 +235,9 @@ static int __init mtd_readtest_init(void)
}
if (err)
- printk(PRINT_PREF "finished with errors\n");
+ pr_info("finished with errors\n");
else
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
@@ -246,7 +246,7 @@ out:
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 42b0f7456fc4..596cbea8df4c 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -28,8 +30,6 @@
#include <linux/sched.h>
#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_speedtest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -70,12 +70,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -96,13 +96,13 @@ static int multiblock_erase(int ebnum, int blocks)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
+ pr_err("error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d,"
+ pr_err("some erase error occurred at EB %d,"
"blocks %d\n", ebnum, blocks);
return -EIO;
}
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
if (err || written != mtd->erasesize) {
- printk(PRINT_PREF "error: write failed at %#llx\n", addr);
+ pr_err("error: write failed at %#llx\n", addr);
if (!err)
err = -EINVAL;
}
@@ -152,7 +152,7 @@ static int write_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -175,7 +175,7 @@ static int write_eraseblock_by_2pages(int ebnum)
for (i = 0; i < n; i++) {
err = mtd_write(mtd, addr, sz, &written, buf);
if (err || written != sz) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -187,7 +187,7 @@ static int write_eraseblock_by_2pages(int ebnum)
if (pgcnt % 2) {
err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -208,7 +208,7 @@ static int read_eraseblock(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != mtd->erasesize) {
- printk(PRINT_PREF "error: read failed at %#llx\n", addr);
+ pr_err("error: read failed at %#llx\n", addr);
if (!err)
err = -EINVAL;
}
@@ -229,7 +229,7 @@ static int read_eraseblock_by_page(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -255,7 +255,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != sz) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -270,7 +270,7 @@ static int read_eraseblock_by_2pages(int ebnum)
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
addr);
if (!err)
err = -EINVAL;
@@ -287,7 +287,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -321,21 +321,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
goto out;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
out:
goodebcnt = ebcnt - bad;
return 0;
@@ -351,25 +351,25 @@ static int __init mtd_speedtest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
if (count)
- printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count);
+ pr_info("MTD device: %d count: %d\n", dev, count);
else
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -380,7 +380,7 @@ static int __init mtd_speedtest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -392,7 +392,7 @@ static int __init mtd_speedtest_init(void)
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
@@ -407,7 +407,7 @@ static int __init mtd_speedtest_init(void)
goto out;
/* Write all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock write speed\n");
+ pr_info("testing eraseblock write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -419,10 +419,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 eraseblock at a time */
- printk(PRINT_PREF "testing eraseblock read speed\n");
+ pr_info("testing eraseblock read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -434,14 +434,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
+ pr_info("eraseblock read speed is %ld KiB/s\n", speed);
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page write speed\n");
+ pr_info("testing page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -453,10 +453,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
+ pr_info("page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 page at a time */
- printk(PRINT_PREF "testing page read speed\n");
+ pr_info("testing page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -468,14 +468,14 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
+ pr_info("page read speed is %ld KiB/s\n", speed);
err = erase_whole_device();
if (err)
goto out;
/* Write all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page write speed\n");
+ pr_info("testing 2 page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -487,10 +487,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
+ pr_info("2 page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 2 pages at a time */
- printk(PRINT_PREF "testing 2 page read speed\n");
+ pr_info("testing 2 page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -502,10 +502,10 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
+ pr_info("2 page read speed is %ld KiB/s\n", speed);
/* Erase all eraseblocks */
- printk(PRINT_PREF "Testing erase speed\n");
+ pr_info("Testing erase speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -517,12 +517,12 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
+ pr_info("erase speed is %ld KiB/s\n", speed);
/* Multi-block erase all eraseblocks */
for (k = 1; k < 7; k++) {
blocks = 1 << k;
- printk(PRINT_PREF "Testing %dx multi-block erase speed\n",
+ pr_info("Testing %dx multi-block erase speed\n",
blocks);
start_timing();
for (i = 0; i < ebcnt; ) {
@@ -541,16 +541,16 @@ static int __init mtd_speedtest_init(void)
}
stop_timing();
speed = calc_speed();
- printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n",
+ pr_info("%dx multi-block erase speed is %ld KiB/s\n",
blocks, speed);
}
- printk(PRINT_PREF "finished\n");
+ pr_info("finished\n");
out:
kfree(iobuf);
kfree(bbt);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index cb268cebf01a..3729f679ae5d 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -19,6 +19,8 @@
* Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -29,8 +31,6 @@
#include <linux/vmalloc.h>
#include <linux/random.h>
-#define PRINT_PREF KERN_INFO "mtd_stresstest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -94,12 +94,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (unlikely(err)) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (unlikely(ei.state == MTD_ERASE_FAILED)) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -114,7 +114,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -137,7 +137,7 @@ static int do_read(void)
if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
- printk(PRINT_PREF "error: read failed at 0x%llx\n",
+ pr_err("error: read failed at 0x%llx\n",
(long long)addr);
if (!err)
err = -EINVAL;
@@ -174,7 +174,7 @@ static int do_write(void)
addr = eb * mtd->erasesize + offs;
err = mtd_write(mtd, addr, len, &written, writebuf);
if (unlikely(err || written != len)) {
- printk(PRINT_PREF "error: write failed at 0x%llx\n",
+ pr_err("error: write failed at 0x%llx\n",
(long long)addr);
if (!err)
err = -EINVAL;
@@ -203,21 +203,21 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
if (!mtd_can_have_bb(mtd))
return 0;
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -231,22 +231,22 @@ static int __init mtd_stresstest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
@@ -257,14 +257,14 @@ static int __init mtd_stresstest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
if (ebcnt < 2) {
- printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+ pr_err("error: need at least 2 eraseblocks\n");
err = -ENOSPC;
goto out_put_mtd;
}
@@ -277,7 +277,7 @@ static int __init mtd_stresstest_init(void)
writebuf = vmalloc(bufsize);
offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
if (!readbuf || !writebuf || !offsets) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out;
}
for (i = 0; i < ebcnt; i++)
@@ -290,16 +290,16 @@ static int __init mtd_stresstest_init(void)
goto out;
/* Do operations */
- printk(PRINT_PREF "doing operations\n");
+ pr_info("doing operations\n");
for (op = 0; op < count; op++) {
if ((op & 1023) == 0)
- printk(PRINT_PREF "%d operations done\n", op);
+ pr_info("%d operations done\n", op);
err = do_operation();
if (err)
goto out;
cond_resched();
}
- printk(PRINT_PREF "finished, %d operations done\n", op);
+ pr_info("finished, %d operations done\n", op);
out:
kfree(offsets);
@@ -309,7 +309,7 @@ out:
out_put_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 9667bf535282..c880c2229c59 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -27,8 +29,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
-
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -82,12 +82,12 @@ static int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -100,7 +100,7 @@ static int erase_whole_device(void)
int err;
unsigned int i;
- printk(PRINT_PREF "erasing whole device\n");
+ pr_info("erasing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -109,7 +109,7 @@ static int erase_whole_device(void)
return err;
cond_resched();
}
- printk(PRINT_PREF "erased %u eraseblocks\n", i);
+ pr_info("erased %u eraseblocks\n", i);
return 0;
}
@@ -122,11 +122,11 @@ static int write_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -136,11 +136,11 @@ static int write_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n", subpgsize);
- printk(PRINT_PREF " written: %#zx\n", written);
+ pr_err(" write size: %#x\n", subpgsize);
+ pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
@@ -160,12 +160,12 @@ static int write_eraseblock2(int ebnum)
set_random_data(writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
- printk(PRINT_PREF "error: write failed at %#llx\n",
+ pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
- printk(PRINT_PREF " write size: %#x\n",
+ pr_err(" write size: %#x\n",
subpgsize * k);
- printk(PRINT_PREF " written: %#08zx\n",
+ pr_err(" written: %#08zx\n",
written);
}
return err ? err : -1;
@@ -198,23 +198,23 @@ static int verify_eraseblock(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -225,23 +225,23 @@ static int verify_eraseblock(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at %#llx\n",
+ pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_info("error: verify failed at %#llx\n",
(long long)addr);
- printk(PRINT_PREF "------------- written----------------\n");
+ pr_info("------------- written----------------\n");
print_subpage(writebuf);
- printk(PRINT_PREF "------------- read ------------------\n");
+ pr_info("------------- read ------------------\n");
print_subpage(readbuf);
- printk(PRINT_PREF "-------------------------------------\n");
+ pr_info("-------------------------------------\n");
errcnt += 1;
}
@@ -262,17 +262,17 @@ static int verify_eraseblock2(int ebnum)
err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
- printk(PRINT_PREF "error: verify failed at %#llx\n",
+ pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
@@ -295,17 +295,17 @@ static int verify_eraseblock_ff(int ebnum)
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
- printk(PRINT_PREF "ECC correction at %#llx\n",
+ pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
- printk(PRINT_PREF "error: read failed at "
+ pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
- printk(PRINT_PREF "error: verify 0xff failed at "
+ pr_err("error: verify 0xff failed at "
"%#llx\n", (long long)addr);
errcnt += 1;
}
@@ -320,7 +320,7 @@ static int verify_all_eraseblocks_ff(void)
int err;
unsigned int i;
- printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
+ pr_info("verifying all eraseblocks for 0xff\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -328,10 +328,10 @@ static int verify_all_eraseblocks_ff(void)
if (err)
return err;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
return 0;
}
@@ -342,7 +342,7 @@ static int is_block_bad(int ebnum)
ret = mtd_block_isbad(mtd, addr);
if (ret)
- printk(PRINT_PREF "block %d is bad\n", ebnum);
+ pr_info("block %d is bad\n", ebnum);
return ret;
}
@@ -352,18 +352,18 @@ static int scan_for_bad_eraseblocks(void)
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
return -ENOMEM;
}
- printk(PRINT_PREF "scanning for bad eraseblocks\n");
+ pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
- printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+ pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
@@ -377,22 +377,22 @@ static int __init mtd_subpagetest_init(void)
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
+ pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->type != MTD_NANDFLASH) {
- printk(PRINT_PREF "this test requires NAND flash\n");
+ pr_info("this test requires NAND flash\n");
goto out;
}
@@ -402,7 +402,7 @@ static int __init mtd_subpagetest_init(void)
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
- printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+ pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, subpage size %u, count of eraseblocks %u, "
"pages per eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
@@ -412,12 +412,12 @@ static int __init mtd_subpagetest_init(void)
bufsize = subpgsize * 32;
writebuf = kmalloc(bufsize, GFP_KERNEL);
if (!writebuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_info("error: cannot allocate memory\n");
goto out;
}
readbuf = kmalloc(bufsize, GFP_KERNEL);
if (!readbuf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_info("error: cannot allocate memory\n");
goto out;
}
@@ -429,7 +429,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
simple_srand(1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
@@ -438,13 +438,13 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
simple_srand(1);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -452,10 +452,10 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = erase_whole_device();
if (err)
@@ -467,7 +467,7 @@ static int __init mtd_subpagetest_init(void)
/* Write all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "writing whole device\n");
+ pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -475,14 +475,14 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
+ pr_info("written up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+ pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
simple_srand(3);
- printk(PRINT_PREF "verifying all eraseblocks\n");
+ pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
@@ -490,10 +490,10 @@ static int __init mtd_subpagetest_init(void)
if (unlikely(err))
goto out;
if (i % 256 == 0)
- printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+ pr_info("verified up to eraseblock %u\n", i);
cond_resched();
}
- printk(PRINT_PREF "verified %u eraseblocks\n", i);
+ pr_info("verified %u eraseblocks\n", i);
err = erase_whole_device();
if (err)
@@ -503,7 +503,7 @@ static int __init mtd_subpagetest_init(void)
if (err)
goto out;
- printk(PRINT_PREF "finished with %d errors\n", errcnt);
+ pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
@@ -511,7 +511,7 @@ out:
kfree(writebuf);
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred\n", err);
+ pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index b65861bc7b8e..c4cde1e9eddb 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -23,6 +23,8 @@
* damage caused by this program.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -31,7 +33,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
-#define PRINT_PREF KERN_INFO "mtd_torturetest: "
#define RETRIES 3
static int eb = 8;
@@ -107,12 +108,12 @@ static inline int erase_eraseblock(int ebnum)
err = mtd_erase(mtd, &ei);
if (err) {
- printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+ pr_err("error %d while erasing EB %d\n", err, ebnum);
return err;
}
if (ei.state == MTD_ERASE_FAILED) {
- printk(PRINT_PREF "some erase error occurred at EB %d\n",
+ pr_err("some erase error occurred at EB %d\n",
ebnum);
return -EIO;
}
@@ -139,40 +140,40 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
retry:
err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
- printk(PRINT_PREF "single bit flip occurred at EB %d "
+ pr_err("single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
else if (err) {
- printk(PRINT_PREF "error %d while reading EB %d, "
+ pr_err("error %d while reading EB %d, "
"read %zd\n", err, ebnum, read);
return err;
}
if (read != len) {
- printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
+ pr_err("failed to read %zd bytes from EB %d, "
"read only %zd, but no error reported\n",
len, ebnum, read);
return -EIO;
}
if (memcmp(buf, check_buf, len)) {
- printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
+ pr_err("read wrong data from EB %d\n", ebnum);
report_corrupt(check_buf, buf);
if (retries++ < RETRIES) {
/* Try read again */
yield();
- printk(PRINT_PREF "re-try reading data from EB %d\n",
+ pr_info("re-try reading data from EB %d\n",
ebnum);
goto retry;
} else {
- printk(PRINT_PREF "retried %d times, still errors, "
+ pr_info("retried %d times, still errors, "
"give-up\n", RETRIES);
return -EINVAL;
}
}
if (retries != 0)
- printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
+ pr_info("only attempt number %d was OK (!!!)\n",
retries);
return 0;
@@ -191,12 +192,12 @@ static inline int write_pattern(int ebnum, void *buf)
}
err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
- printk(PRINT_PREF "error %d while writing EB %d, written %zd"
+ pr_err("error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
return err;
}
if (written != len) {
- printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
+ pr_info("written only %zd bytes of %zd, but no error"
" reported\n", written, len);
return -EIO;
}
@@ -211,64 +212,64 @@ static int __init tort_init(void)
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
- printk(PRINT_PREF "Warning: this program is trying to wear out your "
+ pr_info("Warning: this program is trying to wear out your "
"flash, stop it if this is not wanted.\n");
if (dev < 0) {
- printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
- printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+ pr_info("Please specify a valid mtd-device via module parameter\n");
+ pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
- printk(PRINT_PREF "MTD device: %d\n", dev);
- printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
+ pr_info("MTD device: %d\n", dev);
+ pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
ebcnt, eb, eb + ebcnt - 1, dev);
if (pgcnt)
- printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
+ pr_info("torturing just %d pages per eraseblock\n",
pgcnt);
- printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
+ pr_info("write verify %s\n", check ? "enabled" : "disabled");
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- printk(PRINT_PREF "error: cannot get MTD device\n");
+ pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
- printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+ pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
- printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
+ pr_err("error: invalid pgcnt value %d\n", pgcnt);
goto out_mtd;
}
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_5A5) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_mtd;
}
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_A5A) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_5A5;
}
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_FF) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_A5A;
}
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!check_buf) {
- printk(PRINT_PREF "error: cannot allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto out_patt_FF;
}
@@ -295,13 +296,13 @@ static int __init tort_init(void)
err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
if (err < 0) {
- printk(PRINT_PREF "block_isbad() returned %d "
+ pr_info("block_isbad() returned %d "
"for EB %d\n", err, i);
goto out;
}
if (err) {
- printk("EB %d is bad. Skip it.\n", i);
+ pr_err("EB %d is bad. Skip it.\n", i);
bad_ebs[i - eb] = 1;
}
}
@@ -329,7 +330,7 @@ static int __init tort_init(void)
continue;
err = check_eraseblock(i, patt_FF);
if (err) {
- printk(PRINT_PREF "verify failed"
+ pr_info("verify failed"
" for 0xFF... pattern\n");
goto out;
}
@@ -362,7 +363,7 @@ static int __init tort_init(void)
patt = patt_A5A;
err = check_eraseblock(i, patt);
if (err) {
- printk(PRINT_PREF "verify failed for %s"
+ pr_info("verify failed for %s"
" pattern\n",
((eb + erase_cycles) & 1) ?
"0x55AA55..." : "0xAA55AA...");
@@ -380,7 +381,7 @@ static int __init tort_init(void)
stop_timing();
ms = (finish.tv_sec - start.tv_sec) * 1000 +
(finish.tv_usec - start.tv_usec) / 1000;
- printk(PRINT_PREF "%08u erase cycles done, took %lu "
+ pr_info("%08u erase cycles done, took %lu "
"milliseconds (%lu seconds)\n",
erase_cycles, ms, ms / 1000);
start_timing();
@@ -391,7 +392,7 @@ static int __init tort_init(void)
}
out:
- printk(PRINT_PREF "finished after %u erase cycles\n",
+ pr_info("finished after %u erase cycles\n",
erase_cycles);
kfree(check_buf);
out_patt_FF:
@@ -403,7 +404,7 @@ out_patt_5A5:
out_mtd:
put_mtd_device(mtd);
if (err)
- printk(PRINT_PREF "error %d occurred during torturing\n", err);
+ pr_info("error %d occurred during torturing\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
@@ -441,9 +442,9 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
&bits) >= 0)
pages++;
- printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
+ pr_info("verify fails on %d pages, %d bytes/%d bits\n",
pages, bytes, bits);
- printk(PRINT_PREF "The following is a list of all differences between"
+ pr_info("The following is a list of all differences between"
" what was read from flash and what was expected\n");
for (i = 0; i < check_len; i += pgsize) {
@@ -457,7 +458,7 @@ static void report_corrupt(unsigned char *read, unsigned char *written)
printk("-------------------------------------------------------"
"----------------------------------\n");
- printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
+ pr_info("Page %zd has %d bytes/%d bits failing verify,"
" starting at offset 0x%x\n",
(mtd->erasesize - check_len + i) / pgsize,
bytes, bits, first);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index fec406b4553d..c071d410488f 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -322,7 +322,6 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
- void *buf;
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vh = NULL;
@@ -393,18 +392,14 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
- buf = vmalloc(len);
- if (!buf) {
- err = -ENOMEM;
- goto out_free_vidh;
- }
- err = ubi_io_read_data(ubi, buf, pnum, 0, len);
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
- goto out_free_buf;
+ goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
- crc = crc32(UBI_CRC32_INIT, buf, len);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
@@ -415,8 +410,8 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
dbg_bld("PEB %d CRC is OK", pnum);
bitflips = !!err;
}
+ mutex_unlock(&ubi->buf_mutex);
- vfree(buf);
ubi_free_vid_hdr(ubi, vh);
if (second_is_newer)
@@ -426,8 +421,8 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
-out_free_buf:
- vfree(buf);
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
return err;
@@ -1453,7 +1448,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP
- if (ubi->fm && ubi->dbg->chk_gen) {
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
@@ -1503,7 +1498,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 344b4cb49d4e..a56133585e92 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -825,8 +825,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -986,14 +985,10 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_buf)
goto out_free;
#endif
- err = ubi_debugging_init_dev(ubi);
- if (err)
- goto out_free;
-
err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
- goto out_debugging;
+ goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
@@ -1060,8 +1055,6 @@ out_detach:
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
-out_debugging:
- ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
@@ -1139,7 +1132,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 26908a59506b..63cb1d7236ce 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -217,32 +217,6 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
pr_err("\t1st 16 characters of name: %s\n", nm);
}
-/**
- * ubi_debugging_init_dev - initialize debugging for an UBI device.
- * @ubi: UBI device description object
- *
- * This function initializes debugging-related data for UBI device @ubi.
- * Returns zero in case of success and a negative error code in case of
- * failure.
- */
-int ubi_debugging_init_dev(struct ubi_device *ubi)
-{
- ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
- if (!ubi->dbg)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ubi_debugging_exit_dev - free debugging data for an UBI device.
- * @ubi: UBI device description object
- */
-void ubi_debugging_exit_dev(struct ubi_device *ubi)
-{
- kfree(ubi->dbg);
-}
-
/*
* Root directory for UBI stuff in debugfs. Contains sub-directories which
* contain the stuff specific to particular UBI devices.
@@ -295,7 +269,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
if (dent == d->dfs_chk_gen)
val = d->chk_gen;
@@ -341,7 +315,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
buf_size = min_t(size_t, count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size)) {
@@ -398,7 +372,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
unsigned long ubi_num = ubi->ubi_num;
const char *fname;
struct dentry *dent;
- struct ubi_debug_info *d = ubi->dbg;
+ struct ubi_debug_info *d = &ubi->dbg;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
@@ -471,5 +445,5 @@ out:
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 3dbc877d9663..33f8f3b2c9b2 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -60,51 +60,11 @@ void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
int len);
-int ubi_debugging_init_dev(struct ubi_device *ubi);
-void ubi_debugging_exit_dev(struct ubi_device *ubi);
int ubi_debugfs_init(void);
void ubi_debugfs_exit(void);
int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
-/*
- * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
- * + 2 for the number plus 1 for the trailing zero byte.
- */
-#define UBI_DFS_DIR_NAME "ubi%d"
-#define UBI_DFS_DIR_LEN (3 + 2 + 1)
-
-/**
- * struct ubi_debug_info - debugging information for an UBI device.
- *
- * @chk_gen: if UBI general extra checks are enabled
- * @chk_io: if UBI I/O extra checks are enabled
- * @disable_bgt: disable the background task for testing purposes
- * @emulate_bitflips: emulate bit-flips for testing purposes
- * @emulate_io_failures: emulate write/erase failures for testing purposes
- * @dfs_dir_name: name of debugfs directory containing files of this UBI device
- * @dfs_dir: direntry object of the UBI device debugfs directory
- * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
- * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
- * @dfs_disable_bgt: debugfs knob to disable the background task
- * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
- * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
- */
-struct ubi_debug_info {
- unsigned int chk_gen:1;
- unsigned int chk_io:1;
- unsigned int disable_bgt:1;
- unsigned int emulate_bitflips:1;
- unsigned int emulate_io_failures:1;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
- struct dentry *dfs_dir;
- struct dentry *dfs_chk_gen;
- struct dentry *dfs_chk_io;
- struct dentry *dfs_disable_bgt;
- struct dentry *dfs_emulate_bitflips;
- struct dentry *dfs_emulate_io_failures;
-};
-
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
* @ubi: UBI device description object
@@ -114,7 +74,7 @@ struct ubi_debug_info {
*/
static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi->dbg->disable_bgt;
+ return ubi->dbg.disable_bgt;
}
/**
@@ -125,7 +85,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_bitflips)
+ if (ubi->dbg.emulate_bitflips)
return !(random32() % 200);
return 0;
}
@@ -139,7 +99,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 500);
return 0;
}
@@ -153,9 +113,18 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 400);
return 0;
}
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 1a5f53c090d4..0648c6996d43 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -814,10 +814,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (max_sqnum > ai->max_sqnum)
ai->max_sqnum = max_sqnum;
- list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- list_del(&tmp_aeb->u.list);
- list_add_tail(&tmp_aeb->u.list, &ai->free);
- }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
/*
* If fastmap is leaking PEBs (must not happen), raise a
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 4bd4db8c84c9..b93807b4c459 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -171,17 +171,17 @@ static void gluebi_put_device(struct mtd_info *mtd)
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
- int err = 0, lnum, offs, total_read;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
- total_read = len;
- while (total_read) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
- if (to_read > total_read)
- to_read = total_read;
+ if (to_read > bytes_left)
+ to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
@@ -189,11 +189,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
lnum += 1;
offs = 0;
- total_read -= to_read;
+ bytes_left -= to_read;
buf += to_read;
}
- *retlen = len - total_read;
+ *retlen = len - bytes_left;
return err;
}
@@ -211,7 +211,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- int err = 0, lnum, offs, total_written;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
@@ -220,12 +220,12 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
- total_written = len;
- while (total_written) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
- if (to_write > total_written)
- to_write = total_written;
+ if (to_write > bytes_left)
+ to_write = bytes_left;
err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
@@ -233,11 +233,11 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
lnum += 1;
offs = 0;
- total_written -= to_write;
+ bytes_left -= to_write;
buf += to_write;
}
- *retlen = len - total_written;
+ *retlen = len - bytes_left;
return err;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 78a1dcbf2107..bf79def40126 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1132,7 +1132,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
@@ -1159,7 +1159,7 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1197,7 +1197,7 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1241,7 +1241,7 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1282,7 +1282,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1334,7 +1334,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1398,7 +1398,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7d57469723cf..8ea6297a208f 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -85,6 +85,13 @@
#define UBI_UNKNOWN -1
/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
* Error codes returned by the I/O sub-system.
*
* UBI_IO_FF: the read region of flash contains only 0xFFs
@@ -342,6 +349,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -545,7 +583,7 @@ struct ubi_device {
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct ubi_debug_info *dbg;
+ struct ubi_debug_info dbg;
};
/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 9f2ebd8750e7..ec2c2dc1c1ca 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -64,8 +64,7 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
@@ -93,8 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 9169e58c262e..8330703c098f 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -535,7 +535,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -847,7 +847,7 @@ static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 926e3df14fb2..d77b1c1d7c72 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -858,7 +858,7 @@ out_free:
*/
static void self_vtbl_check(const struct ubi_device *ubi)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2144f611196e..5df49d3cb5c7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,5 +1,4 @@
/*
- * @ubi: UBI device description object
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
@@ -2050,7 +2049,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -2090,7 +2089,7 @@ out_free:
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
@@ -2116,7 +2115,7 @@ static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *p;
int i;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index d427493997b6..cbc44f53755a 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,7 +61,7 @@ module_param(clockp, int, 0);
module_param(clockm, int, 0);
MODULE_LICENSE("GPL");
-static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net_device *dev;
struct arcnet_local *lp;
@@ -135,7 +135,7 @@ out_dev:
return err;
}
-static void __devexit com20020pci_remove(struct pci_dev *pdev)
+static void com20020pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
unregister_netdev(dev);
@@ -178,7 +178,7 @@ static struct pci_driver com20020pci_driver = {
.name = "com20020",
.id_table = com20020pci_id_table,
.probe = com20020pci_probe,
- .remove = __devexit_p(com20020pci_remove),
+ .remove = com20020pci_remove,
};
static int __init com20020pci_init(void)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e15cc11edbbe..7c9d136e74be 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -84,6 +84,10 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
/* Forward declaration */
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
+static void rlb_src_unlink(struct bonding *bond, u32 index);
+static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
+ u32 ip_dst_hash);
static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
{
@@ -354,6 +358,18 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
if (!arp)
goto out;
+ /* We received an ARP from arp->ip_src.
+ * We might have used this IP address previously (on the bonding host
+ * itself or on a system that is bridged together with the bond).
+ * However, if arp->mac_src is different than what is stored in
+ * rx_hashtbl, some other host is now using the IP and we must prevent
+ * sending out client updates with this IP address and the old MAC
+ * address.
+ * Clean up all hash table entries that have this address as ip_src but
+ * have a different mac_src.
+ */
+ rlb_purge_src_ip(bond, arp);
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
rlb_update_entry_from_arp(bond, arp);
@@ -432,9 +448,9 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
_lock_rx_hashtbl_bh(bond);
rx_hash_table = bond_info->rx_hashtbl;
- index = bond_info->rx_hashtbl_head;
+ index = bond_info->rx_hashtbl_used_head;
for (; index != RLB_NULL_INDEX; index = next_index) {
- next_index = rx_hash_table[index].next;
+ next_index = rx_hash_table[index].used_next;
if (rx_hash_table[index].slave == slave) {
struct slave *assigned_slave = rlb_next_rx_slave(bond);
@@ -519,8 +535,9 @@ static void rlb_update_rx_clients(struct bonding *bond)
_lock_rx_hashtbl_bh(bond);
- hash_index = bond_info->rx_hashtbl_head;
- for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ hash_index = bond_info->rx_hashtbl_used_head;
+ for (; hash_index != RLB_NULL_INDEX;
+ hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
@@ -548,8 +565,9 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
_lock_rx_hashtbl_bh(bond);
- hash_index = bond_info->rx_hashtbl_head;
- for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ hash_index = bond_info->rx_hashtbl_used_head;
+ for (; hash_index != RLB_NULL_INDEX;
+ hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->slave == slave) &&
@@ -578,8 +596,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
_lock_rx_hashtbl(bond);
- hash_index = bond_info->rx_hashtbl_head;
- for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ hash_index = bond_info->rx_hashtbl_used_head;
+ for (; hash_index != RLB_NULL_INDEX;
+ hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (!client_info->slave) {
@@ -625,6 +644,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
/* update mac address from arp */
memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
}
+ memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
assigned_slave = client_info->slave;
if (assigned_slave) {
@@ -647,6 +667,17 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
assigned_slave = rlb_next_rx_slave(bond);
if (assigned_slave) {
+ if (!(client_info->assigned &&
+ client_info->ip_src == arp->ip_src)) {
+ /* ip_src is going to be updated,
+ * fix the src hash list
+ */
+ u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
+ sizeof(arp->ip_src));
+ rlb_src_unlink(bond, hash_index);
+ rlb_src_link(bond, hash_src, hash_index);
+ }
+
client_info->ip_src = arp->ip_src;
client_info->ip_dst = arp->ip_dst;
/* arp->mac_dst is broadcast for arp reqeusts.
@@ -654,6 +685,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* upon receiving an arp reply.
*/
memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
client_info->slave = assigned_slave;
if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -669,11 +701,11 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
}
if (!client_info->assigned) {
- u32 prev_tbl_head = bond_info->rx_hashtbl_head;
- bond_info->rx_hashtbl_head = hash_index;
- client_info->next = prev_tbl_head;
+ u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
+ bond_info->rx_hashtbl_used_head = hash_index;
+ client_info->used_next = prev_tbl_head;
if (prev_tbl_head != RLB_NULL_INDEX) {
- bond_info->rx_hashtbl[prev_tbl_head].prev =
+ bond_info->rx_hashtbl[prev_tbl_head].used_prev =
hash_index;
}
client_info->assigned = 1;
@@ -694,6 +726,12 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
struct arp_pkt *arp = arp_pkt(skb);
struct slave *tx_slave = NULL;
+ /* Don't modify or load balance ARPs that do not originate locally
+ * (e.g.,arrive via a bridge).
+ */
+ if (!bond_slave_has_mac(bond, arp->mac_src))
+ return NULL;
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected
* rx channel
@@ -740,8 +778,9 @@ static void rlb_rebalance(struct bonding *bond)
_lock_rx_hashtbl_bh(bond);
ntt = 0;
- hash_index = bond_info->rx_hashtbl_head;
- for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ hash_index = bond_info->rx_hashtbl_used_head;
+ for (; hash_index != RLB_NULL_INDEX;
+ hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
assigned_slave = rlb_next_rx_slave(bond);
if (assigned_slave && (client_info->slave != assigned_slave)) {
@@ -759,11 +798,113 @@ static void rlb_rebalance(struct bonding *bond)
}
/* Caller must hold rx_hashtbl lock */
+static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
+{
+ entry->used_next = RLB_NULL_INDEX;
+ entry->used_prev = RLB_NULL_INDEX;
+ entry->assigned = 0;
+ entry->slave = NULL;
+ entry->tag = 0;
+}
+static void rlb_init_table_entry_src(struct rlb_client_info *entry)
+{
+ entry->src_first = RLB_NULL_INDEX;
+ entry->src_prev = RLB_NULL_INDEX;
+ entry->src_next = RLB_NULL_INDEX;
+}
+
static void rlb_init_table_entry(struct rlb_client_info *entry)
{
memset(entry, 0, sizeof(struct rlb_client_info));
- entry->next = RLB_NULL_INDEX;
- entry->prev = RLB_NULL_INDEX;
+ rlb_init_table_entry_dst(entry);
+ rlb_init_table_entry_src(entry);
+}
+
+static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ u32 next_index = bond_info->rx_hashtbl[index].used_next;
+ u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
+
+ if (index == bond_info->rx_hashtbl_used_head)
+ bond_info->rx_hashtbl_used_head = next_index;
+ if (prev_index != RLB_NULL_INDEX)
+ bond_info->rx_hashtbl[prev_index].used_next = next_index;
+ if (next_index != RLB_NULL_INDEX)
+ bond_info->rx_hashtbl[next_index].used_prev = prev_index;
+}
+
+/* unlink a rlb hash table entry from the src list */
+static void rlb_src_unlink(struct bonding *bond, u32 index)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ u32 next_index = bond_info->rx_hashtbl[index].src_next;
+ u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
+
+ bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
+ bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
+
+ if (next_index != RLB_NULL_INDEX)
+ bond_info->rx_hashtbl[next_index].src_prev = prev_index;
+
+ if (prev_index == RLB_NULL_INDEX)
+ return;
+
+ /* is prev_index pointing to the head of this list? */
+ if (bond_info->rx_hashtbl[prev_index].src_first == index)
+ bond_info->rx_hashtbl[prev_index].src_first = next_index;
+ else
+ bond_info->rx_hashtbl[prev_index].src_next = next_index;
+
+}
+
+static void rlb_delete_table_entry(struct bonding *bond, u32 index)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
+
+ rlb_delete_table_entry_dst(bond, index);
+ rlb_init_table_entry_dst(entry);
+
+ rlb_src_unlink(bond, index);
+}
+
+/* add the rx_hashtbl[ip_dst_hash] entry to the list
+ * of entries with identical ip_src_hash
+ */
+static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ u32 next;
+
+ bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
+ next = bond_info->rx_hashtbl[ip_src_hash].src_first;
+ bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
+ if (next != RLB_NULL_INDEX)
+ bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
+ bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
+}
+
+/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
+ * not match arp->mac_src */
+static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ u32 index;
+
+ _lock_rx_hashtbl_bh(bond);
+
+ index = bond_info->rx_hashtbl[ip_src_hash].src_first;
+ while (index != RLB_NULL_INDEX) {
+ struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
+ u32 next_index = entry->src_next;
+ if (entry->ip_src == arp->ip_src &&
+ !ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
+ rlb_delete_table_entry(bond, index);
+ index = next_index;
+ }
+ _unlock_rx_hashtbl_bh(bond);
}
static int rlb_initialize(struct bonding *bond)
@@ -781,7 +922,7 @@ static int rlb_initialize(struct bonding *bond)
bond_info->rx_hashtbl = new_hashtbl;
- bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
+ bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
rlb_init_table_entry(bond_info->rx_hashtbl + i);
@@ -803,7 +944,7 @@ static void rlb_deinitialize(struct bonding *bond)
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
- bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
+ bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
_unlock_rx_hashtbl_bh(bond);
}
@@ -815,25 +956,13 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
_lock_rx_hashtbl_bh(bond);
- curr_index = bond_info->rx_hashtbl_head;
+ curr_index = bond_info->rx_hashtbl_used_head;
while (curr_index != RLB_NULL_INDEX) {
struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
- u32 next_index = bond_info->rx_hashtbl[curr_index].next;
- u32 prev_index = bond_info->rx_hashtbl[curr_index].prev;
-
- if (curr->tag && (curr->vlan_id == vlan_id)) {
- if (curr_index == bond_info->rx_hashtbl_head) {
- bond_info->rx_hashtbl_head = next_index;
- }
- if (prev_index != RLB_NULL_INDEX) {
- bond_info->rx_hashtbl[prev_index].next = next_index;
- }
- if (next_index != RLB_NULL_INDEX) {
- bond_info->rx_hashtbl[next_index].prev = prev_index;
- }
+ u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
- rlb_init_table_entry(curr);
- }
+ if (curr->tag && (curr->vlan_id == vlan_id))
+ rlb_delete_table_entry(bond, curr_index);
curr_index = next_index;
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 90f140a2d197..e7a5b8b37ea3 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -94,15 +94,35 @@ struct tlb_client_info {
/* -------------------------------------------------------------------------
* struct rlb_client_info contains all info related to a specific rx client
- * connection. This is the Clients Hash Table entry struct
+ * connection. This is the Clients Hash Table entry struct.
+ * Note that this is not a proper hash table; if a new client's IP address
+ * hash collides with an existing client entry, the old entry is replaced.
+ *
+ * There is a linked list (linked by the used_next and used_prev members)
+ * linking all the used entries of the hash table. This allows updating
+ * all the clients without walking over all the unused elements of the table.
+ *
+ * There are also linked lists of entries with identical hash(ip_src). These
+ * allow cleaning up the table from ip_src<->mac_src associations that have
+ * become outdated and would cause sending out invalid ARP updates to the
+ * network. These are linked by the (src_next and src_prev members).
* -------------------------------------------------------------------------
*/
struct rlb_client_info {
__be32 ip_src; /* the server IP address */
__be32 ip_dst; /* the client IP address */
+ u8 mac_src[ETH_ALEN]; /* the server MAC address */
u8 mac_dst[ETH_ALEN]; /* the client MAC address */
- u32 next; /* The next Hash table entry index */
- u32 prev; /* The previous Hash table entry index */
+
+ /* list of used hash table entries, starting at rx_hashtbl_used_head */
+ u32 used_next;
+ u32 used_prev;
+
+ /* ip_src based hashing */
+ u32 src_next; /* next entry with same hash(ip_src) */
+ u32 src_prev; /* prev entry with same hash(ip_src) */
+ u32 src_first; /* first entry with hash(ip_src) == this entry's index */
+
u8 assigned; /* checking whether this entry is assigned */
u8 ntt; /* flag - need to transmit client info */
struct slave *slave; /* the slave assigned to this client */
@@ -131,7 +151,7 @@ struct alb_bond_info {
int rlb_enabled;
struct rlb_client_info *rx_hashtbl; /* Receive hash table */
spinlock_t rx_hashtbl_lock;
- u32 rx_hashtbl_head;
+ u32 rx_hashtbl_used_head;
u8 rx_ntt; /* flag - need to transmit
* to all rx clients
*/
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 2cf084eb9d52..5fc4c2351478 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -31,8 +31,9 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
- hash_index = bond_info->rx_hashtbl_head;
- for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ hash_index = bond_info->rx_hashtbl_used_head;
+ for (; hash_index != RLB_NULL_INDEX;
+ hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
&client_info->ip_src,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a7d47350ea4b..b7d45f367d4a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -615,15 +615,9 @@ static int bond_check_dev_link(struct bonding *bond,
return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
/* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops) {
- if (slave_dev->ethtool_ops->get_link) {
- u32 link;
-
- link = slave_dev->ethtool_ops->get_link(slave_dev);
-
- return link ? BMSR_LSTATUS : 0;
- }
- }
+ if (slave_dev->ethtool_ops->get_link)
+ return slave_dev->ethtool_ops->get_link(slave_dev) ?
+ BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
ioctl = slave_ops->ndo_do_ioctl;
@@ -1510,8 +1504,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
int link_reporting;
int res = 0;
- if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
- slave_ops->ndo_do_ioctl == NULL) {
+ if (!bond->params.use_carrier &&
+ slave_dev->ethtool_ops->get_link == NULL &&
+ slave_ops->ndo_do_ioctl == NULL) {
pr_warning("%s: Warning: no link monitoring support for %s\n",
bond_dev->name, slave_dev->name);
}
@@ -1838,7 +1833,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* anyway (it holds no special properties of the bond device),
* so we can change it without calling change_active_interface()
*/
- if (!bond->curr_active_slave)
+ if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
bond->curr_active_slave = new_slave;
break;
@@ -4436,8 +4431,6 @@ static void bond_uninit(struct net_device *bond_dev)
list_del(&bond->bond_list);
- bond_work_cancel_all(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1877ed7ca086..1c9e09fbdff8 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
pr_info("%s: Setting primary slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
goto out;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f8af2fcd3d16..21b68e5c14fd 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,6 +22,7 @@
#include <linux/in6.h>
#include <linux/netpoll.h>
#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -244,7 +245,7 @@ struct bonding {
struct delayed_work ad_work;
struct delayed_work mcast_work;
#ifdef CONFIG_DEBUG_FS
- /* debugging suport via debugfs */
+ /* debugging support via debugfs */
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
};
@@ -450,6 +451,18 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
}
#endif
+static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ const u8 *mac)
+{
+ int i = 0;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, i)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return tmp;
+
+ return NULL;
+}
/* exported from bond_main.c */
extern int bond_net_id;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index bb709fd66993..b56bd9e80957 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -110,6 +110,15 @@ config PCH_CAN
is an IOH for x86 embedded processor (Intel Atom E6xx series).
This driver can access CAN bus.
+config CAN_GRCAN
+ tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
+ depends on CAN_DEV && OF
+ ---help---
+ Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
+ Note that the driver supports little endian, even though little
+ endian syntheses of the cores would need some modifications on
+ the hardware level to work.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 938be37b670c..7de59862bbe9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -22,5 +22,6 @@ obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
+obj-$(CONFIG_CAN_GRCAN) += grcan.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index fcff73a73b1d..81baefda037b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -33,12 +33,11 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/platform_data/atmel.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <mach/board.h>
-
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
/* Common registers */
@@ -155,7 +154,7 @@ struct at91_priv {
canid_t mb0_id;
};
-static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
+static const struct at91_devtype_data at91_devtype_data[] = {
[AT91_DEVTYPE_SAM9263] = {
.rx_first = 1,
.rx_split = 8,
@@ -1242,7 +1241,7 @@ static struct attribute_group at91_sysfs_attr_group = {
.attrs = at91_sysfs_attrs,
};
-static int __devinit at91_can_probe(struct platform_device *pdev)
+static int at91_can_probe(struct platform_device *pdev)
{
const struct at91_devtype_data *devtype_data;
enum at91_devtype devtype;
@@ -1339,7 +1338,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
return err;
}
-static int __devexit at91_can_remove(struct platform_device *pdev)
+static int at91_can_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_priv *priv = netdev_priv(dev);
@@ -1372,10 +1371,11 @@ static const struct platform_device_id at91_can_id_table[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(platform, at91_can_id_table);
static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
- .remove = __devexit_p(at91_can_remove),
+ .remove = at91_can_remove,
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index f2d6d258a286..6a0532176b69 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -531,7 +531,7 @@ static const struct net_device_ops bfin_can_netdev_ops = {
.ndo_start_xmit = bfin_can_start_xmit,
};
-static int __devinit bfin_can_probe(struct platform_device *pdev)
+static int bfin_can_probe(struct platform_device *pdev)
{
int err;
struct net_device *dev;
@@ -611,7 +611,7 @@ exit:
return err;
}
-static int __devexit bfin_can_remove(struct platform_device *pdev)
+static int bfin_can_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct bfin_can_priv *priv = netdev_priv(dev);
@@ -677,7 +677,7 @@ static int bfin_can_resume(struct platform_device *pdev)
static struct platform_driver bfin_can_driver = {
.probe = bfin_can_probe,
- .remove = __devexit_p(bfin_can_remove),
+ .remove = bfin_can_remove,
.suspend = bfin_can_suspend,
.resume = bfin_can_resume,
.driver = {
@@ -691,3 +691,4 @@ module_platform_driver(bfin_can_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e5180dfddba5..2282b1ae9765 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -233,6 +233,12 @@ static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
pm_runtime_put_sync(priv->device);
}
+static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
+{
+ if (priv->raminit)
+ priv->raminit(priv, enable);
+}
+
static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
{
return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -482,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
+
+ /* According to C_CAN documentation, the reserved bit
+ * in IFx_MASK2 register is fixed 1
+ */
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
- IFX_WRITE_HIGH_16BIT(mask));
+ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
@@ -954,7 +964,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL);
break;
case LEC_BIT1_ERROR:
@@ -967,7 +977,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL);
break;
default:
@@ -1090,6 +1100,7 @@ static int c_can_open(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
c_can_pm_runtime_get_sync(priv);
+ c_can_reset_ram(priv, true);
/* open the can device */
err = open_candev(dev);
@@ -1118,6 +1129,7 @@ static int c_can_open(struct net_device *dev)
exit_irq_fail:
close_candev(dev);
exit_open_fail:
+ c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
return err;
}
@@ -1131,6 +1143,8 @@ static int c_can_close(struct net_device *dev)
c_can_stop(dev);
free_irq(dev->irq, dev);
close_candev(dev);
+
+ c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
return 0;
@@ -1188,6 +1202,7 @@ int c_can_power_down(struct net_device *dev)
c_can_stop(dev);
+ c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
return 0;
@@ -1206,6 +1221,7 @@ int c_can_power_up(struct net_device *dev)
WARN_ON(priv->type != BOSCH_D_CAN);
c_can_pm_runtime_get_sync(priv);
+ c_can_reset_ram(priv, true);
/* Clear PDR and INIT bits */
val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index e5ed41dafa1b..d2e1c21b143f 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -169,6 +169,9 @@ struct c_can_priv {
void *priv; /* for board-specific data */
u16 irqstatus;
enum c_can_dev_id type;
+ u32 __iomem *raminit_ctrlreg;
+ unsigned int instance;
+ void (*raminit) (const struct c_can_priv *priv, bool enable);
};
struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 3d7830bcd2bf..b374be7891a2 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -63,8 +63,8 @@ static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
-static int __devinit c_can_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int c_can_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
struct c_can_priv *priv;
@@ -174,7 +174,7 @@ out:
return ret;
}
-static void __devexit c_can_pci_remove(struct pci_dev *pdev)
+static void c_can_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
@@ -210,7 +210,7 @@ static struct pci_driver c_can_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = c_can_pci_tbl,
.probe = c_can_pci_probe,
- .remove = __devexit_p(c_can_pci_remove),
+ .remove = c_can_pci_remove,
};
module_pci_driver(c_can_pci_driver);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index ee1416132aba..d63b91904f82 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -38,6 +38,8 @@
#include "c_can.h"
+#define CAN_RAMINIT_START_MASK(i) (1 << (i))
+
/*
* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
@@ -68,6 +70,18 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+ u32 val;
+
+ val = readl(priv->raminit_ctrlreg);
+ if (enable)
+ val |= CAN_RAMINIT_START_MASK(priv->instance);
+ else
+ val &= ~CAN_RAMINIT_START_MASK(priv->instance);
+ writel(val, priv->raminit_ctrlreg);
+}
+
static struct platform_device_id c_can_id_table[] = {
[BOSCH_C_CAN_PLATFORM] = {
.name = KBUILD_MODNAME,
@@ -83,14 +97,16 @@ static struct platform_device_id c_can_id_table[] = {
}, {
}
};
+MODULE_DEVICE_TABLE(platform, c_can_id_table);
static const struct of_device_id c_can_of_table[] = {
{ .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
{ .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, c_can_of_table);
-static int __devinit c_can_plat_probe(struct platform_device *pdev)
+static int c_can_plat_probe(struct platform_device *pdev)
{
int ret;
void __iomem *addr;
@@ -99,7 +115,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
const struct of_device_id *match;
const struct platform_device_id *id;
struct pinctrl *pinctrl;
- struct resource *mem;
+ struct resource *mem, *res;
int irq;
struct clk *clk;
@@ -178,6 +194,18 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+
+ if (pdev->dev.of_node)
+ priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
+ else
+ priv->instance = pdev->id;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->raminit_ctrlreg || priv->instance < 0)
+ dev_info(&pdev->dev, "control memory is not used for raminit\n");
+ else
+ priv->raminit = c_can_hw_raminit;
break;
default:
ret = -EINVAL;
@@ -220,7 +248,7 @@ exit:
return ret;
}
-static int __devexit c_can_plat_remove(struct platform_device *pdev)
+static int c_can_plat_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
@@ -306,7 +334,7 @@ static struct platform_driver c_can_plat_driver = {
.of_match_table = of_match_ptr(c_can_of_table),
},
.probe = c_can_plat_probe,
- .remove = __devexit_p(c_can_plat_remove),
+ .remove = c_can_plat_remove,
.suspend = c_can_suspend,
.resume = c_can_resume,
.id_table = c_can_id_table,
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 9f3a25ccd665..8eaaac81f320 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -75,12 +75,12 @@ MODULE_LICENSE("GPL v2");
static unsigned long port[MAXDEV];
static unsigned long mem[MAXDEV];
-static int __devinitdata irq[MAXDEV];
-static int __devinitdata clk[MAXDEV];
-static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
-static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
-static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
-static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static int irq[MAXDEV];
+static int clk[MAXDEV];
+static u8 cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -166,7 +166,7 @@ static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
}
-static int __devinit cc770_isa_probe(struct platform_device *pdev)
+static int cc770_isa_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct cc770_priv *priv;
@@ -291,7 +291,7 @@ static int __devinit cc770_isa_probe(struct platform_device *pdev)
return err;
}
-static int __devexit cc770_isa_remove(struct platform_device *pdev)
+static int cc770_isa_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct cc770_priv *priv = netdev_priv(dev);
@@ -316,7 +316,7 @@ static int __devexit cc770_isa_remove(struct platform_device *pdev)
static struct platform_driver cc770_isa_driver = {
.probe = cc770_isa_probe,
- .remove = __devexit_p(cc770_isa_remove),
+ .remove = cc770_isa_remove,
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 688371cda37a..d0f6bfc45aea 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -60,6 +60,7 @@
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
#define CC770_PLATFORM_CAN_CLOCK 16000000
@@ -74,8 +75,8 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
iowrite8(val, priv->reg_base + reg);
}
-static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
- struct cc770_priv *priv)
+static int cc770_get_of_node_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
{
struct device_node *np = pdev->dev.of_node;
const u32 *prop;
@@ -147,8 +148,8 @@ static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
return 0;
}
-static int __devinit cc770_get_platform_data(struct platform_device *pdev,
- struct cc770_priv *priv)
+static int cc770_get_platform_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
{
struct cc770_platform_data *pdata = pdev->dev.platform_data;
@@ -163,7 +164,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
return 0;
}
-static int __devinit cc770_platform_probe(struct platform_device *pdev)
+static int cc770_platform_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct cc770_priv *priv;
@@ -237,7 +238,7 @@ exit_release_mem:
return err;
}
-static int __devexit cc770_platform_remove(struct platform_device *pdev)
+static int cc770_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct cc770_priv *priv = netdev_priv(dev);
@@ -253,11 +254,12 @@ static int __devexit cc770_platform_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id __devinitdata cc770_platform_table[] = {
+static struct of_device_id cc770_platform_table[] = {
{.compatible = "bosch,cc770"}, /* CC770 from Bosch */
{.compatible = "intc,82527"}, /* AN82527 from Intel CP */
{},
};
+MODULE_DEVICE_TABLE(of, cc770_platform_table);
static struct platform_driver cc770_platform_driver = {
.driver = {
@@ -266,7 +268,7 @@ static struct platform_driver cc770_platform_driver = {
.of_match_table = cc770_platform_table,
},
.probe = cc770_platform_probe,
- .remove = __devexit_p(cc770_platform_remove),
+ .remove = cc770_platform_remove,
};
module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 963e2ccd10db..8233e5ed2939 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -609,8 +609,7 @@ void close_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- if (del_timer_sync(&priv->restart_timer))
- dev_put(dev);
+ del_timer_sync(&priv->restart_timer);
can_flush_echo_skb(dev);
}
EXPORT_SYMBOL_GPL(close_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a412bf6d73ef..0289a6d86f66 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -922,7 +922,7 @@ static const struct net_device_ops flexcan_netdev_ops = {
.ndo_start_xmit = flexcan_start_xmit,
};
-static int __devinit register_flexcandev(struct net_device *dev)
+static int register_flexcandev(struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
@@ -968,7 +968,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
return err;
}
-static void __devexit unregister_flexcandev(struct net_device *dev)
+static void unregister_flexcandev(struct net_device *dev)
{
unregister_candev(dev);
}
@@ -979,13 +979,15 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, flexcan_of_match);
static const struct platform_device_id flexcan_id_table[] = {
{ .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
-static int __devinit flexcan_probe(struct platform_device *pdev)
+static int flexcan_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
const struct flexcan_devtype_data *devtype_data;
@@ -1107,7 +1109,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
return err;
}
-static int __devexit flexcan_remove(struct platform_device *pdev)
+static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct flexcan_priv *priv = netdev_priv(dev);
@@ -1168,7 +1170,7 @@ static struct platform_driver flexcan_driver = {
.of_match_table = flexcan_of_match,
},
.probe = flexcan_probe,
- .remove = __devexit_p(flexcan_remove),
+ .remove = flexcan_remove,
.suspend = flexcan_suspend,
.resume = flexcan_resume,
.id_table = flexcan_id_table,
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
new file mode 100644
index 000000000000..17fbc7a09224
--- /dev/null
+++ b/drivers/net/can/grcan.c
@@ -0,0 +1,1756 @@
+/*
+ * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN.
+ *
+ * 2012 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRCAN and GRHCAN CAN controllers available in the GRLIB
+ * VHDL IP core library.
+ *
+ * Full documentation of the GRCAN core can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * See "Documentation/devicetree/bindings/net/can/grcan.txt" for information on
+ * open firmware properties.
+ *
+ * See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the
+ * sysfs interface.
+ *
+ * See "Documentation/kernel-parameters.txt" for information on the module
+ * parameters.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Andreas Larsson <andreas@gaisler.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/can/dev.h>
+#include <linux/spinlock.h>
+
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+
+#include <linux/of_irq.h>
+
+#include <linux/dma-mapping.h>
+
+#define DRV_NAME "grcan"
+
+#define GRCAN_NAPI_WEIGHT 32
+
+#define GRCAN_RESERVE_SIZE(slot1, slot2) (((slot2) - (slot1)) / 4 - 1)
+
+struct grcan_registers {
+ u32 conf; /* 0x00 */
+ u32 stat; /* 0x04 */
+ u32 ctrl; /* 0x08 */
+ u32 __reserved1[GRCAN_RESERVE_SIZE(0x08, 0x18)];
+ u32 smask; /* 0x18 - CanMASK */
+ u32 scode; /* 0x1c - CanCODE */
+ u32 __reserved2[GRCAN_RESERVE_SIZE(0x1c, 0x100)];
+ u32 pimsr; /* 0x100 */
+ u32 pimr; /* 0x104 */
+ u32 pisr; /* 0x108 */
+ u32 pir; /* 0x10C */
+ u32 imr; /* 0x110 */
+ u32 picr; /* 0x114 */
+ u32 __reserved3[GRCAN_RESERVE_SIZE(0x114, 0x200)];
+ u32 txctrl; /* 0x200 */
+ u32 txaddr; /* 0x204 */
+ u32 txsize; /* 0x208 */
+ u32 txwr; /* 0x20C */
+ u32 txrd; /* 0x210 */
+ u32 txirq; /* 0x214 */
+ u32 __reserved4[GRCAN_RESERVE_SIZE(0x214, 0x300)];
+ u32 rxctrl; /* 0x300 */
+ u32 rxaddr; /* 0x304 */
+ u32 rxsize; /* 0x308 */
+ u32 rxwr; /* 0x30C */
+ u32 rxrd; /* 0x310 */
+ u32 rxirq; /* 0x314 */
+ u32 rxmask; /* 0x318 */
+ u32 rxcode; /* 0x31C */
+};
+
+#define GRCAN_CONF_ABORT 0x00000001
+#define GRCAN_CONF_ENABLE0 0x00000002
+#define GRCAN_CONF_ENABLE1 0x00000004
+#define GRCAN_CONF_SELECT 0x00000008
+#define GRCAN_CONF_SILENT 0x00000010
+#define GRCAN_CONF_SAM 0x00000020 /* Available in some hardware */
+#define GRCAN_CONF_BPR 0x00000300 /* Note: not BRP */
+#define GRCAN_CONF_RSJ 0x00007000
+#define GRCAN_CONF_PS1 0x00f00000
+#define GRCAN_CONF_PS2 0x000f0000
+#define GRCAN_CONF_SCALER 0xff000000
+#define GRCAN_CONF_OPERATION \
+ (GRCAN_CONF_ABORT | GRCAN_CONF_ENABLE0 | GRCAN_CONF_ENABLE1 \
+ | GRCAN_CONF_SELECT | GRCAN_CONF_SILENT | GRCAN_CONF_SAM)
+#define GRCAN_CONF_TIMING \
+ (GRCAN_CONF_BPR | GRCAN_CONF_RSJ | GRCAN_CONF_PS1 \
+ | GRCAN_CONF_PS2 | GRCAN_CONF_SCALER)
+
+#define GRCAN_CONF_RSJ_MIN 1
+#define GRCAN_CONF_RSJ_MAX 4
+#define GRCAN_CONF_PS1_MIN 1
+#define GRCAN_CONF_PS1_MAX 15
+#define GRCAN_CONF_PS2_MIN 2
+#define GRCAN_CONF_PS2_MAX 8
+#define GRCAN_CONF_SCALER_MIN 0
+#define GRCAN_CONF_SCALER_MAX 255
+#define GRCAN_CONF_SCALER_INC 1
+
+#define GRCAN_CONF_BPR_BIT 8
+#define GRCAN_CONF_RSJ_BIT 12
+#define GRCAN_CONF_PS1_BIT 20
+#define GRCAN_CONF_PS2_BIT 16
+#define GRCAN_CONF_SCALER_BIT 24
+
+#define GRCAN_STAT_PASS 0x000001
+#define GRCAN_STAT_OFF 0x000002
+#define GRCAN_STAT_OR 0x000004
+#define GRCAN_STAT_AHBERR 0x000008
+#define GRCAN_STAT_ACTIVE 0x000010
+#define GRCAN_STAT_RXERRCNT 0x00ff00
+#define GRCAN_STAT_TXERRCNT 0xff0000
+
+#define GRCAN_STAT_ERRCTR_RELATED (GRCAN_STAT_PASS | GRCAN_STAT_OFF)
+
+#define GRCAN_STAT_RXERRCNT_BIT 8
+#define GRCAN_STAT_TXERRCNT_BIT 16
+
+#define GRCAN_STAT_ERRCNT_WARNING_LIMIT 96
+#define GRCAN_STAT_ERRCNT_PASSIVE_LIMIT 127
+
+#define GRCAN_CTRL_RESET 0x2
+#define GRCAN_CTRL_ENABLE 0x1
+
+#define GRCAN_TXCTRL_ENABLE 0x1
+#define GRCAN_TXCTRL_ONGOING 0x2
+#define GRCAN_TXCTRL_SINGLE 0x4
+
+#define GRCAN_RXCTRL_ENABLE 0x1
+#define GRCAN_RXCTRL_ONGOING 0x2
+
+/* Relative offset of IRQ sources to AMBA Plug&Play */
+#define GRCAN_IRQIX_IRQ 0
+#define GRCAN_IRQIX_TXSYNC 1
+#define GRCAN_IRQIX_RXSYNC 2
+
+#define GRCAN_IRQ_PASS 0x00001
+#define GRCAN_IRQ_OFF 0x00002
+#define GRCAN_IRQ_OR 0x00004
+#define GRCAN_IRQ_RXAHBERR 0x00008
+#define GRCAN_IRQ_TXAHBERR 0x00010
+#define GRCAN_IRQ_RXIRQ 0x00020
+#define GRCAN_IRQ_TXIRQ 0x00040
+#define GRCAN_IRQ_RXFULL 0x00080
+#define GRCAN_IRQ_TXEMPTY 0x00100
+#define GRCAN_IRQ_RX 0x00200
+#define GRCAN_IRQ_TX 0x00400
+#define GRCAN_IRQ_RXSYNC 0x00800
+#define GRCAN_IRQ_TXSYNC 0x01000
+#define GRCAN_IRQ_RXERRCTR 0x02000
+#define GRCAN_IRQ_TXERRCTR 0x04000
+#define GRCAN_IRQ_RXMISS 0x08000
+#define GRCAN_IRQ_TXLOSS 0x10000
+
+#define GRCAN_IRQ_NONE 0
+#define GRCAN_IRQ_ALL \
+ (GRCAN_IRQ_PASS | GRCAN_IRQ_OFF | GRCAN_IRQ_OR \
+ | GRCAN_IRQ_RXAHBERR | GRCAN_IRQ_TXAHBERR \
+ | GRCAN_IRQ_RXIRQ | GRCAN_IRQ_TXIRQ \
+ | GRCAN_IRQ_RXFULL | GRCAN_IRQ_TXEMPTY \
+ | GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_RXSYNC \
+ | GRCAN_IRQ_TXSYNC | GRCAN_IRQ_RXERRCTR \
+ | GRCAN_IRQ_TXERRCTR | GRCAN_IRQ_RXMISS \
+ | GRCAN_IRQ_TXLOSS)
+
+#define GRCAN_IRQ_ERRCTR_RELATED (GRCAN_IRQ_RXERRCTR | GRCAN_IRQ_TXERRCTR \
+ | GRCAN_IRQ_PASS | GRCAN_IRQ_OFF)
+#define GRCAN_IRQ_ERRORS (GRCAN_IRQ_ERRCTR_RELATED | GRCAN_IRQ_OR \
+ | GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR \
+ | GRCAN_IRQ_TXLOSS)
+#define GRCAN_IRQ_DEFAULT (GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_ERRORS)
+
+#define GRCAN_MSG_SIZE 16
+
+#define GRCAN_MSG_IDE 0x80000000
+#define GRCAN_MSG_RTR 0x40000000
+#define GRCAN_MSG_BID 0x1ffc0000
+#define GRCAN_MSG_EID 0x1fffffff
+#define GRCAN_MSG_IDE_BIT 31
+#define GRCAN_MSG_RTR_BIT 30
+#define GRCAN_MSG_BID_BIT 18
+#define GRCAN_MSG_EID_BIT 0
+
+#define GRCAN_MSG_DLC 0xf0000000
+#define GRCAN_MSG_TXERRC 0x00ff0000
+#define GRCAN_MSG_RXERRC 0x0000ff00
+#define GRCAN_MSG_DLC_BIT 28
+#define GRCAN_MSG_TXERRC_BIT 16
+#define GRCAN_MSG_RXERRC_BIT 8
+#define GRCAN_MSG_AHBERR 0x00000008
+#define GRCAN_MSG_OR 0x00000004
+#define GRCAN_MSG_OFF 0x00000002
+#define GRCAN_MSG_PASS 0x00000001
+
+#define GRCAN_MSG_DATA_SLOT_INDEX(i) (2 + (i) / 4)
+#define GRCAN_MSG_DATA_SHIFT(i) ((3 - (i) % 4) * 8)
+
+#define GRCAN_BUFFER_ALIGNMENT 1024
+#define GRCAN_DEFAULT_BUFFER_SIZE 1024
+#define GRCAN_VALID_TR_SIZE_MASK 0x001fffc0
+
+#define GRCAN_INVALID_BUFFER_SIZE(s) \
+ ((s) == 0 || ((s) & ~GRCAN_VALID_TR_SIZE_MASK))
+
+#if GRCAN_INVALID_BUFFER_SIZE(GRCAN_DEFAULT_BUFFER_SIZE)
+#error "Invalid default buffer size"
+#endif
+
+struct grcan_dma_buffer {
+ size_t size;
+ void *buf;
+ dma_addr_t handle;
+};
+
+struct grcan_dma {
+ size_t base_size;
+ void *base_buf;
+ dma_addr_t base_handle;
+ struct grcan_dma_buffer tx;
+ struct grcan_dma_buffer rx;
+};
+
+/* GRCAN configuration parameters */
+struct grcan_device_config {
+ unsigned short enable0;
+ unsigned short enable1;
+ unsigned short select;
+ unsigned int txsize;
+ unsigned int rxsize;
+};
+
+#define GRCAN_DEFAULT_DEVICE_CONFIG { \
+ .enable0 = 0, \
+ .enable1 = 0, \
+ .select = 0, \
+ .txsize = GRCAN_DEFAULT_BUFFER_SIZE, \
+ .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
+ }
+
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRLIB_VERSION_MASK 0xffff
+
+/* GRCAN private data structure */
+struct grcan_priv {
+ struct can_priv can; /* must be the first member */
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ struct grcan_registers __iomem *regs; /* ioremap'ed registers */
+ struct grcan_device_config config;
+ struct grcan_dma dma;
+
+ struct sk_buff **echo_skb; /* We allocate this on our own */
+ u8 *txdlc; /* Length of queued frames */
+
+ /* The echo skb pointer, pointing into echo_skb and indicating which
+ * frames can be echoed back. See the "Notes on the tx cyclic buffer
+ * handling"-comment for grcan_start_xmit for more details.
+ */
+ u32 eskbp;
+
+ /* Lock for controlling changes to the netif tx queue state, accesses to
+ * the echo_skb pointer eskbp and for making sure that a running reset
+ * and/or a close of the interface is done without interference from
+ * other parts of the code.
+ *
+ * The echo_skb pointer, eskbp, should only be accessed under this lock
+ * as it can be changed in several places and together with decisions on
+ * whether to wake up the tx queue.
+ *
+ * The tx queue must never be woken up if there is a running reset or
+ * close in progress.
+ *
+ * A running reset (see below on need_txbug_workaround) should never be
+ * done if the interface is closing down and several running resets
+ * should never be scheduled simultaneously.
+ */
+ spinlock_t lock;
+
+ /* Whether a workaround is needed due to a bug in older hardware. In
+ * this case, the driver both tries to prevent the bug from being
+ * triggered and recovers, if the bug nevertheless happens, by doing a
+ * running reset. A running reset, resets the device and continues from
+ * where it were without being noticeable from outside the driver (apart
+ * from slight delays).
+ */
+ bool need_txbug_workaround;
+
+ /* To trigger initization of running reset and to trigger running reset
+ * respectively in the case of a hanged device due to a txbug.
+ */
+ struct timer_list hang_timer;
+ struct timer_list rr_timer;
+
+ /* To avoid waking up the netif queue and restarting timers
+ * when a reset is scheduled or when closing of the device is
+ * undergoing
+ */
+ bool resetting;
+ bool closing;
+};
+
+/* Wait time for a short wait for ongoing to clear */
+#define GRCAN_SHORTWAIT_USECS 10
+
+/* Limit on the number of transmitted bits of an eff frame according to the CAN
+ * specification: 1 bit start of frame, 32 bits arbitration field, 6 bits
+ * control field, 8 bytes data field, 16 bits crc field, 2 bits ACK field and 7
+ * bits end of frame
+ */
+#define GRCAN_EFF_FRAME_MAX_BITS (1+32+6+8*8+16+2+7)
+
+#if defined(__BIG_ENDIAN)
+static inline u32 grcan_read_reg(u32 __iomem *reg)
+{
+ return ioread32be(reg);
+}
+
+static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
+{
+ iowrite32be(val, reg);
+}
+#else
+static inline u32 grcan_read_reg(u32 __iomem *reg)
+{
+ return ioread32(reg);
+}
+
+static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
+{
+ iowrite32(val, reg);
+}
+#endif
+
+static inline void grcan_clear_bits(u32 __iomem *reg, u32 mask)
+{
+ grcan_write_reg(reg, grcan_read_reg(reg) & ~mask);
+}
+
+static inline void grcan_set_bits(u32 __iomem *reg, u32 mask)
+{
+ grcan_write_reg(reg, grcan_read_reg(reg) | mask);
+}
+
+static inline u32 grcan_read_bits(u32 __iomem *reg, u32 mask)
+{
+ return grcan_read_reg(reg) & mask;
+}
+
+static inline void grcan_write_bits(u32 __iomem *reg, u32 value, u32 mask)
+{
+ u32 old = grcan_read_reg(reg);
+
+ grcan_write_reg(reg, (old & ~mask) | (value & mask));
+}
+
+/* a and b should both be in [0,size] and a == b == size should not hold */
+static inline u32 grcan_ring_add(u32 a, u32 b, u32 size)
+{
+ u32 sum = a + b;
+
+ if (sum < size)
+ return sum;
+ else
+ return sum - size;
+}
+
+/* a and b should both be in [0,size) */
+static inline u32 grcan_ring_sub(u32 a, u32 b, u32 size)
+{
+ return grcan_ring_add(a, size - b, size);
+}
+
+/* Available slots for new transmissions */
+static inline u32 grcan_txspace(size_t txsize, u32 txwr, u32 eskbp)
+{
+ u32 slots = txsize / GRCAN_MSG_SIZE - 1;
+ u32 used = grcan_ring_sub(txwr, eskbp, txsize) / GRCAN_MSG_SIZE;
+
+ return slots - used;
+}
+
+/* Configuration parameters that can be set via module parameters */
+static struct grcan_device_config grcan_module_config =
+ GRCAN_DEFAULT_DEVICE_CONFIG;
+
+static const struct can_bittiming_const grcan_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = GRCAN_CONF_PS1_MIN + 1,
+ .tseg1_max = GRCAN_CONF_PS1_MAX + 1,
+ .tseg2_min = GRCAN_CONF_PS2_MIN,
+ .tseg2_max = GRCAN_CONF_PS2_MAX,
+ .sjw_max = GRCAN_CONF_RSJ_MAX,
+ .brp_min = GRCAN_CONF_SCALER_MIN + 1,
+ .brp_max = GRCAN_CONF_SCALER_MAX + 1,
+ .brp_inc = GRCAN_CONF_SCALER_INC,
+};
+
+static int grcan_set_bittiming(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 timing = 0;
+ int bpr, rsj, ps1, ps2, scaler;
+
+ /* Should never happen - function will not be called when
+ * device is up
+ */
+ if (grcan_read_bits(&regs->ctrl, GRCAN_CTRL_ENABLE))
+ return -EBUSY;
+
+ bpr = 0; /* Note bpr and brp are different concepts */
+ rsj = bt->sjw;
+ ps1 = (bt->prop_seg + bt->phase_seg1) - 1; /* tseg1 - 1 */
+ ps2 = bt->phase_seg2;
+ scaler = (bt->brp - 1);
+ netdev_dbg(dev, "Request for BPR=%d, RSJ=%d, PS1=%d, PS2=%d, SCALER=%d",
+ bpr, rsj, ps1, ps2, scaler);
+ if (!(ps1 > ps2)) {
+ netdev_err(dev, "PS1 > PS2 must hold: PS1=%d, PS2=%d\n",
+ ps1, ps2);
+ return -EINVAL;
+ }
+ if (!(ps2 >= rsj)) {
+ netdev_err(dev, "PS2 >= RSJ must hold: PS2=%d, RSJ=%d\n",
+ ps2, rsj);
+ return -EINVAL;
+ }
+
+ timing |= (bpr << GRCAN_CONF_BPR_BIT) & GRCAN_CONF_BPR;
+ timing |= (rsj << GRCAN_CONF_RSJ_BIT) & GRCAN_CONF_RSJ;
+ timing |= (ps1 << GRCAN_CONF_PS1_BIT) & GRCAN_CONF_PS1;
+ timing |= (ps2 << GRCAN_CONF_PS2_BIT) & GRCAN_CONF_PS2;
+ timing |= (scaler << GRCAN_CONF_SCALER_BIT) & GRCAN_CONF_SCALER;
+ netdev_info(dev, "setting timing=0x%x\n", timing);
+ grcan_write_bits(&regs->conf, timing, GRCAN_CONF_TIMING);
+
+ return 0;
+}
+
+static int grcan_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 status = grcan_read_reg(&regs->stat);
+
+ bec->txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT;
+ bec->rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT;
+ return 0;
+}
+
+static int grcan_poll(struct napi_struct *napi, int budget);
+
+/* Reset device, but keep configuration information */
+static void grcan_reset(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 config = grcan_read_reg(&regs->conf);
+
+ grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
+ grcan_write_reg(&regs->conf, config);
+
+ priv->eskbp = grcan_read_reg(&regs->txrd);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Turn off hardware filtering - regs->rxcode set to 0 by reset */
+ grcan_write_reg(&regs->rxmask, 0);
+}
+
+/* stop device without changing any configurations */
+static void grcan_stop_hardware(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+
+ grcan_write_reg(&regs->imr, GRCAN_IRQ_NONE);
+ grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+ grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_clear_bits(&regs->ctrl, GRCAN_CTRL_ENABLE);
+}
+
+/* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo
+ * is true and free them otherwise.
+ *
+ * If budget is >= 0, stop after handling at most budget skbs. Otherwise,
+ * continue until priv->eskbp catches up to regs->txrd.
+ *
+ * priv->lock *must* be held when calling this function
+ */
+static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct net_device_stats *stats = &dev->stats;
+ int i, work_done;
+
+ /* Updates to priv->eskbp and wake-ups of the queue needs to
+ * be atomic towards the reads of priv->eskbp and shut-downs
+ * of the queue in grcan_start_xmit.
+ */
+ u32 txrd = grcan_read_reg(&regs->txrd);
+
+ for (work_done = 0; work_done < budget || budget < 0; work_done++) {
+ if (priv->eskbp == txrd)
+ break;
+ i = priv->eskbp / GRCAN_MSG_SIZE;
+ if (echo) {
+ /* Normal echo of messages */
+ stats->tx_packets++;
+ stats->tx_bytes += priv->txdlc[i];
+ priv->txdlc[i] = 0;
+ can_get_echo_skb(dev, i);
+ } else {
+ /* For cleanup of untransmitted messages */
+ can_free_echo_skb(dev, i);
+ }
+
+ priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
+ dma->tx.size);
+ txrd = grcan_read_reg(&regs->txrd);
+ }
+ return work_done;
+}
+
+static void grcan_lost_one_shot_frame(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ u32 txrd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ catch_up_echo_skb(dev, -1, true);
+
+ if (unlikely(grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE))) {
+ /* Should never happen */
+ netdev_err(dev, "TXCTRL enabled at TXLOSS in one shot mode\n");
+ } else {
+ /* By the time an GRCAN_IRQ_TXLOSS is generated in
+ * one-shot mode there is no problem in writing
+ * to TXRD even in versions of the hardware in
+ * which GRCAN_TXCTRL_ONGOING is not cleared properly
+ * in one-shot mode.
+ */
+
+ /* Skip message and discard echo-skb */
+ txrd = grcan_read_reg(&regs->txrd);
+ txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size);
+ grcan_write_reg(&regs->txrd, txrd);
+ catch_up_echo_skb(dev, -1, false);
+
+ if (!priv->resetting && !priv->closing &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) {
+ netif_wake_queue(dev);
+ grcan_set_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void grcan_err(struct net_device *dev, u32 sources, u32 status)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame cf;
+
+ /* Zero potential error_frame */
+ memset(&cf, 0, sizeof(cf));
+
+ /* Message lost interrupt. This might be due to arbitration error, but
+ * is also triggered when there is no one else on the can bus or when
+ * there is a problem with the hardware interface or the bus itself. As
+ * arbitration errors can not be singled out, no error frames are
+ * generated reporting this event as an arbitration error.
+ */
+ if (sources & GRCAN_IRQ_TXLOSS) {
+ /* Take care of failed one-shot transmit */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ grcan_lost_one_shot_frame(dev);
+
+ /* Stop printing as soon as error passive or bus off is in
+ * effect to limit the amount of txloss debug printouts.
+ */
+ if (!(status & GRCAN_STAT_ERRCTR_RELATED)) {
+ netdev_dbg(dev, "tx message lost\n");
+ stats->tx_errors++;
+ }
+ }
+
+ /* Conditions dealing with the error counters. There is no interrupt for
+ * error warning, but there are interrupts for increases of the error
+ * counters.
+ */
+ if ((sources & GRCAN_IRQ_ERRCTR_RELATED) ||
+ (status & GRCAN_STAT_ERRCTR_RELATED)) {
+ enum can_state state = priv->can.state;
+ enum can_state oldstate = state;
+ u32 txerr = (status & GRCAN_STAT_TXERRCNT)
+ >> GRCAN_STAT_TXERRCNT_BIT;
+ u32 rxerr = (status & GRCAN_STAT_RXERRCNT)
+ >> GRCAN_STAT_RXERRCNT_BIT;
+
+ /* Figure out current state */
+ if (status & GRCAN_STAT_OFF) {
+ state = CAN_STATE_BUS_OFF;
+ } else if (status & GRCAN_STAT_PASS) {
+ state = CAN_STATE_ERROR_PASSIVE;
+ } else if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT ||
+ rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) {
+ state = CAN_STATE_ERROR_WARNING;
+ } else {
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ /* Handle and report state changes */
+ if (state != oldstate) {
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ netdev_dbg(dev, "bus-off\n");
+ netif_carrier_off(dev);
+ priv->can.can_stats.bus_off++;
+
+ /* Prevent the hardware from recovering from bus
+ * off on its own if restart is disabled.
+ */
+ if (!priv->can.restart_ms)
+ grcan_stop_hardware(dev);
+
+ cf.can_id |= CAN_ERR_BUSOFF;
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ netdev_dbg(dev, "Error passive condition\n");
+ priv->can.can_stats.error_passive++;
+
+ cf.can_id |= CAN_ERR_CRTL;
+ if (txerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ if (rxerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ break;
+
+ case CAN_STATE_ERROR_WARNING:
+ netdev_dbg(dev, "Error warning condition\n");
+ priv->can.can_stats.error_warning++;
+
+ cf.can_id |= CAN_ERR_CRTL;
+ if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ if (rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
+ cf.data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ break;
+
+ case CAN_STATE_ERROR_ACTIVE:
+ netdev_dbg(dev, "Error active condition\n");
+ cf.can_id |= CAN_ERR_CRTL;
+ break;
+
+ default:
+ /* There are no others at this point */
+ break;
+ }
+ cf.data[6] = txerr;
+ cf.data[7] = rxerr;
+ priv->can.state = state;
+ }
+
+ /* Report automatic restarts */
+ if (priv->can.restart_ms && oldstate == CAN_STATE_BUS_OFF) {
+ unsigned long flags;
+
+ cf.can_id |= CAN_ERR_RESTARTED;
+ netdev_dbg(dev, "restarted\n");
+ priv->can.can_stats.restarts++;
+ netif_carrier_on(dev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!priv->resetting && !priv->closing) {
+ u32 txwr = grcan_read_reg(&regs->txwr);
+
+ if (grcan_txspace(dma->tx.size, txwr,
+ priv->eskbp))
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ /* Data overrun interrupt */
+ if ((sources & GRCAN_IRQ_OR) || (status & GRCAN_STAT_OR)) {
+ netdev_dbg(dev, "got data overrun interrupt\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ cf.can_id |= CAN_ERR_CRTL;
+ cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ /* AHB bus error interrupts (not CAN bus errors) - shut down the
+ * device.
+ */
+ if (sources & (GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR) ||
+ (status & GRCAN_STAT_AHBERR)) {
+ char *txrx = "";
+ unsigned long flags;
+
+ if (sources & GRCAN_IRQ_TXAHBERR) {
+ txrx = "on tx ";
+ stats->tx_errors++;
+ } else if (sources & GRCAN_IRQ_RXAHBERR) {
+ txrx = "on rx ";
+ stats->rx_errors++;
+ }
+ netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
+ txrx);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Prevent anything to be enabled again and halt device */
+ priv->closing = true;
+ netif_stop_queue(dev);
+ grcan_stop_hardware(dev);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ /* Pass on error frame if something to report,
+ * i.e. id contains some information
+ */
+ if (cf.can_id) {
+ struct can_frame *skb_cf;
+ struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf);
+
+ if (skb == NULL) {
+ netdev_dbg(dev, "could not allocate error frame\n");
+ return;
+ }
+ skb_cf->can_id |= cf.can_id;
+ memcpy(skb_cf->data, cf.data, sizeof(cf.data));
+
+ netif_rx(skb);
+ }
+}
+
+static irqreturn_t grcan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 sources, status;
+
+ /* Find out the source */
+ sources = grcan_read_reg(&regs->pimsr);
+ if (!sources)
+ return IRQ_NONE;
+ grcan_write_reg(&regs->picr, sources);
+ status = grcan_read_reg(&regs->stat);
+
+ /* If we got TX progress, the device has not hanged,
+ * so disable the hang timer
+ */
+ if (priv->need_txbug_workaround &&
+ (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
+ del_timer(&priv->hang_timer);
+ }
+
+ /* Frame(s) received or transmitted */
+ if (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_RX)) {
+ /* Disable tx/rx interrupts and schedule poll(). No need for
+ * locking as interference from a running reset at worst leads
+ * to an extra interrupt.
+ */
+ grcan_clear_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
+ napi_schedule(&priv->napi);
+ }
+
+ /* (Potential) error conditions to take care of */
+ if (sources & GRCAN_IRQ_ERRORS)
+ grcan_err(dev, sources, status);
+
+ return IRQ_HANDLED;
+}
+
+/* Reset device and restart operations from where they were.
+ *
+ * This assumes that RXCTRL & RXCTRL is properly disabled and that RX
+ * is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug
+ * for single shot)
+ */
+static void grcan_running_reset(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ unsigned long flags;
+
+ /* This temporarily messes with eskbp, so we need to lock
+ * priv->lock
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->resetting = false;
+ del_timer(&priv->hang_timer);
+ del_timer(&priv->rr_timer);
+
+ if (!priv->closing) {
+ /* Save and reset - config register preserved by grcan_reset */
+ u32 imr = grcan_read_reg(&regs->imr);
+
+ u32 txaddr = grcan_read_reg(&regs->txaddr);
+ u32 txsize = grcan_read_reg(&regs->txsize);
+ u32 txwr = grcan_read_reg(&regs->txwr);
+ u32 txrd = grcan_read_reg(&regs->txrd);
+ u32 eskbp = priv->eskbp;
+
+ u32 rxaddr = grcan_read_reg(&regs->rxaddr);
+ u32 rxsize = grcan_read_reg(&regs->rxsize);
+ u32 rxwr = grcan_read_reg(&regs->rxwr);
+ u32 rxrd = grcan_read_reg(&regs->rxrd);
+
+ grcan_reset(dev);
+
+ /* Restore */
+ grcan_write_reg(&regs->txaddr, txaddr);
+ grcan_write_reg(&regs->txsize, txsize);
+ grcan_write_reg(&regs->txwr, txwr);
+ grcan_write_reg(&regs->txrd, txrd);
+ priv->eskbp = eskbp;
+
+ grcan_write_reg(&regs->rxaddr, rxaddr);
+ grcan_write_reg(&regs->rxsize, rxsize);
+ grcan_write_reg(&regs->rxwr, rxwr);
+ grcan_write_reg(&regs->rxrd, rxrd);
+
+ /* Turn on device again */
+ grcan_write_reg(&regs->imr, imr);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ grcan_write_reg(&regs->txctrl, GRCAN_TXCTRL_ENABLE
+ | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
+ ? GRCAN_TXCTRL_SINGLE : 0));
+ grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
+
+ /* Start queue if there is size and listen-onle mode is not
+ * enabled
+ */
+ if (grcan_txspace(priv->dma.tx.size, txwr, priv->eskbp) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ netdev_err(dev, "Device reset and restored\n");
+}
+
+/* Waiting time in usecs corresponding to the transmission of three maximum
+ * sized can frames in the given bitrate (in bits/sec). Waiting for this amount
+ * of time makes sure that the can controller have time to finish sending or
+ * receiving a frame with a good margin.
+ *
+ * usecs/sec * number of frames * bits/frame / bits/sec
+ */
+static inline u32 grcan_ongoing_wait_usecs(__u32 bitrate)
+{
+ return 1000000 * 3 * GRCAN_EFF_FRAME_MAX_BITS / bitrate;
+}
+
+/* Set timer so that it will not fire until after a period in which the can
+ * controller have a good margin to finish transmitting a frame unless it has
+ * hanged
+ */
+static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
+{
+ u32 wait_jiffies = usecs_to_jiffies(grcan_ongoing_wait_usecs(bitrate));
+
+ mod_timer(timer, jiffies + wait_jiffies);
+}
+
+/* Disable channels and schedule a running reset */
+static void grcan_initiate_running_reset(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ unsigned long flags;
+
+ netdev_err(dev, "Device seems hanged - reset scheduled\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* The main body of this function must never be executed again
+ * until after an execution of grcan_running_reset
+ */
+ if (!priv->resetting && !priv->closing) {
+ priv->resetting = true;
+ netif_stop_queue(dev);
+ grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
+ grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_reset_timer(&priv->rr_timer, priv->can.bittiming.bitrate);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void grcan_free_dma_buffers(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_dma *dma = &priv->dma;
+
+ dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma->base_handle);
+ memset(dma, 0, sizeof(*dma));
+}
+
+static int grcan_allocate_dma_buffers(struct net_device *dev,
+ size_t tsize, size_t rsize)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_dma *dma = &priv->dma;
+ struct grcan_dma_buffer *large = rsize > tsize ? &dma->rx : &dma->tx;
+ struct grcan_dma_buffer *small = rsize > tsize ? &dma->tx : &dma->rx;
+ size_t shift;
+
+ /* Need a whole number of GRCAN_BUFFER_ALIGNMENT for the large,
+ * i.e. first buffer
+ */
+ size_t maxs = max(tsize, rsize);
+ size_t lsize = ALIGN(maxs, GRCAN_BUFFER_ALIGNMENT);
+
+ /* Put the small buffer after that */
+ size_t ssize = min(tsize, rsize);
+
+ /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
+ dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
+ dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_size,
+ &dma->base_handle,
+ GFP_KERNEL);
+
+ if (!dma->base_buf)
+ return -ENOMEM;
+
+ dma->tx.size = tsize;
+ dma->rx.size = rsize;
+
+ large->handle = ALIGN(dma->base_handle, GRCAN_BUFFER_ALIGNMENT);
+ small->handle = large->handle + lsize;
+ shift = large->handle - dma->base_handle;
+
+ large->buf = dma->base_buf + shift;
+ small->buf = large->buf + lsize;
+
+ return 0;
+}
+
+/* priv->lock *must* be held when calling this function */
+static int grcan_start(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ u32 confop, txctrl;
+
+ grcan_reset(dev);
+
+ grcan_write_reg(&regs->txaddr, priv->dma.tx.handle);
+ grcan_write_reg(&regs->txsize, priv->dma.tx.size);
+ /* regs->txwr, regs->txrd and priv->eskbp already set to 0 by reset */
+
+ grcan_write_reg(&regs->rxaddr, priv->dma.rx.handle);
+ grcan_write_reg(&regs->rxsize, priv->dma.rx.size);
+ /* regs->rxwr and regs->rxrd already set to 0 by reset */
+
+ /* Enable interrupts */
+ grcan_read_reg(&regs->pir);
+ grcan_write_reg(&regs->imr, GRCAN_IRQ_DEFAULT);
+
+ /* Enable interfaces, channels and device */
+ confop = GRCAN_CONF_ABORT
+ | (priv->config.enable0 ? GRCAN_CONF_ENABLE0 : 0)
+ | (priv->config.enable1 ? GRCAN_CONF_ENABLE1 : 0)
+ | (priv->config.select ? GRCAN_CONF_SELECT : 0)
+ | (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY ?
+ GRCAN_CONF_SILENT : 0)
+ | (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
+ GRCAN_CONF_SAM : 0);
+ grcan_write_bits(&regs->conf, confop, GRCAN_CONF_OPERATION);
+ txctrl = GRCAN_TXCTRL_ENABLE
+ | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
+ ? GRCAN_TXCTRL_SINGLE : 0);
+ grcan_write_reg(&regs->txctrl, txctrl);
+ grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
+ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+}
+
+static int grcan_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err = 0;
+
+ if (mode == CAN_MODE_START) {
+ /* This might be called to restart the device to recover from
+ * bus off errors
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->closing || priv->resetting) {
+ err = -EBUSY;
+ } else {
+ netdev_info(dev, "Restarting device\n");
+ grcan_start(dev);
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return err;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int grcan_open(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_dma *dma = &priv->dma;
+ unsigned long flags;
+ int err;
+
+ /* Allocate memory */
+ err = grcan_allocate_dma_buffers(dev, priv->config.txsize,
+ priv->config.rxsize);
+ if (err) {
+ netdev_err(dev, "could not allocate DMA buffers\n");
+ return err;
+ }
+
+ priv->echo_skb = kzalloc(dma->tx.size * sizeof(*priv->echo_skb),
+ GFP_KERNEL);
+ if (!priv->echo_skb) {
+ err = -ENOMEM;
+ goto exit_free_dma_buffers;
+ }
+ priv->can.echo_skb_max = dma->tx.size;
+ priv->can.echo_skb = priv->echo_skb;
+
+ priv->txdlc = kzalloc(dma->tx.size * sizeof(*priv->txdlc), GFP_KERNEL);
+ if (!priv->txdlc) {
+ err = -ENOMEM;
+ goto exit_free_echo_skb;
+ }
+
+ /* Get can device up */
+ err = open_candev(dev);
+ if (err)
+ goto exit_free_txdlc;
+
+ err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (err)
+ goto exit_close_candev;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ napi_enable(&priv->napi);
+ grcan_start(dev);
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(dev);
+ priv->resetting = false;
+ priv->closing = false;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+
+exit_close_candev:
+ close_candev(dev);
+exit_free_txdlc:
+ kfree(priv->txdlc);
+exit_free_echo_skb:
+ kfree(priv->echo_skb);
+exit_free_dma_buffers:
+ grcan_free_dma_buffers(dev);
+ return err;
+}
+
+static int grcan_close(struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ napi_disable(&priv->napi);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->closing = true;
+ if (priv->need_txbug_workaround) {
+ del_timer_sync(&priv->hang_timer);
+ del_timer_sync(&priv->rr_timer);
+ }
+ netif_stop_queue(dev);
+ grcan_stop_hardware(dev);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ grcan_free_dma_buffers(dev);
+ priv->can.echo_skb_max = 0;
+ priv->can.echo_skb = NULL;
+ kfree(priv->echo_skb);
+ kfree(priv->txdlc);
+
+ return 0;
+}
+
+static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ int work_done;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ work_done = catch_up_echo_skb(dev, budget, true);
+ if (work_done) {
+ if (!priv->resetting && !priv->closing &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_wake_queue(dev);
+
+ /* With napi we don't get TX interrupts for a while,
+ * so prevent a running reset while catching up
+ */
+ if (priv->need_txbug_workaround)
+ del_timer(&priv->hang_timer);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return work_done;
+}
+
+static int grcan_receive(struct net_device *dev, int budget)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 wr, rd, startrd;
+ u32 *slot;
+ u32 i, rtr, eff, j, shift;
+ int work_done = 0;
+
+ rd = grcan_read_reg(&regs->rxrd);
+ startrd = rd;
+ for (work_done = 0; work_done < budget; work_done++) {
+ /* Check for packet to receive */
+ wr = grcan_read_reg(&regs->rxwr);
+ if (rd == wr)
+ break;
+
+ /* Take care of packet */
+ skb = alloc_can_skb(dev, &cf);
+ if (skb == NULL) {
+ netdev_err(dev,
+ "dropping frame: skb allocation failed\n");
+ stats->rx_dropped++;
+ continue;
+ }
+
+ slot = dma->rx.buf + rd;
+ eff = slot[0] & GRCAN_MSG_IDE;
+ rtr = slot[0] & GRCAN_MSG_RTR;
+ if (eff) {
+ cf->can_id = ((slot[0] & GRCAN_MSG_EID)
+ >> GRCAN_MSG_EID_BIT);
+ cf->can_id |= CAN_EFF_FLAG;
+ } else {
+ cf->can_id = ((slot[0] & GRCAN_MSG_BID)
+ >> GRCAN_MSG_BID_BIT);
+ }
+ cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC)
+ >> GRCAN_MSG_DLC_BIT);
+ if (rtr) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++) {
+ j = GRCAN_MSG_DATA_SLOT_INDEX(i);
+ shift = GRCAN_MSG_DATA_SHIFT(i);
+ cf->data[i] = (u8)(slot[j] >> shift);
+ }
+ }
+ netif_receive_skb(skb);
+
+ /* Update statistics and read pointer */
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
+ }
+
+ /* Make sure everything is read before allowing hardware to
+ * use the memory
+ */
+ mb();
+
+ /* Update read pointer - no need to check for ongoing */
+ if (likely(rd != startrd))
+ grcan_write_reg(&regs->rxrd, rd);
+
+ return work_done;
+}
+
+static int grcan_poll(struct napi_struct *napi, int budget)
+{
+ struct grcan_priv *priv = container_of(napi, struct grcan_priv, napi);
+ struct net_device *dev = priv->dev;
+ struct grcan_registers __iomem *regs = priv->regs;
+ unsigned long flags;
+ int tx_work_done, rx_work_done;
+ int rx_budget = budget / 2;
+ int tx_budget = budget - rx_budget;
+
+ /* Half of the budget for receiveing messages */
+ rx_work_done = grcan_receive(dev, rx_budget);
+
+ /* Half of the budget for transmitting messages as that can trigger echo
+ * frames being received
+ */
+ tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+
+ if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ napi_complete(napi);
+
+ /* Guarantee no interference with a running reset that otherwise
+ * could turn off interrupts.
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Enable tx and rx interrupts again. No need to check
+ * priv->closing as napi_disable in grcan_close is waiting for
+ * scheduled napi calls to finish.
+ */
+ grcan_set_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return rx_work_done + tx_work_done;
+}
+
+/* Work tx bug by waiting while for the risky situation to clear. If that fails,
+ * drop a frame in one-shot mode or indicate a busy device otherwise.
+ *
+ * Returns 0 on successful wait. Otherwise it sets *netdev_tx_status to the
+ * value that should be returned by grcan_start_xmit when aborting the xmit.
+ */
+static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb,
+ u32 txwr, u32 oneshotmode,
+ netdev_tx_t *netdev_tx_status)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ int i;
+ unsigned long flags;
+
+ /* Wait a while for ongoing to be cleared or read pointer to catch up to
+ * write pointer. The latter is needed due to a bug in older versions of
+ * GRCAN in which ONGOING is not cleared properly one-shot mode when a
+ * transmission fails.
+ */
+ for (i = 0; i < GRCAN_SHORTWAIT_USECS; i++) {
+ udelay(1);
+ if (!grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ONGOING) ||
+ grcan_read_reg(&regs->txrd) == txwr) {
+ return 0;
+ }
+ }
+
+ /* Clean up, in case the situation was not resolved */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!priv->resetting && !priv->closing) {
+ /* Queue might have been stopped earlier in grcan_start_xmit */
+ if (grcan_txspace(dma->tx.size, txwr, priv->eskbp))
+ netif_wake_queue(dev);
+ /* Set a timer to resolve a hanged tx controller */
+ if (!timer_pending(&priv->hang_timer))
+ grcan_reset_timer(&priv->hang_timer,
+ priv->can.bittiming.bitrate);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (oneshotmode) {
+ /* In one-shot mode we should never end up here because
+ * then the interrupt handler increases txrd on TXLOSS,
+ * but it is consistent with one-shot mode to drop the
+ * frame in this case.
+ */
+ kfree_skb(skb);
+ *netdev_tx_status = NETDEV_TX_OK;
+ } else {
+ /* In normal mode the socket-can transmission queue get
+ * to keep the frame so that it can be retransmitted
+ * later
+ */
+ *netdev_tx_status = NETDEV_TX_BUSY;
+ }
+ return -EBUSY;
+}
+
+/* Notes on the tx cyclic buffer handling:
+ *
+ * regs->txwr - the next slot for the driver to put data to be sent
+ * regs->txrd - the next slot for the device to read data
+ * priv->eskbp - the next slot for the driver to call can_put_echo_skb for
+ *
+ * grcan_start_xmit can enter more messages as long as regs->txwr does
+ * not reach priv->eskbp (within 1 message gap)
+ *
+ * The device sends messages until regs->txrd reaches regs->txwr
+ *
+ * The interrupt calls handler calls can_put_echo_skb until
+ * priv->eskbp reaches regs->txrd
+ */
+static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_registers __iomem *regs = priv->regs;
+ struct grcan_dma *dma = &priv->dma;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 id, txwr, txrd, space, txctrl;
+ int slotindex;
+ u32 *slot;
+ u32 i, rtr, eff, dlc, tmp, err;
+ int j, shift;
+ unsigned long flags;
+ u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* Trying to transmit in silent mode will generate error interrupts, but
+ * this should never happen - the queue should not have been started.
+ */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ return NETDEV_TX_BUSY;
+
+ /* Reads of priv->eskbp and shut-downs of the queue needs to
+ * be atomic towards the updates to priv->eskbp and wake-ups
+ * of the queue in the interrupt handler.
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ txwr = grcan_read_reg(&regs->txwr);
+ space = grcan_txspace(dma->tx.size, txwr, priv->eskbp);
+
+ slotindex = txwr / GRCAN_MSG_SIZE;
+ slot = dma->tx.buf + txwr;
+
+ if (unlikely(space == 1))
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ /* End of critical section*/
+
+ /* This should never happen. If circular buffer is full, the
+ * netif_stop_queue should have been stopped already.
+ */
+ if (unlikely(!space)) {
+ netdev_err(dev, "No buffer space, but queue is non-stopped.\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Convert and write CAN message to DMA buffer */
+ eff = cf->can_id & CAN_EFF_FLAG;
+ rtr = cf->can_id & CAN_RTR_FLAG;
+ id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK);
+ dlc = cf->can_dlc;
+ if (eff)
+ tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID;
+ else
+ tmp = (id << GRCAN_MSG_BID_BIT) & GRCAN_MSG_BID;
+ slot[0] = (eff ? GRCAN_MSG_IDE : 0) | (rtr ? GRCAN_MSG_RTR : 0) | tmp;
+
+ slot[1] = ((dlc << GRCAN_MSG_DLC_BIT) & GRCAN_MSG_DLC);
+ slot[2] = 0;
+ slot[3] = 0;
+ for (i = 0; i < dlc; i++) {
+ j = GRCAN_MSG_DATA_SLOT_INDEX(i);
+ shift = GRCAN_MSG_DATA_SHIFT(i);
+ slot[j] |= cf->data[i] << shift;
+ }
+
+ /* Checking that channel has not been disabled. These cases
+ * should never happen
+ */
+ txctrl = grcan_read_reg(&regs->txctrl);
+ if (!(txctrl & GRCAN_TXCTRL_ENABLE))
+ netdev_err(dev, "tx channel spuriously disabled\n");
+
+ if (oneshotmode && !(txctrl & GRCAN_TXCTRL_SINGLE))
+ netdev_err(dev, "one-shot mode spuriously disabled\n");
+
+ /* Bug workaround for old version of grcan where updating txwr
+ * in the same clock cycle as the controller updates txrd to
+ * the current txwr could hang the can controller
+ */
+ if (priv->need_txbug_workaround) {
+ txrd = grcan_read_reg(&regs->txrd);
+ if (unlikely(grcan_ring_sub(txwr, txrd, dma->tx.size) == 1)) {
+ netdev_tx_t txstatus;
+
+ err = grcan_txbug_workaround(dev, skb, txwr,
+ oneshotmode, &txstatus);
+ if (err)
+ return txstatus;
+ }
+ }
+
+ /* Prepare skb for echoing. This must be after the bug workaround above
+ * as ownership of the skb is passed on by calling can_put_echo_skb.
+ * Returning NETDEV_TX_BUSY or accessing skb or cf after a call to
+ * can_put_echo_skb would be an error unless other measures are
+ * taken.
+ */
+ priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */
+ can_put_echo_skb(skb, dev, slotindex);
+
+ /* Make sure everything is written before allowing hardware to
+ * read from the memory
+ */
+ wmb();
+
+ /* Update write pointer to start transmission */
+ grcan_write_reg(&regs->txwr,
+ grcan_ring_add(txwr, GRCAN_MSG_SIZE, dma->tx.size));
+
+ return NETDEV_TX_OK;
+}
+
+/* ========== Setting up sysfs interface and module parameters ========== */
+
+#define GRCAN_NOT_BOOL(unsigned_val) ((unsigned_val) > 1)
+
+#define GRCAN_MODULE_PARAM(name, mtype, valcheckf, desc) \
+ static void grcan_sanitize_##name(struct platform_device *pd) \
+ { \
+ struct grcan_device_config grcan_default_config \
+ = GRCAN_DEFAULT_DEVICE_CONFIG; \
+ if (valcheckf(grcan_module_config.name)) { \
+ dev_err(&pd->dev, \
+ "Invalid module parameter value for " \
+ #name " - setting default\n"); \
+ grcan_module_config.name = \
+ grcan_default_config.name; \
+ } \
+ } \
+ module_param_named(name, grcan_module_config.name, \
+ mtype, S_IRUGO); \
+ MODULE_PARM_DESC(name, desc)
+
+#define GRCAN_CONFIG_ATTR(name, desc) \
+ static ssize_t grcan_store_##name(struct device *sdev, \
+ struct device_attribute *att, \
+ const char *buf, \
+ size_t count) \
+ { \
+ struct net_device *dev = to_net_dev(sdev); \
+ struct grcan_priv *priv = netdev_priv(dev); \
+ u8 val; \
+ int ret; \
+ if (dev->flags & IFF_UP) \
+ return -EBUSY; \
+ ret = kstrtou8(buf, 0, &val); \
+ if (ret < 0 || val > 1) \
+ return -EINVAL; \
+ priv->config.name = val; \
+ return count; \
+ } \
+ static ssize_t grcan_show_##name(struct device *sdev, \
+ struct device_attribute *att, \
+ char *buf) \
+ { \
+ struct net_device *dev = to_net_dev(sdev); \
+ struct grcan_priv *priv = netdev_priv(dev); \
+ return sprintf(buf, "%d\n", priv->config.name); \
+ } \
+ static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
+ grcan_show_##name, \
+ grcan_store_##name); \
+ GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc)
+
+/* The following configuration options are made available both via module
+ * parameters and writable sysfs files. See the chapter about GRCAN in the
+ * documentation for the GRLIB VHDL library for further details.
+ */
+GRCAN_CONFIG_ATTR(enable0,
+ "Configuration of physical interface 0. Determines\n" \
+ "the \"Enable 0\" bit of the configuration register.\n" \
+ "Format: 0 | 1\nDefault: 0\n");
+
+GRCAN_CONFIG_ATTR(enable1,
+ "Configuration of physical interface 1. Determines\n" \
+ "the \"Enable 1\" bit of the configuration register.\n" \
+ "Format: 0 | 1\nDefault: 0\n");
+
+GRCAN_CONFIG_ATTR(select,
+ "Select which physical interface to use.\n" \
+ "Format: 0 | 1\nDefault: 0\n");
+
+/* The tx and rx buffer size configuration options are only available via module
+ * parameters.
+ */
+GRCAN_MODULE_PARAM(txsize, uint, GRCAN_INVALID_BUFFER_SIZE,
+ "Sets the size of the tx buffer.\n" \
+ "Format: <unsigned int> where (txsize & ~0x1fffc0) == 0\n" \
+ "Default: 1024\n");
+GRCAN_MODULE_PARAM(rxsize, uint, GRCAN_INVALID_BUFFER_SIZE,
+ "Sets the size of the rx buffer.\n" \
+ "Format: <unsigned int> where (size & ~0x1fffc0) == 0\n" \
+ "Default: 1024\n");
+
+/* Function that makes sure that configuration done using
+ * module parameters are set to valid values
+ */
+static void grcan_sanitize_module_config(struct platform_device *ofdev)
+{
+ grcan_sanitize_enable0(ofdev);
+ grcan_sanitize_enable1(ofdev);
+ grcan_sanitize_select(ofdev);
+ grcan_sanitize_txsize(ofdev);
+ grcan_sanitize_rxsize(ofdev);
+}
+
+static const struct attribute *const sysfs_grcan_attrs[] = {
+ /* Config attrs */
+ &dev_attr_enable0.attr,
+ &dev_attr_enable1.attr,
+ &dev_attr_select.attr,
+ NULL,
+};
+
+static const struct attribute_group sysfs_grcan_group = {
+ .name = "grcan",
+ .attrs = (struct attribute **)sysfs_grcan_attrs,
+};
+
+/* ========== Setting up the driver ========== */
+
+static const struct net_device_ops grcan_netdev_ops = {
+ .ndo_open = grcan_open,
+ .ndo_stop = grcan_close,
+ .ndo_start_xmit = grcan_start_xmit,
+};
+
+static int grcan_setup_netdev(struct platform_device *ofdev,
+ void __iomem *base,
+ int irq, u32 ambafreq, bool txbug)
+{
+ struct net_device *dev;
+ struct grcan_priv *priv;
+ struct grcan_registers __iomem *regs;
+ int err;
+
+ dev = alloc_candev(sizeof(struct grcan_priv), 0);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq;
+ dev->flags |= IFF_ECHO;
+ dev->netdev_ops = &grcan_netdev_ops;
+ dev->sysfs_groups[0] = &sysfs_grcan_group;
+
+ priv = netdev_priv(dev);
+ memcpy(&priv->config, &grcan_module_config,
+ sizeof(struct grcan_device_config));
+ priv->dev = dev;
+ priv->regs = base;
+ priv->can.bittiming_const = &grcan_bittiming_const;
+ priv->can.do_set_bittiming = grcan_set_bittiming;
+ priv->can.do_set_mode = grcan_set_mode;
+ priv->can.do_get_berr_counter = grcan_get_berr_counter;
+ priv->can.clock.freq = ambafreq;
+ priv->can.ctrlmode_supported =
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT;
+ priv->need_txbug_workaround = txbug;
+
+ /* Discover if triple sampling is supported by hardware */
+ regs = priv->regs;
+ grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
+ grcan_set_bits(&regs->conf, GRCAN_CONF_SAM);
+ if (grcan_read_bits(&regs->conf, GRCAN_CONF_SAM)) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ dev_dbg(&ofdev->dev, "Hardware supports triple-sampling\n");
+ }
+
+ spin_lock_init(&priv->lock);
+
+ if (priv->need_txbug_workaround) {
+ init_timer(&priv->rr_timer);
+ priv->rr_timer.function = grcan_running_reset;
+ priv->rr_timer.data = (unsigned long)dev;
+
+ init_timer(&priv->hang_timer);
+ priv->hang_timer.function = grcan_initiate_running_reset;
+ priv->hang_timer.data = (unsigned long)dev;
+ }
+
+ netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+ dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
+ priv->regs, dev->irq, priv->can.clock.freq);
+
+ err = register_candev(dev);
+ if (err)
+ goto exit_free_candev;
+
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* Reset device to allow bit-timing to be set. No need to call
+ * grcan_reset at this stage. That is done in grcan_open.
+ */
+ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_RESET);
+
+ return 0;
+exit_free_candev:
+ free_candev(dev);
+ return err;
+}
+
+static int grcan_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource *res;
+ u32 sysid, ambafreq;
+ int irq, err;
+ void __iomem *base;
+ bool txbug = true;
+
+ /* Compare GRLIB version number with the first that does not
+ * have the tx bug (see start_xmit)
+ */
+ err = of_property_read_u32(np, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK)
+ >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+
+ err = of_property_read_u32(np, "freq", &ambafreq);
+ if (err) {
+ dev_err(&ofdev->dev, "unable to fetch \"freq\" property\n");
+ goto exit_error;
+ }
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ base = devm_request_and_ioremap(&ofdev->dev, res);
+ if (!base) {
+ dev_err(&ofdev->dev, "couldn't map IO resource\n");
+ err = -EADDRNOTAVAIL;
+ goto exit_error;
+ }
+
+ irq = irq_of_parse_and_map(np, GRCAN_IRQIX_IRQ);
+ if (!irq) {
+ dev_err(&ofdev->dev, "no irq found\n");
+ err = -ENODEV;
+ goto exit_error;
+ }
+
+ grcan_sanitize_module_config(ofdev);
+
+ err = grcan_setup_netdev(ofdev, base, irq, ambafreq, txbug);
+ if (err)
+ goto exit_dispose_irq;
+
+ return 0;
+
+exit_dispose_irq:
+ irq_dispose_mapping(irq);
+exit_error:
+ dev_err(&ofdev->dev,
+ "%s socket CAN driver initialization failed with error %d\n",
+ DRV_NAME, err);
+ return err;
+}
+
+static int grcan_remove(struct platform_device *ofdev)
+{
+ struct net_device *dev = dev_get_drvdata(&ofdev->dev);
+ struct grcan_priv *priv = netdev_priv(dev);
+
+ unregister_candev(dev); /* Will in turn call grcan_close */
+
+ irq_dispose_mapping(dev->irq);
+ dev_set_drvdata(&ofdev->dev, NULL);
+ netif_napi_del(&priv->napi);
+ free_candev(dev);
+
+ return 0;
+}
+
+static struct of_device_id grcan_match[] = {
+ {.name = "GAISLER_GRCAN"},
+ {.name = "01_03d"},
+ {.name = "GAISLER_GRHCAN"},
+ {.name = "01_034"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, grcan_match);
+
+static struct platform_driver grcan_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = grcan_match,
+ },
+ .probe = grcan_probe,
+ .remove = grcan_remove,
+};
+
+module_platform_driver(grcan_driver);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Socket CAN driver for Aeroflex Gaisler GRCAN");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 7edadee487ba..c4bc1d2e2033 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -365,7 +365,7 @@ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
* ICAN3 "new-style" Host Interface Setup
*/
-static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
+static void ican3_init_new_host_interface(struct ican3_dev *mod)
{
struct ican3_new_desc desc;
unsigned long flags;
@@ -444,7 +444,7 @@ static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
* ICAN3 Fast Host Interface Setup
*/
-static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
+static void ican3_init_fast_host_interface(struct ican3_dev *mod)
{
struct ican3_fast_desc desc;
unsigned long flags;
@@ -631,7 +631,7 @@ static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
* Quick Pre-constructed Messages
*/
-static int __devinit ican3_msg_connect(struct ican3_dev *mod)
+static int ican3_msg_connect(struct ican3_dev *mod)
{
struct ican3_msg msg;
@@ -642,7 +642,7 @@ static int __devinit ican3_msg_connect(struct ican3_dev *mod)
return ican3_send_msg(mod, &msg);
}
-static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
+static int ican3_msg_disconnect(struct ican3_dev *mod)
{
struct ican3_msg msg;
@@ -653,7 +653,7 @@ static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
return ican3_send_msg(mod, &msg);
}
-static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
+static int ican3_msg_newhostif(struct ican3_dev *mod)
{
struct ican3_msg msg;
int ret;
@@ -674,7 +674,7 @@ static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
return 0;
}
-static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
+static int ican3_msg_fasthostif(struct ican3_dev *mod)
{
struct ican3_msg msg;
unsigned int addr;
@@ -707,7 +707,7 @@ static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
* Setup the CAN filter to either accept or reject all
* messages from the CAN bus.
*/
-static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept)
+static int ican3_set_id_filter(struct ican3_dev *mod, bool accept)
{
struct ican3_msg msg;
int ret;
@@ -1421,7 +1421,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
return -ETIMEDOUT;
}
-static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
+static void ican3_shutdown_module(struct ican3_dev *mod)
{
ican3_msg_disconnect(mod);
ican3_reset_module(mod);
@@ -1430,7 +1430,7 @@ static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
/*
* Startup an ICAN module, bringing it into fast mode
*/
-static int __devinit ican3_startup_module(struct ican3_dev *mod)
+static int ican3_startup_module(struct ican3_dev *mod)
{
int ret;
@@ -1692,7 +1692,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
return ret;
ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
- if (ret <= 0) {
+ if (ret == 0) {
dev_info(mod->dev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1718,7 +1718,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
return ret;
ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
- if (ret <= 0) {
+ if (ret == 0) {
dev_info(mod->dev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1760,7 +1760,7 @@ static struct attribute_group ican3_sysfs_attr_group = {
* PCI Subsystem
*/
-static int __devinit ican3_probe(struct platform_device *pdev)
+static int ican3_probe(struct platform_device *pdev)
{
struct janz_platform_data *pdata;
struct net_device *ndev;
@@ -1898,7 +1898,7 @@ out_return:
return ret;
}
-static int __devexit ican3_remove(struct platform_device *pdev)
+static int ican3_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ican3_dev *mod = netdev_priv(ndev);
@@ -1927,7 +1927,7 @@ static struct platform_driver ican3_driver = {
.owner = THIS_MODULE,
},
.probe = ican3_probe,
- .remove = __devexit_p(ican3_remove),
+ .remove = ican3_remove,
};
module_platform_driver(ican3_driver);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 26e7129332ab..5eaf47b8e37b 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -981,7 +981,7 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_start_xmit = mcp251x_hard_start_xmit,
};
-static int __devinit mcp251x_can_probe(struct spi_device *spi)
+static int mcp251x_can_probe(struct spi_device *spi)
{
struct net_device *net;
struct mcp251x_priv *priv;
@@ -1100,7 +1100,7 @@ error_out:
return ret;
}
-static int __devexit mcp251x_can_remove(struct spi_device *spi)
+static int mcp251x_can_remove(struct spi_device *spi)
{
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1198,7 +1198,7 @@ static struct spi_driver mcp251x_can_driver = {
.id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
- .remove = __devexit_p(mcp251x_can_remove),
+ .remove = mcp251x_can_remove,
.suspend = mcp251x_can_suspend,
.resume = mcp251x_can_resume,
};
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 799c354083c4..668850e441dc 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -43,14 +43,13 @@ struct mpc5xxx_can_data {
};
#ifdef CONFIG_PPC_MPC52xx
-static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
+static struct of_device_id mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
-static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
- const char *clock_name,
- int *mscan_clksrc)
+static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
+ const char *clock_name, int *mscan_clksrc)
{
unsigned int pvr;
struct mpc52xx_cdm __iomem *cdm;
@@ -101,9 +100,8 @@ static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
return freq;
}
#else /* !CONFIG_PPC_MPC52xx */
-static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
- const char *clock_name,
- int *mscan_clksrc)
+static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
+ const char *clock_name, int *mscan_clksrc)
{
return 0;
}
@@ -124,14 +122,13 @@ struct mpc512x_clockctl {
u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
};
-static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
+static struct of_device_id mpc512x_clock_ids[] = {
{ .compatible = "fsl,mpc5121-clock", },
{}
};
-static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
- const char *clock_name,
- int *mscan_clksrc)
+static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
+ const char *clock_name, int *mscan_clksrc)
{
struct mpc512x_clockctl __iomem *clockctl;
struct device_node *np_clock;
@@ -239,16 +236,15 @@ exit_put:
return freq;
}
#else /* !CONFIG_PPC_MPC512x */
-static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
- const char *clock_name,
- int *mscan_clksrc)
+static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
+ const char *clock_name, int *mscan_clksrc)
{
return 0;
}
#endif /* CONFIG_PPC_MPC512x */
static const struct of_device_id mpc5xxx_can_table[];
-static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
+static int mpc5xxx_can_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
const struct mpc5xxx_can_data *data;
@@ -323,7 +319,7 @@ exit_unmap_mem:
return err;
}
-static int __devexit mpc5xxx_can_remove(struct platform_device *ofdev)
+static int mpc5xxx_can_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
@@ -380,22 +376,23 @@ static int mpc5xxx_can_resume(struct platform_device *ofdev)
}
#endif
-static const struct mpc5xxx_can_data __devinitconst mpc5200_can_data = {
+static const struct mpc5xxx_can_data mpc5200_can_data = {
.type = MSCAN_TYPE_MPC5200,
.get_clock = mpc52xx_can_get_clock,
};
-static const struct mpc5xxx_can_data __devinitconst mpc5121_can_data = {
+static const struct mpc5xxx_can_data mpc5121_can_data = {
.type = MSCAN_TYPE_MPC5121,
.get_clock = mpc512x_can_get_clock,
};
-static const struct of_device_id __devinitconst mpc5xxx_can_table[] = {
+static const struct of_device_id mpc5xxx_can_table[] = {
{ .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
/* Note that only MPC5121 Rev. 2 (and later) is supported */
{ .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
{},
};
+MODULE_DEVICE_TABLE(of, mpc5xxx_can_table);
static struct platform_driver mpc5xxx_can_driver = {
.driver = {
@@ -404,7 +401,7 @@ static struct platform_driver mpc5xxx_can_driver = {
.of_match_table = mpc5xxx_can_table,
},
.probe = mpc5xxx_can_probe,
- .remove = __devexit_p(mpc5xxx_can_remove),
+ .remove = mpc5xxx_can_remove,
#ifdef CONFIG_PM
.suspend = mpc5xxx_can_suspend,
.resume = mpc5xxx_can_resume,
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 2b104d5f422c..e6b40954e204 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -517,12 +517,8 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
{
- struct mscan_priv *priv = netdev_priv(dev);
int ret = 0;
- if (!priv->open_time)
- return -EINVAL;
-
switch (mode) {
case CAN_MODE_START:
ret = mscan_restart(dev);
@@ -590,8 +586,6 @@ static int mscan_open(struct net_device *dev)
goto exit_napi_disable;
}
- priv->open_time = jiffies;
-
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
setbits8(&regs->canctl1, MSCAN_LISTEN);
else
@@ -606,7 +600,6 @@ static int mscan_open(struct net_device *dev)
return 0;
exit_free_irq:
- priv->open_time = 0;
free_irq(dev->irq, dev);
exit_napi_disable:
napi_disable(&priv->napi);
@@ -627,7 +620,6 @@ static int mscan_close(struct net_device *dev)
mscan_set_mode(dev, MSCAN_INIT_MODE);
close_candev(dev);
free_irq(dev->irq, dev);
- priv->open_time = 0;
return 0;
}
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index b43e9f5d3268..af2ed8baf0a3 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -281,7 +281,6 @@ struct tx_queue_entry {
struct mscan_priv {
struct can_priv can; /* must be the first member */
unsigned int type; /* MSCAN type variants */
- long open_time;
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
u8 shadow_statflg;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 48b3d62b34cb..5c314a961970 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
stats->rx_errors++;
break;
case PCH_CRC_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
@@ -954,7 +954,7 @@ static const struct net_device_ops pch_can_netdev_ops = {
.ndo_start_xmit = pch_xmit,
};
-static void __devexit pch_can_remove(struct pci_dev *pdev)
+static void pch_can_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct pch_can_priv *priv = netdev_priv(ndev);
@@ -1178,7 +1178,7 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
return 0;
}
-static int __devinit pch_can_probe(struct pci_dev *pdev,
+static int pch_can_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *ndev;
@@ -1269,7 +1269,7 @@ static struct pci_driver pch_can_pci_driver = {
.name = "pch_can",
.id_table = pch_pci_tbl,
.probe = pch_can_probe,
- .remove = __devexit_p(pch_can_remove),
+ .remove = pch_can_remove,
.suspend = pch_can_suspend,
.resume = pch_can_resume,
};
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 03df9a8f2bbf..92f73c708a3d 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -21,7 +21,7 @@ config CAN_SJA1000_PLATFORM
config CAN_SJA1000_OF_PLATFORM
tristate "Generic OF Platform Bus based SJA1000 driver"
- depends on PPC_OF
+ depends on OF
---help---
This driver adds support for the SJA1000 chips connected to
the OpenFirmware "platform bus" found on embedded systems with
@@ -93,6 +93,7 @@ config CAN_PLX_PCI
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
- IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/)
+ - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5c6d412bafb5..036a326836b2 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -220,8 +220,8 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
-static int __devinit ems_pci_add_card(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ems_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct net_device *dev;
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index 075a5457a190..5c2f3fbbf5ae 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -166,8 +166,7 @@ static void ems_pcmcia_del_card(struct pcmcia_device *pdev)
* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
-static int __devinit ems_pcmcia_add_card(struct pcmcia_device *pdev,
- unsigned long base)
+static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
{
struct sja1000_priv *priv;
struct net_device *dev;
@@ -256,7 +255,7 @@ failure_cleanup:
/*
* Setup PCMCIA socket and probe for EMS CPC-CARD
*/
-static int __devinit ems_pcmcia_probe(struct pcmcia_device *dev)
+static int ems_pcmcia_probe(struct pcmcia_device *dev)
{
int csval;
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 23ed6ea4c7c3..37b0381f532e 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -290,8 +290,8 @@ failure:
return err;
}
-static int __devinit kvaser_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int kvaser_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int err;
struct net_device *master_dev = NULL;
@@ -379,7 +379,7 @@ failure:
}
-static void __devexit kvaser_pci_remove_one(struct pci_dev *pdev)
+static void kvaser_pci_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -394,7 +394,7 @@ static struct pci_driver kvaser_pci_driver = {
.name = DRV_NAME,
.id_table = kvaser_pci_tbl,
.probe = kvaser_pci_init_one,
- .remove = __devexit_p(kvaser_pci_remove_one),
+ .remove = kvaser_pci_remove_one,
};
module_pci_driver(kvaser_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6525dbcca4e3..d84888f03d92 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,8 +551,7 @@ static void peak_pci_post_irq(const struct sja1000_priv *priv)
writew(chan->icr_mask, chan->cfg_base + PITA_ICR);
}
-static int __devinit peak_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
@@ -717,7 +716,7 @@ failure_disable_pci:
return err;
}
-static void __devexit peak_pci_remove(struct pci_dev *pdev)
+static void peak_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
struct sja1000_priv *priv = netdev_priv(dev);
@@ -757,7 +756,7 @@ static struct pci_driver peak_pci_driver = {
.name = DRV_NAME,
.id_table = peak_pci_tbl,
.probe = peak_pci_probe,
- .remove = __devexit_p(peak_pci_remove),
+ .remove = peak_pci_remove,
};
module_pci_driver(peak_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 272a85f32b14..f1175142b0a0 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -632,7 +632,7 @@ static void pcan_free(struct pcmcia_device *pdev)
/*
* setup PCMCIA socket and probe for PEAK-System PC-CARD
*/
-static int __devinit pcan_probe(struct pcmcia_device *pdev)
+static int pcan_probe(struct pcmcia_device *pdev)
{
struct pcan_pccard *card;
int err;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8bc95982840f..11d1062a9449 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -44,6 +44,7 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"esd CAN-PCI/CPCI/PCI104/200, "
"esd CAN-PCI/PMC/266, "
"esd CAN-PCIe/2000, "
+ "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
"IXXAT PC-I 04/PCI")
MODULE_LICENSE("GPL v2");
@@ -131,6 +132,9 @@ struct plx_pci_card {
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
+#define CTI_PCI_VENDOR_ID 0x12c4
+#define CTI_PCI_DEVICE_ID_CRG001 0x0900
+
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
@@ -158,7 +162,7 @@ struct plx_pci_card_info {
void (*reset_func)(struct pci_dev *pdev);
};
-static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_adlink = {
"Adlink PCI-7841/cPCI-7841", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
@@ -166,7 +170,7 @@ static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
/* based on PLX9052 */
};
-static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_adlink_se = {
"Adlink PCI-7841/cPCI-7841 SE", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
@@ -174,7 +178,7 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
/* based on PLX9052 */
};
-static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_esd200 = {
"esd CAN-PCI/CPCI/PCI104/200", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
@@ -182,7 +186,7 @@ static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
/* based on PLX9030/9050 */
};
-static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_esd266 = {
"esd CAN-PCI/PMC/266", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
@@ -190,7 +194,7 @@ static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
/* based on PLX9056 */
};
-static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_esd2000 = {
"esd CAN-PCIe/2000", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
@@ -198,7 +202,7 @@ static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
/* based on PEX8311 */
};
-static struct plx_pci_card_info plx_pci_card_info_ixxat __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_ixxat = {
"IXXAT PC-I 04/PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x200, 0x80} },
@@ -206,7 +210,7 @@ static struct plx_pci_card_info plx_pci_card_info_ixxat __devinitdata = {
/* based on PLX9050 */
};
-static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_marathon = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
@@ -214,7 +218,7 @@ static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
/* based on PLX9052 */
};
-static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
+static struct plx_pci_card_info plx_pci_card_info_tews = {
"TEWS TECHNOLOGIES TPMC810", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
@@ -222,6 +226,14 @@ static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
/* based on PLX9030 */
};
+static struct plx_pci_card_info plx_pci_card_info_cti = {
+ "Connect Tech Inc. CANpro/104-Plus Opto (CRG001)", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -300,6 +312,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_tews
},
+ {
+ /* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_cti
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
@@ -465,8 +484,8 @@ static void plx_pci_del_card(struct pci_dev *pdev)
* Probe PLX90xx based device for the SJA1000 chips and register each
* available CAN channel to SJA1000 Socket-CAN subsystem.
*/
-static int __devinit plx_pci_add_card(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int plx_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct net_device *dev;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 25011dbe1b96..83ee11eca0e2 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -188,11 +188,6 @@ static void sja1000_start(struct net_device *dev)
static int sja1000_set_mode(struct net_device *dev, enum can_mode mode)
{
- struct sja1000_priv *priv = netdev_priv(dev);
-
- if (!priv->open_time)
- return -EINVAL;
-
switch (mode) {
case CAN_MODE_START:
sja1000_start(dev);
@@ -579,7 +574,6 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
- priv->open_time = jiffies;
netif_start_queue(dev);
@@ -598,8 +592,6 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
- priv->open_time = 0;
-
return 0;
}
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 23fff06875f5..afa99847a510 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -152,7 +152,6 @@
*/
struct sja1000_priv {
struct can_priv can; /* must be the first member */
- int open_time;
struct sk_buff *echo_skb;
/* the lower-layer is responsible for appropriate locking */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 90c5c2dfd2fd..5c8da4661489 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -42,11 +42,11 @@ MODULE_LICENSE("GPL v2");
static unsigned long port[MAXDEV];
static unsigned long mem[MAXDEV];
-static int __devinitdata irq[MAXDEV];
-static int __devinitdata clk[MAXDEV];
-static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
-static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
-static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static int irq[MAXDEV];
+static int clk[MAXDEV];
+static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -117,7 +117,7 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
outb(val, base + 1);
}
-static int __devinit sja1000_isa_probe(struct platform_device *pdev)
+static int sja1000_isa_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sja1000_priv *priv;
@@ -223,7 +223,7 @@ static int __devinit sja1000_isa_probe(struct platform_device *pdev)
return err;
}
-static int __devexit sja1000_isa_remove(struct platform_device *pdev)
+static int sja1000_isa_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
@@ -248,7 +248,7 @@ static int __devexit sja1000_isa_remove(struct platform_device *pdev)
static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
- .remove = __devexit_p(sja1000_isa_remove),
+ .remove = sja1000_isa_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index f2683eb6a3d5..6433b81256cd 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -42,6 +42,8 @@
#include <linux/can/dev.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/prom.h>
#include "sja1000.h"
@@ -59,16 +61,16 @@ MODULE_LICENSE("GPL v2");
static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
{
- return in_8(priv->reg_base + reg);
+ return ioread8(priv->reg_base + reg);
}
static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
int reg, u8 val)
{
- out_8(priv->reg_base + reg, val);
+ iowrite8(val, priv->reg_base + reg);
}
-static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
+static int sja1000_ofp_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
@@ -88,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
return 0;
}
-static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+static int sja1000_ofp_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
@@ -119,7 +121,7 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
}
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (irq == 0) {
dev_err(&ofdev->dev, "no irq found\n");
err = -ENODEV;
goto exit_unmap_mem;
@@ -204,7 +206,7 @@ exit_release_mem:
return err;
}
-static struct of_device_id __devinitdata sja1000_ofp_table[] = {
+static struct of_device_id sja1000_ofp_table[] = {
{.compatible = "nxp,sja1000"},
{},
};
@@ -217,7 +219,7 @@ static struct platform_driver sja1000_ofp_driver = {
.of_match_table = sja1000_ofp_table,
},
.probe = sja1000_ofp_probe,
- .remove = __devexit_p(sja1000_ofp_remove),
+ .remove = sja1000_ofp_remove,
};
module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 662c5f7eb0c5..21619bb5b869 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -34,6 +34,7 @@
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 9756099a883a..76513dd780c7 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -71,7 +71,7 @@ MODULE_LICENSE("GPL");
#define TSCAN1_SJA1000_XTAL 16000000
/* SJA1000 IO base addresses */
-static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+static const unsigned short tscan1_sja1000_addresses[] = {
0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
};
@@ -88,7 +88,7 @@ static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
}
/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
-static int __devinit tscan1_probe(struct device *dev, unsigned id)
+static int tscan1_probe(struct device *dev, unsigned id)
{
struct net_device *netdev;
struct sja1000_priv *priv;
@@ -171,7 +171,7 @@ static int __devinit tscan1_probe(struct device *dev, unsigned id)
return -ENXIO;
}
-static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+static int tscan1_remove(struct device *dev, unsigned id /*unused*/)
{
struct net_device *netdev;
struct sja1000_priv *priv;
@@ -197,7 +197,7 @@ static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
static struct isa_driver tscan1_isa_driver = {
.probe = tscan1_probe,
- .remove = __devexit_p(tscan1_remove),
+ .remove = tscan1_remove,
.driver = {
.name = "tscan1",
},
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index c0e1b1eb87a9..c2c0a5bb0b21 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -159,7 +159,7 @@ MODULE_FIRMWARE(fw_dir "bcard2.bin");
MODULE_FIRMWARE(fw_dir "ldcard2.bin");
MODULE_FIRMWARE(fw_dir "cancrd2.bin");
-static __devinit const struct softing_platform_data
+static const struct softing_platform_data
*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
{
const struct softing_platform_data *lp;
@@ -193,8 +193,7 @@ static int softingcs_enable_irq(struct platform_device *pdev, int v)
/*
* pcmcia check
*/
-static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
- void *priv_data)
+static int softingcs_probe_config(struct pcmcia_device *pcmcia, void *priv_data)
{
struct softing_platform_data *pdat = priv_data;
struct resource *pres;
@@ -215,7 +214,7 @@ static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
return pcmcia_request_window(pcmcia, pres, memspeed);
}
-static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+static void softingcs_remove(struct pcmcia_device *pcmcia)
{
struct platform_device *pdev = pcmcia->priv;
@@ -235,7 +234,7 @@ static void softingcs_pdev_release(struct device *dev)
kfree(pdev);
}
-static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+static int softingcs_probe(struct pcmcia_device *pcmcia)
{
int ret;
struct platform_device *pdev;
@@ -338,7 +337,7 @@ static struct pcmcia_driver softingcs_driver = {
.name = "softingcs",
.id_table = softingcs_ids,
.probe = softingcs_probe,
- .remove = __devexit_p(softingcs_remove),
+ .remove = softingcs_remove,
};
static int __init softingcs_start(void)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index f2a221e7b968..3a2b45601ec2 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -478,7 +478,7 @@ static void softing_card_shutdown(struct softing *card)
mutex_unlock(&card->fw.lock);
}
-static __devinit int softing_card_boot(struct softing *card)
+static int softing_card_boot(struct softing *card)
{
int ret, j;
static const uint8_t stream[] = {
@@ -645,8 +645,8 @@ static const struct can_bittiming_const softing_btr_const = {
};
-static __devinit struct net_device *softing_netdev_create(struct softing *card,
- uint16_t chip_id)
+static struct net_device *softing_netdev_create(struct softing *card,
+ uint16_t chip_id)
{
struct net_device *netdev;
struct softing_priv *priv;
@@ -676,7 +676,7 @@ static __devinit struct net_device *softing_netdev_create(struct softing *card,
return netdev;
}
-static __devinit int softing_netdev_register(struct net_device *netdev)
+static int softing_netdev_register(struct net_device *netdev)
{
int ret;
@@ -745,7 +745,7 @@ static const struct attribute_group softing_pdev_group = {
/*
* platform driver
*/
-static __devexit int softing_pdev_remove(struct platform_device *pdev)
+static int softing_pdev_remove(struct platform_device *pdev)
{
struct softing *card = platform_get_drvdata(pdev);
int j;
@@ -766,7 +766,7 @@ static __devexit int softing_pdev_remove(struct platform_device *pdev)
return 0;
}
-static __devinit int softing_pdev_probe(struct platform_device *pdev)
+static int softing_pdev_probe(struct platform_device *pdev)
{
const struct softing_platform_data *pdat = pdev->dev.platform_data;
struct softing *card;
@@ -871,7 +871,7 @@ static struct platform_driver softing_driver = {
.owner = THIS_MODULE,
},
.probe = softing_pdev_probe,
- .remove = __devexit_p(softing_pdev_remove),
+ .remove = softing_pdev_remove,
};
module_platform_driver(softing_driver);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 9ded21e79db5..300581b24ff3 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -746,12 +746,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
if (err_status & HECC_CANES_CRCE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
}
if (err_status & HECC_CANES_ACKE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL;
}
}
@@ -978,7 +978,7 @@ probe_exit:
return err;
}
-static int __devexit ti_hecc_remove(struct platform_device *pdev)
+static int ti_hecc_remove(struct platform_device *pdev)
{
struct resource *res;
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1045,7 +1045,7 @@ static struct platform_driver ti_hecc_driver = {
.owner = THIS_MODULE,
},
.probe = ti_hecc_probe,
- .remove = __devexit_p(ti_hecc_remove),
+ .remove = ti_hecc_remove,
.suspend = ti_hecc_suspend,
.resume = ti_hecc_resume,
};
@@ -1055,3 +1055,4 @@ module_platform_driver(ti_hecc_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 0a6876841c20..a4e4bee35710 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,6 +13,35 @@ config CAN_ESD_USB2
This driver supports the CAN-USB/2 interface
from esd electronic system design gmbh (http://www.esd.eu).
+config CAN_KVASER_USB
+ tristate "Kvaser CAN/USB interface"
+ ---help---
+ This driver adds support for Kvaser CAN/USB devices like Kvaser
+ Leaf Light.
+
+ The driver gives support for the following devices:
+ - Kvaser Leaf Light
+ - Kvaser Leaf Professional HS
+ - Kvaser Leaf SemiPro HS
+ - Kvaser Leaf Professional LS
+ - Kvaser Leaf Professional SWC
+ - Kvaser Leaf Professional LIN
+ - Kvaser Leaf SemiPro LS
+ - Kvaser Leaf SemiPro SWC
+ - Kvaser Memorator II HS/HS
+ - Kvaser USBcan Professional HS/HS
+ - Kvaser Leaf Light GI
+ - Kvaser Leaf Professional HS (OBD-II connector)
+ - Kvaser Memorator Professional HS/LS
+ - Kvaser Leaf Light "China"
+ - Kvaser BlackBird SemiPro
+ - Kvaser USBcan R
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called kvaser_usb.
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces"
---help---
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index da6d1d3b2969..80a2ee41fd61 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 086fa321677a..c69f0b72b352 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -245,7 +245,6 @@ struct ems_tx_urb_context {
struct ems_usb {
struct can_priv can; /* must be the first member */
- int open_time;
struct sk_buff *echo_skb[MAX_TX_URBS];
@@ -728,7 +727,6 @@ static int ems_usb_open(struct net_device *netdev)
return err;
}
- dev->open_time = jiffies;
netif_start_queue(netdev);
@@ -878,8 +876,6 @@ static int ems_usb_close(struct net_device *netdev)
close_candev(netdev);
- dev->open_time = 0;
-
return 0;
}
@@ -905,9 +901,6 @@ static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode)
{
struct ems_usb *dev = netdev_priv(netdev);
- if (!dev->open_time)
- return -EINVAL;
-
switch (mode) {
case CAN_MODE_START:
if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL))
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index bd36e5517173..9b74d1e3ad44 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1,7 +1,7 @@
/*
- * CAN driver for esd CAN-USB/2
+ * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
*
- * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published
@@ -28,14 +28,16 @@
#include <linux/can/error.h>
MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces");
+MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
MODULE_LICENSE("GPL v2");
/* Define these values to match your devices */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010
+#define USB_CANUSBM_PRODUCT_ID 0x0011
#define ESD_USB2_CAN_CLOCK 60000000
+#define ESD_USBM_CAN_CLOCK 36000000
#define ESD_USB2_MAX_NETS 2
/* USB2 commands */
@@ -69,6 +71,7 @@ MODULE_LICENSE("GPL v2");
#define ESD_USB2_TSEG2_SHIFT 20
#define ESD_USB2_SJW_MAX 4
#define ESD_USB2_SJW_SHIFT 14
+#define ESD_USBM_SJW_SHIFT 24
#define ESD_USB2_BRP_MIN 1
#define ESD_USB2_BRP_MAX 1024
#define ESD_USB2_BRP_INC 1
@@ -183,6 +186,7 @@ struct __attribute__ ((packed)) esd_usb2_msg {
static struct usb_device_id esd_usb2_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
+ {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
{}
};
MODULE_DEVICE_TABLE(usb, esd_usb2_table);
@@ -213,7 +217,6 @@ struct esd_usb2_net_priv {
struct usb_anchor tx_submitted;
struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
- int open_time;
struct esd_usb2 *usb2;
struct net_device *netdev;
int index;
@@ -691,8 +694,6 @@ static int esd_usb2_open(struct net_device *netdev)
return err;
}
- priv->open_time = jiffies;
-
netif_start_queue(netdev);
return 0;
@@ -860,8 +861,6 @@ static int esd_usb2_close(struct net_device *netdev)
close_candev(netdev);
- priv->open_time = 0;
-
return 0;
}
@@ -889,11 +888,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
struct can_bittiming *bt = &priv->can.bittiming;
struct esd_usb2_msg msg;
u32 canbtr;
+ int sjw_shift;
canbtr = ESD_USB2_UBR;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ canbtr |= ESD_USB2_LOM;
+
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
+
+ if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+ USB_CANUSBM_PRODUCT_ID)
+ sjw_shift = ESD_USBM_SJW_SHIFT;
+ else
+ sjw_shift = ESD_USB2_SJW_SHIFT;
+
canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
- << ESD_USB2_SJW_SHIFT;
+ << sjw_shift;
canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
& (ESD_USB2_TSEG1_MAX - 1))
<< ESD_USB2_TSEG1_SHIFT;
@@ -926,11 +936,6 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev,
static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
-
- if (!priv->open_time)
- return -EINVAL;
-
switch (mode) {
case CAN_MODE_START:
netif_wake_queue(netdev);
@@ -971,12 +976,20 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->index = index;
priv->can.state = CAN_STATE_STOPPED;
- priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
+ USB_CANUSBM_PRODUCT_ID)
+ priv->can.clock.freq = ESD_USBM_CAN_CLOCK;
+ else {
+ priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ }
+
priv->can.bittiming_const = &esd_usb2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming;
priv->can.do_set_mode = esd_usb2_set_mode;
priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
new file mode 100644
index 000000000000..5b58a4d87397
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -0,0 +1,1627 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * Parts of this driver are based on the following:
+ * - Kvaser linux leaf driver (version 4.78)
+ * - CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+ */
+
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_TX_URBS 16
+#define MAX_RX_URBS 4
+#define START_TIMEOUT 1000 /* msecs */
+#define STOP_TIMEOUT 1000 /* msecs */
+#define USB_SEND_TIMEOUT 1000 /* msecs */
+#define USB_RECV_TIMEOUT 1000 /* msecs */
+#define RX_BUFFER_SIZE 3072
+#define CAN_USB_CLOCK 8000000
+#define MAX_NET_DEVICES 3
+
+/* Kvaser USB devices */
+#define KVASER_VENDOR_ID 0x0bfd
+#define USB_LEAF_DEVEL_PRODUCT_ID 10
+#define USB_LEAF_LITE_PRODUCT_ID 11
+#define USB_LEAF_PRO_PRODUCT_ID 12
+#define USB_LEAF_SPRO_PRODUCT_ID 14
+#define USB_LEAF_PRO_LS_PRODUCT_ID 15
+#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
+#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
+#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
+#define USB_MEMO2_DEVEL_PRODUCT_ID 22
+#define USB_MEMO2_HSHS_PRODUCT_ID 23
+#define USB_UPRO_HSHS_PRODUCT_ID 24
+#define USB_LEAF_LITE_GI_PRODUCT_ID 25
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
+#define USB_MEMO2_HSLS_PRODUCT_ID 27
+#define USB_LEAF_LITE_CH_PRODUCT_ID 28
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
+#define USB_OEM_MERCURY_PRODUCT_ID 34
+#define USB_OEM_LEAF_PRODUCT_ID 35
+#define USB_CAN_R_PRODUCT_ID 39
+
+/* USB devices features */
+#define KVASER_HAS_SILENT_MODE BIT(0)
+#define KVASER_HAS_TXRX_ERRORS BIT(1)
+
+/* Message header size */
+#define MSG_HEADER_LEN 2
+
+/* Can message flags */
+#define MSG_FLAG_ERROR_FRAME BIT(0)
+#define MSG_FLAG_OVERRUN BIT(1)
+#define MSG_FLAG_NERR BIT(2)
+#define MSG_FLAG_WAKEUP BIT(3)
+#define MSG_FLAG_REMOTE_FRAME BIT(4)
+#define MSG_FLAG_RESERVED BIT(5)
+#define MSG_FLAG_TX_ACK BIT(6)
+#define MSG_FLAG_TX_REQUEST BIT(7)
+
+/* Can states */
+#define M16C_STATE_BUS_RESET BIT(0)
+#define M16C_STATE_BUS_ERROR BIT(4)
+#define M16C_STATE_BUS_PASSIVE BIT(5)
+#define M16C_STATE_BUS_OFF BIT(6)
+
+/* Can msg ids */
+#define CMD_RX_STD_MESSAGE 12
+#define CMD_TX_STD_MESSAGE 13
+#define CMD_RX_EXT_MESSAGE 14
+#define CMD_TX_EXT_MESSAGE 15
+#define CMD_SET_BUS_PARAMS 16
+#define CMD_GET_BUS_PARAMS 17
+#define CMD_GET_BUS_PARAMS_REPLY 18
+#define CMD_GET_CHIP_STATE 19
+#define CMD_CHIP_STATE_EVENT 20
+#define CMD_SET_CTRL_MODE 21
+#define CMD_GET_CTRL_MODE 22
+#define CMD_GET_CTRL_MODE_REPLY 23
+#define CMD_RESET_CHIP 24
+#define CMD_RESET_CARD 25
+#define CMD_START_CHIP 26
+#define CMD_START_CHIP_REPLY 27
+#define CMD_STOP_CHIP 28
+#define CMD_STOP_CHIP_REPLY 29
+#define CMD_GET_CARD_INFO2 32
+#define CMD_GET_CARD_INFO 34
+#define CMD_GET_CARD_INFO_REPLY 35
+#define CMD_GET_SOFTWARE_INFO 38
+#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_ERROR_EVENT 45
+#define CMD_FLUSH_QUEUE 48
+#define CMD_RESET_ERROR_COUNTER 49
+#define CMD_TX_ACKNOWLEDGE 50
+#define CMD_CAN_ERROR_EVENT 51
+#define CMD_USB_THROTTLE 77
+#define CMD_LOG_MESSAGE 106
+
+/* error factors */
+#define M16C_EF_ACKE BIT(0)
+#define M16C_EF_CRCE BIT(1)
+#define M16C_EF_FORME BIT(2)
+#define M16C_EF_STFE BIT(3)
+#define M16C_EF_BITE0 BIT(4)
+#define M16C_EF_BITE1 BIT(5)
+#define M16C_EF_RCVE BIT(6)
+#define M16C_EF_TRE BIT(7)
+
+/* bittiming parameters */
+#define KVASER_USB_TSEG1_MIN 1
+#define KVASER_USB_TSEG1_MAX 16
+#define KVASER_USB_TSEG2_MIN 1
+#define KVASER_USB_TSEG2_MAX 8
+#define KVASER_USB_SJW_MAX 4
+#define KVASER_USB_BRP_MIN 1
+#define KVASER_USB_BRP_MAX 64
+#define KVASER_USB_BRP_INC 1
+
+/* ctrl modes */
+#define KVASER_CTRL_MODE_NORMAL 1
+#define KVASER_CTRL_MODE_SILENT 2
+#define KVASER_CTRL_MODE_SELFRECEPTION 3
+#define KVASER_CTRL_MODE_OFF 4
+
+struct kvaser_msg_simple {
+ u8 tid;
+ u8 channel;
+} __packed;
+
+struct kvaser_msg_cardinfo {
+ u8 tid;
+ u8 nchannels;
+ __le32 serial_number;
+ __le32 padding;
+ __le32 clock_resolution;
+ __le32 mfgdate;
+ u8 ean[8];
+ u8 hw_revision;
+ u8 usb_hs_mode;
+ __le16 padding2;
+} __packed;
+
+struct kvaser_msg_cardinfo2 {
+ u8 tid;
+ u8 channel;
+ u8 pcb_id[24];
+ __le32 oem_unlock_code;
+} __packed;
+
+struct kvaser_msg_softinfo {
+ u8 tid;
+ u8 channel;
+ __le32 sw_options;
+ __le32 fw_version;
+ __le16 max_outstanding_tx;
+ __le16 padding[9];
+} __packed;
+
+struct kvaser_msg_busparams {
+ u8 tid;
+ u8 channel;
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 no_samp;
+} __packed;
+
+struct kvaser_msg_tx_can {
+ u8 channel;
+ u8 tid;
+ u8 msg[14];
+ u8 padding;
+ u8 flags;
+} __packed;
+
+struct kvaser_msg_rx_can {
+ u8 channel;
+ u8 flag;
+ __le16 time[3];
+ u8 msg[14];
+} __packed;
+
+struct kvaser_msg_chip_state_event {
+ u8 tid;
+ u8 channel;
+ __le16 time[3];
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ u8 status;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_msg_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time[3];
+ u8 flags;
+ u8 time_offset;
+} __packed;
+
+struct kvaser_msg_error_event {
+ u8 tid;
+ u8 flags;
+ __le16 time[3];
+ u8 channel;
+ u8 padding;
+ u8 tx_errors_count;
+ u8 rx_errors_count;
+ u8 status;
+ u8 error_factor;
+} __packed;
+
+struct kvaser_msg_ctrl_mode {
+ u8 tid;
+ u8 channel;
+ u8 ctrl_mode;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_msg_flush_queue {
+ u8 tid;
+ u8 channel;
+ u8 flags;
+ u8 padding[3];
+} __packed;
+
+struct kvaser_msg_log_message {
+ u8 channel;
+ u8 flags;
+ __le16 time[3];
+ u8 dlc;
+ u8 time_offset;
+ __le32 id;
+ u8 data[8];
+} __packed;
+
+struct kvaser_msg {
+ u8 len;
+ u8 id;
+ union {
+ struct kvaser_msg_simple simple;
+ struct kvaser_msg_cardinfo cardinfo;
+ struct kvaser_msg_cardinfo2 cardinfo2;
+ struct kvaser_msg_softinfo softinfo;
+ struct kvaser_msg_busparams busparams;
+ struct kvaser_msg_tx_can tx_can;
+ struct kvaser_msg_rx_can rx_can;
+ struct kvaser_msg_chip_state_event chip_state_event;
+ struct kvaser_msg_tx_acknowledge tx_acknowledge;
+ struct kvaser_msg_error_event error_event;
+ struct kvaser_msg_ctrl_mode ctrl_mode;
+ struct kvaser_msg_flush_queue flush_queue;
+ struct kvaser_msg_log_message log_message;
+ } u;
+} __packed;
+
+struct kvaser_usb_tx_urb_context {
+ struct kvaser_usb_net_priv *priv;
+ u32 echo_index;
+ int dlc;
+};
+
+struct kvaser_usb {
+ struct usb_device *udev;
+ struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
+
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_anchor rx_submitted;
+
+ u32 fw_version;
+ unsigned int nchannels;
+
+ bool rxinitdone;
+ void *rxbuf[MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[MAX_RX_URBS];
+};
+
+struct kvaser_usb_net_priv {
+ struct can_priv can;
+
+ atomic_t active_tx_urbs;
+ struct usb_anchor tx_submitted;
+ struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+ struct completion start_comp, stop_comp;
+
+ struct kvaser_usb *dev;
+ struct net_device *netdev;
+ int channel;
+
+ struct can_berr_counter bec;
+};
+
+static const struct usb_device_id kvaser_usb_table[] = {
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS |
+ KVASER_HAS_SILENT_MODE },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
+ .driver_info = KVASER_HAS_TXRX_ERRORS },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+
+static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
+ struct kvaser_msg *msg)
+{
+ int actual_len;
+
+ return usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ msg, msg->len, &actual_len,
+ USB_SEND_TIMEOUT);
+}
+
+static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
+ struct kvaser_msg *msg)
+{
+ struct kvaser_msg *tmp;
+ void *buf;
+ int actual_len;
+ int err;
+ int pos = 0;
+
+ buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, RX_BUFFER_SIZE, &actual_len,
+ USB_RECV_TIMEOUT);
+ if (err < 0)
+ goto end;
+
+ while (pos <= actual_len - MSG_HEADER_LEN) {
+ tmp = buf + pos;
+
+ if (!tmp->len)
+ break;
+
+ if (pos + tmp->len > actual_len) {
+ dev_err(dev->udev->dev.parent, "Format error\n");
+ break;
+ }
+
+ if (tmp->id == id) {
+ memcpy(msg, tmp, tmp->len);
+ goto end;
+ }
+
+ pos += tmp->len;
+ }
+
+ err = -EINVAL;
+
+end:
+ kfree(buf);
+
+ return err;
+}
+
+static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
+ u8 msg_id, int channel)
+{
+ struct kvaser_msg *msg;
+ int rc;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->id = msg_id;
+ msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
+ msg->u.simple.channel = channel;
+ msg->u.simple.tid = 0xff;
+
+ rc = kvaser_usb_send_msg(dev, msg);
+
+ kfree(msg);
+ return rc;
+}
+
+static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
+{
+ struct kvaser_msg msg;
+ int err;
+
+ err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
+ if (err)
+ return err;
+
+ dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version);
+
+ return 0;
+}
+
+static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
+{
+ struct kvaser_msg msg;
+ int err;
+
+ err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
+ if (err)
+ return err;
+
+ err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
+ if (err)
+ return err;
+
+ dev->nchannels = msg.u.cardinfo.nchannels;
+
+ return 0;
+}
+
+static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct net_device_stats *stats;
+ struct kvaser_usb_tx_urb_context *context;
+ struct kvaser_usb_net_priv *priv;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ u8 channel = msg->u.tx_acknowledge.channel;
+ u8 tid = msg->u.tx_acknowledge.tid;
+
+ if (channel >= dev->nchannels) {
+ dev_err(dev->udev->dev.parent,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ stats = &priv->netdev->stats;
+
+ context = &priv->tx_contexts[tid % MAX_TX_URBS];
+
+ /* Sometimes the state change doesn't come after a bus-off event */
+ if (priv->can.restart_ms &&
+ (priv->can.state >= CAN_STATE_BUS_OFF)) {
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ } else {
+ netdev_err(priv->netdev,
+ "No memory left for err_skb\n");
+ }
+
+ priv->can.can_stats.restarts++;
+ netif_carrier_on(priv->netdev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += context->dlc;
+ can_get_echo_skb(priv->netdev, context->echo_index);
+
+ context->echo_index = MAX_TX_URBS;
+ atomic_dec(&priv->active_tx_urbs);
+
+ netif_wake_queue(priv->netdev);
+}
+
+static void kvaser_usb_simple_msg_callback(struct urb *urb)
+{
+ struct net_device *netdev = urb->context;
+
+ kfree(urb->transfer_buffer);
+
+ if (urb->status)
+ netdev_warn(netdev, "urb status received: %d\n",
+ urb->status);
+}
+
+static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
+ u8 msg_id)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device *netdev = priv->netdev;
+ struct kvaser_msg *msg;
+ struct urb *urb;
+ void *buf;
+ int err;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ return -ENOMEM;
+ }
+
+ buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ msg = (struct kvaser_msg *)buf;
+ msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
+ msg->id = msg_id;
+ msg->u.simple.channel = priv->channel;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ buf, msg->len,
+ kvaser_usb_simple_msg_callback, priv);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ netdev_err(netdev, "Error transmitting URB\n");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ kfree(buf);
+ return err;
+ }
+
+ usb_free_urb(urb);
+
+ return 0;
+}
+
+static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
+ unsigned int new_state;
+ u8 channel, status, txerr, rxerr, error_factor;
+
+ switch (msg->id) {
+ case CMD_CAN_ERROR_EVENT:
+ channel = msg->u.error_event.channel;
+ status = msg->u.error_event.status;
+ txerr = msg->u.error_event.tx_errors_count;
+ rxerr = msg->u.error_event.rx_errors_count;
+ error_factor = msg->u.error_event.error_factor;
+ break;
+ case CMD_LOG_MESSAGE:
+ channel = msg->u.log_message.channel;
+ status = msg->u.log_message.data[0];
+ txerr = msg->u.log_message.data[2];
+ rxerr = msg->u.log_message.data[3];
+ error_factor = msg->u.log_message.data[1];
+ break;
+ case CMD_CHIP_STATE_EVENT:
+ channel = msg->u.chip_state_event.channel;
+ status = msg->u.chip_state_event.status;
+ txerr = msg->u.chip_state_event.tx_errors_count;
+ rxerr = msg->u.chip_state_event.rx_errors_count;
+ error_factor = 0;
+ break;
+ default:
+ dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
+ msg->id);
+ return;
+ }
+
+ if (channel >= dev->nchannels) {
+ dev_err(dev->udev->dev.parent,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ stats = &priv->netdev->stats;
+
+ if (status & M16C_STATE_BUS_RESET) {
+ kvaser_usb_unlink_tx_urbs(priv);
+ return;
+ }
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ new_state = priv->can.state;
+
+ netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
+
+ if (status & M16C_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_BUSOFF;
+
+ priv->can.can_stats.bus_off++;
+ if (!priv->can.restart_ms)
+ kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
+
+ netif_carrier_off(priv->netdev);
+
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (status & M16C_STATE_BUS_PASSIVE) {
+ if (priv->can.state != CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_CRTL;
+
+ if (txerr || rxerr)
+ cf->data[1] = (txerr > rxerr)
+ ? CAN_ERR_CRTL_TX_PASSIVE
+ : CAN_ERR_CRTL_RX_PASSIVE;
+ else
+ cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
+ CAN_ERR_CRTL_RX_PASSIVE;
+
+ priv->can.can_stats.error_passive++;
+ }
+
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ }
+
+ if (status == M16C_STATE_BUS_ERROR) {
+ if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
+ ((txerr >= 96) || (rxerr >= 96))) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (txerr > rxerr)
+ ? CAN_ERR_CRTL_TX_WARNING
+ : CAN_ERR_CRTL_RX_WARNING;
+
+ priv->can.can_stats.error_warning++;
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ if (!status) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ if (priv->can.restart_ms &&
+ (priv->can.state >= CAN_STATE_BUS_OFF) &&
+ (new_state < CAN_STATE_BUS_OFF)) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+
+ priv->can.can_stats.restarts++;
+ }
+
+ if (error_factor) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+ if (error_factor & M16C_EF_ACKE)
+ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
+ if (error_factor & M16C_EF_CRCE)
+ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ if (error_factor & M16C_EF_FORME)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (error_factor & M16C_EF_STFE)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (error_factor & M16C_EF_BITE0)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (error_factor & M16C_EF_BITE1)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (error_factor & M16C_EF_TRE)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ priv->bec.txerr = txerr;
+ priv->bec.rxerr = rxerr;
+
+ priv->can.state = new_state;
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+ MSG_FLAG_NERR)) {
+ netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+ msg->u.rx_can.flag);
+
+ stats->rx_errors++;
+ return;
+ }
+
+ if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) {
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ }
+}
+
+static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ u8 channel = msg->u.rx_can.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(dev->udev->dev.parent,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ stats = &priv->netdev->stats;
+
+ if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
+ MSG_FLAG_OVERRUN)) {
+ kvaser_usb_rx_can_err(priv, msg);
+ return;
+ } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
+ netdev_warn(priv->netdev,
+ "Unhandled frame (flags: 0x%02x)",
+ msg->u.rx_can.flag);
+ return;
+ }
+
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->tx_dropped++;
+ return;
+ }
+
+ cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+ (msg->u.rx_can.msg[1] & 0x3f);
+ cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+ if (msg->id == CMD_RX_EXT_MESSAGE) {
+ cf->can_id <<= 18;
+ cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+ ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+ (msg->u.rx_can.msg[4] & 0x3f);
+ cf->can_id |= CAN_EFF_FLAG;
+ }
+
+ if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = msg->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(dev->udev->dev.parent,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ if (completion_done(&priv->start_comp) &&
+ netif_queue_stopped(priv->netdev)) {
+ netif_wake_queue(priv->netdev);
+ } else {
+ netif_start_queue(priv->netdev);
+ complete(&priv->start_comp);
+ }
+}
+
+static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = msg->u.simple.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(dev->udev->dev.parent,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+
+ complete(&priv->stop_comp);
+}
+
+static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
+ const struct kvaser_msg *msg)
+{
+ switch (msg->id) {
+ case CMD_START_CHIP_REPLY:
+ kvaser_usb_start_chip_reply(dev, msg);
+ break;
+
+ case CMD_STOP_CHIP_REPLY:
+ kvaser_usb_stop_chip_reply(dev, msg);
+ break;
+
+ case CMD_RX_STD_MESSAGE:
+ case CMD_RX_EXT_MESSAGE:
+ kvaser_usb_rx_can_msg(dev, msg);
+ break;
+
+ case CMD_CHIP_STATE_EVENT:
+ case CMD_CAN_ERROR_EVENT:
+ kvaser_usb_rx_error(dev, msg);
+ break;
+
+ case CMD_LOG_MESSAGE:
+ if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
+ kvaser_usb_rx_error(dev, msg);
+ break;
+
+ case CMD_TX_ACKNOWLEDGE:
+ kvaser_usb_tx_acknowledge(dev, msg);
+ break;
+
+ default:
+ dev_warn(dev->udev->dev.parent,
+ "Unhandled message (%d)\n", msg->id);
+ break;
+ }
+}
+
+static void kvaser_usb_read_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb *dev = urb->context;
+ struct kvaser_msg *msg;
+ int pos = 0;
+ int err, i;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
+ urb->status);
+ goto resubmit_urb;
+ }
+
+ while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+ msg = urb->transfer_buffer + pos;
+
+ if (!msg->len)
+ break;
+
+ if (pos + msg->len > urb->actual_length) {
+ dev_err(dev->udev->dev.parent, "Format error\n");
+ break;
+ }
+
+ kvaser_usb_handle_message(dev, msg);
+
+ pos += msg->len;
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ urb->transfer_buffer, RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback, dev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ netif_device_detach(dev->nets[i]->netdev);
+ }
+ } else if (err) {
+ dev_err(dev->udev->dev.parent,
+ "Failed resubmitting read bulk urb: %d\n", err);
+ }
+
+ return;
+}
+
+static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
+{
+ int i, err = 0;
+
+ if (dev->rxinitdone)
+ return 0;
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+ dma_addr_t buf_dma;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_warn(dev->udev->dev.parent,
+ "No memory left for URBs\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
+ GFP_KERNEL, &buf_dma);
+ if (!buf) {
+ dev_warn(dev->udev->dev.parent,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, RX_BUFFER_SIZE,
+ kvaser_usb_read_bulk_callback,
+ dev);
+ urb->transfer_dma = buf_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+ buf_dma);
+ usb_free_urb(urb);
+ break;
+ }
+
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
+ usb_free_urb(urb);
+ }
+
+ if (i == 0) {
+ dev_warn(dev->udev->dev.parent,
+ "Cannot setup read URBs, error %d\n", err);
+ return err;
+ } else if (i < MAX_RX_URBS) {
+ dev_warn(dev->udev->dev.parent,
+ "RX performances may be slow\n");
+ }
+
+ dev->rxinitdone = true;
+
+ return 0;
+}
+
+static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_msg *msg;
+ int rc;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->id = CMD_SET_CTRL_MODE;
+ msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
+ msg->u.ctrl_mode.tid = 0xff;
+ msg->u.ctrl_mode.channel = priv->channel;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
+ else
+ msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
+
+ rc = kvaser_usb_send_msg(priv->dev, msg);
+
+ kfree(msg);
+ return rc;
+}
+
+static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->start_comp);
+
+ err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->start_comp,
+ msecs_to_jiffies(START_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_open(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(dev);
+ if (err)
+ goto error;
+
+ err = kvaser_usb_set_opt_mode(priv);
+ if (err)
+ goto error;
+
+ err = kvaser_usb_start_chip(priv);
+ if (err) {
+ netdev_warn(netdev, "Cannot start device, error %d\n", err);
+ goto error;
+ }
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+error:
+ close_candev(netdev);
+ return err;
+}
+
+static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+ dev->rxbuf[i],
+ dev->rxbuf_dma[i]);
+
+ for (i = 0; i < MAX_NET_DEVICES; i++) {
+ struct kvaser_usb_net_priv *priv = dev->nets[i];
+
+ if (priv)
+ kvaser_usb_unlink_tx_urbs(priv);
+ }
+}
+
+static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ init_completion(&priv->stop_comp);
+
+ err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->stop_comp,
+ msecs_to_jiffies(STOP_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_msg *msg;
+ int rc;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->id = CMD_FLUSH_QUEUE;
+ msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
+ msg->u.flush_queue.channel = priv->channel;
+ msg->u.flush_queue.flags = 0x00;
+
+ rc = kvaser_usb_send_msg(priv->dev, msg);
+
+ kfree(msg);
+ return rc;
+}
+
+static int kvaser_usb_close(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ int err;
+
+ netif_stop_queue(netdev);
+
+ err = kvaser_usb_flush_queue(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+
+ if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+ netdev_warn(netdev, "Cannot reset card, error %d\n", err);
+
+ err = kvaser_usb_stop_chip(priv);
+ if (err)
+ netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+
+ priv->can.state = CAN_STATE_STOPPED;
+ close_candev(priv->netdev);
+
+ return 0;
+}
+
+static void kvaser_usb_write_bulk_callback(struct urb *urb)
+{
+ struct kvaser_usb_tx_urb_context *context = urb->context;
+ struct kvaser_usb_net_priv *priv;
+ struct net_device *netdev;
+
+ if (WARN_ON(!context))
+ return;
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ kfree(urb->transfer_buffer);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+}
+
+static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct kvaser_usb_tx_urb_context *context = NULL;
+ struct urb *urb;
+ void *buf;
+ struct kvaser_msg *msg;
+ int i, err;
+ int ret = NETDEV_TX_OK;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ stats->tx_dropped++;
+ goto nourbmem;
+ }
+
+ buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ stats->tx_dropped++;
+ goto nobufmem;
+ }
+
+ msg = buf;
+ msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
+ msg->u.tx_can.flags = 0;
+ msg->u.tx_can.channel = priv->channel;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ msg->id = CMD_TX_EXT_MESSAGE;
+ msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
+ msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
+ msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
+ msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
+ msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
+ } else {
+ msg->id = CMD_TX_STD_MESSAGE;
+ msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
+ msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
+ }
+
+ msg->u.tx_can.msg[5] = cf->can_dlc;
+ memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME;
+
+ for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
+ if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ context = &priv->tx_contexts[i];
+ break;
+ }
+ }
+
+ if (!context) {
+ netdev_warn(netdev, "cannot find free context\n");
+ ret = NETDEV_TX_BUSY;
+ goto releasebuf;
+ }
+
+ context->priv = priv;
+ context->echo_index = i;
+ context->dlc = cf->can_dlc;
+
+ msg->u.tx_can.tid = context->echo_index;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev,
+ dev->bulk_out->bEndpointAddress),
+ buf, msg->len,
+ kvaser_usb_write_bulk_callback, context);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
+ atomic_inc(&priv->active_tx_urbs);
+
+ if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ netif_stop_queue(netdev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index);
+
+ skb = NULL; /* set to NULL to avoid double free in
+ * dev_kfree_skb(skb) */
+
+ atomic_dec(&priv->active_tx_urbs);
+ usb_unanchor_urb(urb);
+
+ stats->tx_dropped++;
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "Failed tx_urb %d\n", err);
+
+ goto releasebuf;
+ }
+
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+releasebuf:
+ kfree(buf);
+nobufmem:
+ usb_free_urb(urb);
+nourbmem:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static const struct net_device_ops kvaser_usb_netdev_ops = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+};
+
+static const struct can_bittiming_const kvaser_usb_bittiming_const = {
+ .name = "kvaser_usb",
+ .tseg1_min = KVASER_USB_TSEG1_MIN,
+ .tseg1_max = KVASER_USB_TSEG1_MAX,
+ .tseg2_min = KVASER_USB_TSEG2_MIN,
+ .tseg2_max = KVASER_USB_TSEG2_MAX,
+ .sjw_max = KVASER_USB_SJW_MAX,
+ .brp_min = KVASER_USB_BRP_MIN,
+ .brp_max = KVASER_USB_BRP_MAX,
+ .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_msg *msg;
+ int rc;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->id = CMD_SET_BUS_PARAMS;
+ msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
+ msg->u.busparams.channel = priv->channel;
+ msg->u.busparams.tid = 0xff;
+ msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+ msg->u.busparams.sjw = bt->sjw;
+ msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+ msg->u.busparams.tseg2 = bt->phase_seg2;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ msg->u.busparams.no_samp = 3;
+ else
+ msg->u.busparams.no_samp = 1;
+
+ rc = kvaser_usb_send_msg(dev, msg);
+
+ kfree(msg);
+ return rc;
+}
+
+static int kvaser_usb_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ unregister_netdev(dev->nets[i]->netdev);
+ }
+
+ kvaser_usb_unlink_all_urbs(dev);
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (!dev->nets[i])
+ continue;
+
+ free_candev(dev->nets[i]->netdev);
+ }
+}
+
+static int kvaser_usb_init_one(struct usb_interface *intf,
+ const struct usb_device_id *id, int channel)
+{
+ struct kvaser_usb *dev = usb_get_intfdata(intf);
+ struct net_device *netdev;
+ struct kvaser_usb_net_priv *priv;
+ int i, err;
+
+ netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Cannot alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ init_completion(&priv->start_comp);
+ init_completion(&priv->stop_comp);
+
+ init_usb_anchor(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_urbs, 0);
+
+ for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
+ priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+ priv->dev = dev;
+ priv->netdev = netdev;
+ priv->channel = channel;
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = CAN_USB_CLOCK;
+ priv->can.bittiming_const = &kvaser_usb_bittiming_const;
+ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+ priv->can.do_set_mode = kvaser_usb_set_mode;
+ if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
+ priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ if (id->driver_info & KVASER_HAS_SILENT_MODE)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ netdev->flags |= IFF_ECHO;
+
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ dev->nets[channel] = priv;
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&intf->dev, "Failed to register can device\n");
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
+ }
+
+ netdev_dbg(netdev, "device registered\n");
+
+ return 0;
+}
+
+static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
+ struct usb_endpoint_descriptor **in,
+ struct usb_endpoint_descriptor **out)
+{
+ const struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int i;
+
+ iface_desc = &intf->altsetting[0];
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(endpoint))
+ *in = endpoint;
+
+ if (usb_endpoint_is_bulk_out(endpoint))
+ *out = endpoint;
+ }
+}
+
+static int kvaser_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct kvaser_usb *dev;
+ int err = -ENOMEM;
+ int i;
+
+ dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
+ if (!dev->bulk_in || !dev->bulk_out) {
+ dev_err(&intf->dev, "Cannot get usb endpoint(s)");
+ return err;
+ }
+
+ dev->udev = interface_to_usbdev(intf);
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ usb_set_intfdata(intf, dev);
+
+ for (i = 0; i < MAX_NET_DEVICES; i++)
+ kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+
+ err = kvaser_usb_get_software_info(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get software infos, error %d\n", err);
+ return err;
+ }
+
+ err = kvaser_usb_get_card_info(dev);
+ if (err) {
+ dev_err(&intf->dev,
+ "Cannot get card infos, error %d\n", err);
+ return err;
+ }
+
+ dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+ ((dev->fw_version >> 24) & 0xff),
+ ((dev->fw_version >> 16) & 0xff),
+ (dev->fw_version & 0xffff));
+
+ for (i = 0; i < dev->nchannels; i++) {
+ err = kvaser_usb_init_one(intf, id, i);
+ if (err) {
+ kvaser_usb_remove_interfaces(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void kvaser_usb_disconnect(struct usb_interface *intf)
+{
+ struct kvaser_usb *dev = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev)
+ return;
+
+ kvaser_usb_remove_interfaces(dev);
+}
+
+static struct usb_driver kvaser_usb_driver = {
+ .name = "kvaser_usb",
+ .probe = kvaser_usb_probe,
+ .disconnect = kvaser_usb_disconnect,
+ .id_table = kvaser_usb_table,
+};
+
+module_usb_driver(kvaser_usb_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c4643c400d46..d9290ea788e0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -520,7 +520,6 @@ static int peak_usb_ndo_open(struct net_device *netdev)
return err;
}
- dev->open_time = jiffies;
netif_start_queue(netdev);
return 0;
@@ -576,7 +575,6 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
close_candev(netdev);
- dev->open_time = 0;
dev->can.state = CAN_STATE_STOPPED;
/* can set bus off now */
@@ -661,9 +659,6 @@ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode)
struct peak_usb_device *dev = netdev_priv(netdev);
int err = 0;
- if (!dev->open_time)
- return -EINVAL;
-
switch (mode) {
case CAN_MODE_START:
err = peak_usb_restart(dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index c8e5e91d7cb5..073b47ff8eee 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -104,7 +104,6 @@ struct peak_usb_device {
struct can_priv can;
struct peak_usb_adapter *adapter;
unsigned int ctrl_idx;
- int open_time;
u32 state;
struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS];
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index dd151d53d506..b8fe808b7957 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,5 +1,5 @@
menu "Distributed Switch Architecture drivers"
- depends on NET_DSA
+ depends on HAVE_NET_DSA
config NET_DSA_MV88E6XXX
tristate
@@ -7,6 +7,7 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
+ select NET_DSA
select NET_DSA_TAG_TRAILER
---help---
This enables support for the Marvell 88E6060 ethernet switch
@@ -18,6 +19,7 @@ config NET_DSA_MV88E6XXX_NEED_PPU
config NET_DSA_MV88E6131
tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+ select NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_MV88E6XXX_NEED_PPU
select NET_DSA_TAG_DSA
@@ -27,6 +29,7 @@ config NET_DSA_MV88E6131
config NET_DSA_MV88E6123_61_65
tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+ select NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 1a8eef2c3d58..633c709b9d99 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -92,7 +92,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
+static char version[] = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
#ifdef EL3_DEBUG
static int el3_debug = EL3_DEBUG;
@@ -184,7 +184,7 @@ static int max_interrupt_work = 10;
static int nopnp;
#endif
-static int __devinit el3_common_init(struct net_device *dev);
+static int el3_common_init(struct net_device *dev);
static void el3_common_remove(struct net_device *dev);
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
@@ -270,9 +270,8 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
}
-static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr,
- int ioaddr, int irq, int if_port,
- enum el3_cardtype type)
+static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
+ int irq, int if_port, enum el3_cardtype type)
{
struct el3_private *lp = netdev_priv(dev);
@@ -283,8 +282,7 @@ static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr,
lp->type = type;
}
-static int __devinit el3_isa_match(struct device *pdev,
- unsigned int ndev)
+static int el3_isa_match(struct device *pdev, unsigned int ndev)
{
struct net_device *dev;
int ioaddr, isa_irq, if_port, err;
@@ -341,7 +339,7 @@ static int __devinit el3_isa_match(struct device *pdev,
return 1;
}
-static int __devexit el3_isa_remove(struct device *pdev,
+static int el3_isa_remove(struct device *pdev,
unsigned int ndev)
{
el3_device_remove(pdev);
@@ -382,7 +380,7 @@ static int el3_isa_resume(struct device *dev, unsigned int n)
static struct isa_driver el3_isa_driver = {
.match = el3_isa_match,
- .remove = __devexit_p(el3_isa_remove),
+ .remove = el3_isa_remove,
#ifdef CONFIG_PM
.suspend = el3_isa_suspend,
.resume = el3_isa_resume,
@@ -406,8 +404,7 @@ static struct pnp_device_id el3_pnp_ids[] = {
};
MODULE_DEVICE_TABLE(pnp, el3_pnp_ids);
-static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
- const struct pnp_device_id *id)
+static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
{
short i;
int ioaddr, irq, if_port;
@@ -445,7 +442,7 @@ static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
return 0;
}
-static void __devexit el3_pnp_remove(struct pnp_dev *pdev)
+static void el3_pnp_remove(struct pnp_dev *pdev)
{
el3_common_remove(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
@@ -467,7 +464,7 @@ static struct pnp_driver el3_pnp_driver = {
.name = "3c509",
.id_table = el3_pnp_ids,
.probe = el3_pnp_probe,
- .remove = __devexit_p(el3_pnp_remove),
+ .remove = el3_pnp_remove,
#ifdef CONFIG_PM
.suspend = el3_pnp_suspend,
.resume = el3_pnp_resume,
@@ -496,7 +493,7 @@ static struct eisa_driver el3_eisa_driver = {
.driver = {
.name = "3c579",
.probe = el3_eisa_probe,
- .remove = __devexit_p (el3_device_remove),
+ .remove = el3_device_remove,
.suspend = el3_suspend,
.resume = el3_resume,
}
@@ -519,7 +516,7 @@ static const struct net_device_ops netdev_ops = {
#endif
};
-static int __devinit el3_common_init(struct net_device *dev)
+static int el3_common_init(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int err;
@@ -618,7 +615,7 @@ static int __init el3_eisa_probe (struct device *device)
/* This remove works for all device types.
*
* The net dev must be stored in the driver data field */
-static int __devexit el3_device_remove (struct device *device)
+static int el3_device_remove(struct device *device)
{
struct net_device *dev;
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 66df93638085..ffd8de28a76a 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -432,7 +432,7 @@ static int tc574_config(struct pcmcia_device *link)
netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
cardname, dev->base_addr, dev->irq, dev->dev_addr);
netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
- 8 << config & Ram_size,
+ 8 << (config & Ram_size),
ram_split[(config & Ram_split) >> Ram_split_shift],
config & Autoselect ? "autoselect " : "");
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index e463d1036829..ed0feb3cc6fa 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -102,7 +102,7 @@ static int vortex_debug = 1;
#include <linux/delay.h>
-static const char version[] __devinitconst =
+static const char version[] =
DRV_NAME ": Donald Becker and others.\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -277,7 +277,7 @@ static struct vortex_chip_info {
int flags;
int drv_flags;
int io_size;
-} vortex_info_tbl[] __devinitdata = {
+} vortex_info_tbl[] = {
{"3c590 Vortex 10Mbps",
PCI_USES_MASTER, IS_VORTEX, 32, },
{"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
@@ -931,7 +931,7 @@ static int __init vortex_eisa_probe(struct device *device)
return 0;
}
-static int __devexit vortex_eisa_remove(struct device *device)
+static int vortex_eisa_remove(struct device *device)
{
struct eisa_device *edev;
struct net_device *dev;
@@ -962,7 +962,7 @@ static struct eisa_driver vortex_eisa_driver = {
.driver = {
.name = "3c59x",
.probe = vortex_eisa_probe,
- .remove = __devexit_p(vortex_eisa_remove)
+ .remove = vortex_eisa_remove
}
};
@@ -1000,8 +1000,8 @@ static int __init vortex_eisa_init(void)
}
/* returns count (>= 0), or negative on error */
-static int __devinit vortex_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int vortex_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int rc, unit, pci_bar;
struct vortex_chip_info *vci;
@@ -1088,9 +1088,8 @@ static const struct net_device_ops vortex_netdev_ops = {
*
* NOTE: pdev can be NULL, for the case of a Compaq device
*/
-static int __devinit vortex_probe1(struct device *gendev,
- void __iomem *ioaddr, int irq,
- int chip_idx, int card_idx)
+static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
+ int chip_idx, int card_idx)
{
struct vortex_private *vp;
int option;
@@ -3222,7 +3221,7 @@ static void acpi_set_WOL(struct net_device *dev)
}
-static void __devexit vortex_remove_one(struct pci_dev *pdev)
+static void vortex_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp;
@@ -3265,7 +3264,7 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
static struct pci_driver vortex_driver = {
.name = "3c59x",
.probe = vortex_init_one,
- .remove = __devexit_p(vortex_remove_one),
+ .remove = vortex_remove_one,
.id_table = vortex_pci_tbl,
.driver.pm = VORTEX_PM_OPS,
};
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index bad4fa6815c5..eb56174469a7 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -80,7 +80,7 @@ config PCMCIA_3C589
config VORTEX
tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
- depends on (PCI || EISA)
+ depends on (PCI || EISA) && HAS_IOPORT
select NET_CORE
select MII
---help---
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index bb9670f29b59..27aaaf99e73e 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -168,7 +168,7 @@ enum typhoon_cards {
};
/* directly indexed by enum typhoon_cards, above */
-static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
+static struct typhoon_card_info typhoon_card_info[] = {
{ "3Com Typhoon (3C990-TX)",
TYPHOON_CRYPTO_NONE},
{ "3Com Typhoon (3CR990-TX-95)",
@@ -2200,7 +2200,7 @@ need_resume:
}
#endif
-static int __devinit
+static int
typhoon_test_mmio(struct pci_dev *pdev)
{
void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
@@ -2258,7 +2258,7 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int __devinit
+static int
typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -2509,7 +2509,7 @@ error_out:
return err;
}
-static void __devexit
+static void
typhoon_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -2533,7 +2533,7 @@ static struct pci_driver typhoon_driver = {
.name = KBUILD_MODNAME,
.id_table = typhoon_pci_tbl,
.probe = typhoon_init_one,
- .remove = __devexit_p(typhoon_remove_one),
+ .remove = typhoon_remove_one,
#ifdef CONFIG_PM
.suspend = typhoon_suspend,
.resume = typhoon_resume,
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 203ff9dccadb..70dba5d01ad3 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -109,7 +109,7 @@ static inline struct ax_device *to_ax_dev(struct net_device *dev)
/*
* ax_initial_check
*
- * do an initial probe for the card to check wether it exists
+ * do an initial probe for the card to check whether it exists
* and is functional
*/
static int ax_initial_check(struct net_device *dev)
@@ -191,11 +191,11 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_local->word16)
- readsw(nic_base + NE_DATAPORT, hdr,
- sizeof(struct e8390_pkt_hdr) >> 1);
+ ioread16_rep(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
else
- readsb(nic_base + NE_DATAPORT, hdr,
- sizeof(struct e8390_pkt_hdr));
+ ioread8_rep(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr));
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_local->dmaing &= ~0x01;
@@ -237,12 +237,12 @@ static void ax_block_input(struct net_device *dev, int count,
ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_local->word16) {
- readsw(nic_base + NE_DATAPORT, buf, count >> 1);
+ ioread16_rep(nic_base + NE_DATAPORT, buf, count >> 1);
if (count & 0x01)
buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
} else {
- readsb(nic_base + NE_DATAPORT, buf, count);
+ ioread8_rep(nic_base + NE_DATAPORT, buf, count);
}
ei_local->dmaing &= ~1;
@@ -286,9 +286,9 @@ static void ax_block_output(struct net_device *dev, int count,
ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
if (ei_local->word16)
- writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+ iowrite16_rep(nic_base + NE_DATAPORT, buf, count >> 1);
else
- writesb(nic_base + NE_DATAPORT, buf, count);
+ iowrite8_rep(nic_base + NE_DATAPORT, buf, count);
dma_start = jiffies;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 8322c54972f3..78c6fb4b1143 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -463,12 +463,6 @@ etherh_open(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if (!is_valid_ether_addr(dev->dev_addr)) {
- printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
- dev->name);
- return -EINVAL;
- }
-
if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
return -EAGAIN;
@@ -527,7 +521,7 @@ static void __init etherh_banner(void)
* Read the ethernet address string from the on board rom.
* This is an ascii string...
*/
-static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
+static int etherh_addr(char *addr, struct expansion_card *ec)
{
struct in_chunk_dir cd;
char *s;
@@ -657,7 +651,7 @@ static const struct net_device_ops etherh_netdev_ops = {
static u32 etherh_regoffsets[16];
static u32 etherm_regoffsets[16];
-static int __devinit
+static int
etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
{
const struct etherh_data *data = id->data;
@@ -775,7 +769,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit etherh_remove(struct expansion_card *ec)
+static void etherh_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
@@ -839,7 +833,7 @@ static const struct ecard_id etherh_ids[] = {
static struct ecard_driver etherh_driver = {
.probe = etherh_probe,
- .remove = __devexit_p(etherh_remove),
+ .remove = etherh_remove,
.id_table = etherh_ids,
.drv = {
.name = DRV_NAME,
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 5370c884620b..fb3dd4399cf3 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -53,9 +53,9 @@ static const char version[] =
#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
-static int __devinit hydra_init_one(struct zorro_dev *z,
+static int hydra_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent);
-static int __devinit hydra_init(struct zorro_dev *z);
+static int hydra_init(struct zorro_dev *z);
static int hydra_open(struct net_device *dev);
static int hydra_close(struct net_device *dev);
static void hydra_reset_8390(struct net_device *dev);
@@ -65,9 +65,9 @@ static void hydra_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-static void __devexit hydra_remove_one(struct zorro_dev *z);
+static void hydra_remove_one(struct zorro_dev *z);
-static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
{ 0 }
};
@@ -77,11 +77,11 @@ static struct zorro_driver hydra_driver = {
.name = "hydra",
.id_table = hydra_zorro_tbl,
.probe = hydra_init_one,
- .remove = __devexit_p(hydra_remove_one),
+ .remove = hydra_remove_one,
};
-static int __devinit hydra_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int hydra_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
int err;
@@ -110,7 +110,7 @@ static const struct net_device_ops hydra_netdev_ops = {
#endif
};
-static int __devinit hydra_init(struct zorro_dev *z)
+static int hydra_init(struct zorro_dev *z)
{
struct net_device *dev;
unsigned long board = ZTWO_VADDR(z->resource.start);
@@ -247,7 +247,7 @@ static void hydra_block_output(struct net_device *dev, int count,
z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count);
}
-static void __devexit hydra_remove_one(struct zorro_dev *z)
+static void hydra_remove_one(struct zorro_dev *z)
{
struct net_device *dev = zorro_get_drvdata(z);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 5e8845febfb8..c0c127913dec 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -61,7 +61,7 @@ static int options[MAX_UNITS];
#include "8390.h"
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
" D. Becker/P. Gortmaker\n";
@@ -119,7 +119,7 @@ enum ne2k_pci_chipsets {
static struct {
char *name;
int flags;
-} pci_clone_list[] __devinitdata = {
+} pci_clone_list[] = {
{"RealTek RTL-8029", REALTEK_FDX},
{"Winbond 89C940", 0},
{"Compex RL2000", 0},
@@ -215,8 +215,8 @@ static const struct net_device_ops ne2k_netdev_ops = {
#endif
};
-static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ne2k_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev;
int i;
@@ -647,7 +647,7 @@ static const struct ethtool_ops ne2k_pci_ethtool_ops = {
.get_drvinfo = ne2k_pci_get_drvinfo,
};
-static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
+static void ne2k_pci_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -696,7 +696,7 @@ static int ne2k_pci_resume (struct pci_dev *pdev)
static struct pci_driver ne2k_driver = {
.name = DRV_NAME,
.probe = ne2k_pci_init_one,
- .remove = __devexit_p(ne2k_pci_remove_one),
+ .remove = ne2k_pci_remove_one,
.id_table = ne2k_pci_tbl,
#ifdef CONFIG_PM
.suspend = ne2k_pci_suspend,
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index e3f57427d5c5..ebcdb52ec739 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -222,7 +222,7 @@ static int __init ne3210_eisa_probe (struct device *device)
return retval;
}
-static int __devexit ne3210_eisa_remove (struct device *device)
+static int ne3210_eisa_remove(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
unsigned long ioaddr = to_eisa_device (device)->base_addr;
@@ -324,7 +324,7 @@ static struct eisa_driver ne3210_eisa_driver = {
.driver = {
.name = "ne3210",
.probe = ne3210_eisa_probe,
- .remove = __devexit_p (ne3210_eisa_remove),
+ .remove = ne3210_eisa_remove,
},
};
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 7818e6397e91..85ec4c2d2645 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -75,7 +75,7 @@ static struct card_info {
zorro_id id;
const char *name;
unsigned int offset;
-} cards[] __devinitdata = {
+} cards[] = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
};
@@ -254,7 +254,7 @@ static int zorro8390_close(struct net_device *dev)
return 0;
}
-static void __devexit zorro8390_remove_one(struct zorro_dev *z)
+static void zorro8390_remove_one(struct zorro_dev *z)
{
struct net_device *dev = zorro_get_drvdata(z);
@@ -264,7 +264,7 @@ static void __devexit zorro8390_remove_one(struct zorro_dev *z)
free_netdev(dev);
}
-static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id zorro8390_zorro_tbl[] = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
{ 0 }
@@ -286,9 +286,8 @@ static const struct net_device_ops zorro8390_netdev_ops = {
#endif
};
-static int __devinit zorro8390_init(struct net_device *dev,
- unsigned long board, const char *name,
- unsigned long ioaddr)
+static int zorro8390_init(struct net_device *dev, unsigned long board,
+ const char *name, unsigned long ioaddr)
{
int i;
int err;
@@ -396,8 +395,8 @@ static int __devinit zorro8390_init(struct net_device *dev,
return 0;
}
-static int __devinit zorro8390_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int zorro8390_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
struct net_device *dev;
unsigned long board, ioaddr;
@@ -432,7 +431,7 @@ static struct zorro_driver zorro8390_driver = {
.name = "zorro8390",
.id_table = zorro8390_zorro_tbl,
.probe = zorro8390_init_one,
- .remove = __devexit_p(zorro8390_remove_one),
+ .remove = zorro8390_remove_one,
};
static int __init zorro8390_init_module(void)
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 5b65992c2a0a..549b77500579 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -166,7 +166,7 @@ static int rx_copybreak /* = 0 */;
#define FIRMWARE_TX "adaptec/starfire_tx.bin"
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
@@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
static const struct chip_info {
const char *name;
int drv_flags;
-} netdrv_tbl[] __devinitconst = {
+} netdrv_tbl[] = {
{ "Adaptec Starfire 6915", CanHaveMII },
};
@@ -641,8 +641,8 @@ static const struct net_device_ops netdev_ops = {
#endif
};
-static int __devinit starfire_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int starfire_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct device *d = &pdev->dev;
struct netdev_private *np;
@@ -1990,7 +1990,7 @@ static int starfire_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static void __devexit starfire_remove_one (struct pci_dev *pdev)
+static void starfire_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct netdev_private *np = netdev_priv(dev);
@@ -2018,7 +2018,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
static struct pci_driver starfire_driver = {
.name = DRV_NAME,
.probe = starfire_init_one,
- .remove = __devexit_p(starfire_remove_one),
+ .remove = starfire_remove_one,
#ifdef CONFIG_PM
.suspend = starfire_suspend,
.resume = starfire_resume,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 49a30d37ae4a..a9481606bbcd 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -62,6 +62,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on BFIN_MAC && BF518
+ select PTP_1588_CLOCK
default y
---help---
To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index f816426e1085..c1fdb8be8bee 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,14 +548,17 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_SYS_HARDWARE;
- info->phc_index = -1;
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = lp->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -566,6 +569,7 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
return 0;
}
+#endif
static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_settings = bfin_mac_ethtool_getsettings,
@@ -574,7 +578,9 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
.get_wol = bfin_mac_ethtool_getwol,
.set_wol = bfin_mac_ethtool_setwol,
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
.get_ts_info = bfin_mac_ethtool_get_ts_info,
+#endif
};
/**************************************************************************/
@@ -649,6 +655,20 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
+static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
+{
+ u32 ipn = 1000000000UL / input_clk;
+ u32 ppn = 1;
+ unsigned int shift = 0;
+
+ while (ppn <= ipn) {
+ ppn <<= 1;
+ shift++;
+ }
+ *shift_result = shift;
+ return 1000000000UL / ppn;
+}
+
static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -798,19 +818,7 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
bfin_read_EMAC_PTP_TXSNAPLO();
bfin_read_EMAC_PTP_TXSNAPHI();
- /*
- * Set registers so that rollover occurs soon to test this.
- */
- bfin_write_EMAC_PTP_TIMELO(0x00000000);
- bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
-
SSYNC();
-
- lp->compare.last_update = 0;
- timecounter_init(&lp->clock,
- &lp->cycles,
- ktime_to_ns(ktime_get_real()));
- timecompare_update(&lp->compare, 0);
}
lp->stamp_cfg = config;
@@ -818,15 +826,6 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
-EFAULT : 0;
}
-static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
-{
- ktime_t sys = ktime_get_real();
-
- pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
- __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
- sys.tv.nsec, cmp->offset, cmp->skew);
-}
-
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -857,15 +856,9 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
regval = bfin_read_EMAC_PTP_TXSNAPLO();
regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ns = timecounter_cyc2time(&lp->clock,
- regval);
- timecompare_update(&lp->compare, ns);
+ ns = regval << lp->shift;
shhwtstamps.hwtstamp = ns_to_ktime(ns);
- shhwtstamps.syststamp =
- timecompare_transform(&lp->compare, ns);
skb_tstamp_tx(skb, &shhwtstamps);
-
- bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
}
}
}
@@ -888,55 +881,184 @@ static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
regval = bfin_read_EMAC_PTP_RXSNAPLO();
regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
- ns = timecounter_cyc2time(&lp->clock, regval);
- timecompare_update(&lp->compare, ns);
+ ns = regval << lp->shift;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(ns);
- shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+}
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u64 addend, ppb;
+ u32 input_clk, phc_clk;
+
+ /* Initialize hardware timer */
+ input_clk = get_sclk();
+ phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
+ addend = phc_clk * (1ULL << 32);
+ do_div(addend, input_clk);
+ bfin_write_EMAC_PTP_ADDEND((u32)addend);
+
+ lp->addend = addend;
+ ppb = 1000000000ULL * input_clk;
+ do_div(ppb, phc_clk);
+ lp->max_ppb = ppb - 1000000000ULL - 1ULL;
- bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+ /* Initialize hwstamp config */
+ lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
}
-/*
- * bfin_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
{
- u64 stamp;
+ u64 ns;
+ u32 lo, hi;
+
+ lo = bfin_read_EMAC_PTP_TIMELO();
+ hi = bfin_read_EMAC_PTP_TIMEHI();
- stamp = bfin_read_EMAC_PTP_TIMELO();
- stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= lp->shift;
- return stamp;
+ return ns;
}
-#define PTP_CLK 25000000
+static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
+{
+ u32 hi, lo;
-static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+ ns >>= lp->shift;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+
+ bfin_write_EMAC_PTP_TIMELO(lo);
+ bfin_write_EMAC_PTP_TIMEHI(hi);
+}
+
+/* PTP Hardware Clock operations */
+
+static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, addend;
+ int neg_adj = 0;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ addend = lp->addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ bfin_write_EMAC_PTP_ADDEND(addend);
+
+ return 0;
+}
+
+static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ spin_lock_irqsave(&lp->phc_lock, flags);
+
+ now = bfin_ptp_time_read(lp);
+ now += delta;
+ bfin_ptp_time_write(lp, now);
+
+ spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+ return 0;
+}
+
+static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ spin_lock_irqsave(&lp->phc_lock, flags);
+
+ ns = bfin_ptp_time_read(lp);
+
+ spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return 0;
+}
+
+static int bfin_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct bfin_mac_local *lp =
+ container_of(ptp, struct bfin_mac_local, caps);
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&lp->phc_lock, flags);
+
+ bfin_ptp_time_write(lp, ns);
+
+ spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+ return 0;
+}
+
+static int bfin_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info bfin_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "BF518 clock",
+ .max_adj = 0,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = bfin_ptp_adjfreq,
+ .adjtime = bfin_ptp_adjtime,
+ .gettime = bfin_ptp_gettime,
+ .settime = bfin_ptp_settime,
+ .enable = bfin_ptp_enable,
+};
+
+static int bfin_phc_init(struct net_device *netdev, struct device *dev)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
- u64 append;
- /* Initialize hardware timer */
- append = PTP_CLK * (1ULL << 32);
- do_div(append, get_sclk());
- bfin_write_EMAC_PTP_ADDEND((u32)append);
-
- memset(&lp->cycles, 0, sizeof(lp->cycles));
- lp->cycles.read = bfin_read_clock;
- lp->cycles.mask = CLOCKSOURCE_MASK(64);
- lp->cycles.mult = 1000000000 / PTP_CLK;
- lp->cycles.shift = 0;
-
- /* Synchronize our NIC clock against system wall clock */
- memset(&lp->compare, 0, sizeof(lp->compare));
- lp->compare.source = &lp->clock;
- lp->compare.target = ktime_get_real;
- lp->compare.num_samples = 10;
+ lp->caps = bfin_ptp_caps;
+ lp->caps.max_adj = lp->max_ppb;
+ lp->clock = ptp_clock_register(&lp->caps, dev);
+ if (IS_ERR(lp->clock))
+ return PTR_ERR(lp->clock);
- /* Initialize hwstamp config */
- lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+ lp->phc_index = ptp_clock_index(lp->clock);
+ spin_lock_init(&lp->phc_lock);
+
+ return 0;
+}
+
+static void bfin_phc_release(struct bfin_mac_local *lp)
+{
+ ptp_clock_unregister(lp->clock);
}
#else
@@ -945,6 +1067,8 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
+# define bfin_phc_init(netdev, dev) 0
+# define bfin_phc_release(lp)
#endif
static inline void _tx_reclaim_skb(void)
@@ -1479,7 +1603,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
#endif
};
-static int __devinit bfin_mac_probe(struct platform_device *pdev)
+static int bfin_mac_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct bfin_mac_local *lp;
@@ -1579,12 +1703,17 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
}
bfin_mac_hwtstamp_init(ndev);
+ if (bfin_phc_init(ndev, &pdev->dev)) {
+ dev_err(&pdev->dev, "Cannot register PHC device!\n");
+ goto out_err_phc;
+ }
/* now, print out the card info, in a short format.. */
netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
return 0;
+out_err_phc:
out_err_reg_ndev:
free_irq(IRQ_MAC_RX, ndev);
out_err_request_irq:
@@ -1598,11 +1727,13 @@ out_err_probe_mac:
return rc;
}
-static int __devexit bfin_mac_remove(struct platform_device *pdev)
+static int bfin_mac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct bfin_mac_local *lp = netdev_priv(ndev);
+ bfin_phc_release(lp);
+
platform_set_drvdata(pdev, NULL);
lp->mii_bus->priv = NULL;
@@ -1655,7 +1786,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
#define bfin_mac_resume NULL
#endif /* CONFIG_PM */
-static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
+static int bfin_mii_bus_probe(struct platform_device *pdev)
{
struct mii_bus *miibus;
struct bfin_mii_bus_platform_data *mii_bus_pd;
@@ -1733,7 +1864,7 @@ out_err_alloc:
return rc;
}
-static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
+static int bfin_mii_bus_remove(struct platform_device *pdev)
{
struct mii_bus *miibus = platform_get_drvdata(pdev);
struct bfin_mii_bus_platform_data *mii_bus_pd =
@@ -1750,7 +1881,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
static struct platform_driver bfin_mii_bus_driver = {
.probe = bfin_mii_bus_probe,
- .remove = __devexit_p(bfin_mii_bus_remove),
+ .remove = bfin_mii_bus_remove,
.driver = {
.name = "bfin_mii_bus",
.owner = THIS_MODULE,
@@ -1759,7 +1890,7 @@ static struct platform_driver bfin_mii_bus_driver = {
static struct platform_driver bfin_mac_driver = {
.probe = bfin_mac_probe,
- .remove = __devexit_p(bfin_mac_remove),
+ .remove = bfin_mac_remove,
.resume = bfin_mac_resume,
.suspend = bfin_mac_suspend,
.driver = {
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 960905c08223..7a07ee07906b 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -11,8 +11,7 @@
#define _BFIN_MAC_H_
#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/timecompare.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/timer.h>
#include <linux/etherdevice.h>
#include <linux/bfin_mac.h>
@@ -94,10 +93,14 @@ struct bfin_mac_local {
struct mii_bus *mii_bus;
#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
- struct cyclecounter cycles;
- struct timecounter clock;
- struct timecompare compare;
+ u32 addend;
+ unsigned int shift;
+ s32 max_ppb;
struct hwtstamp_config stamp_cfg;
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ int phc_index;
+ spinlock_t phc_lock; /* protects time lo/hi registers */
#endif
};
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 9c77c736f171..aa53115bb38b 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1376,7 +1376,7 @@ error:
}
/* Initialize the GRETH MAC */
-static int __devinit greth_of_probe(struct platform_device *ofdev)
+static int greth_of_probe(struct platform_device *ofdev)
{
struct net_device *dev;
struct greth_private *greth;
@@ -1576,7 +1576,7 @@ error1:
return err;
}
-static int __devexit greth_of_remove(struct platform_device *of_dev)
+static int greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
struct greth_private *greth = netdev_priv(ndev);
@@ -1619,7 +1619,7 @@ static struct platform_driver greth_of_driver = {
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
- .remove = __devexit_p(greth_of_remove),
+ .remove = greth_of_remove,
};
module_platform_driver(greth_of_driver);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 7219123fa0a4..c0bc41a784ca 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -426,7 +426,7 @@ MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descript
MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
-static const char version[] __devinitconst =
+static const char version[] =
"acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
@@ -454,8 +454,8 @@ static const struct net_device_ops ace_netdev_ops = {
.ndo_change_mtu = ace_change_mtu,
};
-static int __devinit acenic_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int acenic_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct net_device *dev;
struct ace_private *ap;
@@ -603,7 +603,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
return -ENODEV;
}
-static void __devexit acenic_remove_one(struct pci_dev *pdev)
+static void acenic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ace_private *ap = netdev_priv(dev);
@@ -699,7 +699,7 @@ static struct pci_driver acenic_pci_driver = {
.name = "acenic",
.id_table = acenic_pci_tbl,
.probe = acenic_probe_one,
- .remove = __devexit_p(acenic_remove_one),
+ .remove = acenic_remove_one,
};
static int __init acenic_init(void)
@@ -871,7 +871,7 @@ static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
}
-static int __devinit ace_init(struct net_device *dev)
+static int ace_init(struct net_device *dev)
{
struct ace_private *ap;
struct ace_regs __iomem *regs;
@@ -2824,8 +2824,8 @@ static struct net_device_stats *ace_get_stats(struct net_device *dev)
}
-static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
- u32 dest, int size)
+static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
+ u32 dest, int size)
{
void __iomem *tdest;
short tsize, i;
@@ -2851,7 +2851,7 @@ static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
}
-static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
+static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
{
void __iomem *tdest;
short tsize = 0, i;
@@ -2882,7 +2882,7 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz
* This operation requires the NIC to be halted and is performed with
* interrupts disabled and with the spinlock hold.
*/
-static int __devinit ace_load_firmware(struct net_device *dev)
+static int ace_load_firmware(struct net_device *dev)
{
const struct firmware *fw;
const char *fw_name = "acenic/tg2.bin";
@@ -2962,7 +2962,7 @@ static int __devinit ace_load_firmware(struct net_device *dev)
* Thanks to Stevarino Webinski for helping tracking down the bugs in the
* code i2c readout code by beta testing all my hacks.
*/
-static void __devinit eeprom_start(struct ace_regs __iomem *regs)
+static void eeprom_start(struct ace_regs __iomem *regs)
{
u32 local;
@@ -2991,7 +2991,7 @@ static void __devinit eeprom_start(struct ace_regs __iomem *regs)
}
-static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
+static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
{
short i;
u32 local;
@@ -3028,7 +3028,7 @@ static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
}
-static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
+static int eeprom_check_ack(struct ace_regs __iomem *regs)
{
int state;
u32 local;
@@ -3056,7 +3056,7 @@ static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
}
-static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
+static void eeprom_stop(struct ace_regs __iomem *regs)
{
u32 local;
@@ -3091,8 +3091,7 @@ static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
/*
* Read a whole byte from the EEPROM.
*/
-static int __devinit read_eeprom_byte(struct net_device *dev,
- unsigned long offset)
+static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
{
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 689dfcafc6d4..3789affbc0e5 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -639,12 +639,12 @@ static void lance_set_multicast(struct net_device *dev)
netif_wake_queue(dev);
}
-static int __devinit a2065_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent);
-static void __devexit a2065_remove_one(struct zorro_dev *z);
+static int a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static void a2065_remove_one(struct zorro_dev *z);
-static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id a2065_zorro_tbl[] = {
{ ZORRO_PROD_CBM_A2065_1 },
{ ZORRO_PROD_CBM_A2065_2 },
{ ZORRO_PROD_AMERISTAR_A2065 },
@@ -656,7 +656,7 @@ static struct zorro_driver a2065_driver = {
.name = "a2065",
.id_table = a2065_zorro_tbl,
.probe = a2065_init_one,
- .remove = __devexit_p(a2065_remove_one),
+ .remove = a2065_remove_one,
};
static const struct net_device_ops lance_netdev_ops = {
@@ -670,8 +670,8 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit a2065_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
struct net_device *dev;
struct lance_private *priv;
@@ -754,7 +754,7 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
}
-static void __devexit a2065_remove_one(struct zorro_dev *z)
+static void a2065_remove_one(struct zorro_dev *z)
{
struct net_device *dev = zorro_get_drvdata(z);
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index e10ffad525a7..60e2b701afe7 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -671,7 +671,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
#endif
};
-static int __devinit am79c961_probe(struct platform_device *pdev)
+static int am79c961_probe(struct platform_device *pdev)
{
struct resource *res;
struct net_device *dev;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 3491d4312fc9..42d4e6ad58a5 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1702,7 +1702,7 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
}
-static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
+static void amd8111e_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
@@ -1774,7 +1774,7 @@ static void amd8111e_config_ipg(struct net_device* dev)
}
-static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
+static void amd8111e_probe_ext_phy(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
int i;
@@ -1810,7 +1810,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
#endif
};
-static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
+static int amd8111e_probe_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err,i,pm_cap;
@@ -1976,7 +1976,7 @@ static struct pci_driver amd8111e_driver = {
.name = MODULE_NAME,
.id_table = amd8111e_pci_tbl,
.probe = amd8111e_probe_one,
- .remove = __devexit_p(amd8111e_remove_one),
+ .remove = amd8111e_remove_one,
.suspend = amd8111e_suspend,
.resume = amd8111e_resume
};
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index f2958df9a1e4..98f4522fd17b 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -682,7 +682,7 @@ static void set_multicast_list(struct net_device *dev)
}
-static void __devexit ariadne_remove_one(struct zorro_dev *z)
+static void ariadne_remove_one(struct zorro_dev *z)
{
struct net_device *dev = zorro_get_drvdata(z);
@@ -692,7 +692,7 @@ static void __devexit ariadne_remove_one(struct zorro_dev *z)
free_netdev(dev);
}
-static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id ariadne_zorro_tbl[] = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
{ 0 }
};
@@ -710,8 +710,8 @@ static const struct net_device_ops ariadne_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit ariadne_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
unsigned long board = z->resource.start;
unsigned long base_addr = board + ARIADNE_LANCE;
@@ -774,7 +774,7 @@ static struct zorro_driver ariadne_driver = {
.name = "ariadne",
.id_table = ariadne_zorro_tbl,
.probe = ariadne_init_one,
- .remove = __devexit_p(ariadne_remove_one),
+ .remove = ariadne_remove_one,
};
static int __init ariadne_init_module(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index f195acfa2df7..2ea221ed4777 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1016,7 +1016,7 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int __devinit au1000_probe(struct platform_device *pdev)
+static int au1000_probe(struct platform_device *pdev)
{
static unsigned version_printed;
struct au1000_private *aup = NULL;
@@ -1295,7 +1295,7 @@ out:
return err;
}
-static int __devexit au1000_remove(struct platform_device *pdev)
+static int au1000_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct au1000_private *aup = netdev_priv(dev);
@@ -1340,7 +1340,7 @@ static int __devexit au1000_remove(struct platform_device *pdev)
static struct platform_driver au1000_eth_driver = {
.probe = au1000_probe,
- .remove = __devexit_p(au1000_remove),
+ .remove = au1000_remove,
.driver = {
.name = "au1000-eth",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 7203b522f234..baca0bd1b393 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -72,7 +72,7 @@
#include <asm/dec/machtype.h>
#include <asm/dec/system.h>
-static char version[] __devinitdata =
+static char version[] =
"declance.c: v0.011 by Linux MIPS DECstation task force\n";
MODULE_AUTHOR("Linux MIPS DECstation task force");
@@ -1020,7 +1020,7 @@ static const struct net_device_ops lance_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit dec_lance_probe(struct device *bdev, const int type)
+static int dec_lance_probe(struct device *bdev, const int type)
{
static unsigned version_printed;
static const char fmt[] = "declance%d";
@@ -1322,7 +1322,7 @@ static void __exit dec_lance_platform_remove(void)
}
#ifdef CONFIG_TC
-static int __devinit dec_lance_tc_probe(struct device *dev);
+static int dec_lance_tc_probe(struct device *dev);
static int __exit dec_lance_tc_remove(struct device *dev);
static const struct tc_device_id dec_lance_tc_table[] = {
@@ -1341,7 +1341,7 @@ static struct tc_driver dec_lance_tc_driver = {
},
};
-static int __devinit dec_lance_tc_probe(struct device *dev)
+static int dec_lance_tc_probe(struct device *dev)
{
int status = dec_lance_probe(dev, PMAD_LANCE);
if (!status)
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
index c771de71612a..34a485363d5b 100644
--- a/drivers/net/ethernet/amd/depca.c
+++ b/drivers/net/ethernet/amd/depca.c
@@ -338,21 +338,21 @@ static struct eisa_driver depca_eisa_driver = {
.driver = {
.name = depca_string,
.probe = depca_eisa_probe,
- .remove = __devexit_p (depca_device_remove)
+ .remove = depca_device_remove
}
};
#endif
static int depca_isa_probe (struct platform_device *);
-static int __devexit depca_isa_remove(struct platform_device *pdev)
+static int depca_isa_remove(struct platform_device *pdev)
{
return depca_device_remove(&pdev->dev);
}
static struct platform_driver depca_isa_driver = {
.probe = depca_isa_probe,
- .remove = __devexit_p(depca_isa_remove),
+ .remove = depca_isa_remove,
.driver = {
.name = depca_string,
},
@@ -1320,7 +1320,7 @@ static enum depca_type __init depca_shmem_probe (ulong *mem_start)
return adapter;
}
-static int __devinit depca_isa_probe (struct platform_device *device)
+static int depca_isa_probe(struct platform_device *device)
{
struct net_device *dev;
struct depca_private *lp;
@@ -1412,7 +1412,7 @@ static int __init depca_eisa_probe (struct device *device)
}
#endif
-static int __devexit depca_device_remove (struct device *device)
+static int depca_device_remove(struct device *device)
{
struct net_device *dev;
struct depca_private *lp;
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 8baff4e5d964..0c61fd50d882 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -46,11 +46,9 @@ struct hplance_private {
* plus board-specific init, open and close actions.
* Oh, and we need to tell the generic code how to read and write LANCE registers...
*/
-static int __devinit hplance_init_one(struct dio_dev *d,
- const struct dio_device_id *ent);
-static void __devinit hplance_init(struct net_device *dev,
- struct dio_dev *d);
-static void __devexit hplance_remove_one(struct dio_dev *d);
+static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent);
+static void hplance_init(struct net_device *dev, struct dio_dev *d);
+static void hplance_remove_one(struct dio_dev *d);
static void hplance_writerap(void *priv, unsigned short value);
static void hplance_writerdp(void *priv, unsigned short value);
static unsigned short hplance_readrdp(void *priv);
@@ -66,7 +64,7 @@ static struct dio_driver hplance_driver = {
.name = "hplance",
.id_table = hplance_dio_tbl,
.probe = hplance_init_one,
- .remove = __devexit_p(hplance_remove_one),
+ .remove = hplance_remove_one,
};
static const struct net_device_ops hplance_netdev_ops = {
@@ -83,8 +81,7 @@ static const struct net_device_ops hplance_netdev_ops = {
};
/* Find all the HP Lance boards and initialise them... */
-static int __devinit hplance_init_one(struct dio_dev *d,
- const struct dio_device_id *ent)
+static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent)
{
struct net_device *dev;
int err = -ENOMEM;
@@ -118,7 +115,7 @@ static int __devinit hplance_init_one(struct dio_dev *d,
return err;
}
-static void __devexit hplance_remove_one(struct dio_dev *d)
+static void hplance_remove_one(struct dio_dev *d)
{
struct net_device *dev = dio_get_drvdata(d);
@@ -128,7 +125,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
}
/* Initialise a single lance board at the given DIO device */
-static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
+static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 86b6d8e4e6cd..a227ccdcb9b5 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1443,7 +1443,7 @@ static const struct ethtool_ops pcnet32_ethtool_ops = {
/* only probes for non-PCI devices, the rest are handled by
* pci_register_driver via pcnet32_probe_pci */
-static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
+static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
{
unsigned int *port, ioaddr;
@@ -1462,7 +1462,7 @@ static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
}
}
-static int __devinit
+static int
pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long ioaddr;
@@ -1521,7 +1521,7 @@ static const struct net_device_ops pcnet32_netdev_ops = {
* Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
* pdev will be NULL when called from pcnet32_probe_vlbus.
*/
-static int __devinit
+static int
pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
struct pcnet32_private *lp;
@@ -2823,7 +2823,7 @@ static int pcnet32_pm_resume(struct pci_dev *pdev)
return 0;
}
-static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
+static void pcnet32_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -2844,7 +2844,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
static struct pci_driver pcnet32_driver = {
.name = DRV_NAME,
.probe = pcnet32_probe_pci,
- .remove = __devexit_p(pcnet32_remove_one),
+ .remove = pcnet32_remove_one,
.id_table = pcnet32_pci_tbl,
.suspend = pcnet32_pm_suspend,
.resume = pcnet32_pm_resume,
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index d7a3533d990b..c2d696c88e46 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1304,9 +1304,9 @@ static const struct net_device_ops sparc_lance_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit sparc_lance_probe_one(struct platform_device *op,
- struct platform_device *ledma,
- struct platform_device *lebuffer)
+static int sparc_lance_probe_one(struct platform_device *op,
+ struct platform_device *ledma,
+ struct platform_device *lebuffer)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1488,7 +1488,7 @@ fail:
return -ENODEV;
}
-static int __devinit sunlance_sbus_probe(struct platform_device *op)
+static int sunlance_sbus_probe(struct platform_device *op)
{
struct platform_device *parent = to_platform_device(op->dev.parent);
struct device_node *parent_dp = parent->dev.of_node;
@@ -1504,7 +1504,7 @@ static int __devinit sunlance_sbus_probe(struct platform_device *op)
return err;
}
-static int __devexit sunlance_sbus_remove(struct platform_device *op)
+static int sunlance_sbus_remove(struct platform_device *op)
{
struct lance_private *lp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = lp->dev;
@@ -1536,7 +1536,7 @@ static struct platform_driver sunlance_sbus_driver = {
.of_match_table = sunlance_sbus_match,
},
.probe = sunlance_sbus_probe,
- .remove = __devexit_p(sunlance_sbus_remove),
+ .remove = sunlance_sbus_remove,
};
module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 855bdafb1a87..f36bbd6d5085 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1244,7 +1244,7 @@ static const struct net_device_ops bmac_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
+static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
int j, rev, ret;
struct bmac_data *bp;
@@ -1602,7 +1602,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
}
#endif
-static int __devexit bmac_remove(struct macio_dev *mdev)
+static int bmac_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e1df4b76c885..842fe7684904 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -106,7 +106,7 @@ static const struct net_device_ops mace_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
+static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
struct device_node *mace = macio_get_of_node(mdev);
struct net_device *dev;
@@ -271,7 +271,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
return rc;
}
-static int __devexit mace_remove(struct macio_dev *mdev)
+static int mace_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct mace_data *mp;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a92ddee7f665..a206779c68cf 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -195,7 +195,7 @@ static const struct net_device_ops mace_netdev_ops = {
* model of Macintrash has a MACE (AV macintoshes)
*/
-static int __devinit mace_probe(struct platform_device *pdev)
+static int mace_probe(struct platform_device *pdev)
{
int j;
struct mace_data *mp;
@@ -746,7 +746,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
MODULE_ALIAS("platform:macmace");
-static int __devexit mac_mace_device_remove (struct platform_device *pdev)
+static int mac_mace_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mace_data *mp = netdev_priv(dev);
@@ -768,7 +768,7 @@ static int __devexit mac_mace_device_remove (struct platform_device *pdev)
static struct platform_driver mac_mace_driver = {
.probe = mace_probe,
- .remove = __devexit_p(mac_mace_device_remove),
+ .remove = mac_mace_device_remove,
.driver = {
.name = mac_mace_string,
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index d19f82f7597a..56d3f697e0c7 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -643,7 +643,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @adapter: board private structure to initialize
*
*/
-static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter)
+static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
{
return 0;
}
@@ -702,7 +702,7 @@ struct atl1c_platform_patch {
u32 patch_flag;
#define ATL1C_LINK_PATCH 0x1
};
-static const struct atl1c_platform_patch plats[] __devinitconst = {
+static const struct atl1c_platform_patch plats[] = {
{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
@@ -725,7 +725,7 @@ static const struct atl1c_platform_patch plats[] __devinitconst = {
{0},
};
-static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
+static void atl1c_patch_assign(struct atl1c_hw *hw)
{
struct pci_dev *pdev = hw->adapter->pdev;
u32 misc_ctrl;
@@ -764,7 +764,7 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
*/
-static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
+static int atl1c_sw_init(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -2442,8 +2442,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
-static int __devinit atl1c_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl1c_adapter *adapter;
@@ -2587,7 +2586,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
*/
-static void __devexit atl1c_remove(struct pci_dev *pdev)
+static void atl1c_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
@@ -2697,7 +2696,7 @@ static struct pci_driver atl1c_driver = {
.name = atl1c_driver_name,
.id_table = atl1c_pci_tbl,
.probe = atl1c_probe,
- .remove = __devexit_p(atl1c_remove),
+ .remove = atl1c_remove,
.shutdown = atl1c_shutdown,
.err_handler = &atl1c_err_handler,
.driver.pm = &atl1c_pm_ops,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e213da29e73d..e4466a36d106 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -534,7 +534,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
* @adapter: board private structure to initialize
*
*/
-static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
+static int atl1e_alloc_queues(struct atl1e_adapter *adapter)
{
return 0;
}
@@ -547,7 +547,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
*/
-static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
+static int atl1e_sw_init(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -2235,8 +2235,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
-static int __devinit atl1e_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl1e_adapter *adapter = NULL;
@@ -2387,7 +2386,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
*/
-static void __devexit atl1e_remove(struct pci_dev *pdev)
+static void atl1e_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1e_adapter *adapter = netdev_priv(netdev);
@@ -2499,7 +2498,7 @@ static struct pci_driver atl1e_driver = {
.name = atl1e_driver_name,
.id_table = atl1e_pci_tbl,
.probe = atl1e_probe,
- .remove = __devexit_p(atl1e_remove),
+ .remove = atl1e_remove,
/* Power Management Hooks */
#ifdef CONFIG_PM
.suspend = atl1e_suspend,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index b5086f1e637f..fa314282c9ad 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -40,7 +40,7 @@
#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET }
#define ATL1E_PARAM(x, desc) \
- static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
+ static int x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
static unsigned int num_##x; \
module_param_array_named(x, x, int, &num_##x, 0); \
MODULE_PARM_DESC(x, desc);
@@ -116,7 +116,8 @@ struct atl1e_option {
} arg;
};
-static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
+static int atl1e_validate_option(int *value, struct atl1e_option *opt,
+ struct atl1e_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -177,7 +178,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
-void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
+void atl1e_check_options(struct atl1e_adapter *adapter)
{
int bd = adapter->bd_number;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7bae2ad7a7c0..71b3d7daa21d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -113,7 +113,7 @@ static const struct ethtool_ops atl1_ethtool_ops;
*
* Default Value: 100 (200us)
*/
-static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
static unsigned int num_int_mod_timer;
module_param_array_named(int_mod_timer, int_mod_timer, int,
&num_int_mod_timer, 0);
@@ -143,8 +143,8 @@ struct atl1_option {
} arg;
};
-static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
- struct pci_dev *pdev)
+static int atl1_validate_option(int *value, struct atl1_option *opt,
+ struct pci_dev *pdev)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -204,7 +204,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
-static void __devinit atl1_check_options(struct atl1_adapter *adapter)
+static void atl1_check_options(struct atl1_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
@@ -945,7 +945,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
*/
-static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
+static int atl1_sw_init(struct atl1_adapter *adapter)
{
struct atl1_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
@@ -2934,8 +2934,7 @@ static const struct net_device_ops atl1_netdev_ops = {
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
-static int __devinit atl1_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl1_adapter *adapter;
@@ -3113,7 +3112,7 @@ err_request_regions:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
*/
-static void __devexit atl1_remove(struct pci_dev *pdev)
+static void atl1_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter;
@@ -3146,7 +3145,7 @@ static struct pci_driver atl1_driver = {
.name = ATLX_DRIVER_NAME,
.id_table = atl1_pci_tbl,
.probe = atl1_probe,
- .remove = __devexit_p(atl1_remove),
+ .remove = atl1_remove,
.shutdown = atl1_shutdown,
.driver.pm = ATL1_PM_OPS,
};
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 623dd8635c46..aab83a2d4e07 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -83,7 +83,7 @@ static void atl2_check_options(struct atl2_adapter *adapter);
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
*/
-static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
+static int atl2_sw_init(struct atl2_adapter *adapter)
{
struct atl2_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -1338,8 +1338,7 @@ static const struct net_device_ops atl2_netdev_ops = {
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
-static int __devinit atl2_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
@@ -1498,7 +1497,7 @@ err_dma:
*/
/* FIXME: write the original MAC address back in case it was changed from a
* BIOS-set value, as in atl1 -- CHS */
-static void __devexit atl2_remove(struct pci_dev *pdev)
+static void atl2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1705,7 +1704,7 @@ static struct pci_driver atl2_driver = {
.name = atl2_driver_name,
.id_table = atl2_pci_tbl,
.probe = atl2_probe,
- .remove = __devexit_p(atl2_remove),
+ .remove = atl2_remove,
/* Power Management Hooks */
.suspend = atl2_suspend,
#ifdef CONFIG_PM
@@ -2845,12 +2844,12 @@ static void atl2_force_ps(struct atl2_hw *hw)
*/
#define ATL2_PARAM(X, desc) \
- static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+ static const int X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
MODULE_PARM_DESC(X, desc);
#else
#define ATL2_PARAM(X, desc) \
- static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
+ static int X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
@@ -2934,7 +2933,7 @@ struct atl2_option {
} arg;
};
-static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
+static int atl2_validate_option(int *value, struct atl2_option *opt)
{
int i;
struct atl2_opt_list *ent;
@@ -2992,7 +2991,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
-static void __devinit atl2_check_options(struct atl2_adapter *adapter)
+static void atl2_check_options(struct atl2_adapter *adapter)
{
int val;
struct atl2_option opt;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 4bd416b72e65..f55267363f35 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -102,6 +102,7 @@ config TIGON3
depends on PCI
select PHYLIB
select HWMON
+ select PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9786c0e9890e..219f6226fcb1 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2083,7 +2083,7 @@ out:
return err;
}
-static int __devinit b44_get_invariants(struct b44 *bp)
+static int b44_get_invariants(struct b44 *bp)
{
struct ssb_device *sdev = bp->sdev;
int err = 0;
@@ -2141,8 +2141,8 @@ static const struct net_device_ops b44_netdev_ops = {
#endif
};
-static int __devinit b44_init_one(struct ssb_device *sdev,
- const struct ssb_device_id *ent)
+static int b44_init_one(struct ssb_device *sdev,
+ const struct ssb_device_id *ent)
{
struct net_device *dev;
struct b44 *bp;
@@ -2249,7 +2249,7 @@ out:
return err;
}
-static void __devexit b44_remove_one(struct ssb_device *sdev)
+static void b44_remove_one(struct ssb_device *sdev)
{
struct net_device *dev = ssb_get_drvdata(sdev);
@@ -2340,7 +2340,7 @@ static struct ssb_driver b44_ssb_driver = {
.name = DRV_MODULE_NAME,
.id_table = b44_ssb_tbl,
.probe = b44_init_one,
- .remove = __devexit_p(b44_remove_one),
+ .remove = b44_remove_one,
.suspend = b44_suspend,
.resume = b44_resume,
};
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index c7ca7ec065ee..39387d67b722 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1612,7 +1612,7 @@ static const struct net_device_ops bcm_enet_ops = {
/*
* allocate netdevice, request register memory and register device.
*/
-static int __devinit bcm_enet_probe(struct platform_device *pdev)
+static int bcm_enet_probe(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -1830,7 +1830,7 @@ out:
/*
* exit func, stops hardware and unregisters netdevice
*/
-static int __devexit bcm_enet_remove(struct platform_device *pdev)
+static int bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -1877,7 +1877,7 @@ static int __devexit bcm_enet_remove(struct platform_device *pdev)
struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove = __devexit_p(bcm_enet_remove),
+ .remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
.owner = THIS_MODULE,
@@ -1887,7 +1887,7 @@ struct platform_driver bcm63xx_enet_driver = {
/*
* reserve & remap memory space shared between all macs
*/
-static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
+static int bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int iomem_size;
@@ -1908,7 +1908,7 @@ static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
+static int bcm_enet_shared_remove(struct platform_device *pdev)
{
struct resource *res;
@@ -1924,7 +1924,7 @@ static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
*/
struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
- .remove = __devexit_p(bcm_enet_shared_remove),
+ .remove = bcm_enet_shared_remove,
.driver = {
.name = "bcm63xx_enet_shared",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d4310700c7a7..a1adfaf87f49 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -71,7 +71,7 @@
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
-static char version[] __devinitdata =
+static char version[] =
"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
@@ -106,7 +106,7 @@ typedef enum {
/* indexed by board_t, above */
static struct {
char *name;
-} board_info[] __devinitdata = {
+} board_info[] = {
{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
{ "HP NC370T Multifunction Gigabit Server Adapter" },
{ "HP NC370i Multifunction Gigabit Server Adapter" },
@@ -260,10 +260,10 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
* needs to be skipped.
*/
diff = txr->tx_prod - txr->tx_cons;
- if (unlikely(diff >= TX_DESC_CNT)) {
+ if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
diff &= 0xffff;
- if (diff == TX_DESC_CNT)
- diff = MAX_TX_DESC_CNT;
+ if (diff == BNX2_TX_DESC_CNT)
+ diff = BNX2_MAX_TX_DESC_CNT;
}
return bp->tx_ring_size - diff;
}
@@ -274,8 +274,8 @@ bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
u32 val;
spin_lock_bh(&bp->indirect_lock);
- REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
- val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
+ BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+ val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
spin_unlock_bh(&bp->indirect_lock);
return val;
}
@@ -284,8 +284,8 @@ static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
spin_lock_bh(&bp->indirect_lock);
- REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
- REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
+ BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+ BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
spin_unlock_bh(&bp->indirect_lock);
}
@@ -306,21 +306,21 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
offset += cid_addr;
spin_lock_bh(&bp->indirect_lock);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
int i;
- REG_WR(bp, BNX2_CTX_CTX_DATA, val);
- REG_WR(bp, BNX2_CTX_CTX_CTRL,
- offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+ BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
+ BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
+ offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
for (i = 0; i < 5; i++) {
- val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+ val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
break;
udelay(5);
}
} else {
- REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
- REG_WR(bp, BNX2_CTX_DATA, val);
+ BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
+ BNX2_WR(bp, BNX2_CTX_DATA, val);
}
spin_unlock_bh(&bp->indirect_lock);
}
@@ -434,7 +434,6 @@ struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
return cp;
}
-EXPORT_SYMBOL(bnx2_cnic_probe);
static void
bnx2_cnic_stop(struct bnx2 *bp)
@@ -494,11 +493,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
int i, ret;
if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
- REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
- REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
@@ -506,16 +505,16 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
val1 = (bp->phy_addr << 21) | (reg << 16) |
BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
BNX2_EMAC_MDIO_COMM_START_BUSY;
- REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+ BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
for (i = 0; i < 50; i++) {
udelay(10);
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
val1 &= BNX2_EMAC_MDIO_COMM_DATA;
break;
@@ -532,11 +531,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
}
if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
- REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
- REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
@@ -551,11 +550,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
int i, ret;
if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
- REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
- REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
@@ -563,12 +562,12 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
val1 = (bp->phy_addr << 21) | (reg << 16) | val |
BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
- REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+ BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
for (i = 0; i < 50; i++) {
udelay(10);
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -581,11 +580,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
ret = 0;
if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
- val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
- REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
- REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
@@ -601,10 +600,10 @@ bnx2_disable_int(struct bnx2 *bp)
for (i = 0; i < bp->irq_nvecs; i++) {
bnapi = &bp->bnx2_napi[i];
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
}
- REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+ BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
}
static void
@@ -616,16 +615,16 @@ bnx2_enable_int(struct bnx2 *bp)
for (i = 0; i < bp->irq_nvecs; i++) {
bnapi = &bp->bnx2_napi[i];
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
- bnapi->last_status_idx);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+ bnapi->last_status_idx);
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bnapi->last_status_idx);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
}
- REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+ BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
}
static void
@@ -824,7 +823,7 @@ bnx2_free_mem(struct bnx2 *bp)
for (i = 0; i < bp->ctx_pages; i++) {
if (bp->ctx_blk[i]) {
- dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
+ dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
bp->ctx_blk[i],
bp->ctx_blk_mapping[i]);
bp->ctx_blk[i] = NULL;
@@ -887,13 +886,13 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
if (bp->ctx_pages == 0)
bp->ctx_pages = 1;
for (i = 0; i < bp->ctx_pages; i++) {
bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
- BCM_PAGE_SIZE,
+ BNX2_PAGE_SIZE,
&bp->ctx_blk_mapping[i],
GFP_KERNEL);
if (bp->ctx_blk[i] == NULL)
@@ -1034,7 +1033,7 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp)
}
if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
- (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+ (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
u32 val;
bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
@@ -1294,14 +1293,14 @@ bnx2_set_mac_link(struct bnx2 *bp)
{
u32 val;
- REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
+ BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
if (bp->link_up && (bp->line_speed == SPEED_1000) &&
(bp->duplex == DUPLEX_HALF)) {
- REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
+ BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
}
/* Configure the EMAC mode register. */
- val = REG_RD(bp, BNX2_EMAC_MODE);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
@@ -1310,7 +1309,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
if (bp->link_up) {
switch (bp->line_speed) {
case SPEED_10:
- if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+ if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
val |= BNX2_EMAC_MODE_PORT_MII_10M;
break;
}
@@ -1333,25 +1332,25 @@ bnx2_set_mac_link(struct bnx2 *bp)
/* Set the MAC to operate in the appropriate duplex mode. */
if (bp->duplex == DUPLEX_HALF)
val |= BNX2_EMAC_MODE_HALF_DUPLEX;
- REG_WR(bp, BNX2_EMAC_MODE, val);
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
/* Enable/disable rx PAUSE. */
bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
if (bp->flow_ctrl & FLOW_CTRL_RX)
bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
- REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
+ BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
/* Enable/disable tx PAUSE. */
- val = REG_RD(bp, BNX2_EMAC_TX_MODE);
+ val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
if (bp->flow_ctrl & FLOW_CTRL_TX)
val |= BNX2_EMAC_TX_MODE_FLOW_EN;
- REG_WR(bp, BNX2_EMAC_TX_MODE, val);
+ BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
/* Acknowledge the interrupt. */
- REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
+ BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
bnx2_init_all_rx_contexts(bp);
}
@@ -1360,7 +1359,7 @@ static void
bnx2_enable_bmsr1(struct bnx2 *bp)
{
if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
- (CHIP_NUM(bp) == CHIP_NUM_5709))
+ (BNX2_CHIP(bp) == BNX2_CHIP_5709))
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_GP_STATUS);
}
@@ -1369,7 +1368,7 @@ static void
bnx2_disable_bmsr1(struct bnx2 *bp)
{
if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
- (CHIP_NUM(bp) == CHIP_NUM_5709))
+ (BNX2_CHIP(bp) == BNX2_CHIP_5709))
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
}
@@ -1386,7 +1385,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp)
if (bp->autoneg & AUTONEG_SPEED)
bp->advertising |= ADVERTISED_2500baseX_Full;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
bnx2_read_phy(bp, bp->mii_up1, &up1);
@@ -1396,7 +1395,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp)
ret = 0;
}
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
@@ -1412,7 +1411,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
return 0;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
bnx2_read_phy(bp, bp->mii_up1, &up1);
@@ -1422,7 +1421,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
ret = 1;
}
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
@@ -1438,7 +1437,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
return;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
u32 val;
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
@@ -1454,7 +1453,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
- } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
if (!err)
bmcr |= BCM5708S_BMCR_FORCE_2500;
@@ -1482,7 +1481,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
return;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
u32 val;
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
@@ -1496,7 +1495,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
- } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
if (!err)
bmcr &= ~BCM5708S_BMCR_FORCE_2500;
@@ -1547,14 +1546,14 @@ bnx2_set_link(struct bnx2 *bp)
bnx2_disable_bmsr1(bp);
if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
- (CHIP_NUM(bp) == CHIP_NUM_5706)) {
+ (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
u32 val, an_dbg;
if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
bnx2_5706s_force_link_dn(bp, 0);
bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
}
- val = REG_RD(bp, BNX2_EMAC_STATUS);
+ val = BNX2_RD(bp, BNX2_EMAC_STATUS);
bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
@@ -1571,11 +1570,11 @@ bnx2_set_link(struct bnx2 *bp)
bp->link_up = 1;
if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
- if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
bnx2_5706s_linkup(bp);
- else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
bnx2_5708s_linkup(bp);
- else if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_5709s_linkup(bp);
}
else {
@@ -1757,7 +1756,7 @@ __acquires(&bp->phy_lock)
new_bmcr = bmcr & ~BMCR_ANENABLE;
new_bmcr |= BMCR_SPEED1000;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
if (bp->req_line_speed == SPEED_2500)
bnx2_enable_forced_2g5(bp);
else if (bp->req_line_speed == SPEED_1000) {
@@ -1765,7 +1764,7 @@ __acquires(&bp->phy_lock)
new_bmcr &= ~0x2000;
}
- } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
if (bp->req_line_speed == SPEED_2500)
new_bmcr |= BCM5708S_BMCR_FORCE_2500;
else
@@ -1942,8 +1941,8 @@ bnx2_send_heart_beat(struct bnx2 *bp)
spin_lock(&bp->indirect_lock);
msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
- REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
- REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
+ BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
+ BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
spin_unlock(&bp->indirect_lock);
}
@@ -2230,9 +2229,9 @@ bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
bnx2_write_phy(bp, BCM5708S_UP1, val);
}
- if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
- (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
+ if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
/* increase tx signal amplitude */
bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
BCM5708S_BLK_ADDR_TX_MISC);
@@ -2268,8 +2267,8 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
- if (CHIP_NUM(bp) == CHIP_NUM_5706)
- REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
+ BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
if (bp->dev->mtu > 1500) {
u32 val;
@@ -2368,7 +2367,7 @@ __acquires(&bp->phy_lock)
bp->mii_adv = MII_ADVERTISE;
bp->mii_lpa = MII_LPA;
- REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+ BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
goto setup_phy;
@@ -2379,11 +2378,11 @@ __acquires(&bp->phy_lock)
bp->phy_id |= val & 0xffff;
if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
- if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
rc = bnx2_init_5706s_phy(bp, reset_phy);
- else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
rc = bnx2_init_5708s_phy(bp, reset_phy);
- else if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
rc = bnx2_init_5709s_phy(bp, reset_phy);
}
else {
@@ -2402,10 +2401,10 @@ bnx2_set_mac_loopback(struct bnx2 *bp)
{
u32 mac_mode;
- mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
+ mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
mac_mode &= ~BNX2_EMAC_MODE_PORT;
mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
- REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
+ BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
bp->link_up = 1;
return 0;
}
@@ -2431,13 +2430,13 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
msleep(100);
}
- mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
+ mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
BNX2_EMAC_MODE_25G_MODE);
mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
- REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
+ BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
bp->link_up = 1;
return 0;
}
@@ -2449,7 +2448,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
u32 mcp_p0, mcp_p1;
netdev_err(dev, "<--- start MCP states dump --->\n");
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
mcp_p0 = BNX2_MCP_STATE_P0;
mcp_p1 = BNX2_MCP_STATE_P1;
} else {
@@ -2538,10 +2537,10 @@ bnx2_init_5709_context(struct bnx2 *bp)
u32 val;
val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
- val |= (BCM_PAGE_BITS - 8) << 16;
- REG_WR(bp, BNX2_CTX_COMMAND, val);
+ val |= (BNX2_PAGE_BITS - 8) << 16;
+ BNX2_WR(bp, BNX2_CTX_COMMAND, val);
for (i = 0; i < 10; i++) {
- val = REG_RD(bp, BNX2_CTX_COMMAND);
+ val = BNX2_RD(bp, BNX2_CTX_COMMAND);
if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
break;
udelay(2);
@@ -2553,20 +2552,20 @@ bnx2_init_5709_context(struct bnx2 *bp)
int j;
if (bp->ctx_blk[i])
- memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
+ memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
else
return -ENOMEM;
- REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
- (bp->ctx_blk_mapping[i] & 0xffffffff) |
- BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
- REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
- (u64) bp->ctx_blk_mapping[i] >> 32);
- REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
- BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+ BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+ (bp->ctx_blk_mapping[i] & 0xffffffff) |
+ BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+ BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+ (u64) bp->ctx_blk_mapping[i] >> 32);
+ BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+ BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
for (j = 0; j < 10; j++) {
- val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+ val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
break;
udelay(5);
@@ -2591,7 +2590,7 @@ bnx2_init_context(struct bnx2 *bp)
vcid--;
- if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
u32 new_vcid;
vcid_addr = GET_PCID_ADDR(vcid);
@@ -2612,8 +2611,8 @@ bnx2_init_context(struct bnx2 *bp)
vcid_addr += (i << PHY_CTX_SHIFT);
pcid_addr += (i << PHY_CTX_SHIFT);
- REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
- REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
+ BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
+ BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
/* Zero out the context. */
for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
@@ -2633,7 +2632,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
if (good_mbuf == NULL)
return -ENOMEM;
- REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
good_mbuf_cnt = 0;
@@ -2678,21 +2677,21 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
val = (mac_addr[0] << 8) | mac_addr[1];
- REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
+ BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
+ BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
}
static inline int
bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
dma_addr_t mapping;
- struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
- struct rx_bd *rxbd =
- &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
+ struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+ struct bnx2_rx_bd *rxbd =
+ &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
struct page *page = alloc_page(gfp);
if (!page)
@@ -2714,7 +2713,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf
static void
bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
{
- struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+ struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct page *page = rx_pg->page;
if (!page)
@@ -2731,9 +2730,10 @@ static inline int
bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
u8 *data;
- struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
+ struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
- struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
+ struct bnx2_rx_bd *rxbd =
+ &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
data = kmalloc(bp->rx_buf_size, gfp);
if (!data)
@@ -2770,9 +2770,9 @@ bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
old_link_state = sblk->status_attn_bits_ack & event;
if (new_link_state != old_link_state) {
if (new_link_state)
- REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
+ BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
else
- REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
+ BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
} else
is_set = 0;
@@ -2802,7 +2802,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
barrier();
cons = *bnapi->hw_tx_cons_ptr;
barrier();
- if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
+ if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
cons++;
return cons;
}
@@ -2823,11 +2823,11 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = txr->tx_cons;
while (sw_cons != hw_cons) {
- struct sw_tx_bd *tx_buf;
+ struct bnx2_sw_tx_bd *tx_buf;
struct sk_buff *skb;
int i, last;
- sw_ring_cons = TX_RING_IDX(sw_cons);
+ sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
tx_buf = &txr->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb;
@@ -2841,7 +2841,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
last_idx = sw_cons + tx_buf->nr_frags + 1;
last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
- if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
+ if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
last_idx++;
}
if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
@@ -2856,17 +2856,18 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
last = tx_buf->nr_frags;
for (i = 0; i < last; i++) {
- sw_cons = NEXT_TX_BD(sw_cons);
+ struct bnx2_sw_tx_bd *tx_buf;
+ sw_cons = BNX2_NEXT_TX_BD(sw_cons);
+
+ tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
dma_unmap_page(&bp->pdev->dev,
- dma_unmap_addr(
- &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
- mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
- sw_cons = NEXT_TX_BD(sw_cons);
+ sw_cons = BNX2_NEXT_TX_BD(sw_cons);
tx_bytes += skb->len;
dev_kfree_skb(skb);
@@ -2905,8 +2906,8 @@ static void
bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
struct sk_buff *skb, int count)
{
- struct sw_pg *cons_rx_pg, *prod_rx_pg;
- struct rx_bd *cons_bd, *prod_bd;
+ struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
+ struct bnx2_rx_bd *cons_bd, *prod_bd;
int i;
u16 hw_prod, prod;
u16 cons = rxr->rx_pg_cons;
@@ -2933,12 +2934,14 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
hw_prod = rxr->rx_pg_prod;
for (i = 0; i < count; i++) {
- prod = RX_PG_RING_IDX(hw_prod);
+ prod = BNX2_RX_PG_RING_IDX(hw_prod);
prod_rx_pg = &rxr->rx_pg_ring[prod];
cons_rx_pg = &rxr->rx_pg_ring[cons];
- cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
- prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
+ [BNX2_RX_IDX(cons)];
+ prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
+ [BNX2_RX_IDX(prod)];
if (prod != cons) {
prod_rx_pg->page = cons_rx_pg->page;
@@ -2950,8 +2953,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
- cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
- hw_prod = NEXT_RX_BD(hw_prod);
+ cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
+ hw_prod = BNX2_NEXT_RX_BD(hw_prod);
}
rxr->rx_pg_prod = hw_prod;
rxr->rx_pg_cons = cons;
@@ -2961,8 +2964,8 @@ static inline void
bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
u8 *data, u16 cons, u16 prod)
{
- struct sw_bd *cons_rx_buf, *prod_rx_buf;
- struct rx_bd *cons_bd, *prod_bd;
+ struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnx2_rx_bd *cons_bd, *prod_bd;
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
@@ -2981,8 +2984,8 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
- cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
@@ -3022,7 +3025,7 @@ error:
return skb;
} else {
unsigned int i, frag_len, frag_size, pages;
- struct sw_pg *rx_pg;
+ struct bnx2_sw_pg *rx_pg;
u16 pg_cons = rxr->rx_pg_cons;
u16 pg_prod = rxr->rx_pg_prod;
@@ -3065,7 +3068,7 @@ error:
rx_pg->page = NULL;
err = bnx2_alloc_rx_page(bp, rxr,
- RX_PG_RING_IDX(pg_prod),
+ BNX2_RX_PG_RING_IDX(pg_prod),
GFP_ATOMIC);
if (unlikely(err)) {
rxr->rx_pg_cons = pg_cons;
@@ -3083,8 +3086,8 @@ error:
skb->truesize += PAGE_SIZE;
skb->len += frag_len;
- pg_prod = NEXT_RX_BD(pg_prod);
- pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
+ pg_prod = BNX2_NEXT_RX_BD(pg_prod);
+ pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
}
rxr->rx_pg_prod = pg_prod;
rxr->rx_pg_cons = pg_cons;
@@ -3101,7 +3104,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
barrier();
cons = *bnapi->hw_rx_cons_ptr;
barrier();
- if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
+ if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
cons++;
return cons;
}
@@ -3125,13 +3128,14 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
while (sw_cons != hw_cons) {
unsigned int len, hdr_len;
u32 status;
- struct sw_bd *rx_buf, *next_rx_buf;
+ struct bnx2_sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
u8 *data;
+ u16 next_ring_idx;
- sw_ring_cons = RX_RING_IDX(sw_cons);
- sw_ring_prod = RX_RING_IDX(sw_prod);
+ sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
+ sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
data = rx_buf->data;
@@ -3146,8 +3150,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- next_rx_buf =
- &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
+ next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
prefetch(get_l2_fhdr(next_rx_buf->data));
len = rx_hdr->l2_fhdr_pkt_len;
@@ -3239,8 +3243,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_pkt++;
next_rx:
- sw_cons = NEXT_RX_BD(sw_cons);
- sw_prod = NEXT_RX_BD(sw_prod);
+ sw_cons = BNX2_NEXT_RX_BD(sw_cons);
+ sw_prod = BNX2_NEXT_RX_BD(sw_prod);
if ((rx_pkt == budget))
break;
@@ -3255,11 +3259,11 @@ next_rx:
rxr->rx_prod = sw_prod;
if (pg_ring_used)
- REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+ BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
- REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
+ BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
- REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+ BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
mmiowb();
@@ -3277,7 +3281,7 @@ bnx2_msi(int irq, void *dev_instance)
struct bnx2 *bp = bnapi->bp;
prefetch(bnapi->status_blk.msi);
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
@@ -3321,18 +3325,18 @@ bnx2_interrupt(int irq, void *dev_instance)
* the status block write.
*/
if ((sblk->status_idx == bnapi->last_status_idx) &&
- (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
+ (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
return IRQ_NONE;
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
/* Read back to deassert IRQ immediately to avoid too many
* spurious interrupts.
*/
- REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+ BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -3388,14 +3392,14 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
u32 msi_ctrl;
if (bnx2_has_work(bnapi)) {
- msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
+ msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
return;
if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
- REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
- ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
- REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
+ BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
+ ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
+ BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
bnx2_msi(bp->irq_tbl[0].vector, bnapi);
}
}
@@ -3434,9 +3438,9 @@ static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
/* This is needed to take care of transient status
* during link changes.
*/
- REG_WR(bp, BNX2_HC_COMMAND,
- bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
- REG_RD(bp, BNX2_HC_COMMAND);
+ BNX2_WR(bp, BNX2_HC_COMMAND,
+ bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+ BNX2_RD(bp, BNX2_HC_COMMAND);
}
}
@@ -3473,9 +3477,9 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
if (likely(!bnx2_has_fast_work(bnapi))) {
napi_complete(napi);
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bnapi->last_status_idx);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
break;
}
}
@@ -3511,19 +3515,19 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
if (likely(!bnx2_has_work(bnapi))) {
napi_complete(napi);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bnapi->last_status_idx);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
break;
}
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
- bnapi->last_status_idx);
-
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
- BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
- bnapi->last_status_idx);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+ bnapi->last_status_idx);
+
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
break;
}
}
@@ -3561,8 +3565,8 @@ bnx2_set_rx_mode(struct net_device *dev)
}
else if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
- 0xffffffff);
+ BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
}
sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
}
@@ -3584,8 +3588,8 @@ bnx2_set_rx_mode(struct net_device *dev)
}
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
- mc_filter[i]);
+ BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ mc_filter[i]);
}
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
@@ -3610,12 +3614,12 @@ bnx2_set_rx_mode(struct net_device *dev)
if (rx_mode != bp->rx_mode) {
bp->rx_mode = rx_mode;
- REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
+ BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
}
- REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
- REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
- REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
spin_unlock_bh(&bp->phy_lock);
}
@@ -3663,10 +3667,10 @@ static int bnx2_request_uncached_firmware(struct bnx2 *bp)
const struct bnx2_rv2p_fw_file *rv2p_fw;
int rc;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
mips_fw_file = FW_MIPS_FILE_09;
- if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5709_A1))
+ if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
rv2p_fw_file = FW_RV2P_FILE_09_Ax;
else
rv2p_fw_file = FW_RV2P_FILE_09;
@@ -3756,13 +3760,13 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
}
for (i = 0; i < rv2p_code_len; i += 8) {
- REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
+ BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
rv2p_code++;
- REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
+ BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
rv2p_code++;
val = (i / 8) | cmd;
- REG_WR(bp, addr, val);
+ BNX2_WR(bp, addr, val);
}
rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
@@ -3772,22 +3776,22 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
loc = be32_to_cpu(fw_entry->fixup[i]);
if (loc && ((loc * 4) < rv2p_code_len)) {
code = be32_to_cpu(*(rv2p_code + loc - 1));
- REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
+ BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
code = be32_to_cpu(*(rv2p_code + loc));
code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
- REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
+ BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
val = (loc / 2) | cmd;
- REG_WR(bp, addr, val);
+ BNX2_WR(bp, addr, val);
}
}
/* Reset the processor, un-stall is done later. */
if (rv2p_proc == RV2P_PROC1) {
- REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
+ BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
}
else {
- REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
+ BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
}
return 0;
@@ -3924,14 +3928,14 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
/* delay required during transition out of D3hot */
msleep(20);
- val = REG_RD(bp, BNX2_EMAC_MODE);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
val &= ~BNX2_EMAC_MODE_MPKT;
- REG_WR(bp, BNX2_EMAC_MODE, val);
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- val = REG_RD(bp, BNX2_RPM_CONFIG);
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- REG_WR(bp, BNX2_RPM_CONFIG, val);
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
break;
}
case PCI_D3hot: {
@@ -3963,7 +3967,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
- val = REG_RD(bp, BNX2_EMAC_MODE);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
/* Enable port mode. */
val &= ~BNX2_EMAC_MODE_PORT;
@@ -3978,32 +3982,32 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
val |= BNX2_EMAC_MODE_25G_MODE;
}
- REG_WR(bp, BNX2_EMAC_MODE, val);
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
/* receive all multicast */
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
- 0xffffffff);
+ BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
}
- REG_WR(bp, BNX2_EMAC_RX_MODE,
- BNX2_EMAC_RX_MODE_SORT_MODE);
+ BNX2_WR(bp, BNX2_EMAC_RX_MODE,
+ BNX2_EMAC_RX_MODE_SORT_MODE);
val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
BNX2_RPM_SORT_USER0_MC_EN;
- REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
- REG_WR(bp, BNX2_RPM_SORT_USER0, val);
- REG_WR(bp, BNX2_RPM_SORT_USER0, val |
- BNX2_RPM_SORT_USER0_ENA);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
+ BNX2_RPM_SORT_USER0_ENA);
/* Need to enable EMAC and RPM for WOL. */
- REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
- BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
- val = REG_RD(bp, BNX2_RPM_CONFIG);
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- REG_WR(bp, BNX2_RPM_CONFIG, val);
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
}
@@ -4016,8 +4020,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
1, 0);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
+ if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
if (bp->wol)
pmcsr |= 3;
@@ -4050,9 +4054,9 @@ bnx2_acquire_nvram_lock(struct bnx2 *bp)
int j;
/* Request access to the flash interface. */
- REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
+ BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
- val = REG_RD(bp, BNX2_NVM_SW_ARB);
+ val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
break;
@@ -4072,10 +4076,10 @@ bnx2_release_nvram_lock(struct bnx2 *bp)
u32 val;
/* Relinquish nvram interface. */
- REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
+ BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
- val = REG_RD(bp, BNX2_NVM_SW_ARB);
+ val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
break;
@@ -4094,20 +4098,20 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
{
u32 val;
- val = REG_RD(bp, BNX2_MISC_CFG);
- REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
+ val = BNX2_RD(bp, BNX2_MISC_CFG);
+ BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
if (bp->flash_info->flags & BNX2_NV_WREN) {
int j;
- REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
- REG_WR(bp, BNX2_NVM_COMMAND,
- BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+ BNX2_WR(bp, BNX2_NVM_COMMAND,
+ BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
udelay(5);
- val = REG_RD(bp, BNX2_NVM_COMMAND);
+ val = BNX2_RD(bp, BNX2_NVM_COMMAND);
if (val & BNX2_NVM_COMMAND_DONE)
break;
}
@@ -4123,8 +4127,8 @@ bnx2_disable_nvram_write(struct bnx2 *bp)
{
u32 val;
- val = REG_RD(bp, BNX2_MISC_CFG);
- REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
+ val = BNX2_RD(bp, BNX2_MISC_CFG);
+ BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
}
@@ -4133,10 +4137,10 @@ bnx2_enable_nvram_access(struct bnx2 *bp)
{
u32 val;
- val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+ val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
/* Enable both bits, even on read. */
- REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
- val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
+ BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+ val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
}
static void
@@ -4144,9 +4148,9 @@ bnx2_disable_nvram_access(struct bnx2 *bp)
{
u32 val;
- val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+ val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
/* Disable both bits, even after read. */
- REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+ BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
BNX2_NVM_ACCESS_ENABLE_WR_EN));
}
@@ -4166,13 +4170,13 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
BNX2_NVM_COMMAND_DOIT;
/* Need to clear DONE bit separately. */
- REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
/* Address of the NVRAM to read from. */
- REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+ BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
/* Issue an erase command. */
- REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
@@ -4180,7 +4184,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
udelay(5);
- val = REG_RD(bp, BNX2_NVM_COMMAND);
+ val = BNX2_RD(bp, BNX2_NVM_COMMAND);
if (val & BNX2_NVM_COMMAND_DONE)
break;
}
@@ -4208,13 +4212,13 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
}
/* Need to clear DONE bit separately. */
- REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
/* Address of the NVRAM to read from. */
- REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+ BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
/* Issue a read command. */
- REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
@@ -4222,9 +4226,9 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
udelay(5);
- val = REG_RD(bp, BNX2_NVM_COMMAND);
+ val = BNX2_RD(bp, BNX2_NVM_COMMAND);
if (val & BNX2_NVM_COMMAND_DONE) {
- __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
+ __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
memcpy(ret_val, &v, 4);
break;
}
@@ -4254,24 +4258,24 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
}
/* Need to clear DONE bit separately. */
- REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
memcpy(&val32, val, 4);
/* Write the data. */
- REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
+ BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
/* Address of the NVRAM to write to. */
- REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+ BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
/* Issue the write command. */
- REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+ BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
udelay(5);
- if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
+ if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT)
@@ -4287,13 +4291,13 @@ bnx2_init_nvram(struct bnx2 *bp)
int j, entry_count, rc = 0;
const struct flash_spec *flash;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
bp->flash_info = &flash_5709;
goto get_flash_size;
}
/* Determine the selected interface. */
- val = REG_RD(bp, BNX2_NVM_CFG1);
+ val = BNX2_RD(bp, BNX2_NVM_CFG1);
entry_count = ARRAY_SIZE(flash_table);
@@ -4332,10 +4336,10 @@ bnx2_init_nvram(struct bnx2 *bp)
bnx2_enable_nvram_access(bp);
/* Reconfigure the flash interface */
- REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
- REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
- REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
- REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
+ BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
+ BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
+ BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
+ BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
/* Disable access to flash interface */
bnx2_disable_nvram_access(bp);
@@ -4696,10 +4700,10 @@ bnx2_init_fw_cap(struct bnx2 *bp)
static void
bnx2_setup_msix_tbl(struct bnx2 *bp)
{
- REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
+ BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
- REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
- REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
+ BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
+ BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
}
static int
@@ -4711,24 +4715,24 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Wait for the current PCI transaction to complete before
* issuing a reset. */
- if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
- (CHIP_NUM(bp) == CHIP_NUM_5708)) {
- REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
- BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
- val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+ if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
+ (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
+ BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+ BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+ val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
udelay(5);
} else { /* 5709 */
- val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
- REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
- val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+ val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
for (i = 0; i < 100; i++) {
msleep(1);
- val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+ val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
break;
}
@@ -4744,17 +4748,17 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Do a dummy read to force the chip to complete all current transaction
* before we issue a reset. */
- val = REG_RD(bp, BNX2_MISC_ID);
+ val = BNX2_RD(bp, BNX2_MISC_ID);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
- REG_RD(bp, BNX2_MISC_COMMAND);
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+ BNX2_RD(bp, BNX2_MISC_COMMAND);
udelay(5);
val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
- REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+ BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
} else {
val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -4762,19 +4766,19 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
/* Chip reset. */
- REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+ BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
/* Reading back any register after chip reset will hang the
* bus on 5706 A0 and A1. The msleep below provides plenty
* of margin for write posting.
*/
- if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5706_A1))
+ if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
msleep(20);
/* Reset takes approximate 30 usec */
for (i = 0; i < 10; i++) {
- val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+ val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
break;
@@ -4789,7 +4793,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
}
/* Make sure byte swapping is properly configured. */
- val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
+ val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
if (val != 0x01020304) {
pr_err("Chip not in correct endian mode\n");
return -ENODEV;
@@ -4808,10 +4812,10 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
bnx2_set_default_remote_link(bp);
spin_unlock_bh(&bp->phy_lock);
- if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
/* Adjust the voltage regular to two steps lower. The default
* of this register is 0x0000000e. */
- REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
+ BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
/* Remove bad rbuf memory from the free pool. */
rc = bnx2_alloc_bad_rbuf(bp);
@@ -4820,7 +4824,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
if (bp->flags & BNX2_FLAG_USING_MSIX) {
bnx2_setup_msix_tbl(bp);
/* Prevent MSIX table reads and write from timing out */
- REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+ BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
}
@@ -4834,7 +4838,7 @@ bnx2_init_chip(struct bnx2 *bp)
int rc, i;
/* Make sure the interrupt is not active. */
- REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
BNX2_DMA_CONFIG_DATA_WORD_SWAP |
@@ -4850,16 +4854,17 @@ bnx2_init_chip(struct bnx2 *bp)
if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
val |= (1 << 23);
- if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
- (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
+ if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
+ (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
+ !(bp->flags & BNX2_FLAG_PCIX))
val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
- REG_WR(bp, BNX2_DMA_CONFIG, val);
+ BNX2_WR(bp, BNX2_DMA_CONFIG, val);
- if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
- val = REG_RD(bp, BNX2_TDMA_CONFIG);
+ if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+ val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
val |= BNX2_TDMA_CONFIG_ONE_DMA;
- REG_WR(bp, BNX2_TDMA_CONFIG, val);
+ BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
}
if (bp->flags & BNX2_FLAG_PCIX) {
@@ -4871,14 +4876,14 @@ bnx2_init_chip(struct bnx2 *bp)
val16 & ~PCI_X_CMD_ERO);
}
- REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
- BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
- BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
- BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
+ BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
+ BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
/* Initialize context mapping and zero out the quick contexts. The
* context block must have already been enabled. */
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
rc = bnx2_init_5709_context(bp);
if (rc)
return rc;
@@ -4892,29 +4897,29 @@ bnx2_init_chip(struct bnx2 *bp)
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
- val = REG_RD(bp, BNX2_MQ_CONFIG);
+ val = BNX2_RD(bp, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
- if (CHIP_REV(bp) == CHIP_REV_Ax)
+ if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
val |= BNX2_MQ_CONFIG_HALT_DIS;
}
- REG_WR(bp, BNX2_MQ_CONFIG, val);
+ BNX2_WR(bp, BNX2_MQ_CONFIG, val);
val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
- REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
- REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
+ BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
+ BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
- val = (BCM_PAGE_BITS - 8) << 24;
- REG_WR(bp, BNX2_RV2P_CONFIG, val);
+ val = (BNX2_PAGE_BITS - 8) << 24;
+ BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
/* Configure page size. */
- val = REG_RD(bp, BNX2_TBDR_CONFIG);
+ val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
- val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
- REG_WR(bp, BNX2_TBDR_CONFIG, val);
+ val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
+ BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
val = bp->mac_addr[0] +
(bp->mac_addr[1] << 8) +
@@ -4922,14 +4927,14 @@ bnx2_init_chip(struct bnx2 *bp)
bp->mac_addr[3] +
(bp->mac_addr[4] << 8) +
(bp->mac_addr[5] << 16);
- REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
+ BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
/* Program the MTU. Also include 4 bytes for CRC32. */
mtu = bp->dev->mtu;
val = mtu + ETH_HLEN + ETH_FCS_LEN;
if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
- REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
+ BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
if (mtu < 1500)
mtu = 1500;
@@ -4947,43 +4952,43 @@ bnx2_init_chip(struct bnx2 *bp)
bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
/* Set up how to generate a link change interrupt. */
- REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+ BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
- REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
- (u64) bp->status_blk_mapping & 0xffffffff);
- REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
+ BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
+ (u64) bp->status_blk_mapping & 0xffffffff);
+ BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
- REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
- (u64) bp->stats_blk_mapping & 0xffffffff);
- REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
- (u64) bp->stats_blk_mapping >> 32);
+ BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
+ (u64) bp->stats_blk_mapping & 0xffffffff);
+ BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
+ (u64) bp->stats_blk_mapping >> 32);
- REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
- (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
+ BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
+ (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
- REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
- (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
+ BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
+ (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
- REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
- (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
+ BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
+ (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
- REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
+ BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
- REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
+ BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
- REG_WR(bp, BNX2_HC_COM_TICKS,
- (bp->com_ticks_int << 16) | bp->com_ticks);
+ BNX2_WR(bp, BNX2_HC_COM_TICKS,
+ (bp->com_ticks_int << 16) | bp->com_ticks);
- REG_WR(bp, BNX2_HC_CMD_TICKS,
- (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
+ BNX2_WR(bp, BNX2_HC_CMD_TICKS,
+ (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
if (bp->flags & BNX2_FLAG_BROKEN_STATS)
- REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
+ BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
else
- REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
- REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
+ BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
+ BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
- if (CHIP_ID(bp) == CHIP_ID_5706_A1)
+ if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
val = BNX2_HC_CONFIG_COLLECT_STATS;
else {
val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
@@ -4991,8 +4996,8 @@ bnx2_init_chip(struct bnx2 *bp)
}
if (bp->flags & BNX2_FLAG_USING_MSIX) {
- REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
- BNX2_HC_MSIX_BIT_VECTOR_VAL);
+ BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
+ BNX2_HC_MSIX_BIT_VECTOR_VAL);
val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
}
@@ -5000,7 +5005,7 @@ bnx2_init_chip(struct bnx2 *bp)
if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
- REG_WR(bp, BNX2_HC_CONFIG, val);
+ BNX2_WR(bp, BNX2_HC_CONFIG, val);
if (bp->rx_ticks < 25)
bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
@@ -5011,48 +5016,48 @@ bnx2_init_chip(struct bnx2 *bp)
u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
BNX2_HC_SB_CONFIG_1;
- REG_WR(bp, base,
+ BNX2_WR(bp, base,
BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
BNX2_HC_SB_CONFIG_1_ONE_SHOT);
- REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
+ BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
(bp->tx_quick_cons_trip_int << 16) |
bp->tx_quick_cons_trip);
- REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
+ BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
(bp->tx_ticks_int << 16) | bp->tx_ticks);
- REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
- (bp->rx_quick_cons_trip_int << 16) |
+ BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+ (bp->rx_quick_cons_trip_int << 16) |
bp->rx_quick_cons_trip);
- REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+ BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
(bp->rx_ticks_int << 16) | bp->rx_ticks);
}
/* Clear internal stats counters. */
- REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
+ BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
- REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
+ BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
/* Initialize the receive filter. */
bnx2_set_rx_mode(bp->dev);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
- REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+ BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
}
rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
1, 0);
- REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
- REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
+ BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
udelay(20);
- bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
+ bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
return rc;
}
@@ -5086,7 +5091,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
u32 val, offset0, offset1, offset2, offset3;
u32 cid_addr = GET_CID_ADDR(cid);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
offset0 = BNX2_L2CTX_TYPE_XI;
offset1 = BNX2_L2CTX_CMD_TYPE_XI;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
@@ -5113,7 +5118,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
static void
bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
{
- struct tx_bd *txbd;
+ struct bnx2_tx_bd *txbd;
u32 cid = TX_CID;
struct bnx2_napi *bnapi;
struct bnx2_tx_ring_info *txr;
@@ -5128,7 +5133,7 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
bp->tx_wake_thresh = bp->tx_ring_size / 2;
- txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
+ txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
@@ -5143,17 +5148,17 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
}
static void
-bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
- int num_rings)
+bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
+ u32 buf_size, int num_rings)
{
int i;
- struct rx_bd *rxbd;
+ struct bnx2_rx_bd *rxbd;
for (i = 0; i < num_rings; i++) {
int j;
rxbd = &rx_ring[i][0];
- for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
+ for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
rxbd->rx_bd_len = buf_size;
rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
}
@@ -5187,9 +5192,9 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
bnx2_init_rx_context(bp, cid);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
- REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
+ BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
}
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
@@ -5208,8 +5213,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
- REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
+ BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
}
val = (u64) rxr->rx_desc_mapping[0] >> 32;
@@ -5225,8 +5230,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_num, i, bp->rx_pg_ring_size);
break;
}
- prod = NEXT_RX_BD(prod);
- ring_prod = RX_PG_RING_IDX(prod);
+ prod = BNX2_NEXT_RX_BD(prod);
+ ring_prod = BNX2_RX_PG_RING_IDX(prod);
}
rxr->rx_pg_prod = prod;
@@ -5237,8 +5242,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_num, i, bp->rx_ring_size);
break;
}
- prod = NEXT_RX_BD(prod);
- ring_prod = RX_RING_IDX(prod);
+ prod = BNX2_NEXT_RX_BD(prod);
+ ring_prod = BNX2_RX_RING_IDX(prod);
}
rxr->rx_prod = prod;
@@ -5246,10 +5251,10 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
- REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
- REG_WR16(bp, rxr->rx_bidx_addr, prod);
+ BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+ BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
- REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+ BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
}
static void
@@ -5260,15 +5265,15 @@ bnx2_init_all_rings(struct bnx2 *bp)
bnx2_clear_ring_states(bp);
- REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
+ BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
for (i = 0; i < bp->num_tx_rings; i++)
bnx2_init_tx_ring(bp, i);
if (bp->num_tx_rings > 1)
- REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
- (TX_TSS_CID << 7));
+ BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
+ (TX_TSS_CID << 7));
- REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+ BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
for (i = 0; i < bp->num_rx_rings; i++)
@@ -5282,8 +5287,8 @@ bnx2_init_all_rings(struct bnx2 *bp)
tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
if ((i % 8) == 7) {
- REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
- REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
+ BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
+ BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
BNX2_RLUP_RSS_COMMAND_WRITE |
BNX2_RLUP_RSS_COMMAND_HASH_MASK);
@@ -5294,7 +5299,7 @@ bnx2_init_all_rings(struct bnx2 *bp)
val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
- REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+ BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
}
}
@@ -5303,8 +5308,8 @@ static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
{
u32 max, num_rings = 1;
- while (ring_size > MAX_RX_DESC_CNT) {
- ring_size -= MAX_RX_DESC_CNT;
+ while (ring_size > BNX2_MAX_RX_DESC_CNT) {
+ ring_size -= BNX2_MAX_RX_DESC_CNT;
num_rings++;
}
/* round to next power of 2 */
@@ -5337,13 +5342,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
jumbo_size = size * pages;
- if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
- jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
+ if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
+ jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
bp->rx_pg_ring_size = jumbo_size;
bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
- MAX_RX_PG_RINGS);
- bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
+ BNX2_MAX_RX_PG_RINGS);
+ bp->rx_max_pg_ring_idx =
+ (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
bp->rx_copy_thresh = 0;
}
@@ -5354,8 +5360,8 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
- bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
- bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
+ bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
+ bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
}
static void
@@ -5371,13 +5377,13 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
if (txr->tx_buf_ring == NULL)
continue;
- for (j = 0; j < TX_DESC_CNT; ) {
- struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ for (j = 0; j < BNX2_TX_DESC_CNT; ) {
+ struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb = tx_buf->skb;
int k, last;
if (skb == NULL) {
- j = NEXT_TX_BD(j);
+ j = BNX2_NEXT_TX_BD(j);
continue;
}
@@ -5389,9 +5395,9 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j = NEXT_TX_BD(j);
- for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
- tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+ j = BNX2_NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
+ tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[k]),
@@ -5417,7 +5423,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
return;
for (j = 0; j < bp->rx_max_ring_idx; j++) {
- struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
+ struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
u8 *data = rx_buf->data;
if (data == NULL)
@@ -5615,7 +5621,7 @@ bnx2_test_registers(struct bnx2 *bp)
ret = 0;
is_5709 = 0;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
is_5709 = 1;
for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
@@ -5714,7 +5720,7 @@ bnx2_test_memory(struct bnx2 *bp)
};
struct mem_entry *mem_tbl;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
mem_tbl = mem_tbl_5709;
else
mem_tbl = mem_tbl_5706;
@@ -5741,8 +5747,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
unsigned char *packet;
u16 rx_start_idx, rx_idx;
dma_addr_t map;
- struct tx_bd *txbd;
- struct sw_bd *rx_buf;
+ struct bnx2_tx_bd *txbd;
+ struct bnx2_sw_bd *rx_buf;
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
@@ -5784,17 +5790,17 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
return -EIO;
}
- REG_WR(bp, BNX2_HC_COMMAND,
- bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+ BNX2_WR(bp, BNX2_HC_COMMAND,
+ bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
- REG_RD(bp, BNX2_HC_COMMAND);
+ BNX2_RD(bp, BNX2_HC_COMMAND);
udelay(5);
rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
num_pkts = 0;
- txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
+ txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
txbd->tx_bd_haddr_hi = (u64) map >> 32;
txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5802,18 +5808,18 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
num_pkts++;
- txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
+ txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
txr->tx_prod_bseq += pkt_size;
- REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
- REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+ BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
+ BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
udelay(100);
- REG_WR(bp, BNX2_HC_COMMAND,
- bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+ BNX2_WR(bp, BNX2_HC_COMMAND,
+ bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
- REG_RD(bp, BNX2_HC_COMMAND);
+ BNX2_RD(bp, BNX2_HC_COMMAND);
udelay(5);
@@ -5962,14 +5968,14 @@ bnx2_test_intr(struct bnx2 *bp)
if (!netif_running(bp->dev))
return -ENODEV;
- status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
+ status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
/* This register is not touched during run-time. */
- REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
- REG_RD(bp, BNX2_HC_COMMAND);
+ BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+ BNX2_RD(bp, BNX2_HC_COMMAND);
for (i = 0; i < 10; i++) {
- if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
+ if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
status_idx) {
break;
@@ -6132,11 +6138,11 @@ bnx2_timer(unsigned long data)
/* workaround occasional corrupted counters */
if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
- REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
- BNX2_HC_COMMAND_STATS_NOW);
+ BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
+ BNX2_HC_COMMAND_STATS_NOW);
if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
- if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
bnx2_5706_serdes_timer(bp);
else
bnx2_5708_serdes_timer(bp);
@@ -6205,13 +6211,13 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
const int len = sizeof(bp->irq_tbl[0].name);
bnx2_setup_msix_tbl(bp);
- REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
- REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
- REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+ BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
+ BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
+ BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
/* Need to flush the previous three writes to ensure MSI-X
* is setup properly */
- REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+ BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
@@ -6274,7 +6280,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
!(bp->flags & BNX2_FLAG_USING_MSIX)) {
if (pci_enable_msi(bp->pdev) == 0) {
bp->flags |= BNX2_FLAG_USING_MSI;
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
bp->irq_tbl[0].handler = bnx2_msi_1shot;
} else
@@ -6464,22 +6470,22 @@ bnx2_dump_ftq(struct bnx2 *bp)
netdev_err(dev, "<--- end FTQ dump --->\n");
netdev_err(dev, "<--- start TBDC dump --->\n");
netdev_err(dev, "TBDC free cnt: %ld\n",
- REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+ BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
for (i = 0; i < 0x20; i++) {
int j = 0;
- REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
- REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
- BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
- REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
- while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
+ BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
+ BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
+ BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+ BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+ while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
j++;
- cid = REG_RD(bp, BNX2_TBDC_CID);
- bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
- valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
+ cid = BNX2_RD(bp, BNX2_TBDC_CID);
+ bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
+ valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
bdidx >> 24, (valid >> 8) & 0x0ff);
@@ -6500,15 +6506,15 @@ bnx2_dump_state(struct bnx2 *bp)
pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
- REG_RD(bp, BNX2_EMAC_TX_STATUS),
- REG_RD(bp, BNX2_EMAC_RX_STATUS));
+ BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
+ BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
- REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
- REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+ BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
netdev_err(dev, "DEBUG: PBA[%08x]\n",
- REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
+ BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
}
static void
@@ -6533,8 +6539,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
dma_addr_t mapping;
- struct tx_bd *txbd;
- struct sw_tx_bd *tx_buf;
+ struct bnx2_tx_bd *txbd;
+ struct bnx2_sw_tx_bd *tx_buf;
u32 len, vlan_tag_flags, last_frag, mss;
u16 prod, ring_prod;
int i;
@@ -6557,7 +6563,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
len = skb_headlen(skb);
prod = txr->tx_prod;
- ring_prod = TX_RING_IDX(prod);
+ ring_prod = BNX2_TX_RING_IDX(prod);
vlan_tag_flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -6627,8 +6633,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < last_frag; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- prod = NEXT_TX_BD(prod);
- ring_prod = TX_RING_IDX(prod);
+ prod = BNX2_NEXT_TX_BD(prod);
+ ring_prod = BNX2_TX_RING_IDX(prod);
txbd = &txr->tx_desc_ring[ring_prod];
len = skb_frag_size(frag);
@@ -6652,11 +6658,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, skb->len);
- prod = NEXT_TX_BD(prod);
+ prod = BNX2_NEXT_TX_BD(prod);
txr->tx_prod_bseq += skb->len;
- REG_WR16(bp, txr->tx_bidx_addr, prod);
- REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+ BNX2_WR16(bp, txr->tx_bidx_addr, prod);
+ BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
mmiowb();
@@ -6682,7 +6688,7 @@ dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
- ring_prod = TX_RING_IDX(prod);
+ ring_prod = BNX2_TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = NULL;
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
@@ -6690,8 +6696,8 @@ dma_error:
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
- prod = NEXT_TX_BD(prod);
- ring_prod = TX_RING_IDX(prod);
+ prod = BNX2_NEXT_TX_BD(prod);
+ ring_prod = BNX2_TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
@@ -6810,8 +6816,8 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
- if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
- (CHIP_ID(bp) == CHIP_ID_5708_A0))
+ if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
net_stats->tx_carrier_errors = 0;
else {
net_stats->tx_carrier_errors =
@@ -7030,7 +7036,7 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
offset = reg_boundaries[0];
p += offset;
while (offset < BNX2_REGDUMP_LEN) {
- *p++ = REG_RD(bp, offset);
+ *p++ = BNX2_RD(bp, offset);
offset += 4;
if (offset == reg_boundaries[i + 1]) {
offset = reg_boundaries[i + 2];
@@ -7254,13 +7260,13 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct bnx2 *bp = netdev_priv(dev);
- ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
- ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
+ ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_pg_ring_size;
- ering->tx_max_pending = MAX_TX_DESC_CNT;
+ ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
ering->tx_pending = bp->tx_ring_size;
}
@@ -7326,8 +7332,8 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
- (ering->tx_pending > MAX_TX_DESC_CNT) ||
+ if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
+ (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
(ering->tx_pending <= MAX_SKB_FRAGS)) {
return -EINVAL;
@@ -7614,10 +7620,10 @@ bnx2_get_ethtool_stats(struct net_device *dev,
return;
}
- if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
- (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
- (CHIP_ID(bp) == CHIP_ID_5708_A0))
+ if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
stats_len_arr = bnx2_5706_stats_len_arr;
else
stats_len_arr = bnx2_5708_stats_len_arr;
@@ -7655,26 +7661,26 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
case ETHTOOL_ID_ACTIVE:
bnx2_set_power_state(bp, PCI_D0);
- bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
- REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
+ bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
+ BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
return 1; /* cycle on/off once per second */
case ETHTOOL_ID_ON:
- REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
- BNX2_EMAC_LED_1000MB_OVERRIDE |
- BNX2_EMAC_LED_100MB_OVERRIDE |
- BNX2_EMAC_LED_10MB_OVERRIDE |
- BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
- BNX2_EMAC_LED_TRAFFIC);
+ BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
+ BNX2_EMAC_LED_1000MB_OVERRIDE |
+ BNX2_EMAC_LED_100MB_OVERRIDE |
+ BNX2_EMAC_LED_10MB_OVERRIDE |
+ BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
+ BNX2_EMAC_LED_TRAFFIC);
break;
case ETHTOOL_ID_OFF:
- REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
+ BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
break;
case ETHTOOL_ID_INACTIVE:
- REG_WR(bp, BNX2_EMAC_LED, 0);
- REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
+ BNX2_WR(bp, BNX2_EMAC_LED, 0);
+ BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
if (!netif_running(dev))
bnx2_set_power_state(bp, PCI_D3hot);
@@ -7896,10 +7902,10 @@ poll_bnx2(struct net_device *dev)
}
#endif
-static void __devinit
+static void
bnx2_get_5709_media(struct bnx2 *bp)
{
- u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
+ u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
u32 strap;
@@ -7934,18 +7940,18 @@ bnx2_get_5709_media(struct bnx2 *bp)
}
}
-static void __devinit
+static void
bnx2_get_pci_speed(struct bnx2 *bp)
{
u32 reg;
- reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
+ reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
u32 clkreg;
bp->flags |= BNX2_FLAG_PCIX;
- clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
+ clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
switch (clkreg) {
@@ -7986,7 +7992,7 @@ bnx2_get_pci_speed(struct bnx2 *bp)
}
-static void __devinit
+static void
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
int rc, i, j;
@@ -8054,7 +8060,7 @@ vpd_done:
kfree(data);
}
-static int __devinit
+static int
bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
{
struct bnx2 *bp;
@@ -8131,20 +8137,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
*/
- REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
- BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
- BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
+ BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+ BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
- bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
+ bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
if (!pci_is_pcie(pdev)) {
dev_err(&pdev->dev, "Not PCIE, aborting\n");
rc = -EIO;
goto err_out_unmap;
}
bp->flags |= BNX2_FLAG_PCIE;
- if (CHIP_REV(bp) == CHIP_REV_Ax)
+ if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
/* AER (Advanced Error Reporting) hooks */
@@ -8163,18 +8169,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags |= BNX2_FLAG_BROKEN_STATS;
}
- if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
+ BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
bp->flags |= BNX2_FLAG_MSIX_CAP;
}
- if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
+ if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
+ BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
bp->flags |= BNX2_FLAG_MSI_CAP;
}
/* 5708 cannot support DMA addresses > 40-bit. */
- if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
else
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
@@ -8197,12 +8205,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bnx2_get_pci_speed(bp);
/* 5706A0 may falsely detect SERR and PERR. */
- if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
- reg = REG_RD(bp, PCI_COMMAND);
+ if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
+ reg = BNX2_RD(bp, PCI_COMMAND);
reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
- REG_WR(bp, PCI_COMMAND, reg);
- }
- else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
+ BNX2_WR(bp, PCI_COMMAND, reg);
+ } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
dev_err(&pdev->dev,
@@ -8299,7 +8306,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[4] = (u8) (reg >> 8);
bp->mac_addr[5] = (u8) reg;
- bp->tx_ring_size = MAX_TX_DESC_CNT;
+ bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
bnx2_set_rx_ring_size(bp, 255);
bp->tx_quick_cons_trip_int = 2;
@@ -8319,9 +8326,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->phy_addr = 1;
/* Disable WOL support if we are running on a SERDES chip. */
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_get_5709_media(bp);
- else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
+ else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bp->phy_port = PORT_TP;
@@ -8332,7 +8339,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags |= BNX2_FLAG_NO_WOL;
bp->wol = 0;
}
- if (CHIP_NUM(bp) == CHIP_NUM_5706) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
/* Don't do parallel detect on this board because of
* some board problems. The link will not go down
* if we do parallel detect.
@@ -8345,25 +8352,25 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
}
- } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
- CHIP_NUM(bp) == CHIP_NUM_5708)
+ } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
+ BNX2_CHIP(bp) == BNX2_CHIP_5708)
bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
- else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
- (CHIP_REV(bp) == CHIP_REV_Ax ||
- CHIP_REV(bp) == CHIP_REV_Bx))
+ else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
+ (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
+ BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
bnx2_init_fw_cap(bp);
- if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
- (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
- (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
- !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
+ if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
+ (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
+ !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
bp->flags |= BNX2_FLAG_NO_WOL;
bp->wol = 0;
}
- if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
bp->tx_quick_cons_trip_int =
bp->tx_quick_cons_trip;
bp->tx_ticks_int = bp->tx_ticks;
@@ -8385,7 +8392,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
* AMD believes this incompatibility is unique to the 5706, and
* prefers to locally disable MSI rather than globally disabling it.
*/
- if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
struct pci_dev *amd_8132 = NULL;
while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
@@ -8414,6 +8421,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->cnic_eth_dev.max_iscsi_conn =
(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
+ bp->cnic_probe = bnx2_cnic_probe;
#endif
pci_save_state(pdev);
@@ -8439,7 +8447,7 @@ err_out:
return rc;
}
-static char * __devinit
+static char *
bnx2_bus_string(struct bnx2 *bp, char *str)
{
char *s = str;
@@ -8505,7 +8513,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#endif
};
-static int __devinit
+static int
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed = 0;
@@ -8541,7 +8549,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_RXHASH | NETIF_F_RXCSUM;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->vlan_features = dev->hw_features;
@@ -8556,8 +8564,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
"node addr %pM\n", board_info[ent->driver_data].name,
- ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
- ((CHIP_ID(bp) & 0x0ff0) >> 4),
+ ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+ ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
pdev->irq, dev->dev_addr);
@@ -8573,7 +8581,7 @@ err_free:
return rc;
}
-static void __devexit
+static void
bnx2_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -8752,7 +8760,7 @@ static struct pci_driver bnx2_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bnx2_pci_tbl,
.probe = bnx2_init_one,
- .remove = __devexit_p(bnx2_remove_one),
+ .remove = bnx2_remove_one,
.suspend = bnx2_suspend,
.resume = bnx2_resume,
.err_handler = &bnx2_err_handler,
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index af6451dec295..172efbecfea2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -20,7 +20,7 @@
/*
* tx_bd definition
*/
-struct tx_bd {
+struct bnx2_tx_bd {
u32 tx_bd_haddr_hi;
u32 tx_bd_haddr_lo;
u32 tx_bd_mss_nbytes;
@@ -48,7 +48,7 @@ struct tx_bd {
/*
* rx_bd definition
*/
-struct rx_bd {
+struct bnx2_rx_bd {
u32 rx_bd_haddr_hi;
u32 rx_bd_haddr_lo;
u32 rx_bd_len;
@@ -6538,37 +6538,38 @@ struct l2_fhdr {
/* Use CPU native page size up to 16K for the ring sizes. */
#if (PAGE_SHIFT > 14)
-#define BCM_PAGE_BITS 14
+#define BNX2_PAGE_BITS 14
#else
-#define BCM_PAGE_BITS PAGE_SHIFT
+#define BNX2_PAGE_BITS PAGE_SHIFT
#endif
-#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS)
+#define BNX2_PAGE_SIZE (1 << BNX2_PAGE_BITS)
-#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
-#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+#define BNX2_TX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_tx_bd))
+#define BNX2_MAX_TX_DESC_CNT (BNX2_TX_DESC_CNT - 1)
-#define MAX_RX_RINGS 8
-#define MAX_RX_PG_RINGS 32
-#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd))
-#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
-#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
-#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS)
+#define BNX2_MAX_RX_RINGS 8
+#define BNX2_MAX_RX_PG_RINGS 32
+#define BNX2_RX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_rx_bd))
+#define BNX2_MAX_RX_DESC_CNT (BNX2_RX_DESC_CNT - 1)
+#define BNX2_MAX_TOTAL_RX_DESC_CNT (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_RINGS)
+#define BNX2_MAX_TOTAL_RX_PG_DESC_CNT \
+ (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_PG_RINGS)
-#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \
- (MAX_TX_DESC_CNT - 1)) ? \
+#define BNX2_NEXT_TX_BD(x) (((x) & (BNX2_MAX_TX_DESC_CNT - 1)) == \
+ (BNX2_MAX_TX_DESC_CNT - 1)) ? \
(x) + 2 : (x) + 1
-#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT)
+#define BNX2_TX_RING_IDX(x) ((x) & BNX2_MAX_TX_DESC_CNT)
-#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) == \
- (MAX_RX_DESC_CNT - 1)) ? \
+#define BNX2_NEXT_RX_BD(x) (((x) & (BNX2_MAX_RX_DESC_CNT - 1)) == \
+ (BNX2_MAX_RX_DESC_CNT - 1)) ? \
(x) + 2 : (x) + 1
-#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
-#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
+#define BNX2_RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
+#define BNX2_RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
-#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4))
-#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
+#define BNX2_RX_RING(x) (((x) & ~BNX2_MAX_RX_DESC_CNT) >> (BNX2_PAGE_BITS - 4))
+#define BNX2_RX_IDX(x) ((x) & BNX2_MAX_RX_DESC_CNT)
/* Context size. */
#define CTX_SHIFT 7
@@ -6609,7 +6610,7 @@ struct l2_fhdr {
* RX ring buffer contains pointer to kmalloc() data only,
* skb are built only after Hardware filled the frame.
*/
-struct sw_bd {
+struct bnx2_sw_bd {
u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -6623,23 +6624,23 @@ static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
}
-struct sw_pg {
+struct bnx2_sw_pg {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
-struct sw_tx_bd {
+struct bnx2_sw_tx_bd {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(mapping);
unsigned short is_gso;
unsigned short nr_frags;
};
-#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT)
-#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT)
-#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
-#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT)
-#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+#define SW_RXBD_RING_SIZE (sizeof(struct bnx2_sw_bd) * BNX2_RX_DESC_CNT)
+#define SW_RXPG_RING_SIZE (sizeof(struct bnx2_sw_pg) * BNX2_RX_DESC_CNT)
+#define RXBD_RING_SIZE (sizeof(struct bnx2_rx_bd) * BNX2_RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnx2_sw_tx_bd) * BNX2_TX_DESC_CNT)
+#define TXBD_RING_SIZE (sizeof(struct bnx2_tx_bd) * BNX2_TX_DESC_CNT)
/* Buffered flash (Atmel: AT45DB011B) specific information */
#define SEEPROM_PAGE_BITS 2
@@ -6720,8 +6721,8 @@ struct bnx2_tx_ring_info {
u32 tx_bidx_addr;
u32 tx_bseq_addr;
- struct tx_bd *tx_desc_ring;
- struct sw_tx_bd *tx_buf_ring;
+ struct bnx2_tx_bd *tx_desc_ring;
+ struct bnx2_sw_tx_bd *tx_buf_ring;
u16 tx_cons;
u16 hw_tx_cons;
@@ -6741,13 +6742,13 @@ struct bnx2_rx_ring_info {
u16 rx_pg_prod;
u16 rx_pg_cons;
- struct sw_bd *rx_buf_ring;
- struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
- struct sw_pg *rx_pg_ring;
- struct rx_bd *rx_pg_desc_ring[MAX_RX_PG_RINGS];
+ struct bnx2_sw_bd *rx_buf_ring;
+ struct bnx2_rx_bd *rx_desc_ring[BNX2_MAX_RX_RINGS];
+ struct bnx2_sw_pg *rx_pg_ring;
+ struct bnx2_rx_bd *rx_pg_desc_ring[BNX2_MAX_RX_PG_RINGS];
- dma_addr_t rx_desc_mapping[MAX_RX_RINGS];
- dma_addr_t rx_pg_desc_mapping[MAX_RX_PG_RINGS];
+ dma_addr_t rx_desc_mapping[BNX2_MAX_RX_RINGS];
+ dma_addr_t rx_pg_desc_mapping[BNX2_MAX_RX_PG_RINGS];
};
struct bnx2_napi {
@@ -6853,33 +6854,31 @@ struct bnx2 {
u32 chip_id;
/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
-#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000)
-#define CHIP_NUM_5706 0x57060000
-#define CHIP_NUM_5708 0x57080000
-#define CHIP_NUM_5709 0x57090000
-
-#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000)
-#define CHIP_REV_Ax 0x00000000
-#define CHIP_REV_Bx 0x00001000
-#define CHIP_REV_Cx 0x00002000
-
-#define CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0)
-#define CHIP_BONDING(bp) (((bp)->chip_id) & 0x0000000f)
-
-#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0)
-#define CHIP_ID_5706_A0 0x57060000
-#define CHIP_ID_5706_A1 0x57060010
-#define CHIP_ID_5706_A2 0x57060020
-#define CHIP_ID_5708_A0 0x57080000
-#define CHIP_ID_5708_B0 0x57081000
-#define CHIP_ID_5708_B1 0x57081010
-#define CHIP_ID_5709_A0 0x57090000
-#define CHIP_ID_5709_A1 0x57090010
-
-#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf)
+#define BNX2_CHIP(bp) (((bp)->chip_id) & 0xffff0000)
+#define BNX2_CHIP_5706 0x57060000
+#define BNX2_CHIP_5708 0x57080000
+#define BNX2_CHIP_5709 0x57090000
+
+#define BNX2_CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000)
+#define BNX2_CHIP_REV_Ax 0x00000000
+#define BNX2_CHIP_REV_Bx 0x00001000
+#define BNX2_CHIP_REV_Cx 0x00002000
+
+#define BNX2_CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0)
+#define BNX2_CHIP_BOND(bp) (((bp)->chip_id) & 0x0000000f)
+
+#define BNX2_CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0)
+#define BNX2_CHIP_ID_5706_A0 0x57060000
+#define BNX2_CHIP_ID_5706_A1 0x57060010
+#define BNX2_CHIP_ID_5706_A2 0x57060020
+#define BNX2_CHIP_ID_5708_A0 0x57080000
+#define BNX2_CHIP_ID_5708_B0 0x57081000
+#define BNX2_CHIP_ID_5708_B1 0x57081010
+#define BNX2_CHIP_ID_5709_A0 0x57090000
+#define BNX2_CHIP_ID_5709_A1 0x57090010
/* A serdes chip will have the first bit of the bond id set. */
-#define CHIP_BOND_ID_SERDES_BIT 0x01
+#define BNX2_CHIP_BOND_SERDES_BIT 0x01
u32 phy_addr;
u32 phy_id;
@@ -6985,19 +6984,20 @@ struct bnx2 {
#ifdef BCM_CNIC
struct mutex cnic_lock;
struct cnic_eth_dev cnic_eth_dev;
+ struct cnic_eth_dev *(*cnic_probe)(struct net_device *);
#endif
const struct firmware *mips_firmware;
const struct firmware *rv2p_firmware;
};
-#define REG_RD(bp, offset) \
+#define BNX2_RD(bp, offset) \
readl(bp->regview + offset)
-#define REG_WR(bp, offset, val) \
+#define BNX2_WR(bp, offset, val) \
writel(val, bp->regview + offset)
-#define REG_WR16(bp, offset, val) \
+#define BNX2_WR16(bp, offset, val) \
writew(val, bp->regview + offset)
struct cpu_reg {
@@ -7052,7 +7052,7 @@ struct bnx2_rv2p_fw_file {
#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0
#define RV2P_BD_PAGE_SIZE_MSK 0xffff
-#define RV2P_BD_PAGE_SIZE ((BCM_PAGE_SIZE / 16) - 1)
+#define RV2P_BD_PAGE_SIZE ((BNX2_PAGE_SIZE / 16) - 1)
#define RV2P_PROC1 0
#define RV2P_PROC2 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 72897c47b8c8..e8d4db10c8f3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,25 +34,16 @@
#include "bnx2x_hsi.h"
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
-#define BCM_CNIC 1
#include "../cnic_if.h"
-#endif
-#ifdef BCM_CNIC
-#define BNX2X_MIN_MSIX_VEC_CNT 3
-#define BNX2X_MSIX_VEC_FP_START 2
-#else
-#define BNX2X_MIN_MSIX_VEC_CNT 2
-#define BNX2X_MSIX_VEC_FP_START 1
-#endif
+
+#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
#include <linux/mdio.h>
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
#include "bnx2x_mfw_req.h"
-#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_sp.h"
#include "bnx2x_dcb.h"
@@ -256,15 +247,10 @@ enum {
/* FCoE L2 */
#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
-/** Additional rings budgeting */
-#ifdef BCM_CNIC
-#define CNIC_PRESENT 1
-#define FCOE_PRESENT 1
-#else
-#define CNIC_PRESENT 0
-#define FCOE_PRESENT 0
-#endif /* BCM_CNIC */
-#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
+#define CNIC_SUPPORT(bp) ((bp)->cnic_support)
+#define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
+#define CNIC_LOADED(bp) ((bp)->cnic_loaded)
+#define FCOE_INIT(bp) ((bp)->fcoe_init)
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -297,9 +283,7 @@ enum {
OOO_TXQ_IDX_OFFSET,
};
#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
-#ifdef BCM_CNIC
#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
-#endif
/* fast path */
/*
@@ -505,7 +489,7 @@ struct bnx2x_fastpath {
u32 ustorm_rx_prods_offset;
u32 rx_buf_size;
-
+ u32 rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */
dma_addr_t status_blk_mapping;
enum bnx2x_tpa_mode_t mode;
@@ -585,15 +569,9 @@ struct bnx2x_fastpath {
->var)
-#define IS_ETH_FP(fp) (fp->index < \
- BNX2X_NUM_ETH_QUEUES(fp->bp))
-#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
-#else
-#define IS_FCOE_FP(fp) false
-#define IS_FCOE_IDX(idx) false
-#endif
+#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
+#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
/* MC hsi */
@@ -886,6 +864,18 @@ struct bnx2x_common {
(CHIP_REV(bp) == CHIP_REV_Bx))
#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax))
+/* This define is used in two main places:
+ * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
+ * to nic-only mode or to offload mode. Offload mode is configured if either the
+ * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
+ * registered for this port (which means that the user wants storage services).
+ * 2. During cnic-related load, to know if offload mode is already configured in
+ * the HW or needs to be configrued.
+ * Since the transition from nic-mode to offload-mode in HW causes traffic
+ * coruption, nic-mode is configured only in ports on which storage services
+ * where never requested.
+ */
+#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
int flash_size;
#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -925,6 +915,7 @@ struct bnx2x_common {
#define BNX2X_IGU_STAS_MSG_VF_CNT 64
#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+#define MAX_IGU_ATTN_ACK_TO 100
/* end of common */
/* port */
@@ -946,7 +937,6 @@ struct bnx2x_port {
/* used to synchronize phy accesses */
struct mutex phy_mutex;
- int need_hw_lock;
u32 port_stx;
@@ -1003,18 +993,15 @@ union cdu_context {
#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
-#ifdef BCM_CNIC
#define CNIC_ISCSI_CID_MAX 256
#define CNIC_FCOE_CID_MAX 2048
#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
-#endif
#define QM_ILT_PAGE_SZ_HW 0
#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
#define QM_CID_ROUND 1024
-#ifdef BCM_CNIC
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
@@ -1032,8 +1019,6 @@ union cdu_context {
#define SRC_T2_SZ SRC_ILT_SZ
#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
-#endif
-
#define MAX_DMAE_C 8
/* DMA memory not used in fastpath */
@@ -1201,6 +1186,7 @@ struct bnx2x_prev_path_list {
u8 slot;
u8 path;
struct list_head list;
+ u8 undi;
};
struct bnx2x_sp_objs {
@@ -1227,7 +1213,6 @@ struct bnx2x {
struct bnx2x_sp_objs *sp_objs;
struct bnx2x_fp_stats *fp_stats;
struct bnx2x_fp_txdata *bnx2x_txq;
- int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1350,6 +1335,16 @@ struct bnx2x {
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+ u8 cnic_support;
+ bool cnic_enabled;
+ bool cnic_loaded;
+ struct cnic_eth_dev *(*cnic_probe)(struct net_device *);
+
+ /* Flag that indicates that we can start looking for FCoE L2 queue
+ * completions in the default status block.
+ */
+ bool fcoe_init;
+
int pm_cap;
int mrrs;
@@ -1420,6 +1415,8 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ uint num_ethernet_queues;
+ uint num_cnic_queues;
int num_napi_queues;
int disable_tpa;
@@ -1433,6 +1430,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+ u8 min_msix_vec_cnt;
dma_addr_t def_status_blk_mapping;
@@ -1478,26 +1476,23 @@ struct bnx2x {
* Maximum supported number of RSS queues: number of IGU SBs minus one that goes
* to CNIC.
*/
-#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
+#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
/*
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+ + 2 * CNIC_SUPPORT(bp))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+ + 2 * CNIC_SUPPORT(bp))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
int qm_cid_count;
- int dropless_fc;
+ bool dropless_fc;
-#ifdef BCM_CNIC
- u32 cnic_flags;
-#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
struct cnic_ops __rcu *cnic_ops;
@@ -1518,7 +1513,6 @@ struct bnx2x {
/* Start index of the "special" (CNIC related) L2 cleints */
u8 cnic_base_cl_id;
-#endif
int dmae_ready;
/* used to synchronize dmae accesses */
@@ -1647,9 +1641,9 @@ struct bnx2x {
/* Tx queues may be less or equal to Rx queues */
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
-#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
- NON_ETH_CONTEXT_USE)
+ (bp)->num_cnic_queues)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1689,6 +1683,13 @@ struct bnx2x_func_init_params {
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
+#define for_each_cnic_queue(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_eth_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
@@ -1702,6 +1703,22 @@ struct bnx2x_func_init_params {
else
/* Skip forwarding FP */
+#define for_each_valid_rx_queue(bp, var) \
+ for ((var) = 0; \
+ (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
+ BNX2X_NUM_ETH_QUEUES(bp)); \
+ (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_rx_queue_cnic(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_rx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_rx_queue(bp, var)) \
@@ -1709,6 +1726,22 @@ struct bnx2x_func_init_params {
else
/* Skip OOO FP */
+#define for_each_valid_tx_queue(bp, var) \
+ for ((var) = 0; \
+ (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
+ BNX2X_NUM_ETH_QUEUES(bp)); \
+ (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_tx_queue_cnic(bp, var) \
+ for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
+ (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_tx_queue(bp, var)) \
@@ -2179,7 +2212,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
-#ifdef BCM_CNIC
#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
(BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
@@ -2196,9 +2228,12 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
-#else
-#define IS_MF_FCOE_AFEX(bp) false
-#endif
+enum {
+ SWITCH_UPDATE,
+ AFEX_UPDATE,
+};
+
+#define NUM_MACS 8
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a9031c..f771ddfba646 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -80,12 +80,37 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
}
- memcpy(&bp->bnx2x_txq[old_txdata_index],
- &bp->bnx2x_txq[new_txdata_index],
+ memcpy(&bp->bnx2x_txq[new_txdata_index],
+ &bp->bnx2x_txq[old_txdata_index],
sizeof(struct bnx2x_fp_txdata));
to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp: driver handle
+ * @delta: number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+ int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+ * backward along the array could cause memory to be overriden
+ */
+ for (cos = 1; cos < bp->max_cos; cos++) {
+ for (i = 0; i < old_eth_num - delta; i++) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int new_idx = cos * (old_eth_num - delta) + i;
+
+ memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+ sizeof(struct bnx2x_fp_txdata));
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+ }
+ }
+}
+
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
@@ -552,6 +577,23 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
+static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
+{
+ if (fp->rx_frag_size)
+ put_page(virt_to_head_page(data));
+ else
+ kfree(data);
+}
+
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+{
+ if (fp->rx_frag_size)
+ return netdev_alloc_frag(fp->rx_frag_size);
+
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+}
+
+
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_agg_info *tpa_info,
u16 pages,
@@ -574,15 +616,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
goto drop;
/* Try to allocate the new data */
- new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
-
+ new_data = bnx2x_frag_alloc(fp);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_data))
- skb = build_skb(data, 0);
+ skb = build_skb(data, fp->rx_frag_size);
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
@@ -619,7 +660,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return;
}
- kfree(new_data);
+ bnx2x_frag_free(fp, new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
@@ -635,7 +676,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ data = bnx2x_frag_alloc(fp);
if (unlikely(data == NULL))
return -ENOMEM;
@@ -643,7 +684,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- kfree(data);
+ bnx2x_frag_free(fp, data);
BNX2X_ERR("Can't map rx data\n");
return -ENOMEM;
}
@@ -845,9 +886,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
- skb = build_skb(data, 0);
+ skb = build_skb(data, fp->rx_frag_size);
if (unlikely(!skb)) {
- kfree(data);
+ bnx2x_frag_free(fp, data);
bnx2x_fp_qstats(bp, fp)->
rx_skb_alloc_failed++;
goto next_rx;
@@ -948,14 +989,12 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
mutex_lock(&bp->port.phy_mutex);
- if (bp->port.need_hw_lock)
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
}
void bnx2x_release_phy_lock(struct bnx2x *bp)
{
- if (bp->port.need_hw_lock)
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
mutex_unlock(&bp->port.phy_mutex);
}
@@ -1147,11 +1186,30 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- kfree(data);
+ bnx2x_frag_free(fp, data);
first_buf->data = NULL;
}
}
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue_cnic(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ fp->rx_bd_cons = 0;
+
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+ }
+}
+
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -1159,7 +1217,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
int i, j;
/* Allocate TPA resources */
- for_each_rx_queue(bp, j) {
+ for_each_eth_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
DP(NETIF_MSG_IFUP,
@@ -1173,8 +1231,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
- GFP_ATOMIC);
+ first_buf->data = bnx2x_frag_alloc(fp);
if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
@@ -1217,7 +1274,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
- for_each_rx_queue(bp, j) {
+ for_each_eth_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
@@ -1244,29 +1301,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
-static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
{
- int i;
u8 cos;
+ struct bnx2x *bp = fp->bp;
- for_each_tx_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
- unsigned pkts_compl = 0, bytes_compl = 0;
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
- u16 sw_prod = txdata->tx_pkt_prod;
- u16 sw_cons = txdata->tx_pkt_cons;
+ u16 sw_prod = txdata->tx_pkt_prod;
+ u16 sw_cons = txdata->tx_pkt_cons;
- while (sw_cons != sw_prod) {
- bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
- &pkts_compl, &bytes_compl);
- sw_cons++;
- }
- netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev,
- txdata->txq_index));
+ while (sw_cons != sw_prod) {
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
+ sw_cons++;
}
+
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
+ }
+}
+
+static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue_cnic(bp, i) {
+ bnx2x_free_tx_skbs_queue(&bp->fp[i]);
+ }
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i) {
+ bnx2x_free_tx_skbs_queue(&bp->fp[i]);
}
}
@@ -1290,7 +1363,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
fp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->data = NULL;
- kfree(data);
+ bnx2x_frag_free(fp, data);
+ }
+}
+
+static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue_cnic(bp, j) {
+ bnx2x_free_rx_bds(&bp->fp[j]);
}
}
@@ -1298,7 +1380,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
int j;
- for_each_rx_queue(bp, j) {
+ for_each_eth_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
bnx2x_free_rx_bds(fp);
@@ -1308,6 +1390,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
}
}
+void bnx2x_free_skbs_cnic(struct bnx2x *bp)
+{
+ bnx2x_free_tx_skbs_cnic(bp);
+ bnx2x_free_rx_skbs_cnic(bp);
+}
+
void bnx2x_free_skbs(struct bnx2x *bp)
{
bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1435,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
bp->msix_table[offset].vector);
offset++;
-#ifdef BCM_CNIC
- if (nvecs == offset)
- return;
- offset++;
-#endif
+
+ if (CNIC_SUPPORT(bp)) {
+ if (nvecs == offset)
+ return;
+ offset++;
+ }
for_each_eth_queue(bp, i) {
if (nvecs == offset)
@@ -1368,7 +1457,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
if (bp->flags & USING_MSIX_FLAG &&
!(bp->flags & USING_SINGLE_MSIX_FLAG))
bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
- CNIC_PRESENT + 1);
+ CNIC_SUPPORT(bp) + 1);
else
free_irq(bp->dev->irq, bp->dev);
}
@@ -1382,12 +1471,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[0].entry);
msix_vec++;
-#ifdef BCM_CNIC
- bp->msix_table[msix_vec].entry = msix_vec;
- BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
- bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
- msix_vec++;
-#endif
+ /* Cnic requires an msix vector for itself */
+ if (CNIC_SUPPORT(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
+ msix_vec, bp->msix_table[msix_vec].entry);
+ msix_vec++;
+ }
+
/* We need separate vectors for ETH queues only (not FCoE) */
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1487,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@@ -1404,7 +1495,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
/* how less vectors we will have? */
int diff = req_cnt - rc;
@@ -1419,7 +1510,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
/*
* decrease number of queues by number of unallocated entries
*/
- bp->num_queues -= diff;
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("New queue configuration set: %d\n",
bp->num_queues);
@@ -1435,6 +1527,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
BNX2X_DEV_INFO("Using single MSI-X vector\n");
bp->flags |= USING_SINGLE_MSIX_FLAG;
+ BNX2X_DEV_INFO("set number of queues to 1\n");
+ bp->num_ethernet_queues = 1;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
@@ -1464,9 +1559,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
return -EBUSY;
}
-#ifdef BCM_CNIC
- offset++;
-#endif
+ if (CNIC_SUPPORT(bp))
+ offset++;
+
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1580,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_PRESENT;
+ offset = 1 + CNIC_SUPPORT(bp);
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
bp->msix_table[0].vector,
0, bp->msix_table[offset].vector,
@@ -1556,19 +1651,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
return 0;
}
+static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i)
+ napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
+static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i)
+ napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
@@ -1576,6 +1687,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
{
if (netif_running(bp->dev)) {
bnx2x_napi_enable(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_napi_enable_cnic(bp);
bnx2x_int_enable(bp);
if (bp->state == BNX2X_STATE_OPEN)
netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1699,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
{
bnx2x_int_disable_sync(bp, disable_hw);
bnx2x_napi_disable(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_napi_disable_cnic(bp);
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct bnx2x *bp = netdev_priv(dev);
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp)) {
+ if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
struct ethhdr *hdr = (struct ethhdr *)skb->data;
u16 ether_type = ntohs(hdr->h_proto);
@@ -1609,7 +1723,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
return bnx2x_fcoe_tx(bp, txq_index);
}
-#endif
+
/* select a non-FCoE queue */
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
@@ -1618,15 +1732,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
void bnx2x_set_num_queues(struct bnx2x *bp)
{
/* RSS queues */
- bp->num_queues = bnx2x_calc_num_queues(bp);
+ bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
-#ifdef BCM_CNIC
/* override in STORAGE SD modes */
if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
- bp->num_queues = 1;
-#endif
+ bp->num_ethernet_queues = 1;
+
/* Add special queues */
- bp->num_queues += NON_ETH_CONTEXT_USE;
+ bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
@@ -1653,20 +1767,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
* bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
* will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
*/
-static int bnx2x_set_real_num_queues(struct bnx2x *bp)
+static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
{
int rc, tx, rx;
tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
- rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
+ rx = BNX2X_NUM_ETH_QUEUES(bp);
/* account for fcoe queue */
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp)) {
- rx += FCOE_PRESENT;
- tx += FCOE_PRESENT;
+ if (include_cnic && !NO_FCOE(bp)) {
+ rx++;
+ tx++;
}
-#endif
rc = netif_set_real_num_tx_queues(bp->dev, tx);
if (rc) {
@@ -1710,6 +1822,10 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
mtu +
BNX2X_FW_RX_ALIGN_END;
/* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
+ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
+ fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
+ else
+ fp->rx_frag_size = 0;
}
}
@@ -1741,7 +1857,6 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
- int i;
/* Although RSS is meaningless when there is a single HW queue we
* still need it enabled in order to have HW Rx hash generated.
@@ -1773,9 +1888,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- for (i = 0; i < sizeof(params.rss_key) / 4; i++)
- params.rss_key[i] = random32();
-
+ prandom_bytes(params.rss_key, sizeof(params.rss_key));
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
@@ -1859,14 +1972,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
(bp)->state = BNX2X_STATE_ERROR; \
goto label; \
} while (0)
-#else
+
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+ do { \
+ bp->cnic_loaded = false; \
+ goto label; \
+ } while (0)
+#else /*BNX2X_STOP_ON_ERROR*/
#define LOAD_ERROR_EXIT(bp, label) \
do { \
(bp)->state = BNX2X_STATE_ERROR; \
(bp)->panic = 1; \
return -EBUSY; \
} while (0)
-#endif
+#define LOAD_ERROR_EXIT_CNIC(bp, label) \
+ do { \
+ bp->cnic_loaded = false; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#endif /*BNX2X_STOP_ON_ERROR*/
bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
{
@@ -1959,10 +2084,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->max_cos = 1;
/* Init txdata pointers */
-#ifdef BCM_CNIC
if (IS_FCOE_FP(fp))
fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
-#endif
if (IS_ETH_FP(fp))
for_each_cos_in_tx_queue(fp, cos)
fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2103,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
else if (bp->flags & GRO_ENABLE_FLAG)
fp->mode = TPA_MODE_GRO;
-#ifdef BCM_CNIC
/* We don't want TPA on an FCoE L2 ring */
if (IS_FCOE_FP(fp))
fp->disable_tpa = 1;
-#endif
+}
+
+int bnx2x_load_cnic(struct bnx2x *bp)
+{
+ int i, rc, port = BP_PORT(bp);
+
+ DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
+
+ mutex_init(&bp->cnic_mutex);
+
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ rc = bnx2x_alloc_fp_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for cnic fps\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ /* Update the number of queues with the cnic queues */
+ rc = bnx2x_set_real_num_queues(bp, 1);
+ if (rc) {
+ BNX2X_ERR("Unable to set real_num_queues including cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
+
+ /* Add all CNIC NAPI objects */
+ bnx2x_add_all_napi_cnic(bp);
+ DP(NETIF_MSG_IFUP, "cnic napi added\n");
+ bnx2x_napi_enable_cnic(bp);
+
+ rc = bnx2x_init_hw_func_cnic(bp);
+ if (rc)
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
+
+ bnx2x_nic_init_cnic(bp);
+
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
+ }
+
+ /* Initialize Rx filter. */
+ netif_addr_lock_bh(bp->dev);
+ bnx2x_set_rx_mode(bp->dev);
+ netif_addr_unlock_bh(bp->dev);
+
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
+ bp->cnic_loaded = true;
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+
+
+ DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
+
+ return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error_cnic2:
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+
+load_error_cnic1:
+ bnx2x_napi_disable_cnic(bp);
+ /* Update the number of queues without the cnic queues */
+ rc = bnx2x_set_real_num_queues(bp, 0);
+ if (rc)
+ BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
+load_error_cnic0:
+ BNX2X_ERR("CNIC-related load failed\n");
+ bnx2x_free_fp_mem_cnic(bp);
+ bnx2x_free_mem_cnic(bp);
+ return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
}
@@ -1995,6 +2202,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
u32 load_code;
int i, rc;
+ DP(NETIF_MSG_IFUP, "Starting NIC load\n");
+ DP(NETIF_MSG_IFUP,
+ "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
+
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic)) {
BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2233,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
- memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
- sizeof(struct bnx2x_fp_txdata));
+ memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
+ bp->num_cnic_queues) *
+ sizeof(struct bnx2x_fp_txdata));
+ bp->fcoe_init = false;
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2247,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* As long as bnx2x_alloc_mem() may possibly update
* bp->num_queues, bnx2x_set_real_num_queues() should always
- * come after it.
+ * come after it. At this stage cnic queues are not counted.
*/
- rc = bnx2x_set_real_num_queues(bp);
+ rc = bnx2x_set_real_num_queues(bp, 0);
if (rc) {
BNX2X_ERR("Unable to set real_num_queues\n");
LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2263,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
+ DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
/* set pf load just before approaching the MCP */
@@ -2073,7 +2287,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DRV_PULSE_SEQ_MASK);
BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
@@ -2191,23 +2406,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error3);
}
-#ifdef BCM_CNIC
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
-#endif
-
- for_each_nondefault_queue(bp, i) {
+ for_each_nondefault_eth_queue(bp, i) {
rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
if (rc) {
BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error4);
+ LOAD_ERROR_EXIT(bp, load_error3);
}
}
rc = bnx2x_init_rss_pf(bp);
if (rc) {
BNX2X_ERR("PF RSS init failed\n");
- LOAD_ERROR_EXIT(bp, load_error4);
+ LOAD_ERROR_EXIT(bp, load_error3);
}
/* Now when Clients are configured we are ready to work */
@@ -2217,7 +2427,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_set_eth_mac(bp, true);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
- LOAD_ERROR_EXIT(bp, load_error4);
+ LOAD_ERROR_EXIT(bp, load_error3);
}
if (bp->pending_max) {
@@ -2227,6 +2437,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
/* Start fast path */
@@ -2257,21 +2468,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
if (bp->port.pmf)
- bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
+ bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
else
bnx2x__link_status_update(bp);
/* start the timer */
mod_timer(&bp->timer, jiffies + bp->current_interval);
-#ifdef BCM_CNIC
- /* re-read iscsi info */
- bnx2x_get_iscsi_info(bp);
- bnx2x_setup_cnic_irq_info(bp);
- bnx2x_setup_cnic_info(bp);
- if (bp->state == BNX2X_STATE_OPEN)
- bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
-#endif
+ if (CNIC_ENABLED(bp))
+ bnx2x_load_cnic(bp);
/* mark driver is loaded in shmem2 */
if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2498,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
bnx2x_dcbx_init(bp, false);
+ DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
-load_error4:
-#ifdef BCM_CNIC
- /* Disable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-#endif
load_error3:
bnx2x_int_disable_sync(bp, 1);
@@ -2338,6 +2540,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
int i;
bool global = false;
+ DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+
/* mark driver is unloaded in shmem2 */
if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
@@ -2373,14 +2577,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
smp_mb();
+ if (CNIC_LOADED(bp))
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
/* Stop Tx */
bnx2x_tx_disable(bp);
netdev_reset_tc(bp->dev);
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
-
bp->rx_mode = BNX2X_RX_MODE_NONE;
del_timer_sync(&bp->timer);
@@ -2414,7 +2617,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
-
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -2435,12 +2639,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_skbs_cnic(bp);
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ if (CNIC_LOADED(bp)) {
+ bnx2x_free_fp_mem_cnic(bp);
+ bnx2x_free_mem_cnic(bp);
+ }
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED;
+ bp->cnic_loaded = false;
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
@@ -2460,6 +2671,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
+ DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
+
return 0;
}
@@ -2550,7 +2763,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-#ifdef BCM_CNIC
+
/* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB
* has been updated when NAPI was scheduled.
@@ -2559,8 +2772,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
break;
}
-#endif
-
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
@@ -2940,7 +3151,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq_index = skb_get_queue_mapping(skb);
txq = netdev_get_tx_queue(dev, txq_index);
- BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+ BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
txdata = &bp->bnx2x_txq[txq_index];
@@ -2958,11 +3169,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
BDS_PER_TX_PKT +
NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
/* Handle special storage cases separately */
- if (txdata->tx_ring_size != 0) {
- BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+ if (txdata->tx_ring_size == 0) {
+ struct bnx2x_eth_q_stats *q_stats =
+ bnx2x_fp_qstats(bp, txdata->parent_fp);
+ q_stats->driver_filtered_tx_pkt++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
- }
+ BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -3339,13 +3555,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
}
-#ifdef BCM_CNIC
if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
!is_zero_ether_addr(addr->sa_data)) {
BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
return -EINVAL;
}
-#endif
if (netif_running(dev)) {
rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3583,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
u8 cos;
/* Common */
-#ifdef BCM_CNIC
+
if (IS_FCOE_IDX(fp_index)) {
memset(sb, 0, sizeof(union host_hc_status_block));
fp->status_blk_mapping = 0;
-
} else {
-#endif
/* status blocks */
if (!CHIP_IS_E1x(bp))
BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3599,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
bnx2x_fp(bp, fp_index,
status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
}
-#endif
+
/* Rx */
if (!skip_rx_queue(bp, fp_index)) {
bnx2x_free_rx_bds(fp);
@@ -3431,10 +3642,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* end of fastpath */
}
+void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
+{
+ int i;
+ for_each_cnic_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
void bnx2x_free_fp_mem(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
bnx2x_free_fp_mem_at(bp, i);
}
@@ -3519,14 +3737,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
-#ifdef BCM_CNIC
if (!bp->rx_ring_size &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
- } else
-#endif
- if (!bp->rx_ring_size) {
+ } else if (!bp->rx_ring_size) {
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3765,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
-#ifdef BCM_CNIC
+
if (!IS_FCOE_IDX(index)) {
-#endif
/* status blocks */
if (!CHIP_IS_E1x(bp))
BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3776,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
BNX2X_PCI_ALLOC(sb->e1x_sb,
&bnx2x_fp(bp, index, status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
}
-#endif
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
* set shortcuts for it.
@@ -3641,31 +3853,31 @@ alloc_mem_err:
return 0;
}
+int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp))
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
+ /* we will fail load process instead of mark
+ * NO_FCOE_FLAG
+ */
+ return -ENOMEM;
+
+ return 0;
+}
+
int bnx2x_alloc_fp_mem(struct bnx2x *bp)
{
int i;
- /**
- * 1. Allocate FP for leading - fatal if error
- * 2. {CNIC} Allocate FCoE FP - fatal if error
- * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
- * 4. Allocate RSS - fix number of queues if error
+ /* 1. Allocate FP for leading - fatal if error
+ * 2. Allocate RSS - fix number of queues if error
*/
/* leading */
if (bnx2x_alloc_fp_mem_at(bp, 0))
return -ENOMEM;
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp))
- /* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
- /* we will fail load process instead of mark
- * NO_FCOE_FLAG
- */
- return -ENOMEM;
-#endif
-
/* RSS */
for_each_nondefault_eth_queue(bp, i)
if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3888,18 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
WARN_ON(delta < 0);
-#ifdef BCM_CNIC
- /**
- * move non eth FPs next to last eth FP
- * must be done in that order
- * FCOE_IDX < FWD_IDX < OOO_IDX
- */
+ bnx2x_shrink_eth_fp(bp, delta);
+ if (CNIC_SUPPORT(bp))
+ /* move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
- /* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
-#endif
- bp->num_queues -= delta;
+ /* move FCoE fp even NO_FCOE_FLAG is on */
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
+ bp->num_ethernet_queues -= delta;
+ bp->num_queues = bp->num_ethernet_queues +
+ bp->num_cnic_queues;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
bp->num_queues + delta, bp->num_queues);
}
@@ -3705,13 +3918,13 @@ void bnx2x_free_mem_bp(struct bnx2x *bp)
kfree(bp->ilt);
}
-int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
+int bnx2x_alloc_mem_bp(struct bnx2x *bp)
{
struct bnx2x_fastpath *fp;
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
- int fp_array_size;
+ int fp_array_size, txq_array_size;
int i;
/*
@@ -3721,7 +3934,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3963,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
goto alloc_err;
/* Allocate memory for the transmission queues array */
- bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
-#ifdef BCM_CNIC
- bp->bnx2x_txq_size++;
-#endif
- bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
- sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ txq_array_size =
+ BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
+ BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
+
+ bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
+ GFP_KERNEL);
if (!bp->bnx2x_txq)
goto alloc_err;
@@ -3838,7 +4051,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
return LINK_CONFIG_IDX(sel_phy_idx);
}
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 9c5ea6c5b4c7..0991534f61da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -144,7 +144,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
* @bp: driver handle
* @load_mode: current mode
*/
-u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
/**
* bnx2x_link_set - configure hw according to link parameters structure.
@@ -238,7 +238,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
* @dev_instance: private instance
*/
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
-#ifdef BCM_CNIC
/**
* bnx2x_cnic_notify - send command to cnic driver
@@ -262,8 +261,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
*/
void bnx2x_setup_cnic_info(struct bnx2x *bp);
-#endif
-
/**
* bnx2x_int_enable - enable HW interrupts.
*
@@ -283,7 +280,7 @@ void bnx2x_int_enable(struct bnx2x *bp);
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
- * bnx2x_nic_init - init driver internals.
+ * bnx2x_nic_init_cnic - init driver internals for cnic.
*
* @bp: driver handle
* @load_code: COMMON, PORT or FUNCTION
@@ -293,9 +290,26 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
* - status blocks
* - etc.
*/
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+void bnx2x_nic_init_cnic(struct bnx2x *bp);
/**
+ * bnx2x_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ *
+ * Initializes:
+ * - rings
+ * - status blocks
+ * - etc.
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+/**
+ * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
+/**
* bnx2x_alloc_mem - allocate driver's memory.
*
* @bp: driver handle
@@ -303,6 +317,12 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
int bnx2x_alloc_mem(struct bnx2x *bp);
/**
+ * bnx2x_free_mem_cnic - release driver's memory for cnic.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem_cnic(struct bnx2x *bp);
+/**
* bnx2x_free_mem - release driver's memory.
*
* @bp: driver handle
@@ -407,6 +427,7 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
void bnx2x_set_reset_in_progress(struct bnx2x *bp);
void bnx2x_set_reset_global(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
/**
* bnx2x_sp_event - handle ramrods completion.
@@ -424,6 +445,14 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
void bnx2x_ilt_set_info(struct bnx2x *bp);
/**
+ * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
+ * and TM.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
+
+/**
* bnx2x_dcbx_init - initialize dcbx protocol.
*
* @bp: driver handle
@@ -491,12 +520,17 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
+void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
+int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
+void bnx2x_free_skbs_cnic(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
+int bnx2x_load_cnic(struct bnx2x *bp);
/**
* bnx2x_enable_msix - set msix configuration.
@@ -529,7 +563,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget);
*
* @bp: driver handle
*/
-int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
+int bnx2x_alloc_mem_bp(struct bnx2x *bp);
/**
* bnx2x_free_mem_bp - release memories outsize main driver structure
@@ -547,7 +581,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
*/
int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
/**
* bnx2x_fcoe_get_wwn - return the requested WWN value for this port
*
@@ -793,23 +827,39 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0;
}
-static inline void bnx2x_add_all_napi(struct bnx2x *bp)
+static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
{
int i;
- bp->num_napi_queues = bp->num_queues;
+ /* Add NAPI objects */
+ for_each_rx_queue_cnic(bp, i)
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, BNX2X_NAPI_WEIGHT);
+}
+
+static inline void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
+static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue_cnic(bp, i)
+ netif_napi_del(&bnx2x_fp(bp, i, napi));
+}
+
static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_eth_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@@ -979,11 +1029,9 @@ static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
if (!CHIP_IS_E1x(bp)) {
-#ifdef BCM_CNIC
/* there are special statistics counters for FCoE 136..140 */
if (IS_FCOE_FP(fp))
return bp->cnic_base_cl_id + (bp->pf_num >> 1);
-#endif
return fp->cl_id;
}
return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
@@ -1102,7 +1150,6 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
txdata->cid, txdata->txq_index);
}
-#ifdef BCM_CNIC
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
@@ -1162,7 +1209,6 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
}
-#endif
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata)
@@ -1280,7 +1326,7 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
*/
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
}
-#ifdef BCM_CNIC
+
/**
* bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
*
@@ -1288,7 +1334,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
*
*/
void bnx2x_get_iscsi_info(struct bnx2x *bp);
-#endif
/**
* bnx2x_link_sync_notify - send notification to other functions.
@@ -1340,13 +1385,11 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
{
- if (is_valid_ether_addr(addr))
+ if (is_valid_ether_addr(addr) ||
+ (is_zero_ether_addr(addr) &&
+ (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
return true;
-#ifdef BCM_CNIC
- if (is_zero_ether_addr(addr) &&
- (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
- return true;
-#endif
+
return false;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2245c3895409..10bc093d2ca4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -413,8 +413,11 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
{
+ int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
if (bp->dcbx_port_params.pfc.enabled &&
- !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
/*
* 1. Fills up common PFC structures if required
* 2. Configure NIG, MAC and BRB via the elink
@@ -552,10 +555,13 @@ static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
{
+ int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
if (!bp->dcbx_port_params.ets.enabled ||
- (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured))
return;
if (CHIP_IS_E3B0(bp))
@@ -1802,11 +1808,14 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
u8 cos = 0, pri = 0;
struct priority_cos *tt2cos;
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags),
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
/* to disable DCB - the structure must be zeroed */
- if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
+ if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)
return;
/*shortcut*/
@@ -1895,6 +1904,11 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+ if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
+ (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
+ DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
+ return 1;
+ }
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
}
@@ -1908,10 +1922,10 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
/* first the HW mac address */
memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
-#ifdef BCM_CNIC
- /* second SAN address */
- memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
-#endif
+ if (CNIC_LOADED(bp))
+ /* second SAN address */
+ memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
+ netdev->addr_len);
}
static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
@@ -2038,10 +2052,13 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
- bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
- if (setting)
+ if (setting) {
+ bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ } else {
+ bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio);
+ }
}
static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
@@ -2073,8 +2090,12 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
"Handling parity error recovery. Try again later\n");
return 1;
}
- if (netif_running(bp->dev))
+ if (netif_running(bp->dev)) {
+ bnx2x_update_drv_flags(bp,
+ 1 << DRV_FLAGS_DCB_MFW_CONFIGURED,
+ 1);
bnx2x_dcbx_init(bp, true);
+ }
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 6e5bdd1a31d9..a427b49a886c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -62,7 +62,9 @@ static const struct {
8, "[%s]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, "[%s]: tpa_aggregated_frames"},
- { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
+ 4, "[%s]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -177,6 +179,8 @@ static const struct {
4, STATS_FLAGS_FUNC, "recoverable_errors" },
{ STATS_OFFSET32(unrecoverable_error),
4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+ { STATS_OFFSET32(driver_filtered_tx_pkt),
+ 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" },
{ STATS_OFFSET32(eee_tx_lpi),
4, STATS_FLAGS_PORT, "Tx LPI entry count"}
};
@@ -227,18 +231,14 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
}
- if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
- if (!(bp->flags & MF_FUNC_DIS)) {
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
+ !(bp->flags & MF_FUNC_DIS)) {
cmd->duplex = bp->link_vars.duplex;
- } else {
- ethtool_cmd_speed_set(
- cmd, bp->link_params.req_line_speed[cfg_idx]);
- cmd->duplex = bp->link_params.req_duplex[cfg_idx];
- }
if (IS_MF(bp) && !BP_NOMCP(bp))
ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+ else
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
} else {
cmd->duplex = DUPLEX_UNKNOWN;
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
@@ -2660,20 +2660,25 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 1; /* cycle on/off once per second */
case ETHTOOL_ID_ON:
+ bnx2x_acquire_phy_lock(bp);
bnx2x_set_led(&bp->link_params, &bp->link_vars,
LED_MODE_ON, SPEED_1000);
+ bnx2x_release_phy_lock(bp);
break;
case ETHTOOL_ID_OFF:
+ bnx2x_acquire_phy_lock(bp);
bnx2x_set_led(&bp->link_params, &bp->link_vars,
LED_MODE_FRONT_PANEL_OFF, 0);
-
+ bnx2x_release_phy_lock(bp);
break;
case ETHTOOL_ID_INACTIVE:
+ bnx2x_acquire_phy_lock(bp);
bnx2x_set_led(&bp->link_params, &bp->link_vars,
LED_MODE_OPER,
bp->link_vars.line_speed);
+ bnx2x_release_phy_lock(bp);
}
return 0;
@@ -2772,10 +2777,10 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
} else {
return 0;
}
@@ -2901,7 +2906,9 @@ static void bnx2x_get_channels(struct net_device *dev,
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
{
bnx2x_disable_msi(bp);
- BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bp->num_ethernet_queues = num_rss;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
bnx2x_set_int_mode(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 620fe939ecfd..60a83ad10370 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -23,6 +23,11 @@
(IRO[159].base + ((funcId) * IRO[159].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
+ (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
+#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
+ (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
+ * IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[316].base + ((pfId) * IRO[316].m1))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 18704929e642..3369a50ac6b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -500,7 +500,15 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 e3_cmn_pin_cfg1; /* 0x170 */
#define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
#define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
- u32 reserved0[7]; /* 0x174 */
+
+ /* pause on host ring */
+ u32 generic_features; /* 0x174 */
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
+ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
+
+ u32 reserved0[6]; /* 0x178 */
u32 aeu_int_mask; /* 0x190 */
@@ -695,6 +703,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
@@ -751,6 +760,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -1246,6 +1256,7 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
#define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
@@ -1515,12 +1526,13 @@ enum mf_cfg_afex_vlan_mode {
/* This structure is not applicable and should not be accessed on 57711 */
struct func_ext_cfg {
u32 func_cfg;
- #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
+ #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F
#define MACP_FUNC_CFG_FLAGS_SHIFT 0
#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+ #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080
u32 iscsi_mac_addr_upper;
u32 iscsi_mac_addr_lower;
@@ -2085,8 +2097,13 @@ struct shmem2_region {
/* generic flags controlled by the driver */
u32 drv_flags;
- #define DRV_FLAGS_DCB_CONFIGURED 0x1
+ #define DRV_FLAGS_DCB_CONFIGURED 0x0
+ #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1
+ #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2
+ #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \
+ (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \
+ (1 << DRV_FLAGS_DCB_MFW_CONFIGURED))
/* pointer to extended dev_info shared data copied from nvm image */
u32 extended_dev_info_shared_addr;
u32 ncsi_oem_data_addr;
@@ -2159,6 +2176,16 @@ struct shmem2_region {
#define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
u32 sizeof_port_stats;
+
+ /* Link Flap Avoidance */
+ u32 lfa_host_addr[PORT_MAX];
+ u32 reserved1;
+
+ u32 reserved2; /* Offset 0x148 */
+ u32 reserved3; /* Offset 0x14C */
+ u32 reserved4; /* Offset 0x150 */
+ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
+ #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
};
@@ -4845,9 +4872,17 @@ struct vif_list_event_data {
__le32 reserved2;
};
-/*
- * union for all event ring message types
- */
+/* function update event data */
+struct function_update_event_data {
+ u8 echo;
+ u8 reserved;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+
+/* union for all event ring message types */
union event_data {
struct vf_pf_event_data vf_pf_event;
struct eth_event_data eth_event;
@@ -4855,6 +4890,7 @@ union event_data {
struct vf_flr_event_data vf_flr_event;
struct malicious_vf_event_data malicious_vf_event;
struct vif_list_event_data vif_list_event;
+ struct function_update_event_data function_update_event;
};
@@ -4984,8 +5020,10 @@ struct function_update_data {
u8 allowed_priorities;
u8 network_cos_mode;
u8 lb_mode_en;
- u8 reserved0;
- __le32 reserved1;
+ u8 tx_switch_suspend_change_flg;
+ u8 tx_switch_suspend;
+ u8 echo;
+ __le16 reserved1;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index fe66d902dc62..d755acfe7a40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -648,15 +648,25 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
return rc;
}
+static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
+{
+ int rc = 0;
+
+ if (CONFIGURE_NIC_MODE(bp))
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
- if (!rc)
+ if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
- if (!rc)
- rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
return rc;
}
@@ -781,12 +791,19 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
+static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
+{
+ if (CONFIGURE_NIC_MODE(bp))
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
- bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
- bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+ if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
@@ -890,7 +907,6 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
/****************************************************************************
* SRC initializations
****************************************************************************/
-#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
@@ -915,5 +931,4 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
-#endif
#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index f6cfdc6cf20f..09096b43a6e9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -121,6 +121,7 @@
#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
@@ -253,6 +254,12 @@ static int bnx2x_check_lfa(struct link_params *params)
if (!(link_status & LINK_STATUS_LINK_UP))
return LFA_LINK_DOWN;
+ /* if loaded after BOOT from SAN, don't flap the link in any case and
+ * rely on link set by preboot driver
+ */
+ if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN)
+ return 0;
+
/* Verify that loopback mode is not set */
if (params->loopback_mode)
return LFA_LOOPBACK_ENABLED;
@@ -1440,30 +1447,47 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
-static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
+ u32 emac_base)
{
- u32 mode, emac_base;
+ u32 new_mode, cur_mode;
+ u32 clc_cnt;
/* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
+ cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
- if (CHIP_IS_E2(bp))
- emac_base = GRCBASE_EMAC0;
- else
- emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
- mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT);
if (USES_WARPCORE(bp))
- mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+ clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
else
- mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+ clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
- mode |= (EMAC_MDIO_MODE_CLAUSE_45);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+ if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
+ (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
+ return;
+ new_mode = cur_mode &
+ ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
+ new_mode |= clc_cnt;
+ new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+
+ DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
+ cur_mode, new_mode);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
udelay(40);
}
+
+static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
+ struct link_params *params)
+{
+ u8 phy_index;
+ /* Set mdio clock per phy */
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++)
+ bnx2x_set_mdio_clk(bp, params->chip_id,
+ params->phy[phy_index].mdio_ctrl);
+}
+
static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
{
u32 port4mode_ovwr_val;
@@ -1508,7 +1532,8 @@ static void bnx2x_emac_init(struct link_params *params,
}
timeout--;
} while (val & EMAC_MODE_RESET);
- bnx2x_set_mdio_clk(bp, params->chip_id, port);
+
+ bnx2x_set_mdio_emac_per_phy(bp, params);
/* Set mac address */
val = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
@@ -1664,7 +1689,10 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
* ports of the path
*/
- if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
+ if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
+ is_port4mode &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK,
@@ -1760,6 +1788,18 @@ static int bnx2x_xmac_enable(struct link_params *params,
*/
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+ /* When XMAC is in XLGMII mode, disable sending idles for fault
+ * detection.
+ */
+ if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
+ REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
+ (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
+ XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
+ REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ }
/* Set Max packet size */
REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
@@ -1780,6 +1820,12 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* Enable TX and RX */
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+ /* Set MAC in XLGMII mode for dual-mode */
+ if ((vars->line_speed == SPEED_20000) &&
+ (params->phy[INT_PHY].supported &
+ SUPPORTED_20000baseKR2_Full))
+ val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
+
/* Check loopback mode */
if (lb)
val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
@@ -2096,6 +2142,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
+static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (SHMEM2_HAS(bp, link_attr_sync))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port]), link_attr);
+}
+
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2126,7 +2182,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
if (CHIP_IS_E3(bp))
ppp_enable = 0;
else
- ppp_enable = 1;
+ ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm_out_en = 0;
@@ -2247,7 +2303,6 @@ int bnx2x_update_pfc(struct link_params *params,
return bnx2x_status;
}
-
static int bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
@@ -2651,6 +2706,13 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u32 val;
u16 i;
int rc = 0;
+ u32 chip_id;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+ bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+ }
+
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
@@ -2719,6 +2781,13 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u32 tmp;
u8 i;
int rc = 0;
+ u32 chip_id;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+ bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
+ }
+
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
@@ -3147,6 +3216,15 @@ static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
}
+static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 and_val)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy, devad, reg, &val);
+ bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
+}
+
int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val)
{
@@ -3551,6 +3629,44 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
+ /* Step 2 - Configure the NP registers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
+ };
+ DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+
+ /* Start KR2 work-around timer which handles BCM8073 link-parner */
+ vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, vars->link_attr_sync);
+}
static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
struct link_params *params)
@@ -3564,6 +3680,21 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
}
+static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* Restart autoneg on the leading lane only */
+ struct bnx2x *bp = params->bp;
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+
+ /* Restore AER */
+ bnx2x_set_aer_mmd(params, phy);
+}
+
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
@@ -3576,7 +3707,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
/* Disable Autoneg: re-enable it after adv is done. */
- {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
};
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
@@ -3585,11 +3718,11 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
reg_set[i].val);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
- cl72_ctrl &= 0xf8ff;
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
+ cl72_ctrl &= 0x08ff;
cl72_ctrl |= 0x3800;
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3624,6 +3757,16 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
(0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
(0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ /* Configure the next lane if dual mode */
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
+ ((0x02 <<
+ MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x06 <<
+ MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x09 <<
+ MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3670,10 +3813,26 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
- /* Enable Autoneg */
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+ (phy->req_line_speed == SPEED_20000)) {
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
+ (1<<11));
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
+ bnx2x_set_aer_mmd(params, phy);
+
+ bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ }
+
+ /* Enable Autoneg: only on the main lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
}
static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
@@ -3692,9 +3851,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
{MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
/* Leave cl72 training enable, needed for KR */
- {MDIO_PMA_DEVAD,
- MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
- 0x2}
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
@@ -3764,27 +3921,21 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
/* Disable 100FX Enable and Auto-Detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
/* Disable 100FX Idle detect */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
/* Turn off auto-detect & fiber mode */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
- (val & 0xFFEE));
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0xFFEE);
/* Set filter_force_link, disable_false_link and parallel_detect */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3846,22 +3997,65 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
/* Release tx_fifo_reset */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0xFFFE);
+ /* Release rxSeqStart */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
+}
+
+static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* Set global registers, so set AER lane to 0 */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ /* Disable sequencer */
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
+
+ bnx2x_set_aer_mmd(params, phy);
+
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CTRL, 0);
+ /* Turn off CL73 */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ MDIO_WC_REG_CL73_USERB0_CTRL, &val);
+ val &= ~(1<<5);
+ val |= (1<<6);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+ MDIO_WC_REG_CL73_USERB0_CTRL, val);
+
+ /* Set 20G KR2 force speed */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
- /* Release rxSeqStart */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
+ val &= ~(3<<14);
+ val |= (1<<15);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
-}
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
-static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
- struct bnx2x_phy *phy)
-{
- DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+ /* Enable sequencer (over lane 0) */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
+
+ bnx2x_set_aer_mmd(params, phy);
}
static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
@@ -3931,20 +4125,16 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
u16 val16, digctrl_kx1, digctrl_kx2;
/* Clear XFI clock comp in non-10G single lane mode. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
bnx2x_warpcore_set_lpi_passthrough(phy, params);
if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
- val16 | 0x1000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x1000);
DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4086,7 +4276,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
(cfg_pin > PIN_CFG_GPIO3_P1)) {
DP(NETIF_MSG_LINK,
- "ERROR: Invalid cfg pin %x for module detect indication\n",
+ "No cfg pin %x for module detect indication\n",
cfg_pin);
return -EINVAL;
}
@@ -4097,7 +4287,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
*gpio_num = MISC_REGISTERS_GPIO_3;
*gpio_port = port;
}
- DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+
return 0;
}
@@ -4120,7 +4310,7 @@ static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
return 0;
}
static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
- struct link_params *params)
+ struct link_params *params)
{
u16 gp2_status_reg0, lane;
struct bnx2x *bp = params->bp;
@@ -4134,8 +4324,8 @@ static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
}
static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
@@ -4163,7 +4353,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
case PORT_HW_CFG_NET_SERDES_IF_KR:
/* Do we get link yet? */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
- &gp_status1);
+ &gp_status1);
lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
/*10G KR*/
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
@@ -4215,6 +4405,27 @@ static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
}
}
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+
+ cfg_pin = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_TX_LASER_MASK;
+ /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+ DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+
+ /* For 20G, the expected pin to be used is 3 pins after the current */
+ bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4275,9 +4486,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
- /* Issue Module detection */
+ /* Issue Module detection if module is plugged, or
+ * enabled transmitter to avoid current leakage in case
+ * no module is connected
+ */
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
+ else
+ bnx2x_sfp_e3_set_transmitter(params, phy, 1);
bnx2x_warpcore_config_sfi(phy, params);
break;
@@ -4293,16 +4509,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
bnx2x_sfp_module_detection(phy, params);
break;
-
case PORT_HW_CFG_NET_SERDES_IF_KR2:
- if (vars->line_speed != SPEED_20000) {
- DP(NETIF_MSG_LINK, "Speed not supported yet\n");
- return;
+ if (!params->loopback_mode) {
+ bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
+ bnx2x_warpcore_set_20G_force_KR2(phy, params);
}
- DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
- bnx2x_warpcore_set_20G_KR2(bp, phy);
break;
-
default:
DP(NETIF_MSG_LINK,
"Unsupported Serdes Net Interface 0x%x\n",
@@ -4316,68 +4530,35 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Exit config init\n");
}
-static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
- struct bnx2x_phy *phy,
- u8 tx_en)
-{
- struct bnx2x *bp = params->bp;
- u32 cfg_pin;
- u8 port = params->port;
-
- cfg_pin = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].e3_sfp_ctrl)) &
- PORT_HW_CFG_TX_LASER_MASK;
- /* Set the !tx_en since this pin is DISABLE_TX_LASER */
- DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
- /* For 20G, the expected pin to be used is 3 pins after the current */
-
- bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
- if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
- bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
-}
-
static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val16, lane;
bnx2x_sfp_e3_set_transmitter(params, phy, 0);
- bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_mdio_emac_per_phy(bp, params);
bnx2x_set_aer_mmd(params, phy);
/* Global register */
bnx2x_warpcore_reset_lane(bp, phy, 1);
/* Clear loopback settings (if any) */
/* 10G & 20G */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
- 0xBFFF);
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
/* Update those 1-copy registers */
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- val16 & ~0x10);
-
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL2,
- val16 & 0xff00);
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ ~0x10);
+ bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
lane = bnx2x_get_warpcore_lane(phy, params);
/* Disable CL36 PCS Tx */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4413,8 +4594,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
params->loopback_mode, phy->req_line_speed);
- if (phy->req_line_speed < SPEED_10000) {
- /* 10/100/1000 */
+ if (phy->req_line_speed < SPEED_10000 ||
+ phy->supported & SUPPORTED_20000baseKR2_Full) {
+ /* 10/100/1000/20G-KR2 */
/* Update those 1-copy registers */
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
@@ -4427,18 +4609,20 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ val16 |= (1<<lane);
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ val16 |= (2<<lane);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL2,
- val16 | (1<<lane));
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16);
/* Switch back to 4-copy registers */
bnx2x_set_aer_mmd(params, phy);
} else {
- /* 10G & 20G */
+ /* 10G / 20G-DXGXS */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
0x4000);
-
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
@@ -4603,6 +4787,10 @@ void bnx2x_link_status_update(struct link_params *params,
params->feature_config_flags &=
~FEATURE_CONFIG_PFC_ENABLED;
+ if (SHMEM2_HAS(bp, link_attr_sync))
+ vars->link_attr_sync = SHMEM2_RD(bp,
+ link_attr_sync[params->port]);
+
DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
@@ -5332,6 +5520,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
vars->link_status |= LINK_10GTFD;
break;
case GP_STATUS_20G_DXGXS:
+ case GP_STATUS_20G_KR2:
vars->line_speed = SPEED_20000;
vars->link_status |= LINK_20GTFD;
break;
@@ -5439,7 +5628,15 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
int rc = 0;
lane = bnx2x_get_warpcore_lane(phy, params);
/* Read gp_status */
- if (phy->req_line_speed > SPEED_10000) {
+ if ((params->loopback_mode) &&
+ (phy->flags & FLAGS_WC_DUAL_MODE)) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
+ link_up &= 0x1;
+ } else if ((phy->req_line_speed > SPEED_10000) &&
+ (phy->supported & SUPPORTED_20000baseMLD2_Full)) {
u16 temp_link_up;
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
1, &temp_link_up);
@@ -5452,12 +5649,22 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_resolve_fc(phy, params, vars);
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status1);
DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
- /* Check for either KR or generic link up. */
- gp_status1 = ((gp_status1 >> 8) & 0xf) |
- ((gp_status1 >> 12) & 0xf);
- link_up = gp_status1 & (1 << lane);
+ /* Check for either KR, 1G, or AN up. */
+ link_up = ((gp_status1 >> 8) |
+ (gp_status1 >> 12) |
+ (gp_status1)) &
+ (1 << lane);
+ if (phy->supported & SUPPORTED_20000baseKR2_Full) {
+ u16 an_link;
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &an_link);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &an_link);
+ link_up |= (an_link & (1<<2));
+ }
if (link_up && SINGLE_MEDIA_DIRECT(params)) {
u16 pd, gp_status4;
if (phy->req_line_speed == SPEED_AUTO_NEG) {
@@ -5522,7 +5729,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
if ((lane & 1) == 0)
gp_speed <<= 8;
gp_speed &= 0x3f00;
-
+ link_up = !!link_up;
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
duplex);
@@ -6683,7 +6890,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
} else if (prev_line_speed != vars->line_speed) {
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
0);
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
}
}
@@ -6753,7 +6960,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
@@ -6894,7 +7101,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
((fw_msgout & 0xff) != 0x03 && (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7604,13 +7811,12 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
}
return -EINVAL;
}
static void bnx2x_warpcore_power_module(struct link_params *params,
- struct bnx2x_phy *phy,
u8 power)
{
u32 pin_cfg;
@@ -7652,10 +7858,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
addr32 = addr & (~0x3);
do {
if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
- bnx2x_warpcore_power_module(params, phy, 0);
+ bnx2x_warpcore_power_module(params, 0);
/* Note that 100us are not enough here */
usleep_range(1000, 2000);
- bnx2x_warpcore_power_module(params, phy, 1);
+ bnx2x_warpcore_power_module(params, 1);
}
rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
data_array);
@@ -7715,7 +7921,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
@@ -7751,7 +7957,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
}
return -EINVAL;
@@ -7786,9 +7992,8 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 val[2], check_limiting_mode = 0;
+ u8 gport, val[2], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
-
phy->media_type = ETH_PHY_UNSPECIFIED;
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
@@ -7843,8 +8048,15 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp))
+ gport = BP_PATH(bp) + (params->port << 1);
+ netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
+ " Current SFP module in port %d is not"
+ " compliant with 10G Ethernet\n",
+ gport);
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8241,7 +8453,7 @@ static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- bnx2x_warpcore_power_module(params, phy, 0);
+ bnx2x_warpcore_power_module(params, 0);
/* Put Warpcore in low power mode */
REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
@@ -8264,7 +8476,7 @@ static void bnx2x_power_sfp_module(struct link_params *params,
bnx2x_8727_power_module(params->bp, phy, power);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- bnx2x_warpcore_power_module(params, phy, power);
+ bnx2x_warpcore_power_module(params, power);
break;
default:
break;
@@ -8337,7 +8549,8 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].config));
-
+ /* Enabled transmitter by default */
+ bnx2x_sfp_set_transmitter(params, phy, 1);
DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
params->port);
/* Power up module */
@@ -8370,14 +8583,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
*/
bnx2x_set_limiting_mode(params, phy, edc_mode);
- /* Enable transmit for this module if the module is approved, or
- * if unapproved modules should also enable the Tx laser
+ /* Disable transmit for this module if the module is not approved, and
+ * laser needs to be disabled.
*/
- if (rc == 0 ||
- (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(params, phy, 1);
- else
+ if ((rc) &&
+ ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
bnx2x_sfp_set_transmitter(params, phy, 0);
return rc;
@@ -8389,11 +8600,13 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
struct bnx2x_phy *phy;
u32 gpio_val;
u8 gpio_num, gpio_port;
- if (CHIP_IS_E3(bp))
+ if (CHIP_IS_E3(bp)) {
phy = &params->phy[INT_PHY];
- else
+ /* Always enable TX laser,will be disabled in case of fault */
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ } else {
phy = &params->phy[EXT_PHY1];
-
+ }
if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
params->port, &gpio_num, &gpio_port) ==
-EINVAL) {
@@ -8409,7 +8622,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
- bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_mdio_emac_per_phy(bp, params);
bnx2x_set_aer_mmd(params, phy);
bnx2x_power_sfp_module(params, phy, 1);
@@ -8438,10 +8651,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
}
} else {
- u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
gpio_port);
@@ -8449,10 +8658,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
* Disable transmit for this module
*/
phy->media_type = ETH_PHY_NOT_PRESENT;
- if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
- CHIP_IS_E3(bp))
- bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -9192,6 +9397,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ bnx2x_8727_power_module(params->bp, phy, 0);
return 0;
}
} /* Over current check */
@@ -9296,20 +9502,28 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct bnx2x *bp,
u8 port)
{
- u16 val, fw_ver1, fw_ver2, cnt;
+ u16 val, fw_ver2, cnt, i;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_PMA_DEVAD, 0xA819, 0x0014},
+ {MDIO_PMA_DEVAD, 0xA81A, 0xc200},
+ {MDIO_PMA_DEVAD, 0xA81B, 0x0000},
+ {MDIO_PMA_DEVAD, 0xA81C, 0x0300},
+ {MDIO_PMA_DEVAD, 0xA817, 0x0009}
+ };
+ u16 fw_ver1;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
phy->ver_addr);
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
+ i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad,
+ reg_set[i].reg, reg_set[i].val);
for (cnt = 0; cnt < 100; cnt++) {
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
@@ -9357,8 +9571,16 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val, offset;
-
+ u16 val, offset, i;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
+ {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
+ {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
+ };
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -9370,49 +9592,20 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x80);
-
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x18);
-
- /* Select activity source by Tx and Rx, as suggested by PHY AE */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0006);
-
- /* Select the closest activity blink rate to that in 10/100/1000 */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_BLINK,
- 0);
-
- /* Configure the blink rate to ~15.9 Hz */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
- MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
else
offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, offset, &val);
- val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, offset, val);
-
- /* 'Interrupt Mask' */
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- 0xFFFB, 0xFFFD);
+ /* stretch_en for LED3*/
+ bnx2x_cl45_read_or_write(bp, phy,
+ MDIO_PMA_DEVAD, offset,
+ MDIO_PMA_REG_84823_LED3_STRETCH_EN);
}
static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9422,7 +9615,8 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
switch (action) {
case PHY_INIT:
- if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -9443,7 +9637,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
+ u16 autoneg_val, an_1000_val, an_10_100_val;
bnx2x_848xx_specific_func(phy, params, PHY_INIT);
bnx2x_cl45_write(bp, phy,
@@ -9542,11 +9736,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
autoneg_val |= (1<<8);
- /* Always write this if this is not 84833.
- * For 84833, write it only when it's a forced speed.
+ /* Always write this if this is not 84833/4.
+ * For 84833/4, write it only when it's a forced speed.
*/
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- ((autoneg_val & (1<<12)) == 0))
+ if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+ ((autoneg_val & (1<<12)) == 0))
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -9558,14 +9753,11 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 10G\n");
/* Restart autoneg for 10G*/
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
- &an_10g_val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
- an_10g_val | 0x1000);
+ bnx2x_cl45_read_or_write(
+ bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 0x1000);
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
0x3200);
@@ -9598,9 +9790,8 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
#define PHY84833_CMDHDLR_WAIT 300
#define PHY84833_CMDHDLR_MAX_ARGS 5
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
- struct link_params *params,
- u16 fw_cmd,
- u16 cmd_args[], int argc)
+ struct link_params *params, u16 fw_cmd,
+ u16 cmd_args[], int argc)
{
int idx;
u16 val;
@@ -9614,7 +9805,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
MDIO_84833_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9635,7 +9826,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9654,7 +9845,6 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
return 0;
}
-
static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -9802,11 +9992,11 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
u16 val;
- u32 actual_phy_selection, cms_enable;
+ u32 actual_phy_selection;
u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
@@ -9828,7 +10018,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to come out of reset */
msleep(50);
- if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
/* BCM84823 requires that XGXS links up first @ 10G for normal
* behavior.
*/
@@ -9884,7 +10075,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
bnx2x_84833_pair_swap_cfg(phy, params, vars);
/* Keep AutogrEEEn disabled. */
@@ -9904,7 +10096,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
/* 84833 PHY has a better feature and doesn't need to support this. */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
- cms_enable = REG_RD(bp, params->shmem_base +
+ u32 cms_enable = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_CMS_MASK;
@@ -9933,7 +10125,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
return rc;
}
- if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+ if ((phy->req_duplex == DUPLEX_FULL) &&
(params->eee_mode & EEE_MODE_ADV_LPI) &&
(bnx2x_eee_calc_timer(params) ||
!(params->eee_mode & EEE_MODE_ENABLE_LPI)))
@@ -9948,15 +10140,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
/* Bring PHY out of super isolate mode as the final step. */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val &= ~MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ bnx2x_cl45_read_and_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1,
+ (u16)~MDIO_84833_SUPER_ISOLATE);
}
return rc;
}
@@ -10090,7 +10280,6 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
return link_up;
}
-
static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
int status = 0;
@@ -10962,7 +11151,7 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
/* STATIC PHY DECLARATION */
/******************************************************************/
-static struct bnx2x_phy phy_null = {
+static const struct bnx2x_phy phy_null = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
.addr = 0,
.def_md_devad = 0,
@@ -10988,7 +11177,7 @@ static struct bnx2x_phy phy_null = {
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_serdes = {
+static const struct bnx2x_phy phy_serdes = {
.type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
@@ -11023,7 +11212,7 @@ static struct bnx2x_phy phy_serdes = {
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_xgxs = {
+static const struct bnx2x_phy phy_xgxs = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
@@ -11058,12 +11247,11 @@ static struct bnx2x_phy phy_xgxs = {
.set_link_led = (set_link_led_t)NULL,
.phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
};
-static struct bnx2x_phy phy_warpcore = {
+static const struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_TX_ERROR_CHECK,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11097,7 +11285,7 @@ static struct bnx2x_phy phy_warpcore = {
};
-static struct bnx2x_phy phy_7101 = {
+static const struct bnx2x_phy phy_7101 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
.addr = 0xff,
.def_md_devad = 0,
@@ -11126,11 +11314,11 @@ static struct bnx2x_phy phy_7101 = {
.set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_8073 = {
+static const struct bnx2x_phy phy_8073 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_HW_LOCK_REQUIRED,
+ .flags = 0,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11157,7 +11345,7 @@ static struct bnx2x_phy phy_8073 = {
.set_link_led = (set_link_led_t)NULL,
.phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
};
-static struct bnx2x_phy phy_8705 = {
+static const struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
.addr = 0xff,
.def_md_devad = 0,
@@ -11185,7 +11373,7 @@ static struct bnx2x_phy phy_8705 = {
.set_link_led = (set_link_led_t)NULL,
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_8706 = {
+static const struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
.def_md_devad = 0,
@@ -11215,12 +11403,11 @@ static struct bnx2x_phy phy_8706 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_8726 = {
+static const struct bnx2x_phy phy_8726 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST |
+ .flags = (FLAGS_INIT_XGXS_FIRST |
FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
@@ -11248,7 +11435,7 @@ static struct bnx2x_phy phy_8726 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_8727 = {
+static const struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
.def_md_devad = 0,
@@ -11278,7 +11465,7 @@ static struct bnx2x_phy phy_8727 = {
.set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
.phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
};
-static struct bnx2x_phy phy_8481 = {
+static const struct bnx2x_phy phy_8481 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
.addr = 0xff,
.def_md_devad = 0,
@@ -11314,7 +11501,7 @@ static struct bnx2x_phy phy_8481 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
-static struct bnx2x_phy phy_84823 = {
+static const struct bnx2x_phy phy_84823 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
.addr = 0xff,
.def_md_devad = 0,
@@ -11351,7 +11538,7 @@ static struct bnx2x_phy phy_84823 = {
.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
-static struct bnx2x_phy phy_84833 = {
+static const struct bnx2x_phy phy_84833 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
.addr = 0xff,
.def_md_devad = 0,
@@ -11386,7 +11573,41 @@ static struct bnx2x_phy phy_84833 = {
.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
-static struct bnx2x_phy phy_54618se = {
+static const struct bnx2x_phy phy_84834 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
+static const struct bnx2x_phy phy_54618se = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
.addr = 0xff,
.def_md_devad = 0,
@@ -11564,9 +11785,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
+ phy->flags &= ~FLAGS_TX_ERROR_CHECK;
break;
default:
DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
@@ -11665,6 +11888,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
*phy = phy_84833;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ *phy = phy_84834;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
@@ -11721,9 +11947,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
(phy->ver_addr)) {
- /* Remove 100Mb link supported for BCM84833 when phy fw
+ /* Remove 100Mb link supported for BCM84833/4 when phy fw
* version lower than or equal to 1.39
*/
u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11733,12 +11960,6 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
SUPPORTED_100baseT_Full);
}
- /* In case mdc/mdio_access of the external phy is different than the
- * mdc/mdio access of the XGXS, a HW lock must be taken in each access
- * to prevent one port interfere with another port's CL45 operations.
- */
- if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
- phy->flags |= FLAGS_HW_LOCK_REQUIRED;
DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
phy_type, port, phy_index);
DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
@@ -11863,7 +12084,6 @@ u32 bnx2x_phy_selection(struct link_params *params)
return return_cfg;
}
-
int bnx2x_phy_probe(struct link_params *params)
{
u8 phy_index, actual_phy_idx;
@@ -11907,6 +12127,10 @@ int bnx2x_phy_probe(struct link_params *params)
FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_MT_SUPPORT))
+ phy->flags |= FLAGS_MDC_MDIO_WA_G;
+
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[params->port].media_type);
@@ -11934,8 +12158,8 @@ int bnx2x_phy_probe(struct link_params *params)
return 0;
}
-void bnx2x_init_bmac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_bmac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -11954,8 +12178,8 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_emac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -11973,8 +12197,8 @@ void bnx2x_init_emac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_xmac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_xmac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -11999,8 +12223,8 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_umac_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_umac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
vars->link_up = 1;
@@ -12014,17 +12238,21 @@ void bnx2x_init_umac_loopback(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
-void bnx2x_init_xgxs_loopback(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_xgxs_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->duplex = DUPLEX_FULL;
+ struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+ vars->link_up = 1;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
if (params->req_line_speed[0] == SPEED_1000)
- vars->line_speed = SPEED_1000;
+ vars->line_speed = SPEED_1000;
+ else if ((params->req_line_speed[0] == SPEED_20000) ||
+ (int_phy->flags & FLAGS_WC_DUAL_MODE))
+ vars->line_speed = SPEED_20000;
else
- vars->line_speed = SPEED_10000;
+ vars->line_speed = SPEED_10000;
if (!USES_WARPCORE(bp))
bnx2x_xgxs_deassert(params);
@@ -12044,34 +12272,30 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
bnx2x_bmac_enable(params, vars, 0, 1);
}
- if (params->loopback_mode == LOOPBACK_XGXS) {
- /* set 10G XGXS loopback */
- params->phy[INT_PHY].config_loopback(
- &params->phy[INT_PHY],
- params);
-
- } else {
- /* set external phy loopback */
- u8 phy_index;
- for (phy_index = EXT_PHY1;
- phy_index < params->num_phys; phy_index++) {
- if (params->phy[phy_index].config_loopback)
- params->phy[phy_index].config_loopback(
- &params->phy[phy_index],
- params);
- }
- }
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ if (params->loopback_mode == LOOPBACK_XGXS) {
+ /* Set 10G XGXS loopback */
+ int_phy->config_loopback(int_phy, params);
+ } else {
+ /* Set external phy loopback */
+ u8 phy_index;
+ for (phy_index = EXT_PHY1;
+ phy_index < params->num_phys; phy_index++)
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(
+ &params->phy[phy_index],
+ params);
+ }
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
}
-static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+void bnx2x_set_rx_filter(struct link_params *params, u8 en)
{
struct bnx2x *bp = params->bp;
u8 val = en * 0x1F;
- /* Open the gate between the NIG to the BRB */
+ /* Open / close the gate between the NIG and the BRB */
if (!CHIP_IS_E1x(bp))
val |= en * 0x20;
REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
@@ -12345,7 +12569,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
* Hold it as vars low
*/
/* Clear link led */
- bnx2x_set_mdio_clk(bp, params->chip_id, port);
+ bnx2x_set_mdio_emac_per_phy(bp, params);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
if (reset_ext_phy) {
@@ -12696,7 +12920,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Initiate PHY reset*/
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
@@ -12784,7 +13008,8 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
}
static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
- struct bnx2x_phy *phy)
+ struct bnx2x_phy *phy,
+ u8 port)
{
u16 val, cnt;
/* Wait for FW completing its initialization. */
@@ -12794,7 +13019,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &val);
if (!(val & (1<<15)))
break;
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
}
if (cnt >= 1500) {
DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12811,26 +13036,28 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
/* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
+ bnx2x_save_848xx_spirom_version(phy, bp, port);
return 0;
}
int bnx2x_pre_init_phy(struct bnx2x *bp,
u32 shmem_base,
u32 shmem2_base,
- u32 chip_id)
+ u32 chip_id,
+ u8 port)
{
int rc = 0;
struct bnx2x_phy phy;
- bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
- PORT_0, &phy)) {
+ port, &phy) != 0) {
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
}
+ bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
switch (phy.type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
- rc = bnx2x_84833_pre_init_phy(bp, &phy);
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
break;
default:
break;
@@ -12867,6 +13094,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
/* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
@@ -12898,8 +13126,9 @@ int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 phy_ver, val;
u8 phy_index = 0;
u32 ext_phy_type, ext_phy_config;
- bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
- bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
+
+ bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0);
+ bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1);
DP(NETIF_MSG_LINK, "Begin common phy init\n");
if (CHIP_IS_E3(bp)) {
/* Enable EPIO */
@@ -12960,6 +13189,7 @@ static void bnx2x_check_over_curr(struct link_params *params,
" error.\n",
params->port);
vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+ bnx2x_warpcore_power_module(params, 0);
}
} else
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
@@ -13139,6 +13369,108 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
}
}
}
+static void bnx2x_disable_kr2(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ int i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+ /* Restart AN on leading lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_kr2_recovery(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "KR2 recovery\n");
+ bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ bnx2x_warpcore_restart_AN_KR(phy, params);
+}
+
+static void bnx2x_check_kr2_wa(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ u16 base_page, next_page, not_kr2_device, lane;
+ int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+
+ if (!sigdet) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ bnx2x_kr2_recovery(params, vars, phy);
+ return;
+ }
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, lane);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &base_page);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
+ bnx2x_set_aer_mmd(params, phy);
+
+ /* CL73 has not begun yet */
+ if (base_page == 0) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ bnx2x_kr2_recovery(params, vars, phy);
+ return;
+ }
+
+ /* In case NP bit is not set in the BasePage, or it is set,
+ * but only KX is advertised, declare this link partner as non-KR2
+ * device.
+ */
+ not_kr2_device = (((base_page & 0x8000) == 0) ||
+ (((base_page & 0x8000) &&
+ ((next_page & 0xe0) == 0x2))));
+
+ /* In case KR2 is already disabled, check if we need to re-enable it */
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!not_kr2_device) {
+ DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
+ next_page);
+ bnx2x_kr2_recovery(params, vars, phy);
+ }
+ return;
+ }
+ /* KR2 is enabled, but not KR2 device */
+ if (not_kr2_device) {
+ /* Disable KR2 on both lanes */
+ DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
+ bnx2x_disable_kr2(params, vars, phy);
+ return;
+ }
+}
+
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
u16 phy_idx;
@@ -13156,6 +13488,9 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
if (CHIP_IS_E3(bp)) {
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
+ if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
+ (phy->speed_cap_mask & SPEED_20000))
+ bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
bnx2x_warpcore_config_runtime(phy, params, vars);
@@ -13176,27 +13511,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
bnx2x_update_mng(params, vars->link_status);
}
}
-
- }
-
-}
-
-u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
-{
- u8 phy_index;
- struct bnx2x_phy phy;
- for (phy_index = INT_PHY; phy_index < MAX_PHYS;
- phy_index++) {
- if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
- 0, &phy) != 0) {
- DP(NETIF_MSG_LINK, "populate phy failed\n");
- return 0;
- }
-
- if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
- return 1;
}
- return 0;
}
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 9165b89a4b19..ee6e7ec85457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -139,8 +139,6 @@ struct bnx2x_phy {
u8 addr;
u8 def_md_devad;
u16 flags;
- /* Require HW lock */
-#define FLAGS_HW_LOCK_REQUIRED (1<<0)
/* No Over-Current detection */
#define FLAGS_NOC (1<<1)
/* Fan failure detection required */
@@ -156,6 +154,7 @@ struct bnx2x_phy {
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
#define FLAGS_EEE (1<<13)
+#define FLAGS_MDC_MDIO_WA_G (1<<15)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -267,6 +266,9 @@ struct link_params {
#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
+#define FEATURE_CONFIG_MT_SUPPORT (1<<13)
+#define FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
+
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@@ -347,6 +349,8 @@ struct link_vars {
u8 rx_tx_asic_rst;
u8 turn_to_run_wc_rt;
u16 rsrv2;
+ /* The same definitions as the shmem2 parameter */
+ u32 link_attr_sync;
};
/***********************************************************/
@@ -418,10 +422,6 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
void bnx2x_hw_reset_phy(struct link_params *params);
-/* Checks if HW lock is required for this phy/board type */
-u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
- u32 shmem2_base);
-
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
@@ -432,7 +432,8 @@ int bnx2x_phy_probe(struct link_params *params);
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
-
+/* Open / close the gate between the NIG and the BRB */
+void bnx2x_set_rx_filter(struct link_params *params, u8 en);
/* DCBX structs */
@@ -459,9 +460,6 @@ struct bnx2x_nig_brb_pfc_port_params {
u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
u32 llfc_high_priority_classes;
u32 llfc_low_priority_classes;
- /* BRB */
- u32 cos0_pauseable;
- u32 cos1_pauseable;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 01611b33a93d..5523da3afcdc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -79,7 +79,7 @@
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
-static char version[] __devinitdata =
+static char version[] =
"Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -127,6 +127,17 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
+struct bnx2x_mac_vals {
+ u32 xmac_addr;
+ u32 xmac_val;
+ u32 emac_addr;
+ u32 emac_val;
+ u32 umac_addr;
+ u32 umac_val;
+ u32 bmac_addr;
+ u32 bmac_val[2];
+};
+
enum bnx2x_board_type {
BCM57710 = 0,
BCM57711,
@@ -149,7 +160,7 @@ enum bnx2x_board_type {
/* indexed by board_type, above */
static struct {
char *name;
-} board_info[] __devinitdata = {
+} board_info[] = {
{ "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
{ "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
{ "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
@@ -791,10 +802,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* host sb data */
-#ifdef BCM_CNIC
if (IS_FCOE_FP(fp))
continue;
-#endif
+
BNX2X_ERR(" run indexes (");
for (j = 0; j < HC_SB_MAX_SM; j++)
pr_cont("0x%x%s",
@@ -859,7 +869,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
#ifdef BNX2X_STOP_ON_ERROR
/* Rings */
/* Rx */
- for_each_rx_queue(bp, i) {
+ for_each_valid_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -893,7 +903,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
/* Tx */
- for_each_tx_queue(bp, i) {
+ for_each_valid_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@ -1483,7 +1493,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -1504,9 +1514,8 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
if (msix) {
synchronize_irq(bp->msix_table[0].vector);
offset = 1;
-#ifdef BCM_CNIC
- offset++;
-#endif
+ if (CNIC_SUPPORT(bp))
+ offset++;
for_each_eth_queue(bp, i)
synchronize_irq(bp->msix_table[offset++].vector);
} else
@@ -1588,9 +1597,8 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
}
-#ifdef BCM_CNIC
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
-#endif
+
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1720,7 +1728,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- mask = 0x2 << (fp->index + CNIC_PRESENT);
+ mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
if (status & mask) {
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
@@ -1732,22 +1740,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
}
}
-#ifdef BCM_CNIC
- mask = 0x2;
- if (status & (mask | 0x1)) {
- struct cnic_ops *c_ops = NULL;
+ if (CNIC_SUPPORT(bp)) {
+ mask = 0x2;
+ if (status & (mask | 0x1)) {
+ struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data, NULL);
- rcu_read_unlock();
- }
+ if (likely(bp->state == BNX2X_STATE_OPEN)) {
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data,
+ NULL);
+ rcu_read_unlock();
+ }
- status &= ~mask;
+ status &= ~mask;
+ }
}
-#endif
if (unlikely(status & 0x1)) {
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -2034,40 +2043,39 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
return 0;
}
-static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
{
- u32 spio_mask = (1 << spio_num);
u32 spio_reg;
- if ((spio_num < MISC_REGISTERS_SPIO_4) ||
- (spio_num > MISC_REGISTERS_SPIO_7)) {
- BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+ /* Only 2 SPIOs are configurable */
+ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
+ BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
return -EINVAL;
}
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
/* read SPIO and mask except the float bits */
- spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
switch (mode) {
- case MISC_REGISTERS_SPIO_OUTPUT_LOW:
- DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num);
+ case MISC_SPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
/* clear FLOAT and set CLR */
- spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
- spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_CLR_POS);
break;
- case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
- DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num);
+ case MISC_SPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
/* clear FLOAT and set SET */
- spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
- spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_SET_POS);
break;
- case MISC_REGISTERS_SPIO_INPUT_HI_Z:
- DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num);
+ case MISC_SPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
/* set FLOAT */
- spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
break;
default:
@@ -2106,22 +2114,25 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
}
}
-u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+static void bnx2x_set_requested_fc(struct bnx2x *bp)
{
- if (!BP_NOMCP(bp)) {
- u8 rc;
- int cfx_idx = bnx2x_get_link_cfg_idx(bp);
- u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
- /*
- * Initialize link parameters structure variables
- * It is recommended to turn off RX FC for jumbo frames
- * for better performance
- */
- if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
- bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
- else
- bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+ /* Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+}
+
+int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+ int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+ if (!BP_NOMCP(bp)) {
+ bnx2x_set_requested_fc(bp);
bnx2x_acquire_phy_lock(bp);
if (load_mode == LOAD_DIAG) {
@@ -2150,11 +2161,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_calc_fc_adv(bp);
- if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+ if (bp->link_vars.link_up) {
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
bnx2x_link_report(bp);
- } else
- queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ }
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
return rc;
}
@@ -3075,11 +3086,13 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
{
-#ifdef BCM_CNIC
struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
+ if (!CNIC_LOADED(bp))
+ return;
+
memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
bp->fip_mac, ETH_ALEN);
@@ -3162,16 +3175,17 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* ask L5 driver to add data to the struct */
bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
-#endif
}
static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
{
-#ifdef BCM_CNIC
struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
+ if (!CNIC_LOADED(bp))
+ return;
+
memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
@@ -3180,7 +3194,6 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
/* ask L5 driver to add data to the struct */
bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
-#endif
}
/* called due to MCP event (on pmf):
@@ -3589,6 +3602,21 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
/* now set back the mask */
if (asserted & ATTN_NIG_FOR_FUNC) {
+ /* Verify that IGU ack through BAR was written before restoring
+ * NIG mask. This loop should exit after 2-3 iterations max.
+ */
+ if (bp->common.int_block != INT_BLOCK_HC) {
+ u32 cnt = 0, igu_acked;
+ do {
+ igu_acked = REG_RD(bp,
+ IGU_REG_ATTENTION_ACK_BITS);
+ } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
+ (++cnt < MAX_IGU_ATTN_ACK_TO));
+ if (!igu_acked)
+ DP(NETIF_MSG_HW,
+ "Failed to verify IGU ack on time\n");
+ barrier();
+ }
REG_WR(bp, nig_int_mask_addr, nig_mask);
bnx2x_release_phy_lock(bp);
}
@@ -4572,7 +4600,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
mmiowb(); /* keep prod updates ordered */
}
-#ifdef BCM_CNIC
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
union event_ring_elem *elem)
{
@@ -4594,7 +4621,6 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
}
-#endif
static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
{
@@ -4635,11 +4661,9 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
-#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID(bp))
+ if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
-#endif
vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
@@ -4665,9 +4689,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
}
-#ifdef BCM_CNIC
static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
-#endif
static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
{
@@ -4678,14 +4700,12 @@ static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
/* Send rx_mode command again if was requested */
if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
bnx2x_set_storm_rx_mode(bp);
-#ifdef BCM_CNIC
else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
&bp->sp_state))
bnx2x_set_iscsi_eth_rx_mode(bp, true);
else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
&bp->sp_state))
bnx2x_set_iscsi_eth_rx_mode(bp, false);
-#endif
netif_addr_unlock_bh(bp->dev);
}
@@ -4747,7 +4767,6 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
-#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4770,22 +4789,16 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
bnx2x_link_report(bp);
bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
}
-#else
- /* If no FCoE ring - ACK MCP now */
- bnx2x_link_report(bp);
- bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
-#endif /* BCM_CNIC */
}
static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
struct bnx2x *bp, u32 cid)
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
-#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID(bp))
+
+ if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
-#endif
return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
@@ -4793,6 +4806,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
{
u16 hw_cons, sw_cons, sw_prod;
union event_ring_elem *elem;
+ u8 echo;
u32 cid;
u8 opcode;
int spqe_cnt = 0;
@@ -4847,10 +4861,11 @@ static void bnx2x_eq_int(struct bnx2x *bp)
*/
DP(BNX2X_MSG_SP,
"got delete ramrod for MULTI[%d]\n", cid);
-#ifdef BCM_CNIC
- if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+
+ if (CNIC_LOADED(bp) &&
+ !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
goto next_spqe;
-#endif
+
q_obj = bnx2x_cid_to_q_obj(bp, cid);
if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
@@ -4875,21 +4890,34 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
+
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
- DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
- "AFEX: ramrod completed FUNCTION_UPDATE\n");
- f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+ echo = elem->message.data.function_update_event.echo;
+ if (echo == SWITCH_UPDATE) {
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_SWITCH_UPDATE ramrod\n");
+ if (f_obj->complete_cmd(
+ bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
+ break;
- /* We will perform the Queues update from sp_rtnl task
- * as all Queue SP operations should run under
- * rtnl_lock.
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
+ } else {
+ DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+ "AFEX: ramrod completed FUNCTION_UPDATE\n");
+ f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_AFEX_UPDATE);
+
+ /* We will perform the Queues update from
+ * sp_rtnl task as all Queue SP operations
+ * should run under rtnl_lock.
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
goto next_spqe;
case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@ -4999,11 +5027,10 @@ static void bnx2x_sp_task(struct work_struct *work)
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
-#ifdef BCM_CNIC
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
- if ((!NO_FCOE(bp)) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ if (FCOE_INIT(bp) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/*
* Prevent local bottom-halves from running as
* we are going to change the local NAPI list.
@@ -5012,7 +5039,7 @@ static void bnx2x_sp_task(struct work_struct *work)
napi_schedule(&bnx2x_fcoe(bp, napi));
local_bh_enable();
}
-#endif
+
/* Handle EQ completions */
bnx2x_eq_int(bp);
@@ -5050,8 +5077,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
return IRQ_HANDLED;
#endif
-#ifdef BCM_CNIC
- {
+ if (CNIC_LOADED(bp)) {
struct cnic_ops *c_ops;
rcu_read_lock();
@@ -5060,7 +5086,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
c_ops->cnic_handler(bp->cnic_data, NULL);
rcu_read_unlock();
}
-#endif
+
queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
return IRQ_HANDLED;
@@ -5498,12 +5524,10 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
-#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* Configure rx_mode of FCoE Queue */
__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-#endif
switch (bp->rx_mode) {
case BNX2X_RX_MODE_NONE:
@@ -5624,12 +5648,12 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
{
- return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+ return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
}
static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
{
- return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
}
static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5720,23 +5744,25 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
txdata->tx_pkt = 0;
}
+static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue_cnic(bp, i)
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
+}
static void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i;
u8 cos;
- for_each_tx_queue(bp, i)
+ for_each_eth_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+void bnx2x_nic_init_cnic(struct bnx2x *bp)
{
- int i;
-
- for_each_eth_queue(bp, i)
- bnx2x_init_eth_fp(bp, i);
-#ifdef BCM_CNIC
if (!NO_FCOE(bp))
bnx2x_init_fcoe_fp(bp);
@@ -5744,8 +5770,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
BNX2X_VF_ID_INVALID, false,
bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
-#endif
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings_cnic(bp);
+ bnx2x_init_tx_rings_cnic(bp);
+
+ /* flush all */
+ mb();
+ mmiowb();
+}
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_init_eth_fp(bp, i);
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
@@ -6031,10 +6071,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
msleep(50);
bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
-#ifndef BCM_CNIC
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
+ if (!CNIC_SUPPORT(bp))
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
/* Enable inputs of parser neighbor blocks */
REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
@@ -6049,6 +6088,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{
+ u32 val;
+
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
if (!CHIP_IS_E1x(bp))
REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
@@ -6082,17 +6123,14 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
- if (CHIP_REV_IS_FPGA(bp))
- REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
- else if (!CHIP_IS_E1x(bp))
- REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
- (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
- | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
- | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
- | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
- | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
- else
- REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+ val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
+ if (!CHIP_IS_E1x(bp))
+ val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
+ PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
+
REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
@@ -6185,18 +6223,16 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
return;
/* Fan failure is indicated by SPIO 5 */
- bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
- MISC_REGISTERS_SPIO_INPUT_HI_Z);
+ bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
/* set to active low mode */
val = REG_RD(bp, MISC_REG_SPIO_INT);
- val |= ((1 << MISC_REGISTERS_SPIO_5) <<
- MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
REG_WR(bp, MISC_REG_SPIO_INT, val);
/* enable interrupt to signal the IGU */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
- val |= (1 << MISC_REGISTERS_SPIO_5);
+ val |= MISC_SPIO_SPIO5;
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
@@ -6256,6 +6292,10 @@ void bnx2x_pf_disable(struct bnx2x *bp)
static void bnx2x__common_init_phy(struct bnx2x *bp)
{
u32 shmem_base[2], shmem2_base[2];
+ /* Avoid common init in case MFW supports LFA */
+ if (SHMEM2_RD(bp, size) >
+ (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+ return;
shmem_base[0] = bp->common.shmem_base;
shmem2_base[0] = bp->common.shmem2_base;
if (!CHIP_IS_E1x(bp)) {
@@ -6522,9 +6562,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, QM_REG_SOFT_RESET, 1);
REG_WR(bp, QM_REG_SOFT_RESET, 0);
-#ifdef BCM_CNIC
- bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
-#endif
+ if (CNIC_SUPPORT(bp))
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
@@ -6611,18 +6650,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
-#ifdef BCM_CNIC
- REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
- REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
- REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
- REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
- REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
- REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
- REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
- REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
- REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
- REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
-#endif
+ if (CNIC_SUPPORT(bp)) {
+ REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+ }
REG_WR(bp, SRC_REG_SOFT_RST, 0);
if (sizeof(union cdu_context) != 1024)
@@ -6786,11 +6825,11 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
/* QM cid (connection) count */
bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
-#ifdef BCM_CNIC
- bnx2x_init_block(bp, BLOCK_TM, init_phase);
- REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
- REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
-#endif
+ if (CNIC_SUPPORT(bp)) {
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+ }
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
@@ -6877,9 +6916,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
}
-#ifdef BCM_CNIC
- bnx2x_init_block(bp, BLOCK_SRC, init_phase);
-#endif
+ if (CNIC_SUPPORT(bp))
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+
bnx2x_init_block(bp, BLOCK_CDU, init_phase);
bnx2x_init_block(bp, BLOCK_CFC, init_phase);
@@ -6955,7 +6994,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
- if (val & (1 << MISC_REGISTERS_SPIO_5)) {
+ if (val & MISC_SPIO_SPIO5) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
val = REG_RD(bp, reg_addr);
@@ -7040,6 +7079,130 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
bnx2x_ilt_wr(bp, i, 0);
}
+
+static void bnx2x_init_searcher(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+}
+
+static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
+{
+ int rc;
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_switch_update_params *switch_update_params =
+ &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ switch_update_params->suspend = suspend;
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+
+ return rc;
+}
+
+static int bnx2x_reset_nic_mode(struct bnx2x *bp)
+{
+ int rc, i, port = BP_PORT(bp);
+ int vlan_en = 0, mac_en[NUM_MACS];
+
+
+ /* Close input from network */
+ if (bp->mf_mode == SINGLE_FUNCTION) {
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ } else {
+ vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN);
+ REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN, 0);
+ for (i = 0; i < NUM_MACS; i++) {
+ mac_en[i] = REG_RD(bp, port ?
+ (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE +
+ 4 * i));
+ REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
+ }
+ }
+
+ /* Close BMC to host */
+ REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+ NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
+
+ /* Suspend Tx switching to the PF. Completion of this ramrod
+ * further guarantees that all the packets of that PF / child
+ * VFs in BRB were processed by the Parser, so it is safe to
+ * change the NIC_MODE register.
+ */
+ rc = bnx2x_func_switch_update(bp, 1);
+ if (rc) {
+ BNX2X_ERR("Can't suspend tx-switching!\n");
+ return rc;
+ }
+
+ /* Change NIC_MODE register */
+ REG_WR(bp, PRS_REG_NIC_MODE, 0);
+
+ /* Open input from network */
+ if (bp->mf_mode == SINGLE_FUNCTION) {
+ bnx2x_set_rx_filter(&bp->link_params, 1);
+ } else {
+ REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
+ NIG_REG_LLH0_FUNC_EN, vlan_en);
+ for (i = 0; i < NUM_MACS; i++) {
+ REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
+ 4 * i) :
+ (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
+ mac_en[i]);
+ }
+ }
+
+ /* Enable BMC to host */
+ REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
+ NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
+
+ /* Resume Tx switching to the PF */
+ rc = bnx2x_func_switch_update(bp, 0);
+ if (rc) {
+ BNX2X_ERR("Can't resume tx-switching!\n");
+ return rc;
+ }
+
+ DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+ return 0;
+}
+
+int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
+{
+ int rc;
+
+ bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
+
+ if (CONFIGURE_NIC_MODE(bp)) {
+ /* Configrue searcher as part of function hw init */
+ bnx2x_init_searcher(bp);
+
+ /* Reset NIC mode */
+ rc = bnx2x_reset_nic_mode(bp);
+ if (rc)
+ BNX2X_ERR("Can't change NIC mode!\n");
+ return rc;
+ }
+
+ return 0;
+}
+
static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -7082,17 +7245,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
}
bnx2x_ilt_init_op(bp, INITOP_SET);
-#ifdef BCM_CNIC
- bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
-
- /* T1 hash bits value determines the T1 number of entries */
- REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
-#endif
+ if (!CONFIGURE_NIC_MODE(bp)) {
+ bnx2x_init_searcher(bp);
+ REG_WR(bp, PRS_REG_NIC_MODE, 0);
+ DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
+ } else {
+ /* Set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+ DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
-#ifndef BCM_CNIC
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif /* BCM_CNIC */
+ }
if (!CHIP_IS_E1x(bp)) {
u32 pf_conf = IGU_PF_CONF_FUNC_EN;
@@ -7343,6 +7505,20 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
}
+void bnx2x_free_mem_cnic(struct bnx2x *bp)
+{
+ bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
+
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+}
+
void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
@@ -7367,17 +7543,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_FREE(bp->ilt->lines);
-#ifdef BCM_CNIC
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e1x));
-
- BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
-#endif
-
BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -7445,24 +7610,44 @@ alloc_mem_err:
return -ENOMEM;
}
-
-int bnx2x_alloc_mem(struct bnx2x *bp)
+int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- int i, allocated, context_size;
-
-#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
sizeof(struct host_hc_status_block_e2));
else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e1x));
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
+ &bp->cnic_sb_mapping,
+ sizeof(struct
+ host_hc_status_block_e1x));
- /* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
-#endif
+ if (CONFIGURE_NIC_MODE(bp))
+ /* allocate searcher T2 table, as it wan't allocated before */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+
+ /* write address to which L5 should insert its values */
+ bp->cnic_eth_dev.addr_drv_info_to_mcp =
+ &bp->slowpath->drv_info_to_mcp;
+
+ if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ return 0;
+alloc_mem_err:
+ bnx2x_free_mem_cnic(bp);
+ BNX2X_ERR("Can't allocate memory\n");
+ return -ENOMEM;
+}
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+ int i, allocated, context_size;
+
+ if (!CONFIGURE_NIC_MODE(bp))
+ /* allocate searcher T2 table */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
@@ -7470,11 +7655,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
-#ifdef BCM_CNIC
- /* write address to which L5 should insert its values */
- bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
-#endif
-
/* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
@@ -7596,14 +7776,12 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
unsigned long ramrod_flags = 0;
-#ifdef BCM_CNIC
if (is_zero_ether_addr(bp->dev->dev_addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
"Ignoring Zero MAC for STORAGE SD mode\n");
return 0;
}
-#endif
DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
@@ -7632,7 +7810,8 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
bnx2x_enable_msi(bp);
/* falling through... */
case INT_MODE_INTx:
- bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ bp->num_ethernet_queues = 1;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
@@ -7644,9 +7823,10 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
bp->flags & USING_SINGLE_MSIX_FLAG) {
/* failed to enable multiple MSI-X */
BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
- bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ bp->num_queues = 1 + bp->num_cnic_queues;
/* Try to enable MSI */
if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@ -7679,9 +7859,9 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilt_client->flags = ILT_CLIENT_SKIP_MEM;
ilt_client->start = line;
line += bnx2x_cid_ilt_lines(bp);
-#ifdef BCM_CNIC
- line += CNIC_ILT_LINES;
-#endif
+
+ if (CNIC_SUPPORT(bp))
+ line += CNIC_ILT_LINES;
ilt_client->end = line - 1;
DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
@@ -7714,49 +7894,43 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilog2(ilt_client->page_size >> 12));
}
- /* SRC */
- ilt_client = &ilt->clients[ILT_CLIENT_SRC];
-#ifdef BCM_CNIC
- ilt_client->client_num = ILT_CLIENT_SRC;
- ilt_client->page_size = SRC_ILT_PAGE_SZ;
- ilt_client->flags = 0;
- ilt_client->start = line;
- line += SRC_ILT_LINES;
- ilt_client->end = line - 1;
- DP(NETIF_MSG_IFUP,
- "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
- ilt_client->start,
- ilt_client->end,
- ilt_client->page_size,
- ilt_client->flags,
- ilog2(ilt_client->page_size >> 12));
+ if (CNIC_SUPPORT(bp)) {
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
-#else
- ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
-#endif
+ DP(NETIF_MSG_IFUP,
+ "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
- /* TM */
- ilt_client = &ilt->clients[ILT_CLIENT_TM];
-#ifdef BCM_CNIC
- ilt_client->client_num = ILT_CLIENT_TM;
- ilt_client->page_size = TM_ILT_PAGE_SZ;
- ilt_client->flags = 0;
- ilt_client->start = line;
- line += TM_ILT_LINES;
- ilt_client->end = line - 1;
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
- DP(NETIF_MSG_IFUP,
- "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
- ilt_client->start,
- ilt_client->end,
- ilt_client->page_size,
- ilt_client->flags,
- ilog2(ilt_client->page_size >> 12));
+ DP(NETIF_MSG_IFUP,
+ "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+ }
-#else
- ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
-#endif
BUG_ON(line > ILT_MAX_LINES);
}
@@ -7823,7 +7997,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
}
}
-int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_queue_state_params *q_params,
struct bnx2x_queue_setup_tx_only_params *tx_only_params,
int tx_index, bool leading)
@@ -7924,6 +8098,9 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Set the command */
q_params.cmd = BNX2X_Q_CMD_SETUP;
+ if (IS_FCOE_FP(fp))
+ bp->fcoe_init = true;
+
/* Change the state to SETUP */
rc = bnx2x_queue_state_change(bp, &q_params);
if (rc) {
@@ -8037,12 +8214,12 @@ static void bnx2x_reset_func(struct bnx2x *bp)
SB_DISABLED);
}
-#ifdef BCM_CNIC
- /* CNIC SB */
- REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
- SB_DISABLED);
-#endif
+ if (CNIC_LOADED(bp))
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
+ (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
+
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
@@ -8061,19 +8238,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
}
-#ifdef BCM_CNIC
- /* Disable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
- /*
- * Wait for at least 10ms and up to 2 second for the timers scan to
- * complete
- */
- for (i = 0; i < 200; i++) {
- msleep(10);
- if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
- break;
+ if (CNIC_LOADED(bp)) {
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+ /*
+ * Wait for at least 10ms and up to 2 second for the timers
+ * scan to complete
+ */
+ for (i = 0; i < 200; i++) {
+ msleep(10);
+ if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+ break;
+ }
}
-#endif
/* Clear ILT */
bnx2x_clear_func_ilt(bp, func);
@@ -8409,13 +8586,24 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Close multi and leading connections
* Completions for ramrods are collected in a synchronous way
*/
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
if (bnx2x_stop_queue(bp, i))
#ifdef BNX2X_STOP_ON_ERROR
return;
#else
goto unload_error;
#endif
+
+ if (CNIC_LOADED(bp)) {
+ for_each_cnic_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+ }
+
/* If SP settings didn't get completed so far - something
* very wrong has happen.
*/
@@ -8437,6 +8625,8 @@ unload_error:
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -8558,7 +8748,8 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
/* Get shmem offset */
shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
- validity_offset = offsetof(struct shmem_region, validity_map[0]);
+ validity_offset =
+ offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
/* Clear validity map flags */
if (shmem > 0)
@@ -8651,7 +8842,11 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
- /* Don't reset the following blocks */
+ /* Don't reset the following blocks.
+ * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
+ * reset, as in 4 port device they might still be owned
+ * by the MCP (there is only one leader per path).
+ */
not_reset_mask1 =
MISC_REGISTERS_RESET_REG_1_RST_HC |
MISC_REGISTERS_RESET_REG_1_RST_PXPV |
@@ -8667,19 +8862,19 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
MISC_REGISTERS_RESET_REG_2_RST_ATC |
- MISC_REGISTERS_RESET_REG_2_PGLC;
+ MISC_REGISTERS_RESET_REG_2_PGLC |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 |
+ MISC_REGISTERS_RESET_REG_2_UMAC1;
/*
* Keep the following blocks in reset:
* - all xxMACs are handled by the bnx2x_link code.
*/
stay_reset2 =
- MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
- MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
- MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
- MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
- MISC_REGISTERS_RESET_REG_2_UMAC0 |
- MISC_REGISTERS_RESET_REG_2_UMAC1 |
MISC_REGISTERS_RESET_REG_2_XMAC |
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
@@ -8769,6 +8964,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
int cnt = 1000;
u32 val = 0;
u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+ u32 tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
@@ -8778,10 +8974,14 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if (CHIP_IS_E3(bp))
+ tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
+
if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
((port_is_idle_0 & 0x1) == 0x1) &&
((port_is_idle_1 & 0x1) == 0x1) &&
- (pgl_exp_rom2 == 0xffffffff))
+ (pgl_exp_rom2 == 0xffffffff) &&
+ (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
break;
usleep_range(1000, 1000);
} while (cnt-- > 0);
@@ -8838,9 +9038,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
/* TBD: Add resetting the NO_MCP mode DB here */
- /* PXP */
- bnx2x_pxp_prep(bp);
-
/* Open the gates #2, #3 and #4 */
bnx2x_set_234_gates(bp, false);
@@ -8850,7 +9047,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
return 0;
}
-int bnx2x_leader_reset(struct bnx2x *bp)
+static int bnx2x_leader_reset(struct bnx2x *bp)
{
int rc = 0;
bool global = bnx2x_reset_is_global(bp);
@@ -9234,12 +9431,19 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
bnx2x_undi_int_disable_e1h(bp);
}
-static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+ struct bnx2x_mac_vals *vals)
{
u32 val, base_addr, offset, mask, reset_reg;
bool mac_stopped = false;
u8 port = BP_PORT(bp);
+ /* reset addresses as they also mark which values were changed */
+ vals->bmac_addr = 0;
+ vals->umac_addr = 0;
+ vals->xmac_addr = 0;
+ vals->emac_addr = 0;
+
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
if (!CHIP_IS_E3(bp)) {
@@ -9261,14 +9465,18 @@ static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
*/
wb_data[0] = REG_RD(bp, base_addr + offset);
wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR(bp, base_addr + offset, wb_data[0]);
- REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+ REG_WR(bp, vals->bmac_addr, wb_data[0]);
+ REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
}
BNX2X_DEV_INFO("Disable emac Rx\n");
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+ vals->emac_val = REG_RD(bp, vals->emac_addr);
+ REG_WR(bp, vals->emac_addr, 0);
mac_stopped = true;
} else {
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9279,14 +9487,18 @@ static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
val & ~(1 << 1));
REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
val | (1 << 1));
- REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+ REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BNX2X_DEV_INFO("Disable umac Rx\n");
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+ vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val = REG_RD(bp, vals->umac_addr);
+ REG_WR(bp, vals->umac_addr, 0);
mac_stopped = true;
}
}
@@ -9301,8 +9513,7 @@ static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
-static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
- u8 inc)
+static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
{
u16 rcq, bd;
u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
@@ -9317,7 +9528,7 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
port, bd, rcq);
}
-static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
+static int bnx2x_prev_mcp_done(struct bnx2x *bp)
{
u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
@@ -9329,7 +9540,21 @@ static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
return 0;
}
-static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
+static struct bnx2x_prev_path_list *
+ bnx2x_prev_path_get_entry(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+
+ list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
+ if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
+ bp->pdev->bus->number == tmp_list->bus &&
+ BP_PATH(bp) == tmp_list->path)
+ return tmp_list;
+
+ return NULL;
+}
+
+static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
int rc = false;
@@ -9353,7 +9578,7 @@ static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
return rc;
}
-static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
+static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
{
struct bnx2x_prev_path_list *tmp_list;
int rc;
@@ -9367,6 +9592,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
tmp_list->bus = bp->pdev->bus->number;
tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
tmp_list->path = BP_PATH(bp);
+ tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem);
if (rc) {
@@ -9382,7 +9608,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
return rc;
}
-static int __devinit bnx2x_do_flr(struct bnx2x *bp)
+static int bnx2x_do_flr(struct bnx2x *bp)
{
int i;
u16 status;
@@ -9422,7 +9648,7 @@ clear:
return 0;
}
-static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
+static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
{
int rc;
@@ -9460,15 +9686,20 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
return rc;
}
-static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
+static int bnx2x_prev_unload_common(struct bnx2x *bp)
{
u32 reset_reg, tmp_reg = 0, rc;
+ bool prev_undi = false;
+ struct bnx2x_mac_vals mac_vals;
+
/* It is possible a previous function received 'common' answer,
* but hasn't loaded yet, therefore creating a scenario of
* multiple functions receiving 'common' on the same path.
*/
BNX2X_DEV_INFO("Common unload Flow\n");
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
@@ -9477,10 +9708,12 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
- bool prev_undi = false;
/* Close the MAC Rx to prevent BRB from filling up */
- bnx2x_prev_unload_close_mac(bp);
+ bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+ /* close LLH filters towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
/* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
@@ -9527,7 +9760,18 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
/* No packets are in the pipeline, path is ready for reset */
bnx2x_reset_common(bp);
- rc = bnx2x_prev_mark_path(bp);
+ if (mac_vals.xmac_addr)
+ REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+ if (mac_vals.umac_addr)
+ REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.emac_addr)
+ REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+ if (mac_vals.bmac_addr) {
+ REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
+ rc = bnx2x_prev_mark_path(bp, prev_undi);
if (rc) {
bnx2x_prev_mcp_done(bp);
return rc;
@@ -9543,7 +9787,7 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
* to clear the interrupt which detected this from the pglueb and the was done
* bit
*/
-static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
+static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
@@ -9555,10 +9799,11 @@ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
}
}
-static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
+static int bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
u32 rc, fw, hw_lock_reg, hw_lock_val;
+ struct bnx2x_prev_path_list *prev_list;
BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
/* clear hw from errors which may have resulted from an interrupted
@@ -9617,12 +9862,18 @@ static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
rc = -EBUSY;
}
+ /* Mark function if its port was used to boot from SAN */
+ prev_list = bnx2x_prev_path_get_entry(bp);
+ if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
+ bp->link_params.feature_config_flags |=
+ FEATURE_CONFIG_BOOT_FROM_SAN;
+
BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
return rc;
}
-static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
u32 val, val2, val3, val4, id, boot_mode;
u16 pmc;
@@ -9701,6 +9952,14 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->link_params.shmem_base = bp->common.shmem_base;
bp->link_params.shmem2_base = bp->common.shmem2_base;
+ if (SHMEM2_RD(bp, size) >
+ (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
+ bp->link_params.lfa_base =
+ REG_RD(bp, bp->common.shmem2_base +
+ (u32)offsetof(struct shmem2_region,
+ lfa_host_addr[BP_PORT(bp)]));
+ else
+ bp->link_params.lfa_base = 0;
BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
bp->common.shmem_base, bp->common.shmem2_base);
@@ -9748,6 +10007,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
+ FEATURE_CONFIG_MT_SUPPORT : 0;
+
bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
BC_SUPPORTS_PFC_STATS : 0;
@@ -9792,7 +10056,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
-static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
{
int pfid = BP_FUNC(bp);
int igu_sb_id;
@@ -9809,7 +10073,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
(CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
- return;
+ return 0;
}
/* IGU in normal mode - read CAM */
@@ -9843,12 +10107,15 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
#endif
- if (igu_sb_cnt == 0)
+ if (igu_sb_cnt == 0) {
BNX2X_ERR("CAM configuration error\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
-static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
- u32 switch_cfg)
+static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
{
int cfg_size = 0, idx, port = BP_PORT(bp);
@@ -9946,7 +10213,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
bp->port.supported[1]);
}
-static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+static void bnx2x_link_settings_requested(struct bnx2x *bp)
{
u32 link_config, idx, cfg_size = 0;
bp->port.advertising[0] = 0;
@@ -10115,11 +10382,13 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->link_params.req_flow_ctrl[idx] = (link_config &
PORT_FEATURE_FLOW_CONTROL_MASK);
- if ((bp->link_params.req_flow_ctrl[idx] ==
- BNX2X_FLOW_CTRL_AUTO) &&
- !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
- bp->link_params.req_flow_ctrl[idx] =
- BNX2X_FLOW_CTRL_NONE;
+ if (bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) {
+ if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
+ else
+ bnx2x_set_requested_fc(bp);
}
BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
@@ -10130,7 +10399,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
}
}
-static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
mac_hi = cpu_to_be16(mac_hi);
mac_lo = cpu_to_be32(mac_lo);
@@ -10138,7 +10407,7 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
}
-static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 config;
@@ -10199,17 +10468,6 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->mdio.prtad =
XGXS_EXT_PHY_ADDR(ext_phy_config);
- /*
- * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
- * In MF mode, it is set to cover self test cases
- */
- if (IS_MF(bp))
- bp->port.need_hw_lock = 1;
- else
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
-
/* Configure link feature according to nvram value */
eee_mode = (((SHMEM_RD(bp, dev_info.
port_feature_config[port].eee_power_mode)) &
@@ -10227,12 +10485,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
u32 no_flags = NO_ISCSI_FLAG;
-#ifdef BCM_CNIC
int port = BP_PORT(bp);
-
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_iscsi_conn);
+ if (!CNIC_SUPPORT(bp)) {
+ bp->flags |= no_flags;
+ return;
+ }
+
/* Get the number of maximum allowed iSCSI connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
@@ -10247,13 +10508,10 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
bp->flags |= no_flags;
-#else
- bp->flags |= no_flags;
-#endif
+
}
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
{
/* Port info */
bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
@@ -10267,16 +10525,18 @@ static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
}
-#endif
-static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+static void bnx2x_get_fcoe_info(struct bnx2x *bp)
{
-#ifdef BCM_CNIC
int port = BP_PORT(bp);
int func = BP_ABS_FUNC(bp);
-
u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_fcoe_conn);
+ if (!CNIC_SUPPORT(bp)) {
+ bp->flags |= NO_FCOE_FLAG;
+ return;
+ }
+
/* Get the number of maximum allowed FCoE connections */
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
@@ -10311,8 +10571,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
bnx2x_get_ext_wwn_info(bp, func);
- } else if (IS_MF_FCOE_SD(bp))
+ } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
bnx2x_get_ext_wwn_info(bp, func);
+ }
BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
@@ -10322,12 +10583,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
*/
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
-#else
- bp->flags |= NO_FCOE_FLAG;
-#endif
}
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+static void bnx2x_get_cnic_info(struct bnx2x *bp)
{
/*
* iSCSI may be dynamically disabled but reading
@@ -10338,143 +10596,162 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
bnx2x_get_fcoe_info(bp);
}
-static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
{
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
-#ifdef BCM_CNIC
u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
u8 *fip_mac = bp->fip_mac;
-#endif
-
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
- if (BP_NOMCP(bp)) {
- BNX2X_ERROR("warning: random MAC workaround active\n");
- eth_hw_addr_random(bp->dev);
- } else if (IS_MF(bp)) {
- val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
- val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
- if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
-
-#ifdef BCM_CNIC
- /*
- * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ if (IS_MF(bp)) {
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
- *
- * In non SD mode features configuration comes from
- * struct func_ext_config.
+ * In non SD mode features configuration comes from struct
+ * func_ext_config.
*/
- if (!IS_MF_SD(bp)) {
+ if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
- iscsi_mac_addr_upper);
+ iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
- iscsi_mac_addr_lower);
+ iscsi_mac_addr_lower);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
- BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
- iscsi_mac);
- } else
+ BNX2X_DEV_INFO
+ ("Read iSCSI MAC: %pM\n", iscsi_mac);
+ } else {
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+ }
if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
- fcoe_mac_addr_upper);
+ fcoe_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
- fcoe_mac_addr_lower);
+ fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
- fip_mac);
-
- } else
+ BNX2X_DEV_INFO
+ ("Read FCoE L2 MAC: %pM\n", fip_mac);
+ } else {
bp->flags |= NO_FCOE_FLAG;
+ }
bp->mf_ext_config = cfg;
} else { /* SD MODE */
- if (IS_MF_STORAGE_SD(bp)) {
- if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
- /* use primary mac as iscsi mac */
- memcpy(iscsi_mac, bp->dev->dev_addr,
- ETH_ALEN);
-
- BNX2X_DEV_INFO("SD ISCSI MODE\n");
- BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
- iscsi_mac);
- } else { /* FCoE */
- memcpy(fip_mac, bp->dev->dev_addr,
- ETH_ALEN);
- BNX2X_DEV_INFO("SD FCoE MODE\n");
- BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
- fip_mac);
- }
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
+ if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO
+ ("Read iSCSI MAC: %pM\n", iscsi_mac);
+ } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+ /* use primary mac as fip mac */
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ BNX2X_DEV_INFO("SD FCoE MODE\n");
+ BNX2X_DEV_INFO
+ ("Read FIP MAC: %pM\n", fip_mac);
}
}
+ if (IS_MF_STORAGE_SD(bp))
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
if (IS_MF_FCOE_AFEX(bp))
/* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-#endif
} else {
- /* in SF read MACs from port configuration */
- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
- val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
-
-#ifdef BCM_CNIC
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
- iscsi_mac_upper);
+ iscsi_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
- iscsi_mac_lower);
+ iscsi_mac_lower);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
- fcoe_fip_mac_upper);
+ fcoe_fip_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
- fcoe_fip_mac_lower);
+ fcoe_fip_mac_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
-#endif
}
- memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
-
-#ifdef BCM_CNIC
- /* Disable iSCSI if MAC configuration is
- * invalid.
- */
+ /* Disable iSCSI OOO if MAC configuration is invalid. */
if (!is_valid_ether_addr(iscsi_mac)) {
- bp->flags |= NO_ISCSI_FLAG;
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
memset(iscsi_mac, 0, ETH_ALEN);
}
- /* Disable FCoE if MAC configuration is
- * invalid.
- */
+ /* Disable FCoE if MAC configuration is invalid. */
if (!is_valid_ether_addr(fip_mac)) {
bp->flags |= NO_FCOE_FLAG;
memset(bp->fip_mac, 0, ETH_ALEN);
}
-#endif
+}
+
+static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ eth_hw_addr_random(bp->dev);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_get_cnic_mac_hwinfo(bp);
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+ if (CNIC_SUPPORT(bp))
+ bnx2x_get_cnic_mac_hwinfo(bp);
+ }
+
+ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: %pM\n"
"change it manually before bringing up the appropriate network interface\n",
bp->dev->dev_addr);
+}
+static bool bnx2x_get_dropless_info(struct bnx2x *bp)
+{
+ int tmp;
+ u32 cfg;
+ if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
+ /* Take function: tmp = func */
+ tmp = BP_ABS_FUNC(bp);
+ cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
+ cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
+ } else {
+ /* Take port: tmp = port */
+ tmp = BP_PORT(bp);
+ cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[tmp].generic_features);
+ cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
+ }
+ return cfg;
}
-static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
int vn;
@@ -10516,6 +10793,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
dev_err(&bp->pdev->dev,
"FORCING Normal Mode failed!!!\n");
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESET);
return -EPERM;
}
}
@@ -10526,9 +10805,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("IGU Normal Mode\n");
- bnx2x_get_igu_cam_info(bp);
-
+ rc = bnx2x_get_igu_cam_info(bp);
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ if (rc)
+ return rc;
}
/*
@@ -10697,7 +10977,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
return rc;
}
-static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+static void bnx2x_read_fwinfo(struct bnx2x *bp)
{
int cnt, i, block_end, rodi;
char vpd_start[BNX2X_VPD_LEN+1];
@@ -10782,7 +11062,7 @@ out_not_found:
return;
}
-static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
{
u32 flags = 0;
@@ -10832,7 +11112,7 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
INIT_MODE_FLAGS(bp) = flags;
}
-static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+static int bnx2x_init_bp(struct bnx2x *bp)
{
int func;
int rc;
@@ -10840,9 +11120,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
-#ifdef BCM_CNIC
- mutex_init(&bp->cnic_mutex);
-#endif
+
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -10880,10 +11158,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->disable_tpa = disable_tpa;
-
-#ifdef BCM_CNIC
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
-#endif
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -10897,7 +11172,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (CHIP_IS_E1(bp))
bp->dropless_fc = 0;
else
- bp->dropless_fc = dropless_fc;
+ bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
bp->mrrs = mrrs;
@@ -10914,15 +11189,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->timer.data = (unsigned long) bp;
bp->timer.function = bnx2x_timer;
- bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
- bnx2x_dcbx_init_params(bp);
+ if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
+ SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
+ SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
+ SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
+ bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+ bnx2x_dcbx_init_params(bp);
+ } else {
+ bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
+ }
-#ifdef BCM_CNIC
if (CHIP_IS_E1x(bp))
bp->cnic_base_cl_id = FP_SB_MAX_E1x;
else
bp->cnic_base_cl_id = FP_SB_MAX_E2;
-#endif
/* multiple tx priority */
if (CHIP_IS_E1x(bp))
@@ -10932,6 +11212,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ /* We need at least one default status block for slow-path events,
+ * second status block for the L2 queue, and a third status block for
+ * CNIC if supproted.
+ */
+ if (CNIC_SUPPORT(bp))
+ bp->min_msix_vec_cnt = 3;
+ else
+ bp->min_msix_vec_cnt = 2;
+ BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
+
return rc;
}
@@ -11168,11 +11458,9 @@ void bnx2x_set_rx_mode(struct net_device *dev)
}
bp->rx_mode = rx_mode;
-#ifdef BCM_CNIC
/* handle ISCSI SD mode */
if (IS_MF_ISCSI_SD(bp))
bp->rx_mode = BNX2X_RX_MODE_NONE;
-#endif
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -11284,7 +11572,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
};
@@ -11307,9 +11595,8 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
-static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
- struct net_device *dev,
- unsigned long board_type)
+static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
+ unsigned long board_type)
{
struct bnx2x *bp;
int rc;
@@ -11346,6 +11633,14 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_disable;
}
+ pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
+ if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
+ PCICFG_REVESION_ID_ERROR_VAL) {
+ pr_err("PCI device error, probably due to fan failure, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
@@ -11481,8 +11776,7 @@ err_out:
return rc;
}
-static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
- int *width, int *speed)
+static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
{
u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
@@ -11750,9 +12044,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
-#ifdef BCM_CNIC
- cid_count += CNIC_CID_MAX;
-#endif
+ if (CNIC_SUPPORT(bp))
+ cid_count += CNIC_CID_MAX;
return roundup(cid_count, QM_CID_ROUND);
}
@@ -11762,7 +12055,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
+ int cnic_cnt)
{
int pos;
u16 control;
@@ -11774,7 +12068,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
* one fast path queue: one FP queue + SB for CNIC
*/
if (!pos)
- return 1 + CNIC_PRESENT;
+ return 1 + cnic_cnt;
/*
* The value in the PCI configuration space is the index of the last
@@ -11786,14 +12080,16 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
return control & PCI_MSIX_FLAGS_QSIZE;
}
-static int __devinit bnx2x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+
+static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct bnx2x *bp;
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
+ int cnic_cnt;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11837,21 +12133,22 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+ cnic_cnt = 1;
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = max_non_def_sbs - CNIC_PRESENT;
+ rss_count = max_non_def_sbs - cnic_cnt;
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
- rx_count = rss_count + FCOE_PRESENT;
+ rx_count = rss_count + cnic_cnt;
/*
* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = rss_count * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + cnic_cnt;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11862,6 +12159,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
+ bp->cnic_support = cnic_cnt;
+ bp->cnic_probe = bnx2x_cnic_probe;
+
pci_set_drvdata(pdev, dev);
rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
@@ -11870,6 +12170,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return rc;
}
+ BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
@@ -11902,10 +12203,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
-#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x */
+ /* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
+
/* disable FCOE for 57840 device, until FW supports it */
switch (ent->driver_data) {
case BCM57840_O:
@@ -11915,8 +12216,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
case BCM57840_MF:
bp->flags |= NO_FCOE_FLAG;
}
-#endif
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -11932,14 +12231,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
-#ifdef BCM_CNIC
+
if (!NO_FCOE(bp)) {
/* Add storage MAC address */
rtnl_lock();
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-#endif
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
@@ -11973,7 +12271,7 @@ init_one_exit:
return rc;
}
-static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+static void bnx2x_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp;
@@ -11984,14 +12282,12 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
}
bp = netdev_priv(dev);
-#ifdef BCM_CNIC
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
-#endif
#ifdef BCM_DCBNL
/* Delete app tlvs from dcbnl */
@@ -12039,15 +12335,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->rx_mode = BNX2X_RX_MODE_NONE;
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
+ if (CNIC_LOADED(bp))
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+
/* Stop Tx */
bnx2x_tx_disable(bp);
bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
del_timer_sync(&bp->timer);
@@ -12188,7 +12486,7 @@ static struct pci_driver bnx2x_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bnx2x_pci_tbl,
.probe = bnx2x_init_one,
- .remove = __devexit_p(bnx2x_remove_one),
+ .remove = bnx2x_remove_one,
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
@@ -12238,7 +12536,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp)
module_init(bnx2x_init);
module_exit(bnx2x_cleanup);
-#ifdef BCM_CNIC
/**
* bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
*
@@ -12691,12 +12988,31 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
{
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ int rc;
+
+ DP(NETIF_MSG_IFUP, "Register_cnic called\n");
if (ops == NULL) {
BNX2X_ERR("NULL ops received\n");
return -EINVAL;
}
+ if (!CNIC_SUPPORT(bp)) {
+ BNX2X_ERR("Can't register CNIC when not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!CNIC_LOADED(bp)) {
+ rc = bnx2x_load_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("CNIC-related load failed\n");
+ return rc;
+ }
+
+ }
+
+ bp->cnic_enabled = true;
+
bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!bp->cnic_kwq)
return -ENOMEM;
@@ -12786,7 +13102,5 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->starting_cid);
return cp;
}
-EXPORT_SYMBOL(bnx2x_cnic_probe);
-#endif /* BCM_CNIC */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 1b1999d34c71..bc2f65b32649 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2107,6 +2107,7 @@
#define NIG_REG_LLH1_ERROR_MASK 0x10090
/* [RW 8] event id for llh1 */
#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_EN 0x16104
#define NIG_REG_LLH1_FUNC_MEM 0x161c0
#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
@@ -2302,6 +2303,15 @@
* set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
* accommodate the 9 input clients to ETS arbiter. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
+ * for BRB LB interface is bypassed and PBF LB traffic is always selected to
+ * send to BRB LB.
+ */
+#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4
#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
#define NIG_REG_P1_MAC_IN_EN 0x185c0
/* [RW 1] Output enable for TX MAC interface */
@@ -2418,6 +2428,12 @@
#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
/* [R 1] TX FIFO for transmitting data to MAC is empty. */
#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
+ * packets to BRB LB interface to forward the packet to the host. All
+ * packets from MCP are forwarded to the network when this bit is cleared -
+ * regardless of the configured destination in tx_mng_destination register.
+ */
+#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8
/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
forwarded to the host. */
#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
@@ -5482,6 +5498,7 @@
#define XMAC_CTRL_REG_RX_EN (0x1<<1)
#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
#define XMAC_CTRL_REG_TX_EN (0x1<<0)
+#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB (0x1<<7)
#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1)
@@ -5502,11 +5519,14 @@
#define XMAC_REG_PAUSE_CTRL 0x68
#define XMAC_REG_PFC_CTRL 0x70
#define XMAC_REG_PFC_CTRL_HI 0x74
+#define XMAC_REG_RX_LSS_CTRL 0x50
#define XMAC_REG_RX_LSS_STATUS 0x58
/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
* CRC in strip mode */
#define XMAC_REG_RX_MAX_SIZE 0x40
#define XMAC_REG_TX_CTRL 0x20
+#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE (0x1<<0)
+#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE (0x1<<1)
/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
header pointer. */
@@ -5922,6 +5942,16 @@
#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
+#define MISC_SPIO_CLR_POS 16
+#define MISC_SPIO_FLOAT (0xffL<<24)
+#define MISC_SPIO_FLOAT_POS 24
+#define MISC_SPIO_INPUT_HI_Z 2
+#define MISC_SPIO_INT_OLD_SET_POS 16
+#define MISC_SPIO_OUTPUT_HIGH 1
+#define MISC_SPIO_OUTPUT_LOW 0
+#define MISC_SPIO_SET_POS 8
+#define MISC_SPIO_SPIO4 0x10
+#define MISC_SPIO_SPIO5 0x20
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
@@ -6130,7 +6160,9 @@
#define PCICFG_COMMAND_INT_DISABLE (1<<10)
#define PCICFG_COMMAND_RESERVED (0x1f<<11)
#define PCICFG_STATUS_OFFSET 0x06
-#define PCICFG_REVESION_ID_OFFSET 0x08
+#define PCICFG_REVISION_ID_OFFSET 0x08
+#define PCICFG_REVESION_ID_MASK 0xff
+#define PCICFG_REVESION_ID_ERROR_VAL 0xff
#define PCICFG_CACHE_LINE_SIZE 0x0c
#define PCICFG_LATENCY_TIMER 0x0d
#define PCICFG_BAR_1_LOW 0x10
@@ -6672,6 +6704,7 @@
#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
@@ -7046,7 +7079,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
-#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
+#define MDIO_WC_REG_PCS_STATUS2 0x0021
+#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
@@ -7078,6 +7112,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142
#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
@@ -7112,6 +7147,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
@@ -7129,9 +7165,16 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d
#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370
+#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371
+#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372
+#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373
+#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374
+#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b
#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
#define MDIO_WC_REG_TX66_CONTROL 0x83b0
#define MDIO_WC_REG_RX66_CONTROL 0x83c0
@@ -7145,7 +7188,17 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
#define MDIO_WC_REG_FX100_CTRL1 0x8400
#define MDIO_WC_REG_FX100_CTRL3 0x8402
-
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438
+#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a
+#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b
+#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453
+#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454
+#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455
+#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456
+#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457
#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 614981c02264..09b625e0fdaa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4318,7 +4318,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
if (o->next_tx_only >= o->max_cos)
/* >= becuase tx only must always be smaller than cos since the
- * primary connection suports COS 0
+ * primary connection supports COS 0
*/
BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
@@ -5350,12 +5350,24 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED;
+
+ /* Switch_update ramrod can be sent in either started or
+ * tx_stopped state, and it doesn't change the state.
+ */
+ else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
else if (cmd == BNX2X_F_CMD_TX_STOP)
next_state = BNX2X_F_STATE_TX_STOPPED;
break;
case BNX2X_F_STATE_TX_STOPPED:
- if (cmd == BNX2X_F_CMD_TX_START)
+ if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ else if (cmd == BNX2X_F_CMD_TX_START)
next_state = BNX2X_F_STATE_STARTED;
break;
@@ -5637,6 +5649,28 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
}
+static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_update_data *rdata =
+ (struct function_update_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_switch_update_params *switch_update_params =
+ &params->params.switch_update;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->tx_switch_suspend_change_flg = 1;
+ rdata->tx_switch_suspend = switch_update_params->suspend;
+ rdata->echo = SWITCH_UPDATE;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
@@ -5657,6 +5691,7 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
cpu_to_le16(afex_update_params->afex_default_vlan);
rdata->allowed_priorities_change_flg = 1;
rdata->allowed_priorities = afex_update_params->allowed_priorities;
+ rdata->echo = AFEX_UPDATE;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
@@ -5773,6 +5808,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
return bnx2x_func_send_tx_stop(bp, params);
case BNX2X_F_CMD_TX_START:
return bnx2x_func_send_tx_start(bp, params);
+ case BNX2X_F_CMD_SWITCH_UPDATE:
+ return bnx2x_func_send_switch_update(bp, params);
default:
BNX2X_ERR("Unknown command: %d\n", params->cmd);
return -EINVAL;
@@ -5818,16 +5855,30 @@ int bnx2x_func_state_change(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
struct bnx2x_func_sp_obj *o = params->f_obj;
- int rc;
+ int rc, cnt = 300;
enum bnx2x_func_cmd cmd = params->cmd;
unsigned long *pending = &o->pending;
mutex_lock(&o->one_pending_mutex);
/* Check that the requested transition is legal */
- if (o->check_transition(bp, o, params)) {
+ rc = o->check_transition(bp, o, params);
+ if ((rc == -EBUSY) &&
+ (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
+ while ((rc == -EBUSY) && (--cnt > 0)) {
+ mutex_unlock(&o->one_pending_mutex);
+ msleep(10);
+ mutex_lock(&o->one_pending_mutex);
+ rc = o->check_transition(bp, o, params);
+ }
+ if (rc == -EBUSY) {
+ mutex_unlock(&o->one_pending_mutex);
+ BNX2X_ERR("timeout waiting for previous ramrod completion\n");
+ return rc;
+ }
+ } else if (rc) {
mutex_unlock(&o->one_pending_mutex);
- return -EINVAL;
+ return rc;
}
/* Set "pending" bit */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index acf2fe4ca608..adbd91b1bdfc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -40,6 +40,12 @@ enum {
* pending commands list.
*/
RAMROD_CONT,
+ /* If there is another pending ramrod, wait until it finishes and
+ * re-try to submit this one. This flag can be set only in sleepable
+ * context, and should not be set from the context that completes the
+ * ramrods as deadlock will occur.
+ */
+ RAMROD_RETRY,
};
typedef enum {
@@ -1061,6 +1067,7 @@ enum bnx2x_func_cmd {
BNX2X_F_CMD_AFEX_VIFLISTS,
BNX2X_F_CMD_TX_STOP,
BNX2X_F_CMD_TX_START,
+ BNX2X_F_CMD_SWITCH_UPDATE,
BNX2X_F_CMD_MAX,
};
@@ -1103,6 +1110,10 @@ struct bnx2x_func_start_params {
u8 network_cos_mode;
};
+struct bnx2x_func_switch_update_params {
+ u8 suspend;
+};
+
struct bnx2x_func_afex_update_params {
u16 vif_id;
u16 afex_default_vlan;
@@ -1136,6 +1147,7 @@ struct bnx2x_func_state_params {
struct bnx2x_func_hw_init_params hw_init;
struct bnx2x_func_hw_reset_params hw_reset;
struct bnx2x_func_start_params start;
+ struct bnx2x_func_switch_update_params switch_update;
struct bnx2x_func_afex_update_params afex_update;
struct bnx2x_func_afex_viflists_params afex_viflists;
struct bnx2x_func_tx_start_params tx_start;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 348ed02d3c69..89ec0667140a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1149,6 +1149,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
UPDATE_ESTAT_QSTAT(hw_csum_err);
+ UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 24b8e505b60c..b4d7b26c7fe7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,7 @@ struct bnx2x_eth_stats {
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
+ u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi;
};
@@ -264,6 +265,7 @@ struct bnx2x_eth_q_stats {
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
+ u32 driver_filtered_tx_pkt;
};
struct bnx2x_eth_stats_old {
@@ -315,6 +317,7 @@ struct bnx2x_eth_q_stats_old {
u32 rx_err_discard_pkt_old;
u32 rx_skb_alloc_failed_old;
u32 hw_csum_err_old;
+ u32 driver_filtered_tx_pkt_old;
};
struct bnx2x_net_stats_old {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index cc8434fd606e..df8c30d1a52c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -40,8 +40,10 @@
#include <net/ip6_checksum.h>
#include <scsi/iscsi_if.h>
+#define BCM_CNIC 1
#include "cnic_if.h"
#include "bnx2.h"
+#include "bnx2x/bnx2x.h"
#include "bnx2x/bnx2x_reg.h"
#include "bnx2x/bnx2x_fw_defs.h"
#include "bnx2x/bnx2x_hsi.h"
@@ -51,10 +53,10 @@
#include "cnic.h"
#include "cnic_defs.h"
-#define DRV_MODULE_NAME "cnic"
+#define CNIC_MODULE_NAME "cnic"
-static char version[] __devinitdata =
- "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+static char version[] =
+ "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
"Chen (zongxi@broadcom.com");
@@ -724,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
- dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
+ dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
@@ -783,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
- BCM_PAGE_SIZE,
+ BNX2_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
@@ -792,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
if (!use_pg_tbl)
return 0;
- dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
- ~(BCM_PAGE_SIZE - 1);
+ dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
+ ~(BNX2_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
@@ -895,11 +897,11 @@ static int cnic_alloc_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
int i, k, arr_size;
- cp->ctx_blk_size = BCM_PAGE_SIZE;
- cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+ cp->ctx_blk_size = BNX2_PAGE_SIZE;
+ cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -931,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
- BCM_PAGE_SIZE,
+ BNX2_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
@@ -1011,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
if (udev->l2_ring)
return 0;
- udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+ udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
@@ -1234,8 +1236,6 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
- cp->iro_arr = ethdev->iro_arr;
-
cp->max_cid_space = MAX_ISCSI_TBL_SZ;
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
@@ -1430,6 +1430,7 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
u32 pfid = cp->pfid;
@@ -1512,6 +1513,7 @@ static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -2048,6 +2050,7 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid;
u8 *mac = dev->mac_addr;
@@ -2084,6 +2087,7 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
u16 tstorm_flags = 0;
@@ -2103,6 +2107,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct l4_kwq_connect_req1 *kwqe1 =
(struct l4_kwq_connect_req1 *) wqes[0];
struct l4_kwq_connect_req3 *kwqe3;
@@ -2898,7 +2903,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
- (udev->l2_ring + (2 * BCM_PAGE_SIZE));
+ (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
u32 cmd;
int comp = 0;
@@ -3853,12 +3858,17 @@ static int cnic_cm_abort(struct cnic_sock *csk)
return cnic_cm_abort_req(csk);
/* Getting here means that we haven't started connect, or
- * connect was not successful.
+ * connect was not successful, or it has been reset by the target.
*/
cp->close_conn(csk, opcode);
- if (csk->state != opcode)
+ if (csk->state != opcode) {
+ /* Wait for remote reset sequence to complete */
+ while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ msleep(1);
+
return -EALREADY;
+ }
return 0;
}
@@ -3872,6 +3882,10 @@ static int cnic_cm_close(struct cnic_sock *csk)
csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
return cnic_cm_close_req(csk);
} else {
+ /* Wait for remote reset sequence to complete */
+ while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ msleep(1);
+
return -EALREADY;
}
return 0;
@@ -4200,6 +4214,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid;
u32 port = CNIC_PORT(cp);
@@ -4349,7 +4364,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
int ret = 0, i;
u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
- if (CHIP_NUM(cp) != CHIP_NUM_5709)
+ if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
return 0;
for (i = 0; i < cp->ctx_blks; i++) {
@@ -4357,7 +4372,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
- memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+ memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4499,7 +4514,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
u32 cid_addr, tx_cid, sb_id;
u32 val, offset0, offset1, offset2, offset3;
int i;
- struct tx_bd *txbd;
+ struct bnx2_tx_bd *txbd;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct status_block *s_blk = cp->status_blk.gen;
@@ -4517,7 +4532,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
cp->tx_cons = *cp->tx_cons_ptr;
cid_addr = GET_CID_ADDR(tx_cid);
- if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+ if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
for (i = 0; i < PHY_CTX_SIZE; i += 4)
@@ -4545,7 +4560,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
txbd = udev->l2_ring;
buf_map = udev->l2_buf_map;
- for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+ for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
@@ -4565,7 +4580,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
struct cnic_uio_dev *udev = cp->udev;
u32 cid_addr, sb_id, val, coal_reg, coal_val;
int i;
- struct rx_bd *rxbd;
+ struct bnx2_rx_bd *rxbd;
struct status_block *s_blk = cp->status_blk.gen;
dma_addr_t ring_map = udev->l2_ring_map;
@@ -4601,8 +4616,8 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = udev->l2_ring + BCM_PAGE_SIZE;
- for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+ rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
+ for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4612,11 +4627,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
- val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@@ -4662,7 +4677,7 @@ static void cnic_set_bnx2_mac(struct cnic_dev *dev)
CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
- if (CHIP_NUM(cp) != CHIP_NUM_5709)
+ if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
@@ -4682,10 +4697,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
- if (BCM_PAGE_BITS > 12)
+ if (BNX2_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
- val |= (BCM_PAGE_BITS - 8) << 4;
+ val |= (BNX2_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
@@ -4708,20 +4723,20 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->kwq_con_idx = 0;
set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
- if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+ if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
else
cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4741,13 +4756,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4843,6 +4858,7 @@ static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
u16 sb_id, u8 sb_index,
u8 disable)
{
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
@@ -4860,6 +4876,7 @@ static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u8 sb_id = cp->status_blk_num;
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
@@ -4886,10 +4903,10 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
- memset(txbd, 0, BCM_PAGE_SIZE);
+ memset(txbd, 0, BNX2_PAGE_SIZE);
buf_map = udev->l2_buf_map;
- for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
+ for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
struct eth_tx_start_bd *start_bd = &txbd->start_bd;
struct eth_tx_parse_bd_e1x *pbd_e1x =
&((txbd + 1)->parse_bd_e1x);
@@ -4908,9 +4925,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
pbd_e2->parsing_data = (UNICAST_ADDRESS <<
- ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
else
- pbd_e1x->global_data = (UNICAST_ADDRESS <<
+ pbd_e1x->global_data = (UNICAST_ADDRESS <<
ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
}
@@ -4945,9 +4962,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
- BCM_PAGE_SIZE);
+ BNX2_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
- (udev->l2_ring + (2 * BCM_PAGE_SIZE));
+ (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -4971,20 +4988,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
- val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
- val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
+ val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
+ val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
@@ -5009,6 +5026,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 pfid = cp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
@@ -5047,37 +5065,17 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
- int func = CNIC_FUNC(cp), ret;
+ int func, ret;
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
- cp->port_mode = CHIP_PORT_MODE_NONE;
-
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- u32 val;
+ cp->port_mode = bp->common.chip_port_mode;
+ cp->pfid = bp->pfid;
+ cp->func = bp->pf_num;
- pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
- cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
- ME_REG_ABS_PF_NUM_SHIFT);
- func = CNIC_FUNC(cp);
-
- val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
- if (!(val & 1))
- val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
- else
- val = (val >> 1) & 1;
-
- if (val) {
- cp->port_mode = CHIP_4_PORT_MODE;
- cp->pfid = func >> 1;
- } else {
- cp->port_mode = CHIP_2_PORT_MODE;
- cp->pfid = func & 0x6;
- }
- } else {
- cp->pfid = func;
- }
+ func = CNIC_FUNC(cp);
pfid = cp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
@@ -5144,6 +5142,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
static void cnic_init_rings(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
@@ -5249,8 +5248,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
- rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
- memset(rx_ring, 0, BCM_PAGE_SIZE);
+ rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
+ memset(rx_ring, 0, BNX2_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)
@@ -5344,8 +5343,28 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
+ u32 sb_id = cp->status_blk_num;
+ u32 idx_off, syn_off;
cnic_free_irq(dev);
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ idx_off = offsetof(struct hc_status_block_e2, index_values) +
+ (hc_index * sizeof(u16));
+
+ syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
+ } else {
+ idx_off = offsetof(struct hc_status_block_e1x, index_values) +
+ (hc_index * sizeof(u16));
+
+ syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
+ }
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
+ idx_off, 0);
+
*cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
@@ -5431,14 +5450,12 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
struct pci_dev *pdev;
struct cnic_dev *cdev;
struct cnic_local *cp;
+ struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *ethdev = NULL;
- struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
- probe = symbol_get(bnx2_cnic_probe);
- if (probe) {
- ethdev = (*probe)(dev);
- symbol_put(bnx2_cnic_probe);
- }
+ if (bp->cnic_probe)
+ ethdev = (bp->cnic_probe)(dev);
+
if (!ethdev)
return NULL;
@@ -5493,14 +5510,12 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
struct pci_dev *pdev;
struct cnic_dev *cdev;
struct cnic_local *cp;
+ struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *ethdev = NULL;
- struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
- probe = symbol_get(bnx2x_cnic_probe);
- if (probe) {
- ethdev = (*probe)(dev);
- symbol_put(bnx2x_cnic_probe);
- }
+ if (bp->cnic_probe)
+ ethdev = bp->cnic_probe(dev);
+
if (!ethdev)
return NULL;
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 148604c3fa0c..62c670619ae6 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -80,18 +80,18 @@
#define CNIC_LOCAL_PORT_MAX 61024
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
-#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
-#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define KWQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kcqe))
#define MAX_KWQE_CNT (KWQE_CNT - 1)
#define MAX_KCQE_CNT (KCQE_CNT - 1)
#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
-#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BNX2_PAGE_BITS - 5))
#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
-#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BNX2_PAGE_BITS - 5))
#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
@@ -186,14 +186,6 @@ struct kcq_info {
u16 (*hw_idx)(u16);
};
-struct iro {
- u32 base;
- u16 m1;
- u16 m2;
- u16 m3;
- u16 size;
-};
-
struct cnic_uio_dev {
struct uio_info cnic_uinfo;
u32 uio_dev;
@@ -241,9 +233,6 @@ struct cnic_local {
u16 rx_cons;
u16 tx_cons;
- const struct iro *iro_arr;
-#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
-
struct cnic_dma kwq_info;
struct kwqe **kwq;
@@ -316,9 +305,6 @@ struct cnic_local {
int func;
u32 pfid;
u8 port_mode;
-#define CHIP_4_PORT_MODE 0
-#define CHIP_2_PORT_MODE 1
-#define CHIP_PORT_MODE_NONE 2
u32 shmem_base;
@@ -420,11 +406,11 @@ struct bnx2x_bd_chain_next {
BNX2X_CHIP_IS_57840(x))
#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
-#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
-
-#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
+ sizeof(struct eth_rx_bd))
#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
-#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define BNX2X_RCQ_DESC_CNT (BNX2_PAGE_SIZE / \
+ sizeof(union eth_rx_cqe))
#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 865095aad1f6..2a35436f9095 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.14"
-#define CNIC_MODULE_RELDATE "Sep 30, 2012"
+#define CNIC_MODULE_VERSION "2.5.16"
+#define CNIC_MODULE_RELDATE "Dec 05, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -353,7 +353,4 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
extern int cnic_unregister_driver(int ulp_type);
-extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
-extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
-
#endif
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 49e7a258da8a..3a1c8a3cf7c9 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2586,7 +2586,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
}
-static int __devinit sbmac_probe(struct platform_device *pldev)
+static int sbmac_probe(struct platform_device *pldev)
{
struct net_device *dev;
struct sbmac_softc *sc;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a8800ac10df9..bdb086934cd9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,6 +54,9 @@
#include <asm/byteorder.h>
#include <linux/uaccess.h>
+#include <uapi/linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+
#ifdef CONFIG_SPARC
#include <asm/idprom.h>
#include <asm/prom.h>
@@ -90,10 +93,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 125
+#define TG3_MIN_NUM 128
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "September 26, 2012"
+#define DRV_MODULE_RELDATE "December 03, 2012"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -211,7 +214,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
@@ -226,6 +229,9 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
+#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
+
static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
@@ -245,20 +251,28 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+ TG3_DRV_DATA_FLAG_5705_10_100},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+ TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
+ TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
@@ -266,8 +280,13 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
+ PCI_VENDOR_ID_LENOVO,
+ TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
@@ -286,18 +305,28 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
+ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
- {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
+ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
@@ -398,19 +427,27 @@ static const struct {
};
#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
+#define TG3_NVRAM_TEST 0
+#define TG3_LINK_TEST 1
+#define TG3_REGISTER_TEST 2
+#define TG3_MEMORY_TEST 3
+#define TG3_MAC_LOOPB_TEST 4
+#define TG3_PHY_LOOPB_TEST 5
+#define TG3_EXT_LOOPB_TEST 6
+#define TG3_INTERRUPT_TEST 7
static const struct {
const char string[ETH_GSTRING_LEN];
} ethtool_test_keys[] = {
- { "nvram test (online) " },
- { "link test (online) " },
- { "register test (offline)" },
- { "memory test (offline)" },
- { "mac loopback test (offline)" },
- { "phy loopback test (offline)" },
- { "ext loopback test (offline)" },
- { "interrupt test (offline)" },
+ [TG3_NVRAM_TEST] = { "nvram test (online) " },
+ [TG3_LINK_TEST] = { "link test (online) " },
+ [TG3_REGISTER_TEST] = { "register test (offline)" },
+ [TG3_MEMORY_TEST] = { "memory test (offline)" },
+ [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
+ [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
+ [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
+ [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
};
#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
@@ -1246,14 +1283,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
}
-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
- MII_TG3_AUXCTL_ACTL_TX_6DB)
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+ u32 val;
+ int err;
+
+ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+ if (err)
+ return err;
+ if (enable)
-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_TX_6DB);
+ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+ else
+ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+ return err;
+}
static int tg3_bmcr_reset(struct tg3 *tp)
{
@@ -2186,7 +2235,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
otp = tp->phy_otp;
- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
return;
phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -2211,7 +2260,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -2247,9 +2296,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
if (!tp->setlpicnt) {
if (current_link_up == 1 &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
val = tr32(TG3_CPMU_EEE_MODE);
@@ -2265,11 +2314,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
tg3_flag(tp, 57765_CLASS)) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
val = tr32(TG3_CPMU_EEE_MODE);
@@ -2413,7 +2462,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000,
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (err)
return err;
@@ -2434,7 +2483,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
@@ -2447,6 +2496,18 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err;
}
+static void tg3_carrier_on(struct tg3 *tp)
+{
+ netif_carrier_on(tp->dev);
+ tp->link_up = true;
+}
+
+static void tg3_carrier_off(struct tg3 *tp)
+{
+ netif_carrier_off(tp->dev);
+ tp->link_up = false;
+}
+
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
@@ -2465,8 +2526,8 @@ static int tg3_phy_reset(struct tg3 *tp)
if (err != 0)
return -EBUSY;
- if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
- netif_carrier_off(tp->dev);
+ if (netif_running(tp->dev) && tp->link_up) {
+ tg3_carrier_off(tp);
tg3_link_report(tp);
}
@@ -2523,10 +2584,10 @@ static int tg3_phy_reset(struct tg3 *tp)
out:
if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x201f, 0x2aaa);
tg3_phydsp_write(tp, 0x000a, 0x0323);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2535,14 +2596,14 @@ out:
}
if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x000a, 0x310b);
tg3_phydsp_write(tp, 0x201f, 0x9506);
tg3_phydsp_write(tp, 0x401f, 0x14e2);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2551,7 +2612,7 @@ out:
} else
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
}
@@ -3960,7 +4021,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (!err) {
u32 err2;
@@ -3993,7 +4054,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
MII_TG3_DSP_CH34TP2_HIBW01);
}
- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
if (!err)
err = err2;
}
@@ -4160,6 +4221,24 @@ static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
return true;
}
+static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
+{
+ if (curr_link_up != tp->link_up) {
+ if (curr_link_up) {
+ tg3_carrier_on(tp);
+ } else {
+ tg3_carrier_off(tp);
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+
+ tg3_link_report(tp);
+ return true;
+ }
+
+ return false;
+}
+
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
{
int current_link_up;
@@ -4192,7 +4271,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
- netif_carrier_ok(tp->dev)) {
+ tp->link_up) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
!(bmsr & BMSR_LSTATUS))
@@ -4434,13 +4513,7 @@ relink:
PCI_EXP_LNKCTL_CLKREQ_EN);
}
- if (current_link_up != netif_carrier_ok(tp->dev)) {
- if (current_link_up)
- netif_carrier_on(tp->dev);
- else
- netif_carrier_off(tp->dev);
- tg3_link_report(tp);
- }
+ tg3_test_and_report_link_chg(tp, current_link_up);
return 0;
}
@@ -5080,7 +5153,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
orig_active_duplex = tp->link_config.active_duplex;
if (!tg3_flag(tp, HW_AUTONEG) &&
- netif_carrier_ok(tp->dev) &&
+ tp->link_up &&
tg3_flag(tp, INIT_COMPLETE)) {
mac_status = tr32(MAC_STATUS);
mac_status &= (MAC_STATUS_PCS_SYNCED |
@@ -5158,13 +5231,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
LED_CTRL_TRAFFIC_OVERRIDE));
}
- if (current_link_up != netif_carrier_ok(tp->dev)) {
- if (current_link_up)
- netif_carrier_on(tp->dev);
- else
- netif_carrier_off(tp->dev);
- tg3_link_report(tp);
- } else {
+ if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
u32 now_pause_cfg = tp->link_config.active_flowctrl;
if (orig_pause_cfg != now_pause_cfg ||
orig_active_speed != tp->link_config.active_speed ||
@@ -5257,7 +5324,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
new_bmcr |= BMCR_SPEED1000;
/* Force a linkdown */
- if (netif_carrier_ok(tp->dev)) {
+ if (tp->link_up) {
u32 adv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
@@ -5269,7 +5336,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
BMCR_ANRESTART |
BMCR_ANENABLE);
udelay(10);
- netif_carrier_off(tp->dev);
+ tg3_carrier_off(tp);
}
tg3_writephy(tp, MII_BMCR, new_bmcr);
bmcr = new_bmcr;
@@ -5335,15 +5402,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
tp->link_config.active_speed = current_speed;
tp->link_config.active_duplex = current_duplex;
- if (current_link_up != netif_carrier_ok(tp->dev)) {
- if (current_link_up)
- netif_carrier_on(tp->dev);
- else {
- netif_carrier_off(tp->dev);
- tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
- }
- tg3_link_report(tp);
- }
+ tg3_test_and_report_link_chg(tp, current_link_up);
return err;
}
@@ -5355,7 +5414,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
return;
}
- if (!netif_carrier_ok(tp->dev) &&
+ if (!tp->link_up &&
(tp->link_config.autoneg == AUTONEG_ENABLE)) {
u32 bmcr;
@@ -5385,7 +5444,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
}
}
- } else if (netif_carrier_ok(tp->dev) &&
+ } else if (tp->link_up &&
(tp->link_config.autoneg == AUTONEG_ENABLE) &&
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
u32 phy2;
@@ -5451,7 +5510,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
(32 << TX_LENGTHS_SLOT_TIME_SHIFT));
if (!tg3_flag(tp, 5705_PLUS)) {
- if (netif_carrier_ok(tp->dev)) {
+ if (tp->link_up) {
tw32(HOSTCC_STAT_COAL_TICKS,
tp->coal.stats_block_coalesce_usecs);
} else {
@@ -5461,7 +5520,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
if (tg3_flag(tp, ASPM_WORKAROUND)) {
val = tr32(PCIE_PWR_MGMT_THRESH);
- if (!netif_carrier_ok(tp->dev))
+ if (!tp->link_up)
val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
tp->pwrmgmt_thresh;
else
@@ -5472,6 +5531,190 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
return err;
}
+/* tp->lock must be held */
+static u64 tg3_refclk_read(struct tg3 *tp)
+{
+ u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+ return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+}
+
+/* tp->lock must be held */
+static void tg3_refclk_write(struct tg3 *tp, u64 newval)
+{
+ tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
+ tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
+ tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
+ tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
+}
+
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
+static inline void tg3_full_unlock(struct tg3 *tp);
+static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (tp->ptp_clock)
+ info->phc_index = ptp_clock_index(tp->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+ return 0;
+}
+
+static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ bool neg_adj = false;
+ u32 correction = 0;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+
+ /* Frequency adjustment is performed using hardware with a 24 bit
+ * accumulator and a programmable correction value. On each clk, the
+ * correction value gets added to the accumulator and when it
+ * overflows, the time counter is incremented/decremented.
+ *
+ * So conversion from ppb to correction value is
+ * ppb * (1 << 24) / 1000000000
+ */
+ correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
+ TG3_EAV_REF_CLK_CORRECT_MASK;
+
+ tg3_full_lock(tp, 0);
+
+ if (correction)
+ tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
+ TG3_EAV_REF_CLK_CORRECT_EN |
+ (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
+ else
+ tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
+
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
+static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+
+ tg3_full_lock(tp, 0);
+ tp->ptp_adjust += delta;
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
+static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+
+ tg3_full_lock(tp, 0);
+ ns = tg3_refclk_read(tp);
+ ns += tp->ptp_adjust;
+ tg3_full_unlock(tp);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int tg3_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+
+ ns = timespec_to_ns(ts);
+
+ tg3_full_lock(tp, 0);
+ tg3_refclk_write(tp, ns);
+ tp->ptp_adjust = 0;
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
+static int tg3_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info tg3_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "tg3 clock",
+ .max_adj = 250000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = tg3_ptp_adjfreq,
+ .adjtime = tg3_ptp_adjtime,
+ .gettime = tg3_ptp_gettime,
+ .settime = tg3_ptp_settime,
+ .enable = tg3_ptp_enable,
+};
+
+static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
+ struct skb_shared_hwtstamps *timestamp)
+{
+ memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
+ timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
+ tp->ptp_adjust);
+}
+
+/* tp->lock must be held */
+static void tg3_ptp_init(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return;
+
+ /* Initialize the hardware clock to the system time. */
+ tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
+ tp->ptp_adjust = 0;
+ tp->ptp_info = tg3_ptp_caps;
+}
+
+/* tp->lock must be held */
+static void tg3_ptp_resume(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return;
+
+ tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
+ tp->ptp_adjust = 0;
+}
+
+static void tg3_ptp_fini(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
+ return;
+
+ ptp_clock_unregister(tp->ptp_clock);
+ tp->ptp_clock = NULL;
+ tp->ptp_adjust = 0;
+}
+
static inline int tg3_irq_sync(struct tg3 *tp)
{
return tp->irq_sync;
@@ -5652,6 +5895,16 @@ static void tg3_tx(struct tg3_napi *tnapi)
return;
}
+ if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
+ struct skb_shared_hwtstamps timestamp;
+ u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
+ hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
+
+ tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+
+ skb_tstamp_tx(skb, &timestamp);
+ }
+
pci_unmap_single(tp->pdev,
dma_unmap_addr(ri, mapping),
skb_headlen(skb),
@@ -5919,6 +6172,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
u8 *data;
+ u64 tstamp = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -5953,6 +6207,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
+ if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
+ RXD_FLAG_PTPSTAT_PTPV1 ||
+ (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
+ RXD_FLAG_PTPSTAT_PTPV2) {
+ tstamp = tr32(TG3_RX_TSTAMP_LSB);
+ tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
+ }
+
if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
unsigned int frag_size;
@@ -5996,6 +6258,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
}
skb_put(skb, len);
+ if (tstamp)
+ tg3_hwclock_to_timestamp(tp, tstamp,
+ skb_hwtstamps(skb));
+
if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -6477,17 +6743,24 @@ static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
tg3_napi_disable(tp);
+ netif_carrier_off(tp->dev);
netif_tx_disable(tp->dev);
}
+/* tp->lock must be held */
static inline void tg3_netif_start(struct tg3 *tp)
{
+ tg3_ptp_resume(tp);
+
/* NOTE: unconditional netif_tx_wake_all_queues is only
* appropriate so long as all callers are assured to
* have free tx slots (such as after tg3_init_hw)
*/
netif_tx_wake_all_queues(tp->dev);
+ if (tp->link_up)
+ netif_carrier_on(tp->dev);
+
tg3_napi_enable(tp);
tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
@@ -6689,6 +6962,9 @@ static void tg3_poll_controller(struct net_device *dev)
int i;
struct tg3 *tp = netdev_priv(dev);
+ if (tg3_irq_sync(tp))
+ return;
+
for (i = 0; i < tp->irq_cnt; i++)
tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
}
@@ -7046,6 +7322,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan = vlan_tx_tag_get(skb);
}
+ if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
+ tg3_flag(tp, TX_TSTAMP_EN)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ base_flags |= TXD_FLAG_HWTSTAMP;
+ }
+
len = skb_headlen(skb);
mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -8386,7 +8668,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
- if (!netif_carrier_ok(tp->dev))
+ if (!tp->link_up)
val = 0;
tw32(HOSTCC_STAT_COAL_TICKS, val);
@@ -8662,14 +8944,14 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
if (!tg3_flag(tp, SUPPORT_MSIX))
return;
- if (tp->irq_cnt <= 2) {
+ if (tp->rxq_cnt == 1) {
memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
return;
}
/* Validate table against current IRQ count */
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
- if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+ if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
break;
}
@@ -8914,9 +9196,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
*/
tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
- tw32(GRC_MODE,
- tp->grc_mode |
- (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
+ val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
+ if (tp->rxptpctl)
+ tw32(TG3_RX_PTP_CTL,
+ tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
+
+ if (tg3_flag(tp, PTP_CAPABLE))
+ val |= GRC_MODE_TIME_SYNC_ENABLE;
+
+ tw32(GRC_MODE, tp->grc_mode | val);
/* Setup the timer prescalar register. Clock is always 66Mhz. */
val = tr32(GRC_MISC_CFG);
@@ -9679,7 +9967,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
{
struct tg3_hw_stats *sp = tp->hw_stats;
- if (!netif_carrier_ok(tp->dev))
+ if (!tp->link_up)
return;
TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
@@ -9823,11 +10111,11 @@ static void tg3_timer(unsigned long __opaque)
u32 mac_stat = tr32(MAC_STATUS);
int need_setup = 0;
- if (netif_carrier_ok(tp->dev) &&
+ if (tp->link_up &&
(mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
need_setup = 1;
}
- if (!netif_carrier_ok(tp->dev) &&
+ if (!tp->link_up &&
(mac_stat & (MAC_STATUS_PCS_SYNCED |
MAC_STATUS_SIGNAL_DET))) {
need_setup = 1;
@@ -9890,7 +10178,7 @@ restart_timer:
add_timer(&tp->timer);
}
-static void __devinit tg3_timer_init(struct tg3 *tp)
+static void tg3_timer_init(struct tg3 *tp)
{
if (tg3_flag(tp, TAGGED_STATUS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
@@ -10316,7 +10604,8 @@ static void tg3_ints_fini(struct tg3 *tp)
tg3_flag_clear(tp, ENABLE_TSS);
}
-static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
+static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
+ bool init)
{
struct net_device *dev = tp->dev;
int i, err;
@@ -10395,6 +10684,12 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
tg3_flag_set(tp, INIT_COMPLETE);
tg3_enable_ints(tp);
+ if (init)
+ tg3_ptp_init(tp);
+ else
+ tg3_ptp_resume(tp);
+
+
tg3_full_unlock(tp);
netif_tx_start_all_queues(dev);
@@ -10429,10 +10724,8 @@ static void tg3_stop(struct tg3 *tp)
{
int i;
- tg3_napi_disable(tp);
tg3_reset_task_cancel(tp);
-
- netif_tx_disable(tp->dev);
+ tg3_netif_stop(tp);
tg3_timer_stop(tp);
@@ -10481,7 +10774,7 @@ static int tg3_open(struct net_device *dev)
}
}
- netif_carrier_off(tp->dev);
+ tg3_carrier_off(tp);
err = tg3_power_up(tp);
if (err)
@@ -10494,11 +10787,19 @@ static int tg3_open(struct net_device *dev)
tg3_full_unlock(tp);
- err = tg3_start(tp, true, true);
+ err = tg3_start(tp, true, true, true);
if (err) {
tg3_frob_aux_power(tp, false);
pci_set_power_state(tp->pdev, PCI_D3hot);
}
+
+ if (tg3_flag(tp, PTP_CAPABLE)) {
+ tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
+ &tp->pdev->dev);
+ if (IS_ERR(tp->ptp_clock))
+ tp->ptp_clock = NULL;
+ }
+
return err;
}
@@ -10506,6 +10807,8 @@ static int tg3_close(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+ tg3_ptp_fini(tp);
+
tg3_stop(tp);
/* Clear stats across close / open calls */
@@ -10514,7 +10817,7 @@ static int tg3_close(struct net_device *dev)
tg3_power_down(tp);
- netif_carrier_off(tp->dev);
+ tg3_carrier_off(tp);
return 0;
}
@@ -10888,7 +11191,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
- if (netif_running(dev) && netif_carrier_ok(dev)) {
+ if (netif_running(dev) && tp->link_up) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
cmd->lp_advertising = tp->link_config.rmt_adv;
@@ -11406,9 +11709,9 @@ static int tg3_set_channels(struct net_device *dev,
tg3_stop(tp);
- netif_carrier_off(dev);
+ tg3_carrier_off(tp);
- tg3_start(tp, true, false);
+ tg3_start(tp, true, false, false);
return 0;
}
@@ -11755,7 +12058,7 @@ static int tg3_test_link(struct tg3 *tp)
max = TG3_COPPER_TIMEOUT_SEC;
for (i = 0; i < max; i++) {
- if (netif_carrier_ok(tp->dev))
+ if (tp->link_up)
return 0;
if (msleep_interruptible(1000))
@@ -12326,19 +12629,19 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
if (!netif_running(tp->dev)) {
- data[0] = TG3_LOOPBACK_FAILED;
- data[1] = TG3_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
if (do_extlpbk)
- data[2] = TG3_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
goto done;
}
err = tg3_reset_hw(tp, 1);
if (err) {
- data[0] = TG3_LOOPBACK_FAILED;
- data[1] = TG3_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
if (do_extlpbk)
- data[2] = TG3_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
goto done;
}
@@ -12361,11 +12664,11 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tg3_mac_loopback(tp, true);
if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
- data[0] |= TG3_STD_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
- data[0] |= TG3_JMB_LOOPBACK_FAILED;
+ data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
tg3_mac_loopback(tp, false);
}
@@ -12384,13 +12687,13 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
}
if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
- data[1] |= TG3_STD_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, TSO_CAPABLE) &&
tg3_run_loopback(tp, ETH_FRAME_LEN, true))
- data[1] |= TG3_TSO_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
- data[1] |= TG3_JMB_LOOPBACK_FAILED;
+ data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
if (do_extlpbk) {
tg3_phy_lpbk_set(tp, 0, true);
@@ -12402,13 +12705,16 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
mdelay(40);
if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
- data[2] |= TG3_STD_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] |=
+ TG3_STD_LOOPBACK_FAILED;
if (tg3_flag(tp, TSO_CAPABLE) &&
tg3_run_loopback(tp, ETH_FRAME_LEN, true))
- data[2] |= TG3_TSO_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] |=
+ TG3_TSO_LOOPBACK_FAILED;
if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
- data[2] |= TG3_JMB_LOOPBACK_FAILED;
+ data[TG3_EXT_LOOPB_TEST] |=
+ TG3_JMB_LOOPBACK_FAILED;
}
/* Re-enable gphy autopowerdown. */
@@ -12416,7 +12722,8 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
tg3_phy_toggle_apd(tp, true);
}
- err = (data[0] | data[1] | data[2]) ? -EIO : 0;
+ err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
+ data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
done:
tp->phy_flags |= eee_cap;
@@ -12441,11 +12748,11 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
if (tg3_test_nvram(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[0] = 1;
+ data[TG3_NVRAM_TEST] = 1;
}
if (!doextlpbk && tg3_test_link(tp)) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[1] = 1;
+ data[TG3_LINK_TEST] = 1;
}
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int err, err2 = 0, irq_sync = 0;
@@ -12457,7 +12764,6 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_lock(tp, irq_sync);
-
tg3_halt(tp, RESET_KIND_SUSPEND, 1);
err = tg3_nvram_lock(tp);
tg3_halt_cpu(tp, RX_CPU_BASE);
@@ -12471,25 +12777,25 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
if (tg3_test_registers(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[2] = 1;
+ data[TG3_REGISTER_TEST] = 1;
}
if (tg3_test_memory(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[3] = 1;
+ data[TG3_MEMORY_TEST] = 1;
}
if (doextlpbk)
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
- if (tg3_test_loopback(tp, &data[4], doextlpbk))
+ if (tg3_test_loopback(tp, data, doextlpbk))
etest->flags |= ETH_TEST_FL_FAILED;
tg3_full_unlock(tp);
if (tg3_test_interrupt(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
- data[7] = 1;
+ data[TG3_INTERRUPT_TEST] = 1;
}
tg3_full_lock(tp, 0);
@@ -12512,6 +12818,96 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
+static int tg3_hwtstamp_ioctl(struct net_device *dev,
+ struct ifreq *ifr, int cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct hwtstamp_config stmpconf;
+
+ if (!tg3_flag(tp, PTP_CAPABLE))
+ return -EINVAL;
+
+ if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
+ return -EFAULT;
+
+ if (stmpconf.flags)
+ return -EINVAL;
+
+ switch (stmpconf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ tg3_flag_set(tp, TX_TSTAMP_EN);
+ break;
+ case HWTSTAMP_TX_OFF:
+ tg3_flag_clear(tp, TX_TSTAMP_EN);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (stmpconf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tp->rxptpctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
+ TG3_RX_PTP_CTL_ALL_V1_EVENTS;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
+ TG3_RX_PTP_CTL_SYNC_EVNT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
+ TG3_RX_PTP_CTL_DELAY_REQ;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
+ TG3_RX_PTP_CTL_ALL_V2_EVENTS;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
+ TG3_RX_PTP_CTL_ALL_V2_EVENTS;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
+ TG3_RX_PTP_CTL_ALL_V2_EVENTS;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
+ TG3_RX_PTP_CTL_SYNC_EVNT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
+ TG3_RX_PTP_CTL_SYNC_EVNT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
+ TG3_RX_PTP_CTL_SYNC_EVNT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
+ TG3_RX_PTP_CTL_DELAY_REQ;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
+ TG3_RX_PTP_CTL_DELAY_REQ;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
+ TG3_RX_PTP_CTL_DELAY_REQ;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(dev) && tp->rxptpctl)
+ tw32(TG3_RX_PTP_CTL,
+ tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
+
+ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+ -EFAULT : 0;
+}
+
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
@@ -12562,6 +12958,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
+ case SIOCSHWTSTAMP:
+ return tg3_hwtstamp_ioctl(dev, ifr, cmd);
+
default:
/* do nothing */
break;
@@ -12663,7 +13062,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.set_rxfh_indir = tg3_set_rxfh_indir,
.get_channels = tg3_get_channels,
.set_channels = tg3_set_channels,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = tg3_get_ts_info,
};
static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
@@ -12779,7 +13178,7 @@ static const struct net_device_ops tg3_netdev_ops = {
#endif
};
-static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
+static void tg3_get_eeprom_size(struct tg3 *tp)
{
u32 cursize, val, magic;
@@ -12813,7 +13212,7 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
tp->nvram_size = cursize;
}
-static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+static void tg3_get_nvram_size(struct tg3 *tp)
{
u32 val;
@@ -12846,7 +13245,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_512KB;
}
-static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+static void tg3_get_nvram_info(struct tg3 *tp)
{
u32 nvcfg1;
@@ -12897,7 +13296,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
}
}
-static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
+static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
{
switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
case FLASH_5752PAGE_SIZE_256:
@@ -12924,7 +13323,7 @@ static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
}
}
-static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
+static void tg3_get_5752_nvram_info(struct tg3 *tp)
{
u32 nvcfg1;
@@ -12965,7 +13364,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
}
}
-static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
+static void tg3_get_5755_nvram_info(struct tg3 *tp)
{
u32 nvcfg1, protect = 0;
@@ -13021,7 +13420,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
}
}
-static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
+static void tg3_get_5787_nvram_info(struct tg3 *tp)
{
u32 nvcfg1;
@@ -13059,7 +13458,7 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
}
}
-static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
+static void tg3_get_5761_nvram_info(struct tg3 *tp)
{
u32 nvcfg1, protect = 0;
@@ -13134,14 +13533,14 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
}
}
-static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
+static void tg3_get_5906_nvram_info(struct tg3 *tp)
{
tp->nvram_jedecnum = JEDEC_ATMEL;
tg3_flag_set(tp, NVRAM_BUFFERED);
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
}
-static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
+static void tg3_get_57780_nvram_info(struct tg3 *tp)
{
u32 nvcfg1;
@@ -13214,7 +13613,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
}
-static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
+static void tg3_get_5717_nvram_info(struct tg3 *tp)
{
u32 nvcfg1;
@@ -13292,7 +13691,7 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
}
-static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
+static void tg3_get_5720_nvram_info(struct tg3 *tp)
{
u32 nvcfg1, nvmpinstrp;
@@ -13405,7 +13804,7 @@ static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
-static void __devinit tg3_nvram_init(struct tg3 *tp)
+static void tg3_nvram_init(struct tg3 *tp)
{
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
@@ -13475,7 +13874,7 @@ struct subsys_tbl_ent {
u32 phy_id;
};
-static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
+static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
/* Broadcom boards. */
{ TG3PCI_SUBVENDOR_ID_BROADCOM,
TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
@@ -13539,7 +13938,7 @@ static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
};
-static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
+static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
{
int i;
@@ -13553,7 +13952,7 @@ static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
return NULL;
}
-static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
{
u32 val;
@@ -13753,7 +14152,7 @@ done:
device_set_wakeup_capable(&tp->pdev->dev, false);
}
-static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
+static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
{
int i;
u32 val;
@@ -13776,7 +14175,7 @@ static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
* configuration is a 32-bit value that straddles the alignment boundary.
* We do two 32-bit reads and then shift and merge the results.
*/
-static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
+static u32 tg3_read_otp_phycfg(struct tg3 *tp)
{
u32 bhalf_otp, thalf_otp;
@@ -13802,7 +14201,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
}
-static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+static void tg3_phy_init_link_config(struct tg3 *tp)
{
u32 adv = ADVERTISED_Autoneg;
@@ -13829,7 +14228,7 @@ static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
tp->old_link = -1;
}
-static int __devinit tg3_phy_probe(struct tg3 *tp)
+static int tg3_phy_probe(struct tg3 *tp)
{
u32 hw_phy_id_1, hw_phy_id_2;
u32 hw_phy_id, hw_phy_id_masked;
@@ -13957,7 +14356,7 @@ skip_phy_reset:
return err;
}
-static void __devinit tg3_read_vpd(struct tg3 *tp)
+static void tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
unsigned int block_end, rosize, len;
@@ -14026,7 +14425,8 @@ out_not_found:
out_no_vpd:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
strcpy(tp->board_part_number, "BCM5718");
@@ -14077,7 +14477,7 @@ nomatch:
}
}
-static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
{
u32 val;
@@ -14090,7 +14490,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
return 1;
}
-static void __devinit tg3_read_bc_ver(struct tg3 *tp)
+static void tg3_read_bc_ver(struct tg3 *tp)
{
u32 val, offset, start, ver_offset;
int i, dst_off;
@@ -14142,7 +14542,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
}
}
-static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
+static void tg3_read_hwsb_ver(struct tg3 *tp)
{
u32 val, major, minor;
@@ -14158,7 +14558,7 @@ static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
}
-static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
+static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
{
u32 offset, major, minor, build;
@@ -14213,7 +14613,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
}
}
-static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
+static void tg3_read_mgmtfw_ver(struct tg3 *tp)
{
u32 val, offset, start;
int i, vlen;
@@ -14265,7 +14665,7 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
}
}
-static void __devinit tg3_probe_ncsi(struct tg3 *tp)
+static void tg3_probe_ncsi(struct tg3 *tp)
{
u32 apedata;
@@ -14281,7 +14681,7 @@ static void __devinit tg3_probe_ncsi(struct tg3 *tp)
tg3_flag_set(tp, APE_HAS_NCSI);
}
-static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+static void tg3_read_dash_ver(struct tg3 *tp)
{
int vlen;
u32 apedata;
@@ -14304,7 +14704,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
(apedata & APE_FW_VERSION_BLDMSK));
}
-static void __devinit tg3_read_fw_ver(struct tg3 *tp)
+static void tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
bool vpd_vers = false;
@@ -14357,7 +14757,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
{ },
};
-static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
+static struct pci_dev *tg3_find_peer(struct tg3 *tp)
{
struct pci_dev *peer;
unsigned int func, devnr = tp->pdev->devfn & ~7;
@@ -14385,7 +14785,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
return peer;
}
-static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
+static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
{
tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
@@ -14397,6 +14797,7 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tg3_flag_set(tp, CPMU_PRESENT);
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
@@ -14424,6 +14825,9 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+ tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
@@ -14462,7 +14866,29 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tg3_flag_set(tp, 5705_PLUS);
}
-static int __devinit tg3_get_invariants(struct tg3 *tp)
+static bool tg3_10_100_only_device(struct tg3 *tp,
+ const struct pci_device_id *ent)
+{
+ u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ (tp->phy_flags & TG3_PHYFLG_IS_FET))
+ return true;
+
+ if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
+ return true;
+ } else {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
{
u32 misc_ctrl_reg;
u32 pci_state_reg, grc_misc_cfg;
@@ -15141,22 +15567,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->mac_mode = 0;
- /* these are limited to 10/100 only */
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
- (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
- (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
- (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
- (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
- tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
- (tp->phy_flags & TG3_PHYFLG_IS_FET))
+ if (tg3_10_100_only_device(tp, ent))
tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
err = tg3_phy_probe(tp);
@@ -15236,7 +15647,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
#ifdef CONFIG_SPARC
-static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
+static int tg3_get_macaddr_sparc(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
struct pci_dev *pdev = tp->pdev;
@@ -15253,7 +15664,7 @@ static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
return -ENODEV;
}
-static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
+static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
@@ -15263,7 +15674,7 @@ static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
}
#endif
-static int __devinit tg3_get_device_address(struct tg3 *tp)
+static int tg3_get_device_address(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
@@ -15342,7 +15753,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
#define BOUNDARY_SINGLE_CACHELINE 1
#define BOUNDARY_MULTI_CACHELINE 2
-static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
{
int cacheline_size;
u8 byte;
@@ -15483,7 +15894,8 @@ out:
return val;
}
-static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
+static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
+ int size, int to_device)
{
struct tg3_internal_buffer_desc test_desc;
u32 sram_dma_descs;
@@ -15570,7 +15982,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
{ },
};
-static int __devinit tg3_test_dma(struct tg3 *tp)
+static int tg3_test_dma(struct tg3 *tp)
{
dma_addr_t buf_dma;
u32 *buf, saved_dma_rwctrl;
@@ -15760,7 +16172,7 @@ out_nofree:
return ret;
}
-static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
+static void tg3_init_bufmgr_config(struct tg3 *tp)
{
if (tg3_flag(tp, 57765_PLUS)) {
tp->bufmgr_config.mbuf_read_dma_low_water =
@@ -15816,7 +16228,7 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
}
-static char * __devinit tg3_phy_string(struct tg3 *tp)
+static char *tg3_phy_string(struct tg3 *tp)
{
switch (tp->phy_id & TG3_PHY_ID_MASK) {
case TG3_PHY_ID_BCM5400: return "5400";
@@ -15847,7 +16259,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
}
}
-static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
+static char *tg3_bus_string(struct tg3 *tp, char *str)
{
if (tg3_flag(tp, PCI_EXPRESS)) {
strcpy(str, "PCI Express");
@@ -15883,7 +16295,7 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
return str;
}
-static void __devinit tg3_init_coal(struct tg3 *tp)
+static void tg3_init_coal(struct tg3 *tp)
{
struct ethtool_coalesce *ec = &tp->coal;
@@ -15914,7 +16326,7 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
}
}
-static int __devinit tg3_init_one(struct pci_dev *pdev,
+static int tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -15970,6 +16382,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->pm_cap = pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
+ tp->irq_sync = 1;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
@@ -16013,6 +16426,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
@@ -16034,7 +16448,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev->netdev_ops = &tg3_netdev_ops;
dev->irq = pdev->irq;
- err = tg3_get_invariants(tp);
+ err = tg3_get_invariants(tp, ent);
if (err) {
dev_err(&pdev->dev,
"Problem fetching invariants of chip, aborting\n");
@@ -16209,6 +16623,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_flag_set(tp, PTP_CAPABLE);
+
if (tg3_flag(tp, 5717_PLUS)) {
/* Resume a low-power mode */
tg3_frob_aux_power(tp, false);
@@ -16293,7 +16711,7 @@ err_out_disable_pdev:
return err;
}
-static void __devexit tg3_remove_one(struct pci_dev *pdev)
+static void tg3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -16534,8 +16952,8 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, 1);
- tg3_full_unlock(tp);
if (err) {
+ tg3_full_unlock(tp);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -16546,6 +16964,8 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
+ tg3_full_unlock(tp);
+
tg3_phy_start(tp);
done:
@@ -16562,7 +16982,7 @@ static struct pci_driver tg3_driver = {
.name = DRV_MODULE_NAME,
.id_table = tg3_pci_tbl,
.probe = tg3_init_one,
- .remove = __devexit_p(tg3_remove_one),
+ .remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
.driver.pm = TG3_PM_OPS,
};
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d9308c32102e..d330e81f5793 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -44,12 +44,14 @@
#define TG3PCI_DEVICE_TIGON3_5761S 0x1688
#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689
#define TG3PCI_DEVICE_TIGON3_57780 0x1692
+#define TG3PCI_DEVICE_TIGON3_5787M 0x1693
#define TG3PCI_DEVICE_TIGON3_57760 0x1690
#define TG3PCI_DEVICE_TIGON3_57790 0x1694
#define TG3PCI_DEVICE_TIGON3_57788 0x1691
#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
#define TG3PCI_DEVICE_TIGON3_5717 0x1655
+#define TG3PCI_DEVICE_TIGON3_5717_C 0x1665
#define TG3PCI_DEVICE_TIGON3_5718 0x1656
#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
@@ -95,6 +97,10 @@
#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_A 0x0601
+#define TG3PCI_SUBDEVICE_ID_ACER_57780_B 0x0612
+#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M 0x3056
+
/* 0x30 --> 0x64 unused */
#define TG3PCI_MSI_DATA 0x00000064
/* 0x66 --> 0x68 unused */
@@ -149,6 +155,7 @@
#define CHIPREV_ID_57780_A0 0x57780000
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_5717_C0 0x05717200
#define CHIPREV_ID_57765_A0 0x57785000
#define CHIPREV_ID_5719_A0 0x05719000
#define CHIPREV_ID_5720_A0 0x05720000
@@ -765,7 +772,10 @@
#define SG_DIG_MAC_ACK_STATUS 0x00000004
#define SG_DIG_AUTONEG_COMPLETE 0x00000002
#define SG_DIG_AUTONEG_ERROR 0x00000001
-/* 0x5b8 --> 0x600 unused */
+#define TG3_TX_TSTAMP_LSB 0x000005c0
+#define TG3_TX_TSTAMP_MSB 0x000005c4
+#define TG3_TSTAMP_MASK 0x7fffffffffffffff
+/* 0x5c8 --> 0x600 unused */
#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
/* 0x624 --> 0x670 unused */
@@ -782,7 +792,36 @@
#define MAC_RSS_HASH_KEY_7 0x0000068c
#define MAC_RSS_HASH_KEY_8 0x00000690
#define MAC_RSS_HASH_KEY_9 0x00000694
-/* 0x698 --> 0x800 unused */
+/* 0x698 --> 0x6b0 unused */
+
+#define TG3_RX_TSTAMP_LSB 0x000006b0
+#define TG3_RX_TSTAMP_MSB 0x000006b4
+/* 0x6b8 --> 0x6c8 unused */
+
+#define TG3_RX_PTP_CTL 0x000006c8
+#define TG3_RX_PTP_CTL_SYNC_EVNT 0x00000001
+#define TG3_RX_PTP_CTL_DELAY_REQ 0x00000002
+#define TG3_RX_PTP_CTL_PDLAY_REQ 0x00000004
+#define TG3_RX_PTP_CTL_PDLAY_RES 0x00000008
+#define TG3_RX_PTP_CTL_ALL_V1_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \
+ TG3_RX_PTP_CTL_DELAY_REQ)
+#define TG3_RX_PTP_CTL_ALL_V2_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \
+ TG3_RX_PTP_CTL_DELAY_REQ | \
+ TG3_RX_PTP_CTL_PDLAY_REQ | \
+ TG3_RX_PTP_CTL_PDLAY_RES)
+#define TG3_RX_PTP_CTL_FOLLOW_UP 0x00000100
+#define TG3_RX_PTP_CTL_DELAY_RES 0x00000200
+#define TG3_RX_PTP_CTL_PDRES_FLW_UP 0x00000400
+#define TG3_RX_PTP_CTL_ANNOUNCE 0x00000800
+#define TG3_RX_PTP_CTL_SIGNALING 0x00001000
+#define TG3_RX_PTP_CTL_MANAGEMENT 0x00002000
+#define TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN 0x00800000
+#define TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN 0x01000000
+#define TG3_RX_PTP_CTL_RX_PTP_V2_EN (TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | \
+ TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN)
+#define TG3_RX_PTP_CTL_RX_PTP_V1_EN 0x02000000
+#define TG3_RX_PTP_CTL_HWTS_INTERLOCK 0x04000000
+/* 0x6cc --> 0x800 unused */
#define MAC_TX_STATS_OCTETS 0x00000800
#define MAC_TX_STATS_RESV1 0x00000804
@@ -1662,6 +1701,7 @@
#define GRC_MODE_HOST_STACKUP 0x00010000
#define GRC_MODE_HOST_SENDBDS 0x00020000
#define GRC_MODE_HTX2B_ENABLE 0x00040000
+#define GRC_MODE_TIME_SYNC_ENABLE 0x00080000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
#define GRC_MODE_PCIE_TL_SEL 0x00000000
@@ -1764,7 +1804,17 @@
#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000
#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */
-/* 0x6c00 --> 0x7000 unused */
+#define TG3_EAV_REF_CLCK_LSB 0x00006900
+#define TG3_EAV_REF_CLCK_MSB 0x00006904
+#define TG3_EAV_REF_CLCK_CTL 0x00006908
+#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
+#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
+#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
+#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
+#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
+
+#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
+/* 0x690c --> 0x7000 unused */
/* NVRAM Control registers */
#define NVRAM_CMD 0x00007000
@@ -2432,6 +2482,7 @@ struct tg3_tx_buffer_desc {
#define TXD_FLAG_IP_FRAG 0x0008
#define TXD_FLAG_JMB_PKT 0x0008
#define TXD_FLAG_IP_FRAG_END 0x0010
+#define TXD_FLAG_HWTSTAMP 0x0020
#define TXD_FLAG_VLAN 0x0040
#define TXD_FLAG_COAL_NOW 0x0080
#define TXD_FLAG_CPU_PRE_DMA 0x0100
@@ -2473,6 +2524,9 @@ struct tg3_rx_buffer_desc {
#define RXD_FLAG_IP_CSUM 0x1000
#define RXD_FLAG_TCPUDP_CSUM 0x2000
#define RXD_FLAG_IS_TCP 0x4000
+#define RXD_FLAG_PTPSTAT_MASK 0x0210
+#define RXD_FLAG_PTPSTAT_PTPV1 0x0010
+#define RXD_FLAG_PTPSTAT_PTPV2 0x0200
u32 ip_tcp_csum;
#define RXD_IPCSUM_MASK 0xffff0000
@@ -2963,9 +3017,11 @@ enum TG3_FLAGS {
TG3_FLAG_USE_JUMBO_BDFLAG,
TG3_FLAG_L1PLLPD_EN,
TG3_FLAG_APE_HAS_NCSI,
+ TG3_FLAG_TX_TSTAMP_EN,
TG3_FLAG_4K_FIFO_LIMIT,
TG3_FLAG_5719_RDMA_BUG,
TG3_FLAG_RESET_TASK_PENDING,
+ TG3_FLAG_PTP_CAPABLE,
TG3_FLAG_5705_PLUS,
TG3_FLAG_IS_5788,
TG3_FLAG_5750_PLUS,
@@ -3034,6 +3090,10 @@ struct tg3 {
u32 coal_now;
u32 msg_enable;
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ s64 ptp_adjust;
+
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
@@ -3101,6 +3161,7 @@ struct tg3 {
u32 dma_rwctrl;
u32 coalesce_mode;
u32 pwrmgmt_thresh;
+ u32 rxptpctl;
/* PCI block */
u32 pci_chip_rev_id;
@@ -3262,6 +3323,7 @@ struct tg3 {
#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon_dev;
#endif
+ bool link_up;
};
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 959c58ef972a..3227fdde521b 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2273,7 +2273,6 @@ bfa_ioc_get_type(struct bfa_ioc *ioc)
static void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
{
- memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
memcpy(serial_num,
(void *)ioc->attr->brcd_serialnum,
BFA_ADAPTER_SERIAL_NUM_LEN);
@@ -2282,7 +2281,6 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
static void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
{
- memset(fw_ver, 0, BFA_VERSION_LEN);
memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
}
@@ -2304,7 +2302,6 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
static void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
{
- memset(optrom_ver, 0, BFA_VERSION_LEN);
memcpy(optrom_ver, ioc->attr->optrom_version,
BFA_VERSION_LEN);
}
@@ -2312,7 +2309,6 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index eef6e1f8aecc..7d10e335c27d 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -787,6 +787,7 @@ struct bfi_enet_stats_bpc {
/* MAC Rx Statistics */
struct bfi_enet_stats_mac {
+ u64 stats_clr_cnt; /* times this stats cleared */
u64 frame_64; /* both rx and tx counter */
u64 frame_65_127; /* both rx and tx counter */
u64 frame_128_255; /* both rx and tx counter */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index ede532b4e9db..25dae757e9c4 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -138,6 +138,8 @@ do { \
#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
+#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth)
+
#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index b8c4e21fbf4c..af3f7bb0b3b8 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -46,7 +46,8 @@
#define BFI_MAX_INTERPKT_COUNT 0xFF
#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
-#define BFI_TX_INTERPKT_COUNT 32
+#define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */
+#define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */
#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 71144b396e02..ea6f4a036401 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1355,6 +1355,8 @@ bfa_fsm_state_decl(bna_rx, stopped,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, start_wait,
struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, start_stop_wait,
+ struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_start_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, started,
@@ -1432,7 +1434,7 @@ static void bna_rx_sm_start_wait(struct bna_rx *rx,
{
switch (event) {
case RX_E_STOP:
- bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
+ bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
break;
case RX_E_FAIL:
@@ -1488,6 +1490,29 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
}
+static void
+bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ case RX_E_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ case RX_E_STARTED:
+ bna_rx_enet_stop(rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
@@ -1908,6 +1933,9 @@ bna_rxq_qpt_setup(struct bna_rxq *rxq,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
+ u8 *kva;
+ u64 dma;
+ struct bna_dma_addr bna_dma;
int i;
rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
@@ -1917,13 +1945,21 @@ bna_rxq_qpt_setup(struct bna_rxq *rxq,
rxq->qpt.page_size = page_size;
rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
+ rxq->rcb->sw_q = page_mem->kva;
+
+ kva = page_mem->kva;
+ BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < rxq->qpt.page_count; i++) {
- rxq->rcb->sw_qpt[i] = page_mem[i].kva;
+ rxq->rcb->sw_qpt[i] = kva;
+ kva += PAGE_SIZE;
+
+ BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
- page_mem[i].dma.lsb;
+ bna_dma.lsb;
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
- page_mem[i].dma.msb;
+ bna_dma.msb;
+ dma += PAGE_SIZE;
}
}
@@ -1935,6 +1971,9 @@ bna_rxp_cqpt_setup(struct bna_rxp *rxp,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
+ u8 *kva;
+ u64 dma;
+ struct bna_dma_addr bna_dma;
int i;
rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
@@ -1944,14 +1983,21 @@ bna_rxp_cqpt_setup(struct bna_rxp *rxp,
rxp->cq.qpt.page_size = page_size;
rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
+ rxp->cq.ccb->sw_q = page_mem->kva;
+
+ kva = page_mem->kva;
+ BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < rxp->cq.qpt.page_count; i++) {
- rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
+ rxp->cq.ccb->sw_qpt[i] = kva;
+ kva += PAGE_SIZE;
+ BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
- page_mem[i].dma.lsb;
+ bna_dma.lsb;
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
- page_mem[i].dma.msb;
+ bna_dma.msb;
+ dma += PAGE_SIZE;
}
}
@@ -2250,8 +2296,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = PAGE_SIZE;
- mem_info->num = cpage_count * q_cfg->num_paths;
+ mem_info->len = PAGE_SIZE * cpage_count;
+ mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
@@ -2268,8 +2314,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = PAGE_SIZE;
- mem_info->num = dpage_count * q_cfg->num_paths;
+ mem_info->len = PAGE_SIZE * dpage_count;
+ mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
@@ -2286,8 +2332,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = (hpage_count ? PAGE_SIZE : 0);
- mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
+ mem_info->len = PAGE_SIZE * hpage_count;
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
@@ -2332,7 +2378,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
- int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
+ int i;
int dpage_count, hpage_count, rcb_idx;
if (!bna_rx_res_check(rx_mod, rx_cfg))
@@ -2352,14 +2398,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
- page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
- rx_cfg->num_paths;
+ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
+ PAGE_SIZE;
- dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
- rx_cfg->num_paths;
+ dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
+ PAGE_SIZE;
- hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
- rx_cfg->num_paths;
+ hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
+ PAGE_SIZE;
rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
rx->bna = bna;
@@ -2446,10 +2492,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
- &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
- q0->rcb->page_idx = dpage_idx;
- q0->rcb->page_count = dpage_count;
- dpage_idx += dpage_count;
+ &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q0->rcb);
@@ -2475,10 +2518,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i],
- &hpage_mem[hpage_idx]);
- q1->rcb->page_idx = hpage_idx;
- q1->rcb->page_count = hpage_count;
- hpage_idx += hpage_count;
+ &hpage_mem[i]);
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q1->rcb);
@@ -2510,10 +2550,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
rxp->cq.ccb->id = i;
bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
- &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
- rxp->cq.ccb->page_idx = cpage_idx;
- rxp->cq.ccb->page_count = page_count;
- cpage_idx += page_count;
+ &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
if (rx->ccb_setup_cbfn)
rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
@@ -3230,6 +3267,9 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
+ u8 *kva;
+ u64 dma;
+ struct bna_dma_addr bna_dma;
int i;
txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
@@ -3239,14 +3279,21 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
txq->qpt.page_size = page_size;
txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
+ txq->tcb->sw_q = page_mem->kva;
+
+ kva = page_mem->kva;
+ BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < page_count; i++) {
- txq->tcb->sw_qpt[i] = page_mem[i].kva;
+ txq->tcb->sw_qpt[i] = kva;
+ kva += PAGE_SIZE;
+ BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
- page_mem[i].dma.lsb;
+ bna_dma.lsb;
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
- page_mem[i].dma.msb;
+ bna_dma.msb;
+ dma += PAGE_SIZE;
}
}
@@ -3430,8 +3477,8 @@ bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
- mem_info->len = PAGE_SIZE;
- mem_info->num = num_txq * page_count;
+ mem_info->len = PAGE_SIZE * page_count;
+ mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
@@ -3457,14 +3504,11 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_txq *txq;
struct list_head *qe;
int page_count;
- int page_size;
- int page_idx;
int i;
intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
- page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
- tx_cfg->num_txq;
- page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
+ page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
+ PAGE_SIZE;
/**
* Get resources
@@ -3529,7 +3573,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
/* TxQ */
i = 0;
- page_idx = 0;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->tcb = (struct bna_tcb *)
@@ -3551,7 +3594,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
if (intr_info->intr_type == BNA_INTR_T_INTX)
txq->ib.intr_vector = (1 << txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
- txq->ib.interpkt_timeo = 0; /* Not used */
+ txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
/* TCB */
@@ -3569,14 +3612,11 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
txq->tcb->id = i;
/* QPT, SWQPT, Pages */
- bna_txq_qpt_setup(txq, page_count, page_size,
+ bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
&res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_PAGE].
- res_u.mem_info.mdl[page_idx]);
- txq->tcb->page_idx = page_idx;
- txq->tcb->page_count = page_count;
- page_idx += page_count;
+ res_u.mem_info.mdl[i]);
/* Callback to bnad for setting up TCB */
if (tx->tcb_setup_cbfn)
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d3eb8bddfb2a..dc50f7836b6d 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -430,6 +430,7 @@ struct bna_ib {
struct bna_tcb {
/* Fast path */
void **sw_qpt;
+ void *sw_q;
void *unmap_q;
u32 producer_index;
u32 consumer_index;
@@ -437,8 +438,6 @@ struct bna_tcb {
u32 q_depth;
void __iomem *q_dbell;
struct bna_ib_dbell *i_dbell;
- int page_idx;
- int page_count;
/* Control path */
struct bna_txq *txq;
struct bnad *bnad;
@@ -563,13 +562,12 @@ struct bna_tx_mod {
struct bna_rcb {
/* Fast path */
void **sw_qpt;
+ void *sw_q;
void *unmap_q;
u32 producer_index;
u32 consumer_index;
u32 q_depth;
void __iomem *q_dbell;
- int page_idx;
- int page_count;
/* Control path */
struct bna_rxq *rxq;
struct bna_ccb *ccb;
@@ -626,6 +624,7 @@ struct bna_pkt_rate {
struct bna_ccb {
/* Fast path */
void **sw_qpt;
+ void *sw_q;
u32 producer_index;
volatile u32 *hw_producer_index;
u32 q_depth;
@@ -633,8 +632,6 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
- int page_idx;
- int page_count;
/* Control path */
struct bna_cq *cq;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ce1eac529470..7cce42dc2f20 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -61,23 +61,17 @@ static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
* Local MACROS
*/
-#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
-
-#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
-
#define BNAD_GET_MBOX_IRQ(_bnad) \
(((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
((_bnad)->pcidev->irq))
-#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
+#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
do { \
(_res_info)->res_type = BNA_RES_T_MEM; \
(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
(_res_info)->res_u.mem_info.num = (_num); \
- (_res_info)->res_u.mem_info.len = \
- sizeof(struct bnad_unmap_q) + \
- (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
+ (_res_info)->res_u.mem_info.len = (_size); \
} while (0)
static void
@@ -103,48 +97,58 @@ bnad_remove_from_list(struct bnad *bnad)
static void
bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
{
- struct bna_cq_entry *cmpl, *next_cmpl;
- unsigned int wi_range, wis = 0, ccb_prod = 0;
+ struct bna_cq_entry *cmpl;
int i;
- BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
- wi_range);
-
for (i = 0; i < ccb->q_depth; i++) {
- wis++;
- if (likely(--wi_range))
- next_cmpl = cmpl + 1;
- else {
- BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
- wis = 0;
- BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
- next_cmpl, wi_range);
- }
+ cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
cmpl->valid = 0;
- cmpl = next_cmpl;
}
}
+/* Tx Datapath functions */
+
+
+/* Caller should ensure that the entry at unmap_q[index] is valid */
static u32
-bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
- u32 index, u32 depth, struct sk_buff *skb, u32 frag)
+bnad_tx_buff_unmap(struct bnad *bnad,
+ struct bnad_tx_unmap *unmap_q,
+ u32 q_depth, u32 index)
{
- int j;
- array[index].skb = NULL;
-
- dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
- skb_headlen(skb), DMA_TO_DEVICE);
- dma_unmap_addr_set(&array[index], dma_addr, 0);
- BNA_QE_INDX_ADD(index, 1, depth);
+ struct bnad_tx_unmap *unmap;
+ struct sk_buff *skb;
+ int vector, nvecs;
+
+ unmap = &unmap_q[index];
+ nvecs = unmap->nvecs;
+
+ skb = unmap->skb;
+ unmap->skb = NULL;
+ unmap->nvecs = 0;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap->vectors[0], dma_addr),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
+ nvecs--;
+
+ vector = 0;
+ while (nvecs) {
+ vector++;
+ if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
+ vector = 0;
+ BNA_QE_INDX_INC(index, q_depth);
+ unmap = &unmap_q[index];
+ }
- for (j = 0; j < frag; j++) {
- dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&array[index], dma_addr, 0);
- BNA_QE_INDX_ADD(index, 1, depth);
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap->vectors[vector], dma_addr),
+ skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
+ nvecs--;
}
+ BNA_QE_INDX_INC(index, q_depth);
+
return index;
}
@@ -154,80 +158,64 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
* so DMA unmap & freeing is fine.
*/
static void
-bnad_txq_cleanup(struct bnad *bnad,
- struct bna_tcb *tcb)
+bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
{
- u32 unmap_cons;
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
- struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb = NULL;
- int q;
-
- unmap_array = unmap_q->unmap_array;
+ struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
+ struct sk_buff *skb;
+ int i;
- for (q = 0; q < unmap_q->q_depth; q++) {
- skb = unmap_array[q].skb;
+ for (i = 0; i < tcb->q_depth; i++) {
+ skb = unmap_q[i].skb;
if (!skb)
continue;
-
- unmap_cons = q;
- unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
- unmap_cons, unmap_q->q_depth, skb,
- skb_shinfo(skb)->nr_frags);
+ bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
dev_kfree_skb_any(skb);
}
}
-/* Data Path Handlers */
-
/*
* bnad_txcmpl_process : Frees the Tx bufs on Tx completion
* Can be called in a) Interrupt context
* b) Sending context
*/
static u32
-bnad_txcmpl_process(struct bnad *bnad,
- struct bna_tcb *tcb)
+bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
{
- u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
- u16 wis, updated_hw_cons;
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
- struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb;
+ u32 sent_packets = 0, sent_bytes = 0;
+ u32 wis, unmap_wis, hw_cons, cons, q_depth;
+ struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
+ struct bnad_tx_unmap *unmap;
+ struct sk_buff *skb;
/* Just return if TX is stopped */
if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
return 0;
- updated_hw_cons = *(tcb->hw_consumer_index);
-
- wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
- updated_hw_cons, tcb->q_depth);
+ hw_cons = *(tcb->hw_consumer_index);
+ cons = tcb->consumer_index;
+ q_depth = tcb->q_depth;
+ wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
- unmap_array = unmap_q->unmap_array;
- unmap_cons = unmap_q->consumer_index;
-
- prefetch(&unmap_array[unmap_cons + 1]);
while (wis) {
- skb = unmap_array[unmap_cons].skb;
+ unmap = &unmap_q[cons];
+
+ skb = unmap->skb;
sent_packets++;
sent_bytes += skb->len;
- wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
- unmap_cons, unmap_q->q_depth, skb,
- skb_shinfo(skb)->nr_frags);
+ unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
+ wis -= unmap_wis;
+ cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
dev_kfree_skb_any(skb);
}
/* Update consumer pointers. */
- tcb->consumer_index = updated_hw_cons;
- unmap_q->consumer_index = unmap_cons;
+ tcb->consumer_index = hw_cons;
tcb->txq->tx_packets += sent_packets;
tcb->txq->tx_bytes += sent_bytes;
@@ -278,133 +266,306 @@ bnad_msix_tx(int irq, void *data)
return IRQ_HANDLED;
}
-static void
-bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
+static inline void
+bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
{
- struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
+
+ unmap_q->reuse_pi = -1;
+ unmap_q->alloc_order = -1;
+ unmap_q->map_size = 0;
+ unmap_q->type = BNAD_RXBUF_NONE;
+}
+
+/* Default is page-based allocation. Multi-buffer support - TBD */
+static int
+bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
+ int mtu, order;
+
+ bnad_rxq_alloc_uninit(bnad, rcb);
+
+ mtu = bna_enet_mtu_get(&bnad->bna.enet);
+ order = get_order(mtu);
+
+ if (bna_is_small_rxq(rcb->id)) {
+ unmap_q->alloc_order = 0;
+ unmap_q->map_size = rcb->rxq->buffer_size;
+ } else {
+ unmap_q->alloc_order = order;
+ unmap_q->map_size =
+ (rcb->rxq->buffer_size > 2048) ?
+ PAGE_SIZE << order : 2048;
+ }
+
+ BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
+
+ unmap_q->type = BNAD_RXBUF_PAGE;
+
+ return 0;
+}
+
+static inline void
+bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
+{
+ if (!unmap->page)
+ return;
+
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap->vector, dma_addr),
+ unmap->vector.len, DMA_FROM_DEVICE);
+ put_page(unmap->page);
+ unmap->page = NULL;
+ dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
+ unmap->vector.len = 0;
+}
- rcb->producer_index = 0;
- rcb->consumer_index = 0;
+static inline void
+bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
+{
+ if (!unmap->skb)
+ return;
- unmap_q->producer_index = 0;
- unmap_q->consumer_index = 0;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap->vector, dma_addr),
+ unmap->vector.len, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(unmap->skb);
+ unmap->skb = NULL;
+ dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
+ unmap->vector.len = 0;
}
static void
bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
{
- struct bnad_unmap_q *unmap_q;
- struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb;
- int unmap_cons;
+ struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
+ int i;
- unmap_q = rcb->unmap_q;
- unmap_array = unmap_q->unmap_array;
- for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
- skb = unmap_array[unmap_cons].skb;
- if (!skb)
- continue;
- unmap_array[unmap_cons].skb = NULL;
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- rcb->rxq->buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ for (i = 0; i < rcb->q_depth; i++) {
+ struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
+
+ if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
+ bnad_rxq_cleanup_page(bnad, unmap);
+ else
+ bnad_rxq_cleanup_skb(bnad, unmap);
}
- bnad_rcb_cleanup(bnad, rcb);
+ bnad_rxq_alloc_uninit(bnad, rcb);
}
-static void
-bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
+static u32
+bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
{
- u16 to_alloc, alloced, unmap_prod, wi_range;
- struct bnad_unmap_q *unmap_q = rcb->unmap_q;
- struct bnad_skb_unmap *unmap_array;
+ u32 alloced, prod, q_depth;
+ struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_rx_unmap *unmap, *prev;
struct bna_rxq_entry *rxent;
- struct sk_buff *skb;
+ struct page *page;
+ u32 page_offset, alloc_size;
dma_addr_t dma_addr;
+ prod = rcb->producer_index;
+ q_depth = rcb->q_depth;
+
+ alloc_size = PAGE_SIZE << unmap_q->alloc_order;
alloced = 0;
- to_alloc =
- BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
- unmap_array = unmap_q->unmap_array;
- unmap_prod = unmap_q->producer_index;
+ while (nalloc--) {
+ unmap = &unmap_q->unmap[prod];
- BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
+ if (unmap_q->reuse_pi < 0) {
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ unmap_q->alloc_order);
+ page_offset = 0;
+ } else {
+ prev = &unmap_q->unmap[unmap_q->reuse_pi];
+ page = prev->page;
+ page_offset = prev->page_offset + unmap_q->map_size;
+ get_page(page);
+ }
+
+ if (unlikely(!page)) {
+ BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ rcb->rxq->rxbuf_alloc_failed++;
+ goto finishing;
+ }
+
+ dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
+ unmap_q->map_size, DMA_FROM_DEVICE);
+
+ unmap->page = page;
+ unmap->page_offset = page_offset;
+ dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
+ unmap->vector.len = unmap_q->map_size;
+ page_offset += unmap_q->map_size;
+
+ if (page_offset < alloc_size)
+ unmap_q->reuse_pi = prod;
+ else
+ unmap_q->reuse_pi = -1;
+
+ rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
+ BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+ BNA_QE_INDX_INC(prod, q_depth);
+ alloced++;
+ }
+
+finishing:
+ if (likely(alloced)) {
+ rcb->producer_index = prod;
+ smp_mb();
+ if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
+ bna_rxq_prod_indx_doorbell(rcb);
+ }
+
+ return alloced;
+}
+
+static u32
+bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
+{
+ u32 alloced, prod, q_depth, buff_sz;
+ struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_rx_unmap *unmap;
+ struct bna_rxq_entry *rxent;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ buff_sz = rcb->rxq->buffer_size;
+ prod = rcb->producer_index;
+ q_depth = rcb->q_depth;
+
+ alloced = 0;
+ while (nalloc--) {
+ unmap = &unmap_q->unmap[prod];
+
+ skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
- while (to_alloc--) {
- if (!wi_range)
- BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
- wi_range);
- skb = netdev_alloc_skb_ip_align(bnad->netdev,
- rcb->rxq->buffer_size);
if (unlikely(!skb)) {
BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
rcb->rxq->rxbuf_alloc_failed++;
goto finishing;
}
- unmap_array[unmap_prod].skb = skb;
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
- rcb->rxq->buffer_size,
- DMA_FROM_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
- dma_addr);
- BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
- BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ buff_sz, DMA_FROM_DEVICE);
+
+ unmap->skb = skb;
+ dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
+ unmap->vector.len = buff_sz;
- rxent++;
- wi_range--;
+ rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
+ BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+ BNA_QE_INDX_INC(prod, q_depth);
alloced++;
}
finishing:
if (likely(alloced)) {
- unmap_q->producer_index = unmap_prod;
- rcb->producer_index = unmap_prod;
+ rcb->producer_index = prod;
smp_mb();
if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
bna_rxq_prod_indx_doorbell(rcb);
}
+
+ return alloced;
}
static inline void
-bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
{
- struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
+ u32 to_alloc;
- if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
- if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
- >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
- bnad_rxq_post(bnad, rcb);
- smp_mb__before_clear_bit();
- clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
+ if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
+ return;
+
+ if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
+ bnad_rxq_refill_page(bnad, rcb, to_alloc);
+ else
+ bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+}
+
+#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
+ BNA_CQ_EF_IPV6 | \
+ BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
+ BNA_CQ_EF_L4_CKSUM_OK)
+
+#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
+ BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
+#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
+ BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
+#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
+ BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
+#define flags_udp6 (BNA_CQ_EF_IPV6 | \
+ BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
+
+static inline struct sk_buff *
+bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
+ struct bnad_rx_unmap_q *unmap_q,
+ struct bnad_rx_unmap *unmap,
+ u32 length, u32 flags)
+{
+ struct bnad *bnad = rx_ctrl->bnad;
+ struct sk_buff *skb;
+
+ if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
+ skb = napi_get_frags(&rx_ctrl->napi);
+ if (unlikely(!skb))
+ return NULL;
+
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap->vector, dma_addr),
+ unmap->vector.len, DMA_FROM_DEVICE);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ unmap->page, unmap->page_offset, length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+
+ unmap->page = NULL;
+ unmap->vector.len = 0;
+
+ return skb;
}
+
+ skb = unmap->skb;
+ BUG_ON(!skb);
+
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap->vector, dma_addr),
+ unmap->vector.len, DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+
+ skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+ unmap->skb = NULL;
+ unmap->vector.len = 0;
+ return skb;
}
static u32
bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
{
- struct bna_cq_entry *cmpl, *next_cmpl;
+ struct bna_cq_entry *cq, *cmpl;
struct bna_rcb *rcb = NULL;
- unsigned int wi_range, packets = 0, wis = 0;
- struct bnad_unmap_q *unmap_q;
- struct bnad_skb_unmap *unmap_array;
+ struct bnad_rx_unmap_q *unmap_q;
+ struct bnad_rx_unmap *unmap;
struct sk_buff *skb;
- u32 flags, unmap_cons;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
- struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
-
- if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
- return 0;
+ struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
+ u32 packets = 0, length = 0, flags, masked_flags;
prefetch(bnad->netdev);
- BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
- wi_range);
- BUG_ON(!(wi_range <= ccb->q_depth));
- while (cmpl->valid && packets < budget) {
+
+ cq = ccb->sw_q;
+ cmpl = &cq[ccb->producer_index];
+
+ while (cmpl->valid && (packets < budget)) {
packets++;
+ flags = ntohl(cmpl->flags);
+ length = ntohs(cmpl->length);
BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
if (bna_is_small_rxq(cmpl->rxq_id))
@@ -413,83 +574,63 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[0];
unmap_q = rcb->unmap_q;
- unmap_array = unmap_q->unmap_array;
- unmap_cons = unmap_q->consumer_index;
-
- skb = unmap_array[unmap_cons].skb;
- BUG_ON(!(skb));
- unmap_array[unmap_cons].skb = NULL;
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- rcb->rxq->buffer_size,
- DMA_FROM_DEVICE);
- BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
-
- /* Should be more efficient ? Performance ? */
- BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
-
- wis++;
- if (likely(--wi_range))
- next_cmpl = cmpl + 1;
- else {
- BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
- wis = 0;
- BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
- next_cmpl, wi_range);
- BUG_ON(!(wi_range <= ccb->q_depth));
- }
- prefetch(next_cmpl);
+ unmap = &unmap_q->unmap[rcb->consumer_index];
+
+ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+ BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
+ bnad_rxq_cleanup_page(bnad, unmap);
+ else
+ bnad_rxq_cleanup_skb(bnad, unmap);
- flags = ntohl(cmpl->flags);
- if (unlikely
- (flags &
- (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
- BNA_CQ_EF_TOO_LONG))) {
- dev_kfree_skb_any(skb);
rcb->rxq->rx_packets_with_error++;
goto next;
}
- skb_put(skb, ntohs(cmpl->length));
+ skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
+ length, flags);
+
+ if (unlikely(!skb))
+ break;
+
+ masked_flags = flags & flags_cksum_prot_mask;
+
if (likely
((bnad->netdev->features & NETIF_F_RXCSUM) &&
- (((flags & BNA_CQ_EF_IPV4) &&
- (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
- (flags & BNA_CQ_EF_IPV6)) &&
- (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
- (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+ ((masked_flags == flags_tcp4) ||
+ (masked_flags == flags_udp4) ||
+ (masked_flags == flags_tcp6) ||
+ (masked_flags == flags_udp6))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
rcb->rxq->rx_packets++;
- rcb->rxq->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, bnad->netdev);
+ rcb->rxq->rx_bytes += length;
if (flags & BNA_CQ_EF_VLAN)
__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ctrl->napi, skb);
+ if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
+ napi_gro_frags(&rx_ctrl->napi);
else
netif_receive_skb(skb);
next:
cmpl->valid = 0;
- cmpl = next_cmpl;
+ BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
+ BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+ cmpl = &cq[ccb->producer_index];
}
- BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
-
+ napi_gro_flush(&rx_ctrl->napi, false);
if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
bna_ib_ack_disable_irq(ccb->i_dbell, packets);
- bnad_refill_rxq(bnad, ccb->rcb[0]);
+ bnad_rxq_post(bnad, ccb->rcb[0]);
if (ccb->rcb[1])
- bnad_refill_rxq(bnad, ccb->rcb[1]);
-
- clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+ bnad_rxq_post(bnad, ccb->rcb[1]);
return packets;
}
@@ -764,12 +905,9 @@ bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
{
struct bnad_tx_info *tx_info =
(struct bnad_tx_info *)tcb->txq->tx->priv;
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ tcb->priv = tcb;
tx_info->tcb[tcb->id] = tcb;
- unmap_q->producer_index = 0;
- unmap_q->consumer_index = 0;
- unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
}
static void
@@ -783,16 +921,6 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
}
static void
-bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
-{
- struct bnad_unmap_q *unmap_q = rcb->unmap_q;
-
- unmap_q->producer_index = 0;
- unmap_q->consumer_index = 0;
- unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
-}
-
-static void
bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
{
struct bnad_rx_info *rx_info =
@@ -878,10 +1006,9 @@ bnad_tx_cleanup(struct delayed_work *work)
struct bnad_tx_info *tx_info =
container_of(work, struct bnad_tx_info, tx_cleanup_work);
struct bnad *bnad = NULL;
- struct bnad_unmap_q *unmap_q;
struct bna_tcb *tcb;
unsigned long flags;
- uint32_t i, pending = 0;
+ u32 i, pending = 0;
for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
tcb = tx_info->tcb[i];
@@ -897,10 +1024,6 @@ bnad_tx_cleanup(struct delayed_work *work)
bnad_txq_cleanup(bnad, tcb);
- unmap_q = tcb->unmap_q;
- unmap_q->producer_index = 0;
- unmap_q->consumer_index = 0;
-
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
}
@@ -916,7 +1039,6 @@ bnad_tx_cleanup(struct delayed_work *work)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
-
static void
bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
{
@@ -965,7 +1087,7 @@ bnad_rx_cleanup(void *work)
struct bnad_rx_ctrl *rx_ctrl;
struct bnad *bnad = NULL;
unsigned long flags;
- uint32_t i;
+ u32 i;
for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
@@ -1022,9 +1144,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
struct bna_ccb *ccb;
struct bna_rcb *rcb;
struct bnad_rx_ctrl *rx_ctrl;
- struct bnad_unmap_q *unmap_q;
- int i;
- int j;
+ int i, j;
for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
@@ -1039,19 +1159,10 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
if (!rcb)
continue;
+ bnad_rxq_alloc_init(bnad, rcb);
set_bit(BNAD_RXQ_STARTED, &rcb->flags);
set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
- unmap_q = rcb->unmap_q;
-
- /* Now allocate & post buffers for this RCB */
- /* !!Allocation in callback context */
- if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
- if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
- >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
- bnad_rxq_post(bnad, rcb);
- smp_mb__before_clear_bit();
- clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
- }
+ bnad_rxq_post(bnad, rcb);
}
}
}
@@ -1775,10 +1886,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(
- &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
- bnad->num_txq_per_tx,
- BNAD_TX_UNMAPQ_DEPTH);
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
+ bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
+ bnad->txq_depth));
/* Allocate resources */
err = bnad_tx_res_alloc(bnad, res_info, tx_id);
@@ -1916,7 +2026,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
static const struct bna_rx_event_cbfn rx_cbfn = {
- .rcb_setup_cbfn = bnad_cb_rcb_setup,
+ .rcb_setup_cbfn = NULL,
.rcb_destroy_cbfn = NULL,
.ccb_setup_cbfn = bnad_cb_ccb_setup,
.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
@@ -1938,11 +2048,12 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Fill Unmap Q memory requirements */
- BNAD_FILL_UNMAPQ_MEM_REQ(
- &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
+ BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
rx_config->num_paths +
- ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
- rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
+ ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_config->num_paths),
+ ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
+ sizeof(struct bnad_rx_unmap_q)));
/* Allocate resource */
err = bnad_rx_res_alloc(bnad, res_info, rx_id);
@@ -2523,125 +2634,34 @@ bnad_stop(struct net_device *netdev)
}
/* TX */
-/*
- * bnad_start_xmit : Netdev entry point for Transmit
- * Called under lock held by net_device
- */
-static netdev_tx_t
-bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+/* Returns 0 for success */
+static int
+bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
+ struct sk_buff *skb, struct bna_txq_entry *txqent)
{
- struct bnad *bnad = netdev_priv(netdev);
- u32 txq_id = 0;
- struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
-
- u16 txq_prod, vlan_tag = 0;
- u32 unmap_prod, wis, wis_used, wi_range;
- u32 vectors, vect_id, i, acked;
- int err;
- unsigned int len;
- u32 gso_size;
-
- struct bnad_unmap_q *unmap_q = tcb->unmap_q;
- dma_addr_t dma_addr;
- struct bna_txq_entry *txqent;
- u16 flags;
-
- if (unlikely(skb->len <= ETH_HLEN)) {
- dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
- return NETDEV_TX_OK;
- }
- if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
- dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
- return NETDEV_TX_OK;
- }
- if (unlikely(skb_headlen(skb) == 0)) {
- dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
- return NETDEV_TX_OK;
- }
-
- /*
- * Takes care of the Tx that is scheduled between clearing the flag
- * and the netif_tx_stop_all_queues() call.
- */
- if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
- dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
- return NETDEV_TX_OK;
- }
-
- vectors = 1 + skb_shinfo(skb)->nr_frags;
- if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
- dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
- return NETDEV_TX_OK;
- }
- wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
- acked = 0;
- if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
- vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
- if ((u16) (*tcb->hw_consumer_index) !=
- tcb->consumer_index &&
- !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
- acked = bnad_txcmpl_process(bnad, tcb);
- if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
- bna_ib_ack(tcb->i_dbell, acked);
- smp_mb__before_clear_bit();
- clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
- } else {
- netif_stop_queue(netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_stop);
- }
-
- smp_mb();
- /*
- * Check again to deal with race condition between
- * netif_stop_queue here, and netif_wake_queue in
- * interrupt handler which is not inside netif tx lock.
- */
- if (likely
- (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
- vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
- BNAD_UPDATE_CTR(bnad, netif_queue_stop);
- return NETDEV_TX_BUSY;
- } else {
- netif_wake_queue(netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
- }
- }
-
- unmap_prod = unmap_q->producer_index;
- flags = 0;
-
- txq_prod = tcb->producer_index;
- BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
- txqent->hdr.wi.reserved = 0;
- txqent->hdr.wi.num_vectors = vectors;
+ u16 flags = 0;
+ u32 gso_size;
+ u16 vlan_tag = 0;
if (vlan_tx_tag_present(skb)) {
- vlan_tag = (u16) vlan_tx_tag_get(skb);
+ vlan_tag = (u16)vlan_tx_tag_get(skb);
flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
}
if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
- vlan_tag =
- (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+ vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
+ | (vlan_tag & 0x1fff);
flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
}
-
txqent->hdr.wi.vlan_tag = htons(vlan_tag);
if (skb_is_gso(skb)) {
gso_size = skb_shinfo(skb)->gso_size;
-
- if (unlikely(gso_size > netdev->mtu)) {
- dev_kfree_skb(skb);
+ if (unlikely(gso_size > bnad->netdev->mtu)) {
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
- return NETDEV_TX_OK;
+ return -EINVAL;
}
if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ tcp_hdrlen(skb)) >= skb->len)) {
txqent->hdr.wi.opcode =
__constant_htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
@@ -2652,25 +2672,22 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
txqent->hdr.wi.lso_mss = htons(gso_size);
}
- err = bnad_tso_prepare(bnad, skb);
- if (unlikely(err)) {
- dev_kfree_skb(skb);
+ if (bnad_tso_prepare(bnad, skb)) {
BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
- return NETDEV_TX_OK;
+ return -EINVAL;
}
+
flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (tcp_hdrlen(skb) >> 2,
- skb_transport_offset(skb)));
- } else {
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
+ tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
+ } else {
txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
- if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
- dev_kfree_skb(skb);
+ if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
- return NETDEV_TX_OK;
+ return -EINVAL;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2678,11 +2695,13 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb->protocol == __constant_htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
+#ifdef NETIF_F_IPV6_CSUM
else if (skb->protocol ==
__constant_htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
+#endif
if (proto == IPPROTO_TCP) {
flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
txqent->hdr.wi.l4_hdr_size_n_offset =
@@ -2692,12 +2711,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) + tcp_hdrlen(skb))) {
- dev_kfree_skb(skb);
+ skb_transport_offset(skb) +
+ tcp_hdrlen(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
- return NETDEV_TX_OK;
+ return -EINVAL;
}
-
} else if (proto == IPPROTO_UDP) {
flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
txqent->hdr.wi.l4_hdr_size_n_offset =
@@ -2706,51 +2724,149 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BNAD_UPDATE_CTR(bnad, udpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
+ skb_transport_offset(skb) +
sizeof(struct udphdr))) {
- dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
- return NETDEV_TX_OK;
+ return -EINVAL;
}
} else {
- dev_kfree_skb(skb);
+
BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
- return NETDEV_TX_OK;
+ return -EINVAL;
}
- } else {
+ } else
txqent->hdr.wi.l4_hdr_size_n_offset = 0;
- }
}
txqent->hdr.wi.flags = htons(flags);
-
txqent->hdr.wi.frame_length = htonl(skb->len);
- unmap_q->unmap_array[unmap_prod].skb = skb;
+ return 0;
+}
+
+/*
+ * bnad_start_xmit : Netdev entry point for Transmit
+ * Called under lock held by net_device
+ */
+static netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ u32 txq_id = 0;
+ struct bna_tcb *tcb = NULL;
+ struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
+ u32 prod, q_depth, vect_id;
+ u32 wis, vectors, len;
+ int i;
+ dma_addr_t dma_addr;
+ struct bna_txq_entry *txqent;
+
len = skb_headlen(skb);
- txqent->vector[0].length = htons(len);
- dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
- dma_addr);
- BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
- BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ /* Sanity checks for the skb */
- vect_id = 0;
- wis_used = 1;
+ if (unlikely(skb->len <= ETH_HLEN)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(len == 0)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
+ return NETDEV_TX_OK;
+ }
+
+ tcb = bnad->tx_info[0].tcb[txq_id];
+ q_depth = tcb->q_depth;
+ prod = tcb->producer_index;
+
+ unmap_q = tcb->unmap_q;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ /*
+ * Takes care of the Tx that is scheduled between clearing the flag
+ * and the netif_tx_stop_all_queues() call.
+ */
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
+ return NETDEV_TX_OK;
+ }
+
+ vectors = 1 + skb_shinfo(skb)->nr_frags;
+ wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
+
+ if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
+ return NETDEV_TX_OK;
+ }
+
+ /* Check for available TxQ resources */
+ if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
+ if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
+ !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+ u32 sent;
+ sent = bnad_txcmpl_process(bnad, tcb);
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ bna_ib_ack(tcb->i_dbell, sent);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ } else {
+ netif_stop_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+
+ smp_mb();
+ /*
+ * Check again to deal with race condition between
+ * netif_stop_queue here, and netif_wake_queue in
+ * interrupt handler which is not inside netif tx lock.
+ */
+ if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ return NETDEV_TX_BUSY;
+ } else {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+
+ txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
+ head_unmap = &unmap_q[prod];
+
+ /* Program the opcode, flags, frame_len, num_vectors in WI */
+ if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txqent->hdr.wi.reserved = 0;
+ txqent->hdr.wi.num_vectors = vectors;
+
+ head_unmap->skb = skb;
+ head_unmap->nvecs = 0;
+
+ /* Program the vectors */
+ unmap = head_unmap;
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
+ txqent->vector[0].length = htons(len);
+ dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
+ head_unmap->nvecs++;
+
+ for (i = 0, vect_id = 0; i < vectors - 1; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
u16 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
- unmap_prod = unmap_q->producer_index;
-
- unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
- unmap_q->unmap_array,
- unmap_prod, unmap_q->q_depth, skb,
- i);
+ /* Undo the changes starting at tcb->producer_index */
+ bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
+ tcb->producer_index);
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
return NETDEV_TX_OK;
@@ -2758,47 +2874,35 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
len += size;
- if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
+ vect_id++;
+ if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
vect_id = 0;
- if (--wi_range)
- txqent++;
- else {
- BNA_QE_INDX_ADD(txq_prod, wis_used,
- tcb->q_depth);
- wis_used = 0;
- BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
- txqent, wi_range);
- }
- wis_used++;
+ BNA_QE_INDX_INC(prod, q_depth);
+ txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
txqent->hdr.wi_ext.opcode =
__constant_htons(BNA_TXQ_WI_EXTENSION);
+ unmap = &unmap_q[prod];
}
- BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
- txqent->vector[vect_id].length = htons(size);
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
- dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
- BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ txqent->vector[vect_id].length = htons(size);
+ dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
+ dma_addr);
+ head_unmap->nvecs++;
}
if (unlikely(len != skb->len)) {
- unmap_prod = unmap_q->producer_index;
-
- unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
- unmap_q->unmap_array, unmap_prod,
- unmap_q->q_depth, skb,
- skb_shinfo(skb)->nr_frags);
+ /* Undo the changes starting at tcb->producer_index */
+ bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
dev_kfree_skb(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
return NETDEV_TX_OK;
}
- unmap_q->producer_index = unmap_prod;
- BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
- tcb->producer_index = txq_prod;
+ BNA_QE_INDX_INC(prod, q_depth);
+ tcb->producer_index = prod;
smp_mb();
@@ -3226,7 +3330,7 @@ bnad_pci_uninit(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __devinit
+static int
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
@@ -3320,7 +3424,6 @@ bnad_pci_probe(struct pci_dev *pdev,
if (err)
goto res_free;
-
/* Set up timers */
setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
((unsigned long)bnad));
@@ -3426,7 +3529,7 @@ unlock_mutex:
return err;
}
-static void __devexit
+static void
bnad_pci_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3490,7 +3593,7 @@ static struct pci_driver bnad_pci_driver = {
.name = BNAD_NAME,
.id_table = bnad_pci_id_table,
.probe = bnad_pci_probe,
- .remove = __devexit_p(bnad_pci_remove),
+ .remove = bnad_pci_remove,
};
static int __init
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index d78339224751..c1d0bc059bfd 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.0.23.0"
+#define BNAD_VERSION "3.1.2.1"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -83,12 +83,9 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
-#define BNAD_MAX_Q_DEPTH 0x10000
-#define BNAD_MIN_Q_DEPTH 0x200
-
-#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq)
-/* keeping MAX TX and RX Q depth equal */
-#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH
+#define BNAD_MIN_Q_DEPTH 512
+#define BNAD_MAX_RXQ_DEPTH 2048
+#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@@ -101,9 +98,8 @@ struct bnad_rx_ctrl {
#define BNAD_TXQ_TX_STARTED 1
/* Bit positions for rcb->flags */
-#define BNAD_RXQ_REFILL 0
-#define BNAD_RXQ_STARTED 1
-#define BNAD_RXQ_POST_OK 2
+#define BNAD_RXQ_STARTED 0
+#define BNAD_RXQ_POST_OK 1
/* Resource limits */
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
@@ -221,18 +217,43 @@ struct bnad_rx_info {
struct work_struct rx_cleanup_work;
} ____cacheline_aligned;
-/* Unmap queues for Tx / Rx cleanup */
-struct bnad_skb_unmap {
+struct bnad_tx_vector {
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct bnad_tx_unmap {
struct sk_buff *skb;
+ u32 nvecs;
+ struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI];
+};
+
+struct bnad_rx_vector {
DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ u32 len;
+};
+
+struct bnad_rx_unmap {
+ struct page *page;
+ u32 page_offset;
+ struct sk_buff *skb;
+ struct bnad_rx_vector vector;
};
-struct bnad_unmap_q {
- u32 producer_index;
- u32 consumer_index;
- u32 q_depth;
- /* This should be the last one */
- struct bnad_skb_unmap unmap_array[1];
+enum bnad_rxbuf_type {
+ BNAD_RXBUF_NONE = 0,
+ BNAD_RXBUF_SKB = 1,
+ BNAD_RXBUF_PAGE = 2,
+ BNAD_RXBUF_MULTI = 3
+};
+
+#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
+
+struct bnad_rx_unmap_q {
+ int reuse_pi;
+ int alloc_order;
+ u32 map_size;
+ enum bnad_rxbuf_type type;
+ struct bnad_rx_unmap unmap[0];
};
/* Bit mask values for bnad->cfg_flags */
@@ -252,11 +273,6 @@ struct bnad_unmap_q {
#define BNAD_RF_STATS_TIMER_RUNNING 5
#define BNAD_RF_TX_PRIO_SET 6
-
-/* Define for Fast Path flags */
-/* Defined as bit positions */
-#define BNAD_FP_IN_RX_PATH 0
-
struct bnad {
struct net_device *netdev;
u32 id;
@@ -284,8 +300,8 @@ struct bnad {
u8 tx_coalescing_timeo;
u8 rx_coalescing_timeo;
- struct bna_rx_config rx_config[BNAD_MAX_RX];
- struct bna_tx_config tx_config[BNAD_MAX_TX];
+ struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned;
+ struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned;
void __iomem *bar0; /* BAR0 address */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 40e1e84f4984..455b5a2e59d4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -102,6 +102,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"rx_unmap_q_alloc_failed",
"rxbuf_alloc_failed",
+ "mac_stats_clr_cnt",
"mac_frame_64",
"mac_frame_65_127",
"mac_frame_128_255",
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 32e8f178ab76..14ca9317c915 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw.bin"
-#define CNA_FW_FILE_CT2 "ct2fw.bin"
+#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.1.0.0.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index db931916da08..ceb0de0cf62c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,13 +2,10 @@
# Atmel device configuration
#
-config HAVE_NET_MACB
- bool
-
config NET_CADENCE
bool "Cadence devices"
+ depends on HAS_IOMEM
default y
- depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
Make sure you know the name of your card. Read the Ethernet-HOWTO,
@@ -25,16 +22,14 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on ARM && ARCH_AT91RM9200
select NET_CORE
- select MII
+ select MACB
---help---
If you wish to compile a kernel for the AT91RM9200 and enable
ethernet support, then you should always answer Y to this.
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAVE_NET_MACB
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 4e980a7886fb..3becdb2deb46 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -6,11 +6,6 @@
* Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
* Initial version by Rick Bronson 01/11/2003
*
- * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
- * (Polaroid Corporation)
- *
- * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -20,7 +15,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -31,956 +25,248 @@
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/mach-types.h>
-
-#include <mach/at91rm9200_emac.h>
-#include <asm/gpio.h>
-#include <mach/board.h>
-
-#include "at91_ether.h"
-
-#define DRV_NAME "at91_ether"
-#define DRV_VERSION "1.0"
-
-#define LINK_POLL_INTERVAL (HZ)
-
-/* ..................................................................... */
-
-/*
- * Read from a EMAC register.
- */
-static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
-{
- return __raw_readl(lp->emac_base + reg);
-}
-
-/*
- * Write to a EMAC register.
- */
-static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
-{
- __raw_writel(value, lp->emac_base + reg);
-}
-
-/* ........................... PHY INTERFACE ........................... */
-
-/*
- * Enable the MDIO bit in MAC control register
- * When not called from an interrupt-handler, access to the PHY must be
- * protected by a spinlock.
- */
-static void enable_mdi(struct at91_private *lp)
-{
- unsigned long ctl;
-
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
-}
-
-/*
- * Disable the MDIO bit in the MAC control register
- */
-static void disable_mdi(struct at91_private *lp)
-{
- unsigned long ctl;
-
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
-}
-
-/*
- * Wait until the PHY operation is complete.
- */
-static inline void at91_phy_wait(struct at91_private *lp)
-{
- unsigned long timeout = jiffies + 2;
-
- while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
- if (time_after(jiffies, timeout)) {
- printk("at91_ether: MIO timeout\n");
- break;
- }
- cpu_relax();
- }
-}
-
-/*
- * Write value to the a PHY register
- * Note: MDI interface is assumed to already have been enabled.
- */
-static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
-{
- at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
- | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
-
- /* Wait until IDLE bit in Network Status register is cleared */
- at91_phy_wait(lp);
-}
-
-/*
- * Read value stored in a PHY register.
- * Note: MDI interface is assumed to already have been enabled.
- */
-static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
-{
- at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
- | ((phy_addr & 0x1f) << 23) | (address << 18));
-
- /* Wait until IDLE bit in Network Status register is cleared */
- at91_phy_wait(lp);
-
- *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
-}
-
-/* ........................... PHY MANAGEMENT .......................... */
-
-/*
- * Access the PHY to determine the current link speed and mode, and update the
- * MAC accordingly.
- * If no link or auto-negotiation is busy, then no changes are made.
- */
-static void update_linkspeed(struct net_device *dev, int silent)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int bmsr, bmcr, lpa, mac_cfg;
- unsigned int speed, duplex;
-
- if (!mii_link_ok(&lp->mii)) { /* no link */
- netif_carrier_off(dev);
- if (!silent)
- printk(KERN_INFO "%s: Link down.\n", dev->name);
- return;
- }
-
- /* Link up, or auto-negotiation still in progress */
- read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
- read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
- if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
- if (!(bmsr & BMSR_ANEGCOMPLETE))
- return; /* Do nothing - another interrupt generated when negotiation complete */
-
- read_phy(lp, lp->phy_address, MII_LPA, &lpa);
- if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
- else speed = SPEED_10;
- if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
- else duplex = DUPLEX_HALF;
- } else {
- speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
- duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
- }
-
- /* Update the MAC */
- mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
- if (speed == SPEED_100) {
- if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
- mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
- else /* 100 Half Duplex */
- mac_cfg |= AT91_EMAC_SPD;
- } else {
- if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
- mac_cfg |= AT91_EMAC_FD;
- else {} /* 10 Half Duplex */
- }
- at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
-
- if (!silent)
- printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
- netif_carrier_on(dev);
-}
-
-/*
- * Handle interrupts from the PHY
- */
-static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
- unsigned int phy;
-
- /*
- * This hander is triggered on both edges, but the PHY chips expect
- * level-triggering. We therefore have to check if the PHY actually has
- * an IRQ pending.
- */
- enable_mdi(lp);
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
- read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
- if (!(phy & (1 << 0)))
- goto done;
- }
- else if (lp->phy_type == MII_LXT971A_ID) {
- read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
- if (!(phy & (1 << 2)))
- goto done;
- }
- else if (lp->phy_type == MII_BCM5221_ID) {
- read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
- if (!(phy & (1 << 0)))
- goto done;
- }
- else if (lp->phy_type == MII_KS8721_ID) {
- read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
- if (!(phy & ((1 << 2) | 1)))
- goto done;
- }
- else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
- read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
- if (!(phy & ((1 << 2) | 1)))
- goto done;
- }
- else if (lp->phy_type == MII_DP83848_ID) {
- read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
- if (!(phy & (1 << 7)))
- goto done;
- }
-
- update_linkspeed(dev, 0);
-
-done:
- disable_mdi(lp);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Initialize and enable the PHY interrupt for link-state changes
- */
-static void enable_phyirq(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int dsintr, irq_number;
- int status;
-
- if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
- /*
- * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
- * or board does not have it connected.
- */
- mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
- return;
- }
-
- irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
- status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
- if (status) {
- printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
- return;
- }
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
- read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
- dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
- write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
- read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
- dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
- write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
- }
- else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
- dsintr = (1 << 15) | ( 1 << 14);
- write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
- dsintr = (1 << 10) | ( 1 << 8);
- write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
- }
- else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
- read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
- dsintr = dsintr | 0x500; /* set bits 8, 10 */
- write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
- }
- else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
- read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
- dsintr = dsintr | 0x3c; /* set bits 2..5 */
- write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
- read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
- dsintr = dsintr | 0x3; /* set bits 0,1 */
- write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
- }
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-}
-
-/*
- * Disable the PHY interrupt
- */
-static void disable_phyirq(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int dsintr;
- unsigned int irq_number;
-
- if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
- del_timer_sync(&lp->check_timer);
- return;
- }
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
- read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
- dsintr = dsintr | 0xf00; /* set bits 8..11 */
- write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
- read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
- dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
- write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
- }
- else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
- read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
- dsintr = ~(1 << 14);
- write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
- }
- else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
- read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
- dsintr = ~((1 << 10) | (1 << 8));
- write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
- }
- else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
- read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
- dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
- write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
- }
- else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
- read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
- dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
- write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
- read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
- dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
- write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
- }
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
- free_irq(irq_number, dev); /* Free interrupt handler */
-}
-
-/*
- * Perform a software reset of the PHY.
- */
-#if 0
-static void reset_phy(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int bmcr;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- /* Perform PHY reset */
- write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
-
- /* Wait until PHY reset is complete */
- do {
- read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
- } while (!(bmcr & BMCR_RESET));
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-}
-#endif
-
-static void at91ether_check_link(unsigned long dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
-
- enable_mdi(lp);
- update_linkspeed(dev, 1);
- disable_mdi(lp);
-
- mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
-}
-
-/*
- * Perform any PHY-specific initialization.
- */
-static void __init initialize_phy(struct at91_private *lp)
-{
- unsigned int val;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
- read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
- if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
- lp->phy_media = PORT_FIBRE;
- } else if (machine_is_csb337()) {
- /* mix link activity status into LED2 link state */
- write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
- } else if (machine_is_ecbat91())
- write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-}
-
-/* ......................... ADDRESS MANAGEMENT ........................ */
-
-/*
- * NOTE: Your bootloader must always set the MAC address correctly before
- * booting into Linux.
- *
- * - It must always set the MAC address after reset, even if it doesn't
- * happen to access the Ethernet while it's booting. Some versions of
- * U-Boot on the AT91RM9200-DK do not do this.
- *
- * - Likewise it must store the addresses in the correct byte order.
- * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
- * continues to do so, for bug-compatibility).
- */
-
-static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
-{
- char addr[6];
-
- if (machine_is_csb337()) {
- addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
- addr[4] = (lo & 0xff00) >> 8;
- addr[3] = (lo & 0xff0000) >> 16;
- addr[2] = (lo & 0xff000000) >> 24;
- addr[1] = (hi & 0xff);
- addr[0] = (hi & 0xff00) >> 8;
- }
- else {
- addr[0] = (lo & 0xff);
- addr[1] = (lo & 0xff00) >> 8;
- addr[2] = (lo & 0xff0000) >> 16;
- addr[3] = (lo & 0xff000000) >> 24;
- addr[4] = (hi & 0xff);
- addr[5] = (hi & 0xff00) >> 8;
- }
-
- if (is_valid_ether_addr(addr)) {
- memcpy(dev->dev_addr, &addr, 6);
- return 1;
- }
- return 0;
-}
-
-/*
- * Set the ethernet MAC address in dev->dev_addr
- */
-static void __init get_mac_address(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
-
- /* Check Specific-Address 1 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
- return;
- /* Check Specific-Address 2 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
- return;
- /* Check Specific-Address 3 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
- return;
- /* Check Specific-Address 4 */
- if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
- return;
-
- printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
-}
-
-/*
- * Program the hardware MAC address from dev->dev_addr.
- */
-static void update_mac_address(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
-
- at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
- at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
-
- at91_emac_write(lp, AT91_EMAC_SA2L, 0);
- at91_emac_write(lp, AT91_EMAC_SA2H, 0);
-}
-
-/*
- * Store the new hardware address in dev->dev_addr, and update the MAC.
- */
-static int set_mac_address(struct net_device *dev, void* addr)
-{
- struct sockaddr *address = addr;
-
- if (!is_valid_ether_addr(address->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
- update_mac_address(dev);
-
- printk("%s: Setting MAC address to %pM\n", dev->name,
- dev->dev_addr);
+#include "macb.h"
- return 0;
-}
-
-static int inline hash_bit_value(int bitnr, __u8 *addr)
-{
- if (addr[bitnr / 8] & (1 << (bitnr % 8)))
- return 1;
- return 0;
-}
+/* 1518 rounded up */
+#define MAX_RBUFF_SZ 0x600
+/* max number of receive buffers */
+#define MAX_RX_DESCR 9
-/*
- * The hash address register is 64 bits long and takes up two locations in the memory map.
- * The least significant bits are stored in EMAC_HSL and the most significant
- * bits in EMAC_HSH.
- *
- * The unicast hash enable and the multicast hash enable bits in the network configuration
- * register enable the reception of hash matched frames. The destination address is
- * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
- * The hash function is an exclusive or of every sixth bit of the destination address.
- * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
- * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
- * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
- * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
- * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
- * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
- * da[0] represents the least significant bit of the first byte received, that is, the multicast/
- * unicast indicator, and da[47] represents the most significant bit of the last byte
- * received.
- * If the hash index points to a bit that is set in the hash register then the frame will be
- * matched according to whether the frame is multicast or unicast.
- * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
- * the hash index points to a bit set in the hash register.
- * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
- * hash index points to a bit set in the hash register.
- * To receive all multicast frames, the hash register should be set with all ones and the
- * multicast hash enable bit should be set in the network configuration register.
- */
-
-/*
- * Return the hash index value for the specified address.
- */
-static int hash_get_index(__u8 *addr)
-{
- int i, j, bitval;
- int hash_index = 0;
-
- for (j = 0; j < 6; j++) {
- for (i = 0, bitval = 0; i < 8; i++)
- bitval ^= hash_bit_value(i*6 + j, addr);
-
- hash_index |= (bitval << j);
- }
-
- return hash_index;
-}
-
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
-static void at91ether_sethashtable(struct net_device *dev)
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- unsigned long mc_filter[2];
- unsigned int bitnr;
-
- mc_filter[0] = mc_filter[1] = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- bitnr = hash_get_index(ha->addr);
- mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
- }
-
- at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
- at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
-}
+ struct macb *lp = netdev_priv(dev);
+ dma_addr_t addr;
+ u32 ctl;
+ int i;
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
-static void at91ether_set_multicast_list(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned long cfg;
-
- cfg = at91_emac_read(lp, AT91_EMAC_CFG);
-
- if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
- cfg |= AT91_EMAC_CAF;
- else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
- cfg &= ~AT91_EMAC_CAF;
-
- if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
- at91_emac_write(lp, AT91_EMAC_HSH, -1);
- at91_emac_write(lp, AT91_EMAC_HSL, -1);
- cfg |= AT91_EMAC_MTI;
- } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
- at91ether_sethashtable(dev);
- cfg |= AT91_EMAC_MTI;
- } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
- at91_emac_write(lp, AT91_EMAC_HSH, 0);
- at91_emac_write(lp, AT91_EMAC_HSL, 0);
- cfg &= ~AT91_EMAC_MTI;
+ lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring) {
+ netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+ return -ENOMEM;
}
- at91_emac_write(lp, AT91_EMAC_CFG, cfg);
-}
-
-/* ......................... ETHTOOL SUPPORT ........................... */
-
-static int mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct at91_private *lp = netdev_priv(dev);
- unsigned int value;
-
- read_phy(lp, phy_id, location, &value);
- return value;
-}
-
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
-{
- struct at91_private *lp = netdev_priv(dev);
-
- write_phy(lp, phy_id, location, value);
-}
-
-static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ret;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
+ lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
+ if (!lp->rx_buffers) {
+ netdev_err(dev, "unable to alloc rx data DMA buffer\n");
- ret = mii_ethtool_gset(&lp->mii, cmd);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
- cmd->supported = SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+ return -ENOMEM;
}
- return ret;
-}
-
-static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ret;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- ret = mii_ethtool_sset(&lp->mii, cmd);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- return ret;
-}
-
-static int at91ether_nwayreset(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ret;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
-
- ret = mii_nway_restart(&lp->mii);
-
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- return ret;
-}
-
-static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
-}
-
-static const struct ethtool_ops at91ether_ethtool_ops = {
- .get_settings = at91ether_get_settings,
- .set_settings = at91ether_set_settings,
- .get_drvinfo = at91ether_get_drvinfo,
- .nway_reset = at91ether_nwayreset,
- .get_link = ethtool_op_get_link,
-};
-
-static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct at91_private *lp = netdev_priv(dev);
- int res;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
- res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- return res;
-}
-
-/* ................................ MAC ................................ */
-
-/*
- * Initialize and start the Receiver and Transmit subsystems
- */
-static void at91ether_start(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- struct recv_desc_bufs *dlist, *dlist_phys;
- int i;
- unsigned long ctl;
-
- dlist = lp->dlist;
- dlist_phys = lp->dlist_phys;
-
+ addr = lp->rx_buffers_dma;
for (i = 0; i < MAX_RX_DESCR; i++) {
- dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
- dlist->descriptors[i].size = 0;
+ lp->rx_ring[i].addr = addr;
+ lp->rx_ring[i].ctrl = 0;
+ addr += MAX_RBUFF_SZ;
}
/* Set the Wrap bit on the last descriptor */
- dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
+ lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
- lp->rxBuffIndex = 0;
+ lp->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
- at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys);
+ macb_writel(lp, RBQP, lp->rx_ring_dma);
/* Enable Receive and Transmit */
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+ return 0;
}
-/*
- * Open the ethernet interface
- */
+/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- unsigned long ctl;
-
- if (!is_valid_ether_addr(dev->dev_addr))
- return -EADDRNOTAVAIL;
-
- clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
+ int ret;
/* Clear internal statistics */
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
- /* Update the MAC address (incase user has changed it) */
- update_mac_address(dev);
+ macb_set_hwaddr(lp);
- /* Enable PHY interrupt */
- enable_phyirq(dev);
+ ret = at91ether_start(dev);
+ if (ret)
+ return ret;
/* Enable MAC interrupts */
- at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
- | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
- | AT91_EMAC_ROVR | AT91_EMAC_ABT);
-
- /* Determine current link speed */
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
- update_linkspeed(dev, 0);
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
-
- at91ether_start(dev);
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* schedule a link state check */
+ phy_start(lp->phy_dev);
+
netif_start_queue(dev);
+
return 0;
}
-/*
- * Close the interface
- */
+/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- unsigned long ctl;
+ struct macb *lp = netdev_priv(dev);
+ u32 ctl;
/* Disable Receiver and Transmitter */
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
-
- /* Disable PHY interrupt */
- disable_phyirq(dev);
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
/* Disable MAC interrupts */
- at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
- | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
- | AT91_EMAC_ROVR | AT91_EMAC_ABT);
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
netif_stop_queue(dev);
- clk_disable(lp->ether_clk); /* Disable Peripheral clock */
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+
+ dma_free_coherent(&lp->pdev->dev,
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ lp->rx_buffers, lp->rx_buffers_dma);
+ lp->rx_buffers = NULL;
return 0;
}
-/*
- * Transmit packet.
- */
+/* Transmit packet */
static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
+ struct macb *lp = netdev_priv(dev);
- if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
+ if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
netif_stop_queue(dev);
/* Store packet information (to free when Tx completed) */
lp->skb = skb;
lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
- dev->stats.tx_bytes += skb->len;
+ lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+ DMA_TO_DEVICE);
/* Set address of the data in the Transmit Address register */
- at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr);
+ macb_writel(lp, TAR, lp->skb_physaddr);
/* Set length of the packet in the Transmit Control register */
- at91_emac_write(lp, AT91_EMAC_TCR, skb->len);
+ macb_writel(lp, TCR, skb->len);
} else {
- printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
- return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
- on this skb, he also reports -ENETDOWN and printk's, so either
- we free and return(0) or don't free and return 1 */
+ netdev_err(dev, "%s called, but device is busy!\n", __func__);
+ return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
-/*
- * Update the current statistics from the internal statistics registers.
- */
-static struct net_device_stats *at91ether_stats(struct net_device *dev)
-{
- struct at91_private *lp = netdev_priv(dev);
- int ale, lenerr, seqe, lcol, ecol;
-
- if (netif_running(dev)) {
- dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */
- ale = at91_emac_read(lp, AT91_EMAC_ALE);
- dev->stats.rx_frame_errors += ale; /* Alignment errors */
- lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
- dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
- seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
- dev->stats.rx_crc_errors += seqe; /* CRC error */
- dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
- dev->stats.rx_errors += (ale + lenerr + seqe
- + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
-
- dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */
- dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */
- dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */
- dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
-
- lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
- ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
- dev->stats.tx_window_errors += lcol; /* Late collisions */
- dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
-
- dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
- }
- return &dev->stats;
-}
-
-/*
- * Extract received frame from buffer descriptors and sent to upper layers.
+/* Extract received frame from buffer descriptors and sent to upper layers.
* (Called from interrupt context)
*/
static void at91ether_rx(struct net_device *dev)
{
- struct at91_private *lp = netdev_priv(dev);
- struct recv_desc_bufs *dlist;
+ struct macb *lp = netdev_priv(dev);
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- dlist = lp->dlist;
- while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
- p_recv = dlist->recv_buf[lp->rxBuffIndex];
- pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
+ while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+ p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
+ pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_bytes += pktlen;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pktlen;
netif_rx(skb);
+ } else {
+ lp->stats.rx_dropped++;
+ netdev_notice(dev, "Memory squeeze, dropping packet.\n");
}
- else {
- dev->stats.rx_dropped += 1;
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- }
- if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
- dev->stats.multicast++;
+ if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+ lp->stats.multicast++;
+
+ /* reset ownership bit */
+ lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
- dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
- if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
- lp->rxBuffIndex = 0;
+ /* wrap after last buffer */
+ if (lp->rx_tail == MAX_RX_DESCR - 1)
+ lp->rx_tail = 0;
else
- lp->rxBuffIndex++;
+ lp->rx_tail++;
}
}
-/*
- * MAC interrupt handler
- */
+/* MAC interrupt handler */
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
- unsigned long intstatus, ctl;
+ struct net_device *dev = dev_id;
+ struct macb *lp = netdev_priv(dev);
+ u32 intstatus, ctl;
/* MAC Interrupt Status register indicates what interrupts are pending.
- It is automatically cleared once read. */
- intstatus = at91_emac_read(lp, AT91_EMAC_ISR);
+ * It is automatically cleared once read.
+ */
+ intstatus = macb_readl(lp, ISR);
- if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
+ /* Receive complete */
+ if (intstatus & MACB_BIT(RCOMP))
at91ether_rx(dev);
- if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
- /* The TCOM bit is set even if the transmission failed. */
- if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
- dev->stats.tx_errors += 1;
+ /* Transmit complete */
+ if (intstatus & MACB_BIT(TCOMP)) {
+ /* The TCOM bit is set even if the transmission failed */
+ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+ lp->stats.tx_errors++;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += lp->skb_length;
}
netif_wake_queue(dev);
}
- /* Work-around for Errata #11 */
- if (intstatus & AT91_EMAC_RBNA) {
- ctl = at91_emac_read(lp, AT91_EMAC_CTL);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
- at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
+ /* Work-around for EMAC Errata section 41.3.1 */
+ if (intstatus & MACB_BIT(RXUBR)) {
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+ macb_writel(lp, NCR, ctl | MACB_BIT(RE));
}
- if (intstatus & AT91_EMAC_ROVR)
- printk("%s: ROVR error\n", dev->name);
+ if (intstatus & MACB_BIT(ISR_ROVR))
+ netdev_err(dev, "ROVR error\n");
return IRQ_HANDLED;
}
@@ -1000,10 +286,10 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = at91ether_stats,
- .ndo_set_rx_mode = at91ether_set_multicast_list,
- .ndo_set_mac_address = set_mac_address,
- .ndo_do_ioctl = at91ether_ioctl,
+ .ndo_get_stats = macb_get_stats,
+ .ndo_set_rx_mode = macb_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1011,237 +297,195 @@ static const struct net_device_ops at91ether_netdev_ops = {
#endif
};
-/*
- * Detect the PHY type, and its address.
- */
-static int __init at91ether_phy_detect(struct at91_private *lp)
+#if defined(CONFIG_OF)
+static const struct of_device_id at91ether_dt_ids[] = {
+ { .compatible = "cdns,at91rm9200-emac" },
+ { .compatible = "cdns,emac" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
+
+static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
{
- unsigned int phyid1, phyid2;
- unsigned long phy_id;
- unsigned short phy_address = 0;
-
- while (phy_address < PHY_MAX_ADDR) {
- /* Read the PHY ID registers */
- enable_mdi(lp);
- read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
- read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
- disable_mdi(lp);
-
- phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
- switch (phy_id) {
- case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
- case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
- case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
- case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
- case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
- case MII_DP83847_ID: /* National Semiconductor DP83847: */
- case MII_DP83848_ID: /* National Semiconductor DP83848: */
- case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
- case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
- case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
- case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
- /* store detected values */
- lp->phy_type = phy_id; /* Type of PHY connected */
- lp->phy_address = phy_address; /* MDI address of PHY */
- return 1;
- }
+ struct device_node *np = pdev->dev.of_node;
- phy_address++;
- }
+ if (np)
+ return of_get_phy_mode(np);
- return 0; /* not detected */
+ return -ENODEV;
}
+static int at91ether_get_hwaddr_dt(struct macb *bp)
+{
+ struct device_node *np = bp->pdev->dev.of_node;
-/*
- * Detect MAC & PHY and perform ethernet interface initialization
- */
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac) {
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+#else
+static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+static int at91ether_get_hwaddr_dt(struct macb *bp)
+{
+ return -ENODEV;
+}
+#endif
+
+/* Detect MAC & PHY and perform ethernet interface initialization */
static int __init at91ether_probe(struct platform_device *pdev)
{
struct macb_platform_data *board_data = pdev->dev.platform_data;
struct resource *regs;
struct net_device *dev;
- struct at91_private *lp;
+ struct phy_device *phydev;
+ struct pinctrl *pinctrl;
+ struct macb *lp;
int res;
+ u32 reg;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENOENT;
- dev = alloc_etherdev(sizeof(struct at91_private));
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ res = PTR_ERR(pinctrl);
+ if (res == -EPROBE_DEFER)
+ return res;
+
+ dev_warn(&pdev->dev, "No pinctrl provided\n");
+ }
+
+ dev = alloc_etherdev(sizeof(struct macb));
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
- lp->board_data = *board_data;
+ lp->pdev = pdev;
+ lp->dev = dev;
spin_lock_init(&lp->lock);
- dev->base_addr = regs->start; /* physical base address */
- lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1);
- if (!lp->emac_base) {
+ /* physical base address */
+ dev->base_addr = regs->start;
+ lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!lp->regs) {
res = -ENOMEM;
goto err_free_dev;
}
/* Clock */
- lp->ether_clk = clk_get(&pdev->dev, "ether_clk");
- if (IS_ERR(lp->ether_clk)) {
- res = PTR_ERR(lp->ether_clk);
- goto err_ioumap;
+ lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(lp->pclk)) {
+ res = PTR_ERR(lp->pclk);
+ goto err_free_dev;
}
- clk_enable(lp->ether_clk);
+ clk_enable(lp->pclk);
/* Install the interrupt handler */
dev->irq = platform_get_irq(pdev, 0);
- if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
- res = -EBUSY;
+ res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
+ if (res)
goto err_disable_clock;
- }
-
- /* Allocate memory for DMA Receive descriptors */
- lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
- if (lp->dlist == NULL) {
- res = -ENOMEM;
- goto err_free_irq;
- }
ether_setup(dev);
dev->netdev_ops = &at91ether_netdev_ops;
- dev->ethtool_ops = &at91ether_ethtool_ops;
+ dev->ethtool_ops = &macb_ethtool_ops;
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
- update_mac_address(dev); /* Program ethernet address into MAC */
-
- at91_emac_write(lp, AT91_EMAC_CTL, 0);
+ res = at91ether_get_hwaddr_dt(lp);
+ if (res < 0)
+ macb_get_hwaddr(lp);
- if (board_data->is_rmii)
- at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
- else
- at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
-
- /* Detect PHY */
- if (!at91ether_phy_detect(lp)) {
- printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n");
- res = -ENODEV;
- goto err_free_dmamem;
+ res = at91ether_get_phy_mode_dt(pdev);
+ if (res < 0) {
+ if (board_data && board_data->is_rmii)
+ lp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ lp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ lp->phy_interface = res;
}
- initialize_phy(lp);
+ macb_writel(lp, NCR, 0);
+
+ reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+ if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ reg |= MACB_BIT(RM9200_RMII);
- lp->mii.dev = dev; /* Support for ethtool */
- lp->mii.mdio_read = mdio_read;
- lp->mii.mdio_write = mdio_write;
- lp->mii.phy_id = lp->phy_address;
- lp->mii.phy_id_mask = 0x1f;
- lp->mii.reg_num_mask = 0x1f;
+ macb_writel(lp, NCFGR, reg);
/* Register the network interface */
res = register_netdev(dev);
if (res)
- goto err_free_dmamem;
-
- /* Determine current link speed */
- spin_lock_irq(&lp->lock);
- enable_mdi(lp);
- update_linkspeed(dev, 0);
- disable_mdi(lp);
- spin_unlock_irq(&lp->lock);
- netif_carrier_off(dev); /* will be enabled in open() */
-
- /* If board has no PHY IRQ, use a timer to poll the PHY */
- if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
- gpio_request(board_data->phy_irq_pin, "ethernet_phy");
- } else {
- /* If board has no PHY IRQ, use a timer to poll the PHY */
- init_timer(&lp->check_timer);
- lp->check_timer.data = (unsigned long)dev;
- lp->check_timer.function = at91ether_check_link;
- }
+ goto err_disable_clock;
+
+ if (macb_mii_init(lp) != 0)
+ goto err_out_unregister_netdev;
+
+ /* will be enabled in open() */
+ netif_carrier_off(dev);
+
+ phydev = lp->phy_dev;
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev),
+ phydev->irq);
/* Display ethernet banner */
- printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
- dev->name, (uint) dev->base_addr, dev->irq,
- at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
- at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
- dev->dev_addr);
- if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
- printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
- else if (lp->phy_type == MII_LXT971A_ID)
- printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
- else if (lp->phy_type == MII_RTL8201_ID)
- printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
- else if (lp->phy_type == MII_BCM5221_ID)
- printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
- else if (lp->phy_type == MII_DP83847_ID)
- printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
- else if (lp->phy_type == MII_DP83848_ID)
- printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
- else if (lp->phy_type == MII_AC101L_ID)
- printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
- else if (lp->phy_type == MII_KS8721_ID)
- printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
- else if (lp->phy_type == MII_T78Q21x3_ID)
- printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
- else if (lp->phy_type == MII_LAN83C185_ID)
- printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
-
- clk_disable(lp->ether_clk); /* Disable Peripheral clock */
+ netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
+ dev->base_addr, dev->irq, dev->dev_addr);
return 0;
-
-err_free_dmamem:
- platform_set_drvdata(pdev, NULL);
- dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
-err_free_irq:
- free_irq(dev->irq, dev);
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_disable_clock:
- clk_disable(lp->ether_clk);
- clk_put(lp->ether_clk);
-err_ioumap:
- iounmap(lp->emac_base);
+ clk_disable(lp->pclk);
err_free_dev:
free_netdev(dev);
return res;
}
-static int __devexit at91ether_remove(struct platform_device *pdev)
+static int at91ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct at91_private *lp = netdev_priv(dev);
+ struct macb *lp = netdev_priv(dev);
- if (gpio_is_valid(lp->board_data.phy_irq_pin))
- gpio_free(lp->board_data.phy_irq_pin);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
- dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
- clk_put(lp->ether_clk);
-
- platform_set_drvdata(pdev, NULL);
+ clk_disable(lp->pclk);
free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
#ifdef CONFIG_PM
-
static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
- struct at91_private *lp = netdev_priv(net_dev);
+ struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
- int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
- disable_irq(phy_irq);
- }
-
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
- clk_disable(lp->ether_clk);
+ clk_disable(lp->pclk);
}
return 0;
}
@@ -1249,34 +493,29 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
static int at91ether_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
- struct at91_private *lp = netdev_priv(net_dev);
+ struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- clk_enable(lp->ether_clk);
+ clk_enable(lp->pclk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
-
- if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
- int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
- enable_irq(phy_irq);
- }
}
return 0;
}
-
#else
#define at91ether_suspend NULL
#define at91ether_resume NULL
#endif
static struct platform_driver at91ether_driver = {
- .remove = __devexit_p(at91ether_remove),
+ .remove = at91ether_remove,
.suspend = at91ether_suspend,
.resume = at91ether_resume,
.driver = {
- .name = DRV_NAME,
+ .name = "at91_ether",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91ether_dt_ids),
},
};
@@ -1296,4 +535,4 @@ module_exit(at91ether_exit)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
MODULE_AUTHOR("Andrew Victor");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
deleted file mode 100644
index 0ef6328fa7f8..000000000000
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Ethernet driver for the Atmel AT91RM9200 (Thunder)
- *
- * Copyright (C) SAN People (Pty) Ltd
- *
- * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
- * Initial version by Rick Bronson.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef AT91_ETHERNET
-#define AT91_ETHERNET
-
-
-/* Davicom 9161 PHY */
-#define MII_DM9161_ID 0x0181b880
-#define MII_DM9161A_ID 0x0181b8a0
-#define MII_DSCR_REG 16
-#define MII_DSCSR_REG 17
-#define MII_DSINTR_REG 21
-
-/* Intel LXT971A PHY */
-#define MII_LXT971A_ID 0x001378E0
-#define MII_ISINTE_REG 18
-#define MII_ISINTS_REG 19
-#define MII_LEDCTRL_REG 20
-
-/* Realtek RTL8201 PHY */
-#define MII_RTL8201_ID 0x00008200
-
-/* Broadcom BCM5221 PHY */
-#define MII_BCM5221_ID 0x004061e0
-#define MII_BCMINTR_REG 26
-
-/* National Semiconductor DP83847 */
-#define MII_DP83847_ID 0x20005c30
-
-/* National Semiconductor DP83848 */
-#define MII_DP83848_ID 0x20005c90
-#define MII_DPPHYSTS_REG 16
-#define MII_DPMICR_REG 17
-#define MII_DPMISR_REG 18
-
-/* Altima AC101L PHY */
-#define MII_AC101L_ID 0x00225520
-
-/* Micrel KS8721 PHY */
-#define MII_KS8721_ID 0x00221610
-
-/* Teridian 78Q2123/78Q2133 */
-#define MII_T78Q21x3_ID 0x000e7230
-#define MII_T78Q21INT_REG 17
-
-/* SMSC LAN83C185 */
-#define MII_LAN83C185_ID 0x0007C0A0
-
-/* ........................................................................ */
-
-#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
-#define MAX_RX_DESCR 9 /* max number of receive buffers */
-
-#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
-#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
-
-#define EMAC_BROADCAST 0x80000000 /* broadcast address */
-#define EMAC_MULTICAST 0x40000000 /* multicast address */
-#define EMAC_UNICAST 0x20000000 /* unicast address */
-
-struct rbf_t
-{
- unsigned int addr;
- unsigned long size;
-};
-
-struct recv_desc_bufs
-{
- struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
- char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
-};
-
-struct at91_private
-{
- struct mii_if_info mii; /* ethtool support */
- struct macb_platform_data board_data; /* board-specific
- * configuration (shared with
- * macb for common data */
- void __iomem *emac_base; /* base register address */
- struct clk *ether_clk; /* clock */
-
- /* PHY */
- unsigned long phy_type; /* type of PHY (PHY_ID) */
- spinlock_t lock; /* lock for MDI interface */
- short phy_media; /* media interface type */
- unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
- struct timer_list check_timer; /* Poll link status */
-
- /* Transmit */
- struct sk_buff *skb; /* holds skb until xmit interrupt completes */
- dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
- int skb_length; /* saved skb length for pci_unmap_single */
-
- /* Receive */
- int rxBuffIndex; /* index into receive descriptor list */
- struct recv_desc_bufs *dlist; /* descriptor list address */
- struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
-};
-
-#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 033064b7b576..a9b0830fb39d 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -14,8 +14,10 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/circ_buf.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -26,37 +28,74 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
#include "macb.h"
#define RX_BUFFER_SIZE 128
-#define RX_RING_SIZE 512
-#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
+#define RX_RING_SIZE 512 /* must be power of 2 */
+#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
-/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
-#define RX_OFFSET 2
+#define TX_RING_SIZE 128 /* must be power of 2 */
+#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
-#define TX_RING_SIZE 128
-#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
-#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
+/* level of occupied TX descriptors under which we wake up TX process */
+#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
-#define TX_RING_GAP(bp) \
- (TX_RING_SIZE - (bp)->tx_pending)
-#define TX_BUFFS_AVAIL(bp) \
- (((bp)->tx_tail <= (bp)->tx_head) ? \
- (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
- (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
-#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
+ | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
+ | MACB_BIT(ISR_RLE) \
+ | MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
-#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT 1230
-/* minimum number of free TX descriptors before waking up TX process */
-#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+ return index & (TX_RING_SIZE - 1);
+}
-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
- | MACB_BIT(ISR_ROVR))
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
-static void __macb_set_hwaddr(struct macb *bp)
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+ return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+{
+ dma_addr_t offset;
+
+ offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+
+ return bp->tx_ring_dma + offset;
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+ return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+ return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+ return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
+}
+
+void macb_set_hwaddr(struct macb *bp)
{
u32 bottom;
u16 top;
@@ -65,31 +104,58 @@ static void __macb_set_hwaddr(struct macb *bp)
macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+
+ /* Clear unused address register sets */
+ macb_or_gem_writel(bp, SA2B, 0);
+ macb_or_gem_writel(bp, SA2T, 0);
+ macb_or_gem_writel(bp, SA3B, 0);
+ macb_or_gem_writel(bp, SA3T, 0);
+ macb_or_gem_writel(bp, SA4B, 0);
+ macb_or_gem_writel(bp, SA4T, 0);
}
+EXPORT_SYMBOL_GPL(macb_set_hwaddr);
-static void __init macb_get_hwaddr(struct macb *bp)
+void macb_get_hwaddr(struct macb *bp)
{
+ struct macb_platform_data *pdata;
u32 bottom;
u16 top;
u8 addr[6];
+ int i;
- bottom = macb_or_gem_readl(bp, SA1B);
- top = macb_or_gem_readl(bp, SA1T);
+ pdata = bp->pdev->dev.platform_data;
- addr[0] = bottom & 0xff;
- addr[1] = (bottom >> 8) & 0xff;
- addr[2] = (bottom >> 16) & 0xff;
- addr[3] = (bottom >> 24) & 0xff;
- addr[4] = top & 0xff;
- addr[5] = (top >> 8) & 0xff;
+ /* Check all 4 address register for vaild address */
+ for (i = 0; i < 4; i++) {
+ bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+ top = macb_or_gem_readl(bp, SA1T + i * 8);
+
+ if (pdata && pdata->rev_eth_addr) {
+ addr[5] = bottom & 0xff;
+ addr[4] = (bottom >> 8) & 0xff;
+ addr[3] = (bottom >> 16) & 0xff;
+ addr[2] = (bottom >> 24) & 0xff;
+ addr[1] = top & 0xff;
+ addr[0] = (top & 0xff00) >> 8;
+ } else {
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
+ }
- if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
- } else {
- netdev_info(bp->dev, "invalid hw address, using random\n");
- eth_hw_addr_random(bp->dev);
+ if (is_valid_ether_addr(addr)) {
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ return;
+ }
}
+
+ netdev_info(bp->dev, "invalid hw address, using random\n");
+ eth_hw_addr_random(bp->dev);
}
+EXPORT_SYMBOL_GPL(macb_get_hwaddr);
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
@@ -152,13 +218,17 @@ static void macb_handle_link_change(struct net_device *dev)
reg = macb_readl(bp, NCFGR);
reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+ if (macb_is_gem(bp))
+ reg &= ~GEM_BIT(GBE);
if (phydev->duplex)
reg |= MACB_BIT(FD);
if (phydev->speed == SPEED_100)
reg |= MACB_BIT(SPD);
+ if (phydev->speed == SPEED_1000)
+ reg |= GEM_BIT(GBE);
- macb_writel(bp, NCFGR, reg);
+ macb_or_gem_writel(bp, NCFGR, reg);
bp->speed = phydev->speed;
bp->duplex = phydev->duplex;
@@ -196,7 +266,9 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_platform_data *pdata;
struct phy_device *phydev;
+ int phy_irq;
int ret;
phydev = phy_find_first(bp->mii_bus);
@@ -205,7 +277,14 @@ static int macb_mii_probe(struct net_device *dev)
return -1;
}
- /* TODO : add pin_irq */
+ pdata = dev_get_platdata(&bp->pdev->dev);
+ if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+ }
+ }
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
@@ -216,7 +295,10 @@ static int macb_mii_probe(struct net_device *dev)
}
/* mask with MAC supported features */
- phydev->supported &= PHY_BASIC_FEATURES;
+ if (macb_is_gem(bp))
+ phydev->supported &= PHY_GBIT_FEATURES;
+ else
+ phydev->supported &= PHY_BASIC_FEATURES;
phydev->advertising = phydev->supported;
@@ -228,7 +310,7 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-static int macb_mii_init(struct macb *bp)
+int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
int err = -ENXIO, i;
@@ -284,6 +366,7 @@ err_out_free_mdiobus:
err_out:
return err;
}
+EXPORT_SYMBOL_GPL(macb_mii_init);
static void macb_update_stats(struct macb *bp)
{
@@ -297,93 +380,148 @@ static void macb_update_stats(struct macb *bp)
*p += __raw_readl(reg);
}
-static void macb_tx(struct macb *bp)
+static int macb_halt_tx(struct macb *bp)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
+ unsigned long halt_time, timeout;
+ u32 status;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
+ timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+ do {
+ halt_time = jiffies;
+ status = macb_readl(bp, TSR);
+ if (!(status & MACB_BIT(TGO)))
+ return 0;
- if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
- int i;
- netdev_err(bp->dev, "TX %s, resetting buffers\n",
- status & MACB_BIT(UND) ?
- "underrun" : "retry limit exceeded");
+ usleep_range(10, 250);
+ } while (time_before(halt_time, timeout));
- /* Transfer ongoing, disable transmitter, to avoid confusion */
- if (status & MACB_BIT(TGO))
- macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
+ return -ETIMEDOUT;
+}
- head = bp->tx_head;
+static void macb_tx_error_task(struct work_struct *work)
+{
+ struct macb *bp = container_of(work, struct macb, tx_error_task);
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ unsigned int tail;
- /*Mark all the buffer as used to avoid sending a lost buffer*/
- for (i = 0; i < TX_RING_SIZE; i++)
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+ bp->tx_tail, bp->tx_head);
- /* Add wrap bit */
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ /* Make sure nobody is trying to queue up new packets */
+ netif_stop_queue(bp->dev);
- /* free transmit buffer in upper layer*/
- for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
- struct ring_info *rp = &bp->tx_skb[tail];
- struct sk_buff *skb = rp->skb;
+ /*
+ * Stop transmission now
+ * (in case we have just queued new packets)
+ */
+ if (macb_halt_tx(bp))
+ /* Just complain for now, reinitializing TX path can be good */
+ netdev_err(bp->dev, "BUG: halt tx timed out\n");
- BUG_ON(skb == NULL);
+ /* No need for the lock here as nobody will interrupt us anymore */
- rmb();
+ /*
+ * Treat frames in TX queue including the ones that caused the error.
+ * Free transmit buffers in upper layer.
+ */
+ for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+ struct macb_dma_desc *desc;
+ u32 ctrl;
+
+ desc = macb_tx_desc(bp, tail);
+ ctrl = desc->ctrl;
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ if (ctrl & MACB_BIT(TX_USED)) {
+ netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ } else {
+ /*
+ * "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about those.
+ * Statistics are updated by hardware.
+ */
+ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+ netdev_err(bp->dev,
+ "BUG: TX buffers exhausted mid-frame\n");
- dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
- DMA_TO_DEVICE);
- rp->skb = NULL;
- dev_kfree_skb_irq(skb);
+ desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- bp->tx_head = bp->tx_tail = 0;
-
- /* Enable the transmitter again */
- if (status & MACB_BIT(TGO))
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+ DMA_TO_DEVICE);
+ tx_skb->skb = NULL;
+ dev_kfree_skb(skb);
}
- if (!(status & MACB_BIT(COMP)))
- /*
- * This may happen when a buffer becomes complete
- * between reading the ISR and scanning the
- * descriptors. Nothing to worry about.
- */
- return;
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ /* Reinitialize the TX desc queue */
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+ /* Make TX ring reflect state of hardware */
+ bp->tx_head = bp->tx_tail = 0;
+
+ /* Now we are ready to start transmission again */
+ netif_wake_queue(bp->dev);
+
+ /* Housework before enabling TX IRQ */
+ macb_writel(bp, TSR, macb_readl(bp, TSR));
+ macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+ unsigned int tail;
+ unsigned int head;
+ u32 status;
+
+ status = macb_readl(bp, TSR);
+ macb_writel(bp, TSR, status);
+
+ netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+ (unsigned long)status);
head = bp->tx_head;
- for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
- struct ring_info *rp = &bp->tx_skb[tail];
- struct sk_buff *skb = rp->skb;
- u32 bufstat;
+ for (tail = bp->tx_tail; tail != head; tail++) {
+ struct macb_tx_skb *tx_skb;
+ struct sk_buff *skb;
+ struct macb_dma_desc *desc;
+ u32 ctrl;
- BUG_ON(skb == NULL);
+ desc = macb_tx_desc(bp, tail);
+ /* Make hw descriptor updates visible to CPU */
rmb();
- bufstat = bp->tx_ring[tail].ctrl;
- if (!(bufstat & MACB_BIT(TX_USED)))
+ ctrl = desc->ctrl;
+
+ if (!(ctrl & MACB_BIT(TX_USED)))
break;
- netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
- tail, skb->data);
- dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+ tx_skb = macb_tx_skb(bp, tail);
+ skb = tx_skb->skb;
+
+ netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+ macb_tx_ring_wrap(tail), skb->data);
+ dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
DMA_TO_DEVICE);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
- rp->skb = NULL;
+ tx_skb->skb = NULL;
dev_kfree_skb_irq(skb);
}
bp->tx_tail = tail;
- if (netif_queue_stopped(bp->dev) &&
- TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+ if (netif_queue_stopped(bp->dev)
+ && CIRC_CNT(bp->tx_head, bp->tx_tail,
+ TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
netif_wake_queue(bp->dev);
}
@@ -392,31 +530,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
{
unsigned int len;
unsigned int frag;
- unsigned int offset = 0;
+ unsigned int offset;
struct sk_buff *skb;
+ struct macb_dma_desc *desc;
- len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+ desc = macb_rx_desc(bp, last_frag);
+ len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
- netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- first_frag, last_frag, len);
+ netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
- skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
+ /*
+ * The ethernet header starts NET_IP_ALIGN bytes into the
+ * first buffer. Since the header is 14 bytes, this makes the
+ * payload word-aligned.
+ *
+ * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+ * the two padding bytes into the skb so that we avoid hitting
+ * the slowpath in memcpy(), and pull them off afterwards.
+ */
+ skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
if (!skb) {
bp->stats.rx_dropped++;
- for (frag = first_frag; ; frag = NEXT_RX(frag)) {
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ for (frag = first_frag; ; frag++) {
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
}
+
+ /* Make descriptor updates visible to hardware */
wmb();
+
return 1;
}
- skb_reserve(skb, RX_OFFSET);
+ offset = 0;
+ len += NET_IP_ALIGN;
skb_checksum_none_assert(skb);
skb_put(skb, len);
- for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+ for (frag = first_frag; ; frag++) {
unsigned int frag_len = RX_BUFFER_SIZE;
if (offset + frag_len > len) {
@@ -424,22 +579,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- (bp->rx_buffers +
- (RX_BUFFER_SIZE * frag)),
- frag_len);
+ macb_rx_buffer(bp, frag), frag_len);
offset += RX_BUFFER_SIZE;
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
- wmb();
+ desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
}
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ __skb_pull(skb, NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, bp->dev);
bp->stats.rx_packets++;
- bp->stats.rx_bytes += len;
- netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ bp->stats.rx_bytes += skb->len;
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
netif_receive_skb(skb);
@@ -452,8 +609,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
{
unsigned int frag;
- for (frag = begin; frag != end; frag = NEXT_RX(frag))
- bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ for (frag = begin; frag != end; frag++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ desc->addr &= ~MACB_BIT(RX_USED);
+ }
+
+ /* Make descriptor updates visible to hardware */
wmb();
/*
@@ -466,15 +627,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
static int macb_rx(struct macb *bp, int budget)
{
int received = 0;
- unsigned int tail = bp->rx_tail;
+ unsigned int tail;
int first_frag = -1;
- for (; budget > 0; tail = NEXT_RX(tail)) {
+ for (tail = bp->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
u32 addr, ctrl;
+ /* Make hw descriptor updates visible to CPU */
rmb();
- addr = bp->rx_ring[tail].addr;
- ctrl = bp->rx_ring[tail].ctrl;
+
+ addr = desc->addr;
+ ctrl = desc->ctrl;
if (!(addr & MACB_BIT(RX_USED)))
break;
@@ -517,7 +681,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
- netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
@@ -552,10 +716,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
while (status) {
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
- macb_writel(bp, IDR, ~0UL);
+ macb_writel(bp, IDR, -1);
break;
}
+ netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
if (status & MACB_RX_INT_FLAGS) {
/*
* There's no point taking any more interrupts
@@ -567,14 +733,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
- netdev_dbg(bp->dev, "scheduling RX softirq\n");
+ netdev_vdbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi);
}
}
- if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE)))
- macb_tx(bp);
+ if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+ macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+ schedule_work(&bp->tx_error_task);
+ break;
+ }
+
+ if (status & MACB_BIT(TCOMP))
+ macb_tx_interrupt(bp);
/*
* Link change detection isn't possible with RMII, so we'll
@@ -626,11 +797,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb *bp = netdev_priv(dev);
dma_addr_t mapping;
unsigned int len, entry;
+ struct macb_dma_desc *desc;
+ struct macb_tx_skb *tx_skb;
u32 ctrl;
unsigned long flags;
-#ifdef DEBUG
- netdev_dbg(bp->dev,
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev,
"start_xmit: len %u head %p data %p tail %p end %p\n",
skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -642,7 +815,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
- if (TX_BUFFS_AVAIL(bp) < 1) {
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -651,13 +824,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- entry = bp->tx_head;
- netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
+ entry = macb_tx_ring_wrap(bp->tx_head);
+ bp->tx_head++;
+ netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
- bp->tx_skb[entry].skb = skb;
- bp->tx_skb[entry].mapping = mapping;
- netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+
+ tx_skb = &bp->tx_skb[entry];
+ tx_skb->skb = skb;
+ tx_skb->mapping = mapping;
+ netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
skb->data, (unsigned long)mapping);
ctrl = MACB_BF(TX_FRMLEN, len);
@@ -665,18 +841,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (entry == (TX_RING_SIZE - 1))
ctrl |= MACB_BIT(TX_WRAP);
- bp->tx_ring[entry].addr = mapping;
- bp->tx_ring[entry].ctrl = ctrl;
- wmb();
+ desc = &bp->tx_ring[entry];
+ desc->addr = mapping;
+ desc->ctrl = ctrl;
- entry = NEXT_TX(entry);
- bp->tx_head = entry;
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
skb_tx_timestamp(skb);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (TX_BUFFS_AVAIL(bp) < 1)
+ if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -712,7 +888,7 @@ static int macb_alloc_consistent(struct macb *bp)
{
int size;
- size = TX_RING_SIZE * sizeof(struct ring_info);
+ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
bp->tx_skb = kmalloc(size, GFP_KERNEL);
if (!bp->tx_skb)
goto out_err;
@@ -775,9 +951,6 @@ static void macb_init_rings(struct macb *bp)
static void macb_reset_hw(struct macb *bp)
{
- /* Make sure we have the write buffer for ourselves */
- wmb();
-
/*
* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
@@ -788,11 +961,11 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
/* Clear all status flags */
- macb_writel(bp, TSR, ~0UL);
- macb_writel(bp, RSR, ~0UL);
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
/* Disable all interrupts */
- macb_writel(bp, IDR, ~0UL);
+ macb_writel(bp, IDR, -1);
macb_readl(bp, ISR);
}
@@ -860,8 +1033,12 @@ static u32 macb_dbw(struct macb *bp)
}
/*
- * Configure the receive DMA engine to use the correct receive buffer size.
- * This is a configurable parameter for GEM.
+ * Configure the receive DMA engine
+ * - use the correct receive buffer size
+ * - set the possibility to use INCR16 bursts
+ * (if not supported by FIFO, it will fallback to default)
+ * - set both rx/tx packet buffers to full memory size
+ * These are configurable parameters for GEM.
*/
static void macb_configure_dma(struct macb *bp)
{
@@ -870,6 +1047,8 @@ static void macb_configure_dma(struct macb *bp)
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+ dmacfg |= GEM_BF(FBLDO, 16);
+ dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
gem_writel(bp, DMACFG, dmacfg);
}
}
@@ -879,9 +1058,10 @@ static void macb_init_hw(struct macb *bp)
u32 config;
macb_reset_hw(bp);
- __macb_set_hwaddr(bp);
+ macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
+ config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -891,6 +1071,8 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(NBC); /* No BroadCast */
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ bp->speed = SPEED_10;
+ bp->duplex = DUPLEX_HALF;
macb_configure_dma(bp);
@@ -902,13 +1084,8 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
/* Enable interrupts */
- macb_writel(bp, IER, (MACB_BIT(RCOMP)
- | MACB_BIT(RXUBR)
- | MACB_BIT(ISR_TUND)
- | MACB_BIT(ISR_RLE)
- | MACB_BIT(TXERR)
- | MACB_BIT(TCOMP)
- | MACB_BIT(ISR_ROVR)
+ macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+ | MACB_TX_INT_FLAGS
| MACB_BIT(HRESP)));
}
@@ -996,7 +1173,7 @@ static void macb_sethashtable(struct net_device *dev)
/*
* Enable/Disable promiscuous and multicast modes.
*/
-static void macb_set_rx_mode(struct net_device *dev)
+void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
struct macb *bp = netdev_priv(dev);
@@ -1028,6 +1205,7 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, NCFGR, cfg);
}
+EXPORT_SYMBOL_GPL(macb_set_rx_mode);
static int macb_open(struct net_device *dev)
{
@@ -1043,9 +1221,6 @@ static int macb_open(struct net_device *dev)
if (!bp->phy_dev)
return -EAGAIN;
- if (!is_valid_ether_addr(dev->dev_addr))
- return -EADDRNOTAVAIL;
-
err = macb_alloc_consistent(bp);
if (err) {
netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
@@ -1135,7 +1310,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
return nstat;
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
@@ -1181,6 +1356,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
+EXPORT_SYMBOL_GPL(macb_get_stats);
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -1204,25 +1380,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return phy_ethtool_sset(phydev, cmd);
}
-static void macb_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+static int macb_get_regs_len(struct net_device *netdev)
+{
+ return MACB_GREGS_NBR * sizeof(u32);
+}
+
+static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
{
struct macb *bp = netdev_priv(dev);
+ unsigned int tail, head;
+ u32 *regs_buff = p;
+
+ regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
+ | MACB_GREGS_VERSION;
+
+ tail = macb_tx_ring_wrap(bp->tx_tail);
+ head = macb_tx_ring_wrap(bp->tx_head);
+
+ regs_buff[0] = macb_readl(bp, NCR);
+ regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
+ regs_buff[2] = macb_readl(bp, NSR);
+ regs_buff[3] = macb_readl(bp, TSR);
+ regs_buff[4] = macb_readl(bp, RBQP);
+ regs_buff[5] = macb_readl(bp, TBQP);
+ regs_buff[6] = macb_readl(bp, RSR);
+ regs_buff[7] = macb_readl(bp, IMR);
- strcpy(info->driver, bp->pdev->dev.driver->name);
- strcpy(info->version, "$Revision: 1.14 $");
- strcpy(info->bus_info, dev_name(&bp->pdev->dev));
+ regs_buff[8] = tail;
+ regs_buff[9] = head;
+ regs_buff[10] = macb_tx_dma(bp, tail);
+ regs_buff[11] = macb_tx_dma(bp, head);
+
+ if (macb_is_gem(bp)) {
+ regs_buff[12] = gem_readl(bp, USRIO);
+ regs_buff[13] = gem_readl(bp, DMACFG);
+ }
}
-static const struct ethtool_ops macb_ethtool_ops = {
+const struct ethtool_ops macb_ethtool_ops = {
.get_settings = macb_get_settings,
.set_settings = macb_set_settings,
- .get_drvinfo = macb_get_drvinfo,
+ .get_regs_len = macb_get_regs_len,
+ .get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
};
+EXPORT_SYMBOL_GPL(macb_ethtool_ops);
-static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev = bp->phy_dev;
@@ -1235,6 +1441,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
+EXPORT_SYMBOL_GPL(macb_ioctl);
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
@@ -1263,7 +1470,7 @@ static const struct of_device_id macb_dt_ids[] = {
MODULE_DEVICE_TABLE(of, macb_dt_ids);
-static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+static int macb_get_phy_mode_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1273,7 +1480,7 @@ static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
return -ENODEV;
}
-static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+static int macb_get_hwaddr_dt(struct macb *bp)
{
struct device_node *np = bp->pdev->dev.of_node;
if (np) {
@@ -1287,11 +1494,11 @@ static int __devinit macb_get_hwaddr_dt(struct macb *bp)
return -ENODEV;
}
#else
-static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+static int macb_get_phy_mode_dt(struct platform_device *pdev)
{
return -ENODEV;
}
-static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+static int macb_get_hwaddr_dt(struct macb *bp)
{
return -ENODEV;
}
@@ -1306,6 +1513,7 @@ static int __init macb_probe(struct platform_device *pdev)
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
+ struct pinctrl *pinctrl;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1313,6 +1521,15 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ err = PTR_ERR(pinctrl);
+ if (err == -EPROBE_DEFER)
+ goto err_out;
+
+ dev_warn(&pdev->dev, "No pinctrl provided\n");
+ }
+
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
if (!dev)
@@ -1328,6 +1545,7 @@ static int __init macb_probe(struct platform_device *pdev)
bp->dev = dev;
spin_lock_init(&bp->lock);
+ INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
bp->pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
@@ -1384,7 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev)
bp->phy_interface = err;
}
- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+ else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
#if defined(CONFIG_ARCH_AT91)
macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
MACB_BIT(CLKEN)));
@@ -1398,8 +1618,6 @@ static int __init macb_probe(struct platform_device *pdev)
macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
#endif
- bp->tx_pending = DEF_TX_RING_PENDING;
-
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 335e288f5314..570908b93578 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -10,10 +10,15 @@
#ifndef _MACB_H
#define _MACB_H
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
/* MACB register offsets */
#define MACB_NCR 0x0000
#define MACB_NCFGR 0x0004
#define MACB_NSR 0x0008
+#define MACB_TAR 0x000c /* AT91RM9200 only */
+#define MACB_TCR 0x0010 /* AT91RM9200 only */
#define MACB_TSR 0x0014
#define MACB_RBQP 0x0018
#define MACB_TBQP 0x001c
@@ -69,6 +74,12 @@
#define GEM_HRT 0x0084
#define GEM_SA1B 0x0088
#define GEM_SA1T 0x008C
+#define GEM_SA2B 0x0090
+#define GEM_SA2T 0x0094
+#define GEM_SA3B 0x0098
+#define GEM_SA3T 0x009C
+#define GEM_SA4B 0x00A0
+#define GEM_SA4T 0x00A4
#define GEM_OTX 0x0100
#define GEM_DCFG1 0x0280
#define GEM_DCFG2 0x0284
@@ -133,6 +144,8 @@
#define MACB_RTY_SIZE 1
#define MACB_PAE_OFFSET 13
#define MACB_PAE_SIZE 1
+#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
#define MACB_RBOF_OFFSET 14
#define MACB_RBOF_SIZE 2
#define MACB_RLCE_OFFSET 16
@@ -145,6 +158,8 @@
#define MACB_IRXFCS_SIZE 1
/* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET 10
+#define GEM_GBE_SIZE 1
#define GEM_CLK_OFFSET 18
#define GEM_CLK_SIZE 3
#define GEM_DBW_OFFSET 21
@@ -156,8 +171,19 @@
#define GEM_DBW128 2
/* Bitfields in DMACFG. */
+#define GEM_FBLDO_OFFSET 0
+#define GEM_FBLDO_SIZE 5
+#define GEM_RXBMS_OFFSET 8
+#define GEM_RXBMS_SIZE 2
+#define GEM_TXPBMS_OFFSET 10
+#define GEM_TXPBMS_SIZE 1
+#define GEM_TXCOEN_OFFSET 11
+#define GEM_TXCOEN_SIZE 1
#define GEM_RXBS_OFFSET 16
#define GEM_RXBS_SIZE 8
+#define GEM_DDRP_OFFSET 24
+#define GEM_DDRP_SIZE 1
+
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0
@@ -178,6 +204,8 @@
#define MACB_TGO_SIZE 1
#define MACB_BEX_OFFSET 4
#define MACB_BEX_SIZE 1
+#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
#define MACB_COMP_OFFSET 5
#define MACB_COMP_SIZE 1
#define MACB_UND_OFFSET 6
@@ -246,6 +274,8 @@
/* Bitfields in USRIO (AT91) */
#define MACB_RMII_OFFSET 0
#define MACB_RMII_SIZE 1
+#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
+#define GEM_RGMII_SIZE 1
#define MACB_CLKEN_OFFSET 1
#define MACB_CLKEN_SIZE 1
@@ -352,7 +382,12 @@
__v; \
})
-struct dma_desc {
+/**
+ * struct macb_dma_desc - Hardware DMA descriptor
+ * @addr: DMA address of data buffer
+ * @ctrl: Control and status bits
+ */
+struct macb_dma_desc {
u32 addr;
u32 ctrl;
};
@@ -417,7 +452,12 @@ struct dma_desc {
#define MACB_TX_USED_OFFSET 31
#define MACB_TX_USED_SIZE 1
-struct ring_info {
+/**
+ * struct macb_tx_skb - data about an skb which is being transmitted
+ * @skb: skb currently being transmitted
+ * @mapping: DMA address of the skb's data buffer
+ */
+struct macb_tx_skb {
struct sk_buff *skb;
dma_addr_t mapping;
};
@@ -502,12 +542,12 @@ struct macb {
void __iomem *regs;
unsigned int rx_tail;
- struct dma_desc *rx_ring;
+ struct macb_dma_desc *rx_ring;
void *rx_buffers;
unsigned int tx_head, tx_tail;
- struct dma_desc *tx_ring;
- struct ring_info *tx_skb;
+ struct macb_dma_desc *tx_ring;
+ struct macb_tx_skb *tx_skb;
spinlock_t lock;
struct platform_device *pdev;
@@ -515,6 +555,7 @@ struct macb {
struct clk *hclk;
struct net_device *dev;
struct napi_struct napi;
+ struct work_struct tx_error_task;
struct net_device_stats stats;
union {
struct macb_stats macb;
@@ -525,8 +566,6 @@ struct macb {
dma_addr_t tx_ring_dma;
dma_addr_t rx_buffers_dma;
- unsigned int rx_pending, tx_pending;
-
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
unsigned int link;
@@ -534,8 +573,22 @@ struct macb {
unsigned int duplex;
phy_interface_t phy_interface;
+
+ /* AT91RM9200 transmit */
+ struct sk_buff *skb; /* holds skb until xmit interrupt completes */
+ dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
+ int skb_length; /* saved skb length for pci_unmap_single */
};
+extern const struct ethtool_ops macb_ethtool_ops;
+
+int macb_mii_init(struct macb *bp);
+int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+struct net_device_stats *macb_get_stats(struct net_device *dev);
+void macb_set_rx_mode(struct net_device *dev);
+void macb_set_hwaddr(struct macb *bp);
+void macb_get_hwaddr(struct macb *bp);
+
static inline bool macb_is_gem(struct macb *bp)
{
return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 16814b34d4b6..f7f02900f650 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -191,6 +191,7 @@
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
/* DMA Normal interrupt */
#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
@@ -210,7 +211,7 @@
#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TUE)
+ DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -373,6 +374,7 @@ struct xgmac_priv {
struct sk_buff **tx_skbuff;
unsigned int tx_head;
unsigned int tx_tail;
+ int tx_irq_cnt;
void __iomem *base;
unsigned int dma_buf_sz;
@@ -546,6 +548,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
return -1;
}
+ /* All frames should fit into a single buffer */
+ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+ return -1;
+
/* Check if packet has checksum already */
if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
!(ext_status & RXDESC_IP_PAYLOAD_MASK))
@@ -663,6 +669,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
{
struct xgmac_dma_desc *p;
dma_addr_t paddr;
+ int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
int entry = priv->rx_head;
@@ -671,13 +678,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
if (unlikely(skb == NULL))
break;
priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz, DMA_FROM_DEVICE);
+ bufsz, DMA_FROM_DEVICE);
desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
}
@@ -701,10 +708,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev)
unsigned int bfsize;
/* Set the Buffer size according to the MTU;
- * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ * The total buffer size including any IP offset must be a multiple
+ * of 8 bytes.
*/
- bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
- 64);
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
@@ -845,9 +852,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
static void xgmac_tx_complete(struct xgmac_priv *priv)
{
int i;
- void __iomem *ioaddr = priv->base;
-
- writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
unsigned int entry = priv->tx_tail;
@@ -888,7 +892,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
}
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
- TX_THRESH)
+ MAX_SKB_FRAGS)
netif_wake_queue(priv->dev);
}
@@ -965,8 +969,7 @@ static int xgmac_hw_init(struct net_device *dev)
ctrl |= XGMAC_CONTROL_IPC;
writel(ctrl, ioaddr + XGMAC_CONTROL);
- value = DMA_CONTROL_DFF;
- writel(value, ioaddr + XGMAC_DMA_CONTROL);
+ writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
/* Set the HW DMA mode and the COE */
writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
@@ -1060,19 +1063,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct xgmac_priv *priv = netdev_priv(dev);
unsigned int entry;
int i;
+ u32 irq_flag;
int nfrags = skb_shinfo(skb)->nr_frags;
struct xgmac_dma_desc *desc, *first;
unsigned int desc_flags;
unsigned int len;
dma_addr_t paddr;
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
- (nfrags + 1)) {
- writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
- priv->base + XGMAC_DMA_INTR_ENA);
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
+ priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
+ irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
TXDESC_CSUM_ALL : 0;
@@ -1113,9 +1112,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Interrupt on completition only for the latest segment */
if (desc != first)
desc_set_tx_owner(desc, desc_flags |
- TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+ TXDESC_LAST_SEG | irq_flag);
else
- desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+ desc_flags |= TXDESC_LAST_SEG | irq_flag;
/* Set owner on first desc last to avoid race condition */
wmb();
@@ -1124,6 +1123,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
writel(1, priv->base + XGMAC_DMA_TX_POLL);
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+ MAX_SKB_FRAGS)
+ netif_stop_queue(dev);
return NETDEV_TX_OK;
}
@@ -1139,9 +1141,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
- writel(DMA_STATUS_RI | DMA_STATUS_NIS,
- priv->base + XGMAC_DMA_STATUS);
-
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@@ -1180,8 +1179,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
xgmac_rx_refill(priv);
- writel(1, priv->base + XGMAC_DMA_RX_POLL);
-
return count;
}
@@ -1205,7 +1202,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
- writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
return work_done;
}
@@ -1350,7 +1347,7 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
struct xgmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- intr_status = readl(ioaddr + XGMAC_INT_STAT);
+ intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
if (intr_status & XGMAC_INT_STAT_PMT) {
netdev_dbg(priv->dev, "received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT */
@@ -1368,9 +1365,9 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
struct xgmac_extra_stats *x = &priv->xstats;
/* read the status register (CSR5) */
- intr_status = readl(priv->base + XGMAC_DMA_STATUS);
- intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
- writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+ intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
+ intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
+ __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
/* It displays the DMA process states (CSR5 register) */
/* ABNORMAL interrupts */
@@ -1405,8 +1402,8 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
}
/* TX/RX NORMAL interrupts */
- if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
- writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
+ __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
napi_schedule(&priv->napi);
}
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 2de50f95798f..d40c994a4f6a 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_CHELSIO
bool "Chelsio devices"
default y
- depends on PCI || INET
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d17c92f2dda..c8fdeaae56c0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -974,8 +974,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
#endif
};
-static int __devinit init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -1332,7 +1331,7 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct adapter *adapter = dev->ml_priv;
@@ -1361,7 +1360,7 @@ static struct pci_driver driver = {
.name = DRV_NAME,
.id_table = t1_pci_tbl,
.probe = init_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
};
static int __init t1_init_module(void)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47a84359d4e4..d84872e88171 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -367,18 +367,6 @@ void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
#endif /* 0 */
-
-/*
- * get_clock() implements a ns clock (see ktime_get)
- */
-static inline ktime_t get_clock(void)
-{
- struct timespec ts;
-
- ktime_get_ts(&ts);
- return timespec_to_ktime(ts);
-}
-
/*
* tx_sched_init() allocates resources and does basic initialization.
*/
@@ -411,7 +399,7 @@ static int tx_sched_init(struct sge *sge)
static inline int sched_update_avail(struct sge *sge)
{
struct sched *s = sge->tx_sched;
- ktime_t now = get_clock();
+ ktime_t now = ktime_get();
unsigned int i;
long long delta_time_ns;
@@ -2071,8 +2059,7 @@ static void espibug_workaround(unsigned long data)
/*
* Creates a t1_sge structure and returns suggested resource parameters.
*/
-struct sge * __devinit t1_sge_create(struct adapter *adapter,
- struct sge_params *p)
+struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
{
struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 8a43c7e19701..e0a03a31e7c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -892,8 +892,8 @@ static void power_sequence_xpak(adapter_t* adapter)
}
}
-int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
- struct adapter_params *p)
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+ struct adapter_params *p)
{
p->chip_version = bi->chip_term;
p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
@@ -992,7 +992,7 @@ out_err:
/*
* Determine a card's PCI mode.
*/
-static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
{
static const unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode;
@@ -1028,8 +1028,8 @@ void t1_free_sw_modules(adapter_t *adapter)
t1_espi_destroy(adapter->espi);
}
-static void __devinit init_link_config(struct link_config *lc,
- const struct board_info *bi)
+static void init_link_config(struct link_config *lc,
+ const struct board_info *bi)
{
lc->supported = bi->caps;
lc->requested_speed = lc->speed = SPEED_INVALID;
@@ -1049,8 +1049,7 @@ static void __devinit init_link_config(struct link_config *lc,
* Allocate and initialize the data structures that hold the SW state of
* the Terminator HW modules.
*/
-int __devinit t1_init_sw_modules(adapter_t *adapter,
- const struct board_info *bi)
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
{
unsigned int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
index 8bed4a59e65f..b146acabf982 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.c
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
@@ -55,7 +55,7 @@ void t1_tp_destroy(struct petp *tp)
kfree(tp);
}
-struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p)
{
struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b6343241..8c82248ce416 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -42,10 +42,9 @@
#include <linux/mdio.h>
#include "version.h"
-#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
-#define CH_ALERT(adap, fmt, ...) \
- dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 9c9f3260344a..f15ee326d5c1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3078,7 +3078,7 @@ static void set_nqsets(struct adapter *adap)
}
}
-static int __devinit cxgb_enable_msix(struct adapter *adap)
+static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
@@ -3108,8 +3108,7 @@ static int __devinit cxgb_enable_msix(struct adapter *adap)
return err;
}
-static void __devinit print_port_info(struct adapter *adap,
- const struct adapter_info *ai)
+static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
{
static const char *pci_variant[] = {
"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
@@ -3165,7 +3164,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
#endif
};
-static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
+static void cxgb3_init_iscsi_mac(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -3176,8 +3175,7 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
-static int __devinit init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -3381,7 +3379,7 @@ out:
return err;
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
@@ -3425,7 +3423,7 @@ static struct pci_driver driver = {
.name = DRV_NAME,
.id_table = cxgb3_pci_tbl,
.probe = init_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
.err_handler = &t3_err_handler,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 2dbbcbb450d3..942dace361d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1382,7 +1382,7 @@ static inline int adap2type(struct adapter *adapter)
return type;
}
-void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
+void cxgb3_adapter_ofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
@@ -1396,7 +1396,7 @@ void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
register_tdev(tdev);
}
-void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
+void cxgb3_adapter_unofld(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index aef45d3113ba..3dee68612c9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3307,7 +3307,7 @@ static void config_pcie(struct adapter *adap)
G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
log2_width = fls(adap->params.pci.width) - 1;
acklat = ack_lat[log2_width][pldsize];
- if (val & 1) /* check LOsEnable */
+ if (val & PCI_EXP_LNKCTL_ASPM_L0S) /* check LOsEnable */
acklat += fst_trn_tx * 4;
rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 378988b5709a..6db997c78a5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -35,6 +35,8 @@
#ifndef __CXGB4_H__
#define __CXGB4_H__
+#include "t4_hw.h"
+
#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
@@ -212,6 +214,8 @@ struct tp_err_stats {
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned short tx_modq_map; /* TX modulation scheduler queue to */
+ /* channel map */
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
@@ -526,6 +530,7 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ u32 filter_mode;
unsigned int l2t_start;
unsigned int l2t_end;
struct l2t_data *l2t;
@@ -545,6 +550,129 @@ struct adapter {
spinlock_t stats_lock;
};
+/* Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/* Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ *
+ * Most of the following data structures are modeled on T4 capabilities.
+ * Drivers for earlier chips use the subsets which make sense for those chips.
+ * We really need to come up with a hardware-independent mechanism to
+ * represent hardware filter capabilities ...
+ */
+struct ch_filter_tuple {
+ /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /* Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+};
+
+/* A filter ioctl command.
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter.
+ */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /* Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+
+ /* Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t rpttid:1; /* report TID in RSS hash field */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
+ uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
+ /* 1 => TCB contains IQ ID */
+
+ /* Switch proxy/rewrite fields. An ingress packet which matches a
+ * filter with "switch" set will be looped back out as an egress
+ * packet -- potentially with some Ethernet header rewriting.
+ */
+ uint32_t eport:2; /* egress port to switch packet out */
+ uint32_t newdmac:1; /* rewrite destination MAC address */
+ uint32_t newsmac:1; /* rewrite source MAC address */
+ uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
+ uint8_t smac[ETH_ALEN]; /* new source MAC address */
+ uint16_t vlan; /* VLAN Tag to insert */
+
+ /* Filter rule value/mask pairs.
+ */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum {
+ VLAN_NOCHANGE = 0, /* default */
+ VLAN_REMOVE,
+ VLAN_INSERT,
+ VLAN_REWRITE
+};
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -701,6 +829,12 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+
+struct fw_filter_wr;
+
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
@@ -737,6 +871,8 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr);
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0df1284df497..c306df7d4568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -175,6 +175,30 @@ enum {
MIN_FL_ENTRIES = 16
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -325,6 +349,9 @@ enum {
static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+module_param(tp_vlan_pri_map, uint, 0644);
+MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
+
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -506,8 +533,67 @@ static int link_start(struct net_device *dev)
return ret;
}
-/*
- * Response queue handler for the FW event queue.
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/* Handle a filter write/deletion reply.
+ */
+static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int idx = GET_TID(rpl);
+ unsigned int nidx = idx - adap->tids.ftid_base;
+ unsigned int ret;
+ struct filter_entry *f;
+
+ if (idx >= adap->tids.ftid_base && nidx <
+ (adap->tids.nftids + adap->tids.nsftids)) {
+ idx = nidx;
+ ret = GET_TCB_COOKIE(rpl->cookie);
+ f = &adap->tids.ftid_tab[idx];
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ }
+ }
+}
+
+/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
@@ -542,6 +628,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (void *)rsp;
+
+ filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
@@ -983,6 +1073,148 @@ static void t4_free_mem(void *addr)
kfree(addr);
}
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+static int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int ftid;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter->l2t);
+ if (f->l2t == NULL)
+ return -EAGAIN;
+ if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
+ f->fs.eport, f->fs.dmac)) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
+ fwr->tid_to_iq =
+ htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
+ V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
+ V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
+ V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
+ V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio) |
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
+ V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
+ V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
+ V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+ V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+ V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
+ V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
+ V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
+ V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
+ V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Delete the filter at a specified index.
+ */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct sk_buff *skb;
+ struct fw_filter_wr *fwr;
+ unsigned int len, ftid;
+
+ len = sizeof(*fwr);
+ ftid = adapter->tids.ftid_base + fidx;
+
+ skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
@@ -1762,9 +1994,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
-
- return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
- c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+ struct sge_rspq *q;
+ int i;
+ int r = 0;
+
+ for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
+ q = &adap->sge.ethrxq[i].rspq;
+ r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
+ if (r) {
+ dev_err(&dev->dev, "failed to set coalesce %d\n", r);
+ break;
+ }
+ }
+ return r;
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2148,8 +2391,8 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
-static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
- unsigned int idx, unsigned int size_mb)
+static void add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
{
struct dentry *de;
@@ -2159,7 +2402,7 @@ static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
de->d_inode->i_size = size_mb << 20;
}
-static int __devinit setup_debugfs(struct adapter *adap)
+static int setup_debugfs(struct adapter *adap)
{
int i;
@@ -2195,7 +2438,7 @@ int cxgb4_alloc_atid(struct tid_info *t, void *data)
if (t->afree) {
union aopen_entry *p = t->afree;
- atid = p - t->atid_tab;
+ atid = (p - t->atid_tab) + t->atid_base;
t->afree = p->next;
p->data = data;
t->atids_in_use++;
@@ -2210,7 +2453,7 @@ EXPORT_SYMBOL(cxgb4_alloc_atid);
*/
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{
- union aopen_entry *p = &t->atid_tab[atid];
+ union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
spin_lock_bh(&t->atid_lock);
p->next = t->afree;
@@ -2249,8 +2492,34 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
}
EXPORT_SYMBOL(cxgb4_alloc_stid);
-/*
- * Release a server TID.
+/* Allocate a server filter TID and set it to the supplied value.
+ */
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_next_zero_bit(t->stid_bmap,
+ t->nstids + t->nsftids, t->nstids);
+ if (stid < (t->nstids + t->nsftids))
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_sftid);
+
+/* Release a server TID.
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
@@ -2362,18 +2631,26 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
static int tid_init(struct tid_info *t)
{
size_t size;
+ unsigned int stid_bmap_size;
unsigned int natids = t->natids;
- size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
- BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->nsftids * sizeof(*t->stid_tab) +
+ stid_bmap_size * sizeof(long) +
+ t->nftids * sizeof(*t->ftid_tab) +
+ t->nsftids * sizeof(*t->ftid_tab);
+
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
return -ENOMEM;
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
- t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
+ t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
@@ -2388,7 +2665,7 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids);
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
return 0;
}
@@ -2404,7 +2681,8 @@ static int tid_init(struct tid_info *t)
* Returns <0 on error and one of the %NET_XMIT_* values on success.
*/
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue)
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue)
{
unsigned int chan;
struct sk_buff *skb;
@@ -2750,6 +3028,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
struct cxgb4_lld_info lli;
+ unsigned short i;
lli.pdev = adap->pdev;
lli.l2t = adap->l2t;
@@ -2776,10 +3055,16 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
+ lli.filt_mode = adap->filter_mode;
+ /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
+ for (i = 0; i < NCHAN; i++)
+ lli.tx_modq[i] = i;
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
lli.fw_vers = adap->params.fw_vers;
lli.dbfifo_int_thresh = dbfifo_int_thresh;
+ lli.sge_pktshift = adap->sge.pktshift;
+ lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2999,6 +3284,126 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
}
+/* Return an error number if the indicated filter isn't writable ...
+ */
+static int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+static int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue, unsigned char port, unsigned char mask)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+ int i;
+ u8 *val;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ /* Check to make sure the filter requested is writable ...
+ */
+ f = &adap->tids.ftid_tab[stid];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adap, f);
+
+ /* Clear out filter specifications */
+ memset(&f->fs, 0, sizeof(struct ch_filter_specification));
+ f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.mask.lport = ~0;
+ val = (u8 *)&sip;
+ if ((val[0] | val[1] | val[2] | val[3]) != 0) {
+ for (i = 0; i < 4; i++) {
+ f->fs.val.lip[i] = val[i];
+ f->fs.mask.lip[i] = ~0;
+ }
+ if (adap->filter_mode & F_PORT) {
+ f->fs.val.iport = port;
+ f->fs.mask.iport = mask;
+ }
+ }
+
+ f->fs.dirsteer = 1;
+ f->fs.iq = queue;
+ /* Mark filter as locked */
+ f->locked = 1;
+ f->fs.rpttid = 1;
+
+ ret = set_filter_wr(adap, stid);
+ if (ret) {
+ clear_filter(adap, f);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_create_server_filter);
+
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6)
+{
+ int ret;
+ struct filter_entry *f;
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+
+ /* Adjust stid to correct filter index */
+ stid -= adap->tids.nstids;
+ stid += adap->tids.nftids;
+
+ f = &adap->tids.ftid_tab[stid];
+ /* Unlock the filter */
+ f->locked = 0;
+
+ ret = delete_filter(adap, stid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_remove_server_filter);
+
static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *ns)
{
@@ -3203,7 +3608,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- c->retval_len16 = htonl(FW_LEN16(*c));
+ c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3245,6 +3650,34 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
v = t4_read_reg(adap, TP_PIO_DATA);
t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ /* first 4 Tx modulation queues point to consecutive Tx channels */
+ adap->params.tp.tx_modq_map = 0xE4;
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
+
+ /* associate each Tx modulation queue with consecutive Tx channels */
+ v = 0x84218421;
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_HDR);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_FIFO);
+ t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, A_TP_TX_SCHED_PCMD);
+
+#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
+ if (is_offload(adap)) {
+ t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
+ V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
+ V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
+ }
+
/* get basic stuff going */
return t4_early_init(adap, adap->fn);
}
@@ -3397,7 +3830,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -3422,7 +3855,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
NULL);
if (ret < 0)
@@ -3497,7 +3930,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -3929,7 +4362,7 @@ static int adap_init0(struct adapter *adap)
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
if (ret < 0)
@@ -4035,6 +4468,10 @@ static int adap_init0(struct adapter *adap)
for (j = 0; j < NCHAN; j++)
adap->params.tp.tx_modq[j] = j;
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->filter_mode, 1,
+ TP_VLAN_PRI_MAP);
+
adap->flags |= FW_OK;
return 0;
@@ -4173,7 +4610,7 @@ static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
-static void __devinit cfg_queues(struct adapter *adap)
+static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
@@ -4257,7 +4694,7 @@ static void __devinit cfg_queues(struct adapter *adap)
* Reduce the number of Ethernet queues across all ports to at most n.
* n provides at least one queue per port.
*/
-static void __devinit reduce_ethqs(struct adapter *adap, int n)
+static void reduce_ethqs(struct adapter *adap, int n)
{
int i;
struct port_info *pi;
@@ -4284,7 +4721,7 @@ static void __devinit reduce_ethqs(struct adapter *adap, int n)
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2
-static int __devinit enable_msix(struct adapter *adap)
+static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
int i, err, want, need;
@@ -4333,7 +4770,7 @@ static int __devinit enable_msix(struct adapter *adap)
#undef EXTRA_VECS
-static int __devinit init_rss(struct adapter *adap)
+static int init_rss(struct adapter *adap)
{
unsigned int i, j;
@@ -4349,7 +4786,7 @@ static int __devinit init_rss(struct adapter *adap)
return 0;
}
-static void __devinit print_port_info(const struct net_device *dev)
+static void print_port_info(const struct net_device *dev)
{
static const char *base[] = {
"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
@@ -4386,7 +4823,7 @@ static void __devinit print_port_info(const struct net_device *dev)
adap->params.vpd.sn, adap->params.vpd.ec);
}
-static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
+static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
@@ -4419,8 +4856,7 @@ static void free_some_resources(struct adapter *adapter)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
-static int __devinit init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err;
struct port_info *pi;
@@ -4640,7 +5076,7 @@ sriov:
return err;
}
-static void __devexit remove_one(struct pci_dev *pdev)
+static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
@@ -4662,6 +5098,17 @@ static void __devexit remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ for (i = 0; i < (adapter->tids.nftids +
+ adapter->tids.nsftids); i++, f++)
+ if (f->valid)
+ clear_filter(adapter, f);
+ }
+
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
@@ -4680,7 +5127,7 @@ static struct pci_driver cxgb4_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4_pci_tbl,
.probe = init_one,
- .remove = __devexit_p(remove_one),
+ .remove = remove_one,
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 39bec73ff87c..e2bbc7f3e2de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -38,6 +38,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
#include <linux/atomic.h>
/* CPL message priority levels */
@@ -97,7 +98,9 @@ struct tid_info {
union aopen_entry *atid_tab;
unsigned int natids;
+ unsigned int atid_base;
+ struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -129,7 +132,7 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
stid -= t->stid_base;
- return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+ return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
@@ -141,6 +144,7 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
int cxgb4_alloc_atid(struct tid_info *t, void *data);
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
@@ -148,8 +152,14 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, unsigned int queue);
-
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue);
+int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, __be16 vlan,
+ unsigned int queue,
+ unsigned char port, unsigned char mask);
+int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -221,9 +231,16 @@ struct cxgb4_lld_info {
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned short filt_mode; /* filter optional components */
+ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
+ /* scheduler queue */
void __iomem *gts_reg; /* address of GTS register */
void __iomem *db_reg; /* address of kernel doorbell */
int dbfifo_int_thresh; /* doorbell fifo int threshold */
+ unsigned int sge_pktshift; /* Padding between CPL and */
+ /* packet data */
+ bool enable_fw_ofld_conn; /* Enable connection through fw */
+ /* WR */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6ac77a62f361..29878098101e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -484,6 +484,38 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
handle_failed_resolution(adap, arpq);
}
+/* Allocate an L2T entry for use by a switching rule. Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+ struct l2t_entry *e;
+
+ write_lock_bh(&d->lock);
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_SWITCHING;
+ atomic_set(&e->refcnt, 1);
+ spin_unlock(&e->lock);
+ }
+ write_unlock_bh(&d->lock);
+ return e;
+}
+
+/* Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr)
+{
+ e->vlan = vlan;
+ e->lport = port;
+ memcpy(e->dmac, eth_addr, ETH_ALEN);
+ return write_l2e(adap, e, 0);
+}
+
struct l2t_data *t4_init_l2t(void)
{
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 02b31d0c6410..108c0f1fce1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -100,6 +100,9 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+ u8 port, u8 *eth_addr);
struct l2t_data *t4_init_l2t(void);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 3ecc087d732d..fe9a2ea3588b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -508,7 +508,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= 8) {
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
q->pend_cred &= 7;
}
@@ -2082,10 +2082,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
FW_IQ_CMD_FL0FETCHRO(1) |
FW_IQ_CMD_FL0DATARO(1) |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PADEN(1));
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
c.fl0size = htons(flsz);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 730ae2cfa49e..22f3af5166bf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -109,7 +109,7 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
@@ -648,12 +648,12 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
- ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, SF_DATA);
return ret;
@@ -676,14 +676,14 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
- if (t4_read_reg(adapter, SF_OP) & BUSY)
+ if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_DATA, val);
t4_write_reg(adapter, SF_OP, lock |
cont | BYTECNT(byte_cnt - 1) | OP_WR);
- return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
}
/**
@@ -2003,7 +2003,7 @@ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
*
* Initialize the congestion control parameters.
*/
-static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
@@ -2252,14 +2252,14 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
return -ETIMEDOUT;
}
#undef EPIO_REG
@@ -2268,6 +2268,26 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return 0;
}
+/* t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
FW_CMD_REQUEST | FW_CMD_##rd_wr); \
@@ -2405,7 +2425,7 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_mbasyncnot = htonl(
+ c.err_to_clearinit = htonl(
FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
@@ -2426,7 +2446,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_mbasyncnot);
+ v = ntohl(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
if (state) {
if (v & FW_HELLO_CMD_ERR)
@@ -2774,7 +2794,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_READ);
- caps_cmd.retval_len16 =
+ caps_cmd.cfvalid_to_len16 =
htonl(FW_CAPS_CONFIG_CMD_CFVALID |
FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
@@ -2797,7 +2817,7 @@ int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST |
FW_CMD_WRITE);
- caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
}
@@ -3440,8 +3460,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
return 0;
}
-static void __devinit get_pci_mode(struct adapter *adapter,
- struct pci_params *p)
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
u16 val;
@@ -3460,8 +3479,7 @@ static void __devinit get_pci_mode(struct adapter *adapter,
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
@@ -3485,7 +3503,7 @@ int t4_wait_dev_ready(struct adapter *adap)
return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
}
-static int __devinit get_flash_params(struct adapter *adap)
+static int get_flash_params(struct adapter *adap)
{
int ret;
u32 info;
@@ -3521,7 +3539,7 @@ static int __devinit get_flash_params(struct adapter *adap)
* values for some adapter tunables, take PHYs out of reset, and
* initialize the MDIO interface.
*/
-int __devinit t4_prep_adapter(struct adapter *adapter)
+int t4_prep_adapter(struct adapter *adapter)
{
int ret;
@@ -3549,7 +3567,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
return 0;
}
-int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
int ret, i, j = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index eb71b8250b91..261d17703adc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -193,8 +193,24 @@ struct work_request_hdr {
__be64 wr_lo;
};
+/* wr_hi fields */
+#define S_WR_OP 24
+#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+
#define WR_HDR struct work_request_hdr wr
+/* option 0 fields */
+#define S_MSS_IDX 60
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define M_RSS_QUEUE 0x3FF
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+
struct cpl_pass_open_req {
WR_HDR;
union opcode_tid ot;
@@ -204,12 +220,14 @@ struct cpl_pass_open_req {
__be32 peer_ip;
__be64 opt0;
#define TX_CHAN(x) ((x) << 2)
+#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
+#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
#define WND_SCALE(x) ((u64)(x) << 50)
#define KEEP_ALIVE(x) ((u64)(x) << 54)
@@ -247,8 +265,10 @@ struct cpl_pass_accept_rpl {
#define RSS_QUEUE_VALID (1 << 10)
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
+#define PACE(x) ((x) << 16)
#define TX_QUEUE(x) ((x) << 23)
#define RX_CHANNEL(x) ((x) << 26)
+#define CCTRL_ECN(x) ((x) << 27)
#define WND_SCALE_EN(x) ((x) << 28)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
@@ -292,6 +312,9 @@ struct cpl_pass_establish {
union opcode_tid ot;
__be32 rsvd;
__be32 tos_stid;
+#define PASS_OPEN_TID(x) ((x) << 0)
+#define PASS_OPEN_TOS(x) ((x) << 24)
+#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
#define GET_POPEN_TID(x) ((x) & 0xffffff)
#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx;
@@ -332,6 +355,7 @@ struct cpl_set_tcb_field {
__be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
+#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask;
__be64 val;
};
@@ -536,6 +560,37 @@ struct cpl_rx_pkt {
__be16 err_vec;
};
+/* rx_pkt.l2info fields */
+#define S_RX_ETHHDR_LEN 0
+#define M_RX_ETHHDR_LEN 0x1F
+#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
+#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+
+#define S_RX_MACIDX 8
+#define M_RX_MACIDX 0x1FF
+#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
+#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
+
+#define S_RXF_SYN 21
+#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
+#define F_RXF_SYN V_RXF_SYN(1U)
+
+#define S_RX_CHAN 28
+#define M_RX_CHAN 0xF
+#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
+#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+
+/* rx_pkt.hdr_len fields */
+#define S_RX_TCPHDR_LEN 0
+#define M_RX_TCPHDR_LEN 0x3F
+#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
+#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+
+#define S_RX_IPHDR_LEN 6
+#define M_RX_IPHDR_LEN 0x3FF
+#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
+#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+
struct cpl_trace_pkt {
u8 opcode;
u8 intf;
@@ -634,6 +689,17 @@ struct cpl_fw6_msg {
/* cpl_fw6_msg.type values */
enum {
FW6_TYPE_CMD_RPL = 0,
+ FW6_TYPE_WR_RPL = 1,
+ FW6_TYPE_CQE = 2,
+ FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+};
+
+struct cpl_fw6_msg_ofld_connection_wr_rpl {
+ __u64 cookie;
+ __be32 tid; /* or atid in case of active failure */
+ __u8 t_state;
+ __u8 retval;
+ __u8 rsvd[2];
};
enum {
@@ -658,6 +724,7 @@ struct ulptx_sgl {
__be32 cmd_nsge;
#define ULPTX_CMD(x) ((x) << 24)
#define ULPTX_NSGE(x) ((x) << 0)
+#define ULPTX_MORE (1U << 23)
__be32 len0;
__be64 addr0;
struct ulptx_sge_pair sge[0];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1a8b57200f6..83ec5f7844ac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -67,7 +67,7 @@
#define QID_MASK 0xffff8000U
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
-#define DBPRIO 0x00004000U
+#define DBPRIO(x) ((x) << 14)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
@@ -193,6 +193,12 @@
#define SGE_FL_BUFFER_SIZE1 0x1048
#define SGE_FL_BUFFER_SIZE2 0x104c
#define SGE_FL_BUFFER_SIZE3 0x1050
+#define SGE_FL_BUFFER_SIZE4 0x1054
+#define SGE_FL_BUFFER_SIZE5 0x1058
+#define SGE_FL_BUFFER_SIZE6 0x105c
+#define SGE_FL_BUFFER_SIZE7 0x1060
+#define SGE_FL_BUFFER_SIZE8 0x1064
+
#define SGE_INGRESS_RX_THRESHOLD 0x10a0
#define THRESHOLD_0_MASK 0x3f000000U
#define THRESHOLD_0_SHIFT 24
@@ -217,6 +223,17 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define SGE_DBFIFO_STATUS 0x10a4
+#define HP_INT_THRESH_SHIFT 28
+#define HP_INT_THRESH_MASK 0xfU
+#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT)
+#define LP_INT_THRESH_SHIFT 12
+#define LP_INT_THRESH_MASK 0xfU
+#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT)
+
+#define SGE_DOORBELL_CONTROL 0x10a8
+#define ENABLE_DROP (1 << 13)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -277,6 +294,10 @@
#define A_SGE_CTXT_CMD 0x11fc
#define A_SGE_DBQ_CTXT_BADDR 0x1084
+#define PCIE_PF_CFG 0x40
+#define AIVEC(x) ((x) << 4)
+#define AIVEC_MASK 0x3ffU
+
#define PCIE_PF_CLI 0x44
#define PCIE_INT_CAUSE 0x3004
#define UNXSPLCPLERR 0x20000000U
@@ -322,6 +343,13 @@
#define PCIE_MEM_ACCESS_OFFSET 0x306c
#define PCIE_FW 0x30b8
+#define PCIE_FW_ERR 0x80000000U
+#define PCIE_FW_INIT 0x40000000U
+#define PCIE_FW_HALT 0x20000000U
+#define PCIE_FW_MASTER_VLD 0x00008000U
+#define PCIE_FW_MASTER(x) ((x) << 12)
+#define PCIE_FW_MASTER_MASK 0x7
+#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
#define RNPP 0x80000000U
@@ -432,6 +460,9 @@
#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+#define CIM_PF_HOST_INT_ENABLE 0x288
+#define MBMSGRDYINTEN(x) ((x) << 19)
+
#define CIM_PF_HOST_INT_CAUSE 0x28c
#define MBMSGRDYINT 0x00080000U
@@ -922,7 +953,7 @@
#define SF_DATA 0x193f8
#define SF_OP 0x193fc
-#define BUSY 0x80000000U
+#define SF_BUSY 0x80000000U
#define SF_LOCK 0x00000010U
#define SF_CONT 0x00000008U
#define BYTECNT_MASK 0x00000006U
@@ -981,6 +1012,7 @@
#define I2CM 0x00000002U
#define CIM 0x00000001U
+#define PL_INT_ENABLE 0x19410
#define PL_INT_MAP0 0x19414
#define PL_RST 0x19428
#define PIORST 0x00000002U
@@ -1032,4 +1064,41 @@
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
#define XGMAC_PORT_INT_CAUSE 0x10dc
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+
+#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+
+#define S_TX_MODQ_WEIGHT3 24
+#define M_TX_MODQ_WEIGHT3 0xffU
+#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+
+#define S_TX_MODQ_WEIGHT2 16
+#define M_TX_MODQ_WEIGHT2 0xffU
+#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+
+#define S_TX_MODQ_WEIGHT1 8
+#define M_TX_MODQ_WEIGHT1 0xffU
+#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+
+#define S_TX_MODQ_WEIGHT0 0
+#define M_TX_MODQ_WEIGHT0 0xffU
+#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+
+#define A_TP_TX_SCHED_HDR 0x23
+
+#define A_TP_TX_SCHED_FIFO 0x24
+
+#define A_TP_TX_SCHED_PCMD 0x25
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a6364632b490..a0dcccd846c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -35,6 +35,45 @@
#ifndef _T4FW_INTERFACE_H_
#define _T4FW_INTERFACE_H_
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed sucessfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+ FW_SCSI_ABORT_REQUESTED = 128, /* */
+ FW_SCSI_ABORT_TIMEDOUT = 129, /* */
+ FW_SCSI_ABORTED = 130, /* */
+ FW_SCSI_CLOSE_REQUESTED = 131, /* */
+ FW_ERR_LINK_DOWN = 132, /* */
+ FW_RDEV_NOT_READY = 133, /* */
+ FW_ERR_RDEV_LOST = 134, /* */
+ FW_ERR_RDEV_LOGO = 135, /* */
+ FW_FCOE_NO_XCHG = 136, /* */
+ FW_SCSI_RSP_ERR = 137, /* */
+ FW_ERR_RDEV_IMPL_LOGO = 138, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_DDP_ERR = 141, /* DDP error*/
+ FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
+};
+
#define FW_T4VF_SGE_BASE_ADDR 0x0000
#define FW_T4VF_MPS_BASE_ADDR 0x0100
#define FW_T4VF_PL_BASE_ADDR 0x0200
@@ -46,6 +85,7 @@ enum fw_wr_opcodes {
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
+ FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
FW_CMD_WR = 0x10,
@@ -68,6 +108,7 @@ struct fw_wr_hdr {
};
#define FW_WR_OP(x) ((x) << 24)
+#define FW_WR_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_WR_ATOMIC(x) ((x) << 23)
#define FW_WR_FLUSH(x) ((x) << 22)
#define FW_WR_COMPL(x) ((x) << 21)
@@ -80,6 +121,282 @@ struct fw_wr_hdr {
#define FW_WR_LEN16(x) ((x) << 0)
#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
+#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define M_FW_FILTER_WR_TID 0xfffff
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+#define G_FW_FILTER_WR_TID(x) \
+ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define M_FW_FILTER_WR_RQTYPE 0x1
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+#define G_FW_FILTER_WR_RQTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
+#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define M_FW_FILTER_WR_NOREPLY 0x1
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+#define G_FW_FILTER_WR_NOREPLY(x) \
+ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
+#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
+
+#define S_FW_FILTER_WR_IQ 0
+#define M_FW_FILTER_WR_IQ 0x3ff
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+#define G_FW_FILTER_WR_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define M_FW_FILTER_WR_DEL_FILTER 0x1
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define G_FW_FILTER_WR_DEL_FILTER(x) \
+ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define M_FW_FILTER_WR_RPTTID 0x1
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+#define G_FW_FILTER_WR_RPTTID(x) \
+ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
+#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
+
+#define S_FW_FILTER_WR_DROP 24
+#define M_FW_FILTER_WR_DROP 0x1
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+#define G_FW_FILTER_WR_DROP(x) \
+ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
+#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define M_FW_FILTER_WR_DIRSTEER 0x1
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+#define G_FW_FILTER_WR_DIRSTEER(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
+#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define M_FW_FILTER_WR_MASKHASH 0x1
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+#define G_FW_FILTER_WR_MASKHASH(x) \
+ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
+#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
+#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define M_FW_FILTER_WR_LPBK 0x1
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+#define G_FW_FILTER_WR_LPBK(x) \
+ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
+#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define M_FW_FILTER_WR_DMAC 0x1
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+#define G_FW_FILTER_WR_DMAC(x) \
+ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
+#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
+
+#define S_FW_FILTER_WR_SMAC 18
+#define M_FW_FILTER_WR_SMAC 0x1
+#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
+#define G_FW_FILTER_WR_SMAC(x) \
+ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
+#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define M_FW_FILTER_WR_INSVLAN 0x1
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+#define G_FW_FILTER_WR_INSVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
+#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define M_FW_FILTER_WR_RMVLAN 0x1
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+#define G_FW_FILTER_WR_RMVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
+#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define M_FW_FILTER_WR_HITCNTS 0x1
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+#define G_FW_FILTER_WR_HITCNTS(x) \
+ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
+#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define M_FW_FILTER_WR_TXCHAN 0x3
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+#define G_FW_FILTER_WR_TXCHAN(x) \
+ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define M_FW_FILTER_WR_PRIO 0x1
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+#define G_FW_FILTER_WR_PRIO(x) \
+ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
+#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define M_FW_FILTER_WR_L2TIX 0xfff
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+#define G_FW_FILTER_WR_L2TIX(x) \
+ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define M_FW_FILTER_WR_FRAG 0x1
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+#define G_FW_FILTER_WR_FRAG(x) \
+ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
+#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define M_FW_FILTER_WR_FRAGM 0x1
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+#define G_FW_FILTER_WR_FRAGM(x) \
+ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
+#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define M_FW_FILTER_WR_IVLAN_VLD 0x1
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+#define G_FW_FILTER_WR_IVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
+#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define M_FW_FILTER_WR_OVLAN_VLD 0x1
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+#define G_FW_FILTER_WR_OVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
+#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
+#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
+#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define M_FW_FILTER_WR_RX_CHAN 0x1
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+#define G_FW_FILTER_WR_RX_CHAN(x) \
+ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
+#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define M_FW_FILTER_WR_MACI 0x1ff
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+#define G_FW_FILTER_WR_MACI(x) \
+ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define M_FW_FILTER_WR_MACIM 0x1ff
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+#define G_FW_FILTER_WR_MACIM(x) \
+ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define M_FW_FILTER_WR_FCOE 0x1
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+#define G_FW_FILTER_WR_FCOE(x) \
+ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
+#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define M_FW_FILTER_WR_FCOEM 0x1
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+#define G_FW_FILTER_WR_FCOEM(x) \
+ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
+#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
+
+#define S_FW_FILTER_WR_PORT 9
+#define M_FW_FILTER_WR_PORT 0x7
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+#define G_FW_FILTER_WR_PORT(x) \
+ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define M_FW_FILTER_WR_PORTM 0x7
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+#define G_FW_FILTER_WR_PORTM(x) \
+ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define M_FW_FILTER_WR_MATCHTYPE 0x7
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+#define G_FW_FILTER_WR_MATCHTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define M_FW_FILTER_WR_MATCHTYPEM 0x7
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+#define G_FW_FILTER_WR_MATCHTYPEM(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
struct fw_ulptx_wr {
__be32 op_to_compl;
@@ -99,6 +416,108 @@ struct fw_eth_tx_pkt_wr {
__be64 r3;
};
+struct fw_ofld_connection_wr {
+ __be32 op_compl;
+ __be32 len16_pkd;
+ __u64 cookie;
+ __be64 r2;
+ __be64 r3;
+ struct fw_ofld_connection_le {
+ __be32 version_cpl;
+ __be32 filter;
+ __be32 r1;
+ __be16 lport;
+ __be16 pport;
+ union fw_ofld_connection_leip {
+ struct fw_ofld_connection_le_ipv4 {
+ __be32 pip;
+ __be32 lip;
+ __be64 r0;
+ __be64 r1;
+ __be64 r2;
+ } ipv4;
+ struct fw_ofld_connection_le_ipv6 {
+ __be64 pip_hi;
+ __be64 pip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ } ipv6;
+ } u;
+ } le;
+ struct fw_ofld_connection_tcb {
+ __be32 t_state_to_astid;
+ __be16 cplrxdataack_cplpassacceptrpl;
+ __be16 rcv_adv;
+ __be32 rcv_nxt;
+ __be32 tx_max;
+ __be64 opt0;
+ __be32 opt2;
+ __be32 r1;
+ __be64 r2;
+ __be64 r3;
+ } tcb;
+};
+
+#define S_FW_OFLD_CONNECTION_WR_VERSION 31
+#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
+#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
+#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
+ M_FW_OFLD_CONNECTION_WR_VERSION)
+#define F_FW_OFLD_CONNECTION_WR_VERSION \
+ V_FW_OFLD_CONNECTION_WR_VERSION(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPL 30
+#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
+#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
+#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
+#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
+#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
+#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
+ M_FW_OFLD_CONNECTION_WR_T_STATE)
+
+#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
+#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
+#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
+ M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
+
+#define S_FW_OFLD_CONNECTION_WR_ASTID 0
+#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
+#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
+#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
+#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
+ M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
+#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
+ V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
+
+#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
+#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
+#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
+ (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
+ M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
+#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
+ V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
+
enum fw_flowc_mnem {
FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
FW_FLOWC_MNEM_CH,
@@ -222,6 +641,7 @@ struct fw_cmd_hdr {
#define FW_CMD_OP(x) ((x) << 24)
#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
#define FW_CMD_REQUEST (1U << 23)
+#define FW_CMD_REQUEST_GET(x) (((x) >> 23) & 0x1)
#define FW_CMD_READ (1U << 22)
#define FW_CMD_WRITE (1U << 21)
#define FW_CMD_EXEC (1U << 20)
@@ -229,6 +649,7 @@ struct fw_cmd_hdr {
#define FW_CMD_RETVAL(x) ((x) << 8)
#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
#define FW_CMD_LEN16(x) ((x) << 0)
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
@@ -241,7 +662,8 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_TP_MIB = 0x0012,
FW_LDST_ADDRSPC_MDIO = 0x0018,
FW_LDST_ADDRSPC_MPS = 0x0020,
- FW_LDST_ADDRSPC_FUNC = 0x0028
+ FW_LDST_ADDRSPC_FUNC = 0x0028,
+ FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
};
enum fw_ldst_mps_fid {
@@ -303,6 +725,16 @@ struct fw_ldst_cmd {
__be64 data0;
__be64 data1;
} func;
+ struct fw_ldst_pcie {
+ u8 ctrl_to_fn;
+ u8 bnum;
+ u8 r;
+ u8 ext_r;
+ u8 select_naccess;
+ u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
} u;
};
@@ -312,6 +744,9 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID(x) ((x) << 15)
#define FW_LDST_CMD_CTL(x) ((x) << 0)
#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
+#define FW_LDST_CMD_LC (1U << 4)
+#define FW_LDST_CMD_NACCESS(x) ((x) << 0)
+#define FW_LDST_CMD_FN(x) ((x) << 0)
struct fw_reset_cmd {
__be32 op_to_write;
@@ -333,7 +768,7 @@ enum fw_hellow_cmd {
struct fw_hello_cmd {
__be32 op_to_write;
__be32 retval_len16;
- __be32 err_to_mbasyncnot;
+ __be32 err_to_clearinit;
#define FW_HELLO_CMD_ERR (1U << 31)
#define FW_HELLO_CMD_INIT (1U << 30)
#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
@@ -343,6 +778,7 @@ struct fw_hello_cmd {
#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
#define FW_HELLO_CMD_MBMASTER_GET(x) \
(((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
+#define FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << 23)
#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
#define FW_HELLO_CMD_CLEARINIT (1U << 16)
@@ -428,6 +864,7 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_fcoe {
FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+ FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004,
};
enum fw_memtype_cf {
@@ -440,7 +877,7 @@ enum fw_memtype_cf {
struct fw_caps_config_cmd {
__be32 op_to_write;
- __be32 retval_len16;
+ __be32 cfvalid_to_len16;
__be32 r2;
__be32 hwmbitmap;
__be16 nbmcaps;
@@ -701,8 +1138,8 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
-#define FW_IQ_CMD_FL0PADEN (1U << 2)
-#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0PADEN(x) ((x) << 2)
+#define FW_IQ_CMD_FL0PACKEN(x) ((x) << 1)
#define FW_IQ_CMD_FL0CONGEN (1U << 0)
#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
@@ -1190,6 +1627,14 @@ enum fw_port_dcb_cfg_rc {
FW_PORT_DCB_CFG_ERROR = 0x1
};
+enum fw_port_dcb_type {
+ FW_PORT_DCB_TYPE_PGID = 0x00,
+ FW_PORT_DCB_TYPE_PGRATE = 0x01,
+ FW_PORT_DCB_TYPE_PRIORATE = 0x02,
+ FW_PORT_DCB_TYPE_PFC = 0x03,
+ FW_PORT_DCB_TYPE_APP_ID = 0x04,
+};
+
struct fw_port_cmd {
__be32 op_to_portid;
__be32 action_to_len16;
@@ -1257,6 +1702,7 @@ struct fw_port_cmd {
#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSTATUS_GET(x) (((x) >> 31) & 0x1)
#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
#define FW_PORT_CMD_TXPAUSE (1U << 23)
@@ -1305,6 +1751,9 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
FW_PORT_MOD_TYPE_LRM,
+ FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_MASK - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_MASK - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_MASK - 1,
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9dad56101e23..0188df705719 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2023,7 +2023,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = {
* Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
* directory (debugfs_root) has already been set up.
*/
-static int __devinit setup_debugfs(struct adapter *adapter)
+static int setup_debugfs(struct adapter *adapter)
{
int i;
@@ -2064,7 +2064,7 @@ static void cleanup_debugfs(struct adapter *adapter)
* adapter parameters we're going to be using and initialize basic adapter
* hardware support.
*/
-static int __devinit adap_init0(struct adapter *adapter)
+static int adap_init0(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
struct sge_params *sge_params = &adapter->params.sge;
@@ -2266,7 +2266,7 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
* be modified by the admin via ethtool and cxgbtool prior to the adapter
* being brought up for the first time.
*/
-static void __devinit cfg_queues(struct adapter *adapter)
+static void cfg_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
@@ -2361,7 +2361,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* Reduce the number of Ethernet queues across all ports to at most n.
* n provides at least one queue per port.
*/
-static void __devinit reduce_ethqs(struct adapter *adapter, int n)
+static void reduce_ethqs(struct adapter *adapter, int n)
{
int i;
struct port_info *pi;
@@ -2400,7 +2400,7 @@ static void __devinit reduce_ethqs(struct adapter *adapter, int n)
* for our "extras". Note that this process may lower the maximum number of
* allowed Queue Sets ...
*/
-static int __devinit enable_msix(struct adapter *adapter)
+static int enable_msix(struct adapter *adapter)
{
int i, err, want, need;
struct msix_entry entries[MSIX_ENTRIES];
@@ -2462,8 +2462,8 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* state needed to manage the device. This routine is called "init_one" in
* the PF Driver ...
*/
-static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cxgb4vf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int version_printed;
@@ -2769,7 +2769,7 @@ err_disable_device:
* "probe" routine and quiesce the device (disable interrupts, etc.). (Note
* that this is called "remove_one" in the PF Driver.)
*/
-static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
+static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
@@ -2835,7 +2835,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
* "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
* delivery.
*/
-static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
{
struct adapter *adapter;
int pidx;
@@ -2905,8 +2905,8 @@ static struct pci_driver cxgb4vf_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
- .remove = __devexit_p(cxgb4vf_pci_remove),
- .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
+ .remove = cxgb4vf_pci_remove,
+ .shutdown = cxgb4vf_pci_shutdown,
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f16745f4b36b..92170d50d9d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -536,7 +536,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
- DBPRIO |
+ DBPRIO(1) |
QID(fl->cntxt_id) |
PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
fl->pend_cred %= FL_PER_EQ_UNIT;
@@ -952,7 +952,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* Warn if we write doorbells with the wrong priority and write
* descriptors before telling HW.
*/
- WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
QID(tq->cntxt_id) | PIDX(n));
@@ -2126,8 +2126,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqns_to_fl0congen =
cpu_to_be32(
FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
- FW_IQ_CMD_FL0PACKEN |
- FW_IQ_CMD_FL0PADEN);
+ FW_IQ_CMD_FL0PACKEN(1) |
+ FW_IQ_CMD_FL0PADEN(1));
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index a65c80aed1f2..283f9d0d37fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -232,8 +232,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
-int __devinit t4vf_wait_dev_ready(struct adapter *);
-int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_wait_dev_ready(struct adapter *);
+int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fe3fd3dad6f7..7127c7b9efde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -46,7 +46,7 @@
* returning a value other than all 1's). Return an error if it doesn't
* become ready ...
*/
-int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
+int t4vf_wait_dev_ready(struct adapter *adapter)
{
const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
const u32 notready1 = 0xffffffff;
@@ -253,8 +253,7 @@ static int hash_mac_addr(const u8 *addr)
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
@@ -275,7 +274,7 @@ static void __devinit init_link_config(struct link_config *lc,
* @adapter: the adapter
* @pidx: the adapter port index
*/
-int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
+int t4vf_port_init(struct adapter *adapter, int pidx)
{
struct port_info *pi = adap2pinfo(adapter, pidx);
struct fw_vi_cmd vi_cmd, vi_rpl;
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
index 94606f7ee13a..1c7b884e3371 100644
--- a/drivers/net/ethernet/cisco/Kconfig
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_CISCO
bool "Cisco devices"
default y
- depends on PCI && INET
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index 9cc706a6cffd..b63f8d8a4261 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -4,6 +4,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
- depends on PCI && INET
+ depends on PCI
---help---
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ad1468b3ab91..64866ff1aea0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2275,8 +2275,7 @@ static void enic_iounmap(struct enic *enic)
iounmap(enic->bar[i].vaddr);
}
-static int __devinit enic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct net_device *netdev;
@@ -2552,7 +2551,7 @@ err_out_free_netdev:
return err;
}
-static void __devexit enic_remove(struct pci_dev *pdev)
+static void enic_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2584,7 +2583,7 @@ static struct pci_driver enic_driver = {
.name = DRV_NAME,
.id_table = enic_id_table,
.probe = enic_probe,
- .remove = __devexit_p(enic_remove),
+ .remove = enic_remove,
};
static int __init enic_init_module(void)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 36499d5edd95..c73472c369cd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -193,35 +193,35 @@ iow(board_info_t * db, int reg, int value)
static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
{
- writesb(reg, data, count);
+ iowrite8_rep(reg, data, count);
}
static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
{
- writesw(reg, data, (count+1) >> 1);
+ iowrite16_rep(reg, data, (count+1) >> 1);
}
static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
{
- writesl(reg, data, (count+3) >> 2);
+ iowrite32_rep(reg, data, (count+3) >> 2);
}
/* input block from chip to memory */
static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
{
- readsb(reg, data, count);
+ ioread8_rep(reg, data, count);
}
static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
{
- readsw(reg, data, (count+1) >> 1);
+ ioread16_rep(reg, data, (count+1) >> 1);
}
static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
{
- readsl(reg, data, (count+3) >> 2);
+ ioread32_rep(reg, data, (count+3) >> 2);
}
/* dump block from chip to null */
@@ -1359,7 +1359,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
/*
* Search DM9000 board, allocate space and register it
*/
-static int __devinit
+static int
dm9000_probe(struct platform_device *pdev)
{
struct dm9000_plat_data *pdata = pdev->dev.platform_data;
@@ -1661,7 +1661,7 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = {
.resume = dm9000_drv_resume,
};
-static int __devexit
+static int
dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1683,7 +1683,7 @@ static struct platform_driver dm9000_driver = {
.pm = &dm9000_drv_pm_ops,
},
.probe = dm9000_probe,
- .remove = __devexit_p(dm9000_drv_remove),
+ .remove = dm9000_drv_remove,
};
static int __init
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index 17ae8c619680..9f992b95eddc 100644
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -1910,9 +1910,8 @@ static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
static int ndevs;
static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
-/* '21' below should really be 'MAX_NUM_EWRK3S' */
module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
+module_param_array(irq, byte, NULL, 0);
MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 77335853ac36..eaab73cf27ca 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1700,7 +1700,7 @@ static const struct ethtool_ops de_ethtool_ops = {
.get_regs = de_get_regs,
};
-static void __devinit de21040_get_mac_address (struct de_private *de)
+static void de21040_get_mac_address(struct de_private *de)
{
unsigned i;
@@ -1721,7 +1721,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
}
}
-static void __devinit de21040_get_media_info(struct de_private *de)
+static void de21040_get_media_info(struct de_private *de)
{
unsigned int i;
@@ -1748,7 +1748,8 @@ static void __devinit de21040_get_media_info(struct de_private *de)
}
/* Note: this routine returns extra data bits for size detection. */
-static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
+static unsigned tulip_read_eeprom(void __iomem *regs, int location,
+ int addr_len)
{
int i;
unsigned retval = 0;
@@ -1783,7 +1784,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in
return retval;
}
-static void __devinit de21041_get_srom_info (struct de_private *de)
+static void de21041_get_srom_info(struct de_private *de)
{
unsigned i, sa_offset = 0, ofs;
u8 ee_data[DE_EEPROM_SIZE + 6] = {};
@@ -1961,8 +1962,7 @@ static const struct net_device_ops de_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit de_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct de_private *de;
@@ -2099,7 +2099,7 @@ err_out_free:
return rc;
}
-static void __devexit de_remove_one (struct pci_dev *pdev)
+static void de_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct de_private *de = netdev_priv(dev);
@@ -2184,7 +2184,7 @@ static struct pci_driver de_driver = {
.name = DRV_NAME,
.id_table = de_pci_tbl,
.probe = de_init_one,
- .remove = __devexit_p(de_remove_one),
+ .remove = de_remove_one,
#ifdef CONFIG_PM
.suspend = de_suspend,
.resume = de_resume,
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f879e9224846..4c830030fb06 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -479,7 +479,7 @@
#include "de4x5.h"
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
#define c_char const char
@@ -1092,7 +1092,7 @@ static const struct net_device_ops de4x5_netdev_ops = {
};
-static int __devinit
+static int
de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
{
char name[DE4X5_NAME_LENGTH + 1];
@@ -2077,7 +2077,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
return status;
}
-static int __devexit de4x5_eisa_remove (struct device *device)
+static int de4x5_eisa_remove(struct device *device)
{
struct net_device *dev;
u_long iobase;
@@ -2104,7 +2104,7 @@ static struct eisa_driver de4x5_eisa_driver = {
.driver = {
.name = "de4x5",
.probe = de4x5_eisa_probe,
- .remove = __devexit_p (de4x5_eisa_remove),
+ .remove = de4x5_eisa_remove,
}
};
MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
@@ -2118,7 +2118,7 @@ MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
** For single port cards this is a time waster...
*/
-static void __devinit
+static void
srom_search(struct net_device *dev, struct pci_dev *pdev)
{
u_char pb;
@@ -2192,8 +2192,8 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
** kernels use the V0.535[n] drivers.
*/
-static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int de4x5_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
u_char pb, pbus = 0, dev_num, dnum = 0, timer;
u_short vendor, status;
@@ -2314,7 +2314,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
return error;
}
-static void __devexit de4x5_pci_remove (struct pci_dev *pdev)
+static void de4x5_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev;
u_long iobase;
@@ -2344,7 +2344,7 @@ static struct pci_driver de4x5_pci_driver = {
.name = "de4x5",
.id_table = de4x5_pci_tbl,
.probe = de4x5_pci_probe,
- .remove = __devexit_p (de4x5_pci_remove),
+ .remove = de4x5_pci_remove,
};
#endif
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index d23755ea9bc7..83139307861c 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -291,8 +291,8 @@ enum dmfe_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int __devinitdata printed_version;
-static const char version[] __devinitconst =
+static int printed_version;
+static const char version[] =
"Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
static int dmfe_debug;
@@ -367,8 +367,7 @@ static const struct net_device_ops netdev_ops = {
* Search DM910X board ,allocate space and register it
*/
-static int __devinit dmfe_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct dmfe_board_info *db; /* board information structure */
struct net_device *dev;
@@ -531,7 +530,7 @@ err_out_free:
}
-static void __devexit dmfe_remove_one (struct pci_dev *pdev)
+static void dmfe_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct dmfe_board_info *db = netdev_priv(dev);
@@ -2187,7 +2186,7 @@ static struct pci_driver dmfe_driver = {
.name = "dmfe",
.id_table = dmfe_pci_tbl,
.probe = dmfe_init_one,
- .remove = __devexit_p(dmfe_remove_one),
+ .remove = dmfe_remove_one,
.suspend = dmfe_suspend,
.resume = dmfe_resume
};
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index 44f7e8e82d85..df5a892fb49c 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -26,7 +26,7 @@
*/
/* Known cards that have old-style EEPROMs. */
-static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
+static struct eeprom_fixup eeprom_fixups[] = {
{"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
{"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
{NULL}};
-static const char *const block_name[] __devinitconst = {
+static const char *const block_name[] = {
"21140 non-MII",
"21140 MII PHY",
"21142 Serial PHY",
@@ -102,7 +102,7 @@ static const char *const block_name[] __devinitconst = {
* #ifdef __hppa__ should completely optimize this function away for
* non-parisc hardware.
*/
-static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
+static void tulip_build_fake_mediatable(struct tulip_private *tp)
{
#ifdef CONFIG_GSC
if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) {
@@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
#endif
}
-void __devinit tulip_parse_eeprom(struct net_device *dev)
+void tulip_parse_eeprom(struct net_device *dev)
{
/*
dev is not registered at this point, so logging messages can't
@@ -339,7 +339,7 @@ subsequent_board:
#define EE_READ_CMD (6)
/* Note: this routine returns extra data bits for size detection. */
-int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_len)
+int tulip_read_eeprom(struct net_device *dev, int location, int addr_len)
{
int i;
unsigned retval = 0;
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index ae937c6749e7..93a4afaa09f1 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -447,7 +447,7 @@ int tulip_check_duplex(struct net_device *dev)
return 0;
}
-void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
+void tulip_find_mii(struct net_device *dev, int board_idx)
{
struct tulip_private *tp = netdev_priv(dev);
int phyn, phy_idx = 0;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 885700a19978..1e9443d9fb57 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -37,7 +37,7 @@
#include <asm/prom.h>
#endif
-static char version[] __devinitdata =
+static char version[] =
"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
/* A few user-configurable values. */
@@ -1191,8 +1191,7 @@ static void set_rx_mode(struct net_device *dev)
}
#ifdef CONFIG_TULIP_MWI
-static void __devinit tulip_mwi_config (struct pci_dev *pdev,
- struct net_device *dev)
+static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
u8 cache;
@@ -1301,8 +1300,7 @@ DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
{ },
};
-static int __devinit tulip_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct tulip_private *tp;
/* See note below on the multiport cards. */
@@ -1927,7 +1925,7 @@ static int tulip_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static void __devexit tulip_remove_one (struct pci_dev *pdev)
+static void tulip_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct tulip_private *tp;
@@ -1974,7 +1972,7 @@ static struct pci_driver tulip_driver = {
.name = DRV_NAME,
.id_table = tulip_pci_tbl,
.probe = tulip_init_one,
- .remove = __devexit_p(tulip_remove_one),
+ .remove = tulip_remove_one,
#ifdef CONFIG_PM
.suspend = tulip_suspend,
.resume = tulip_resume,
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 75d45f8a37dc..93845afe1cea 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -204,8 +204,8 @@ enum uli526x_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int __devinitdata printed_version;
-static const char version[] __devinitconst =
+static int printed_version;
+static const char version[] =
"ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
static int uli526x_debug;
@@ -281,8 +281,8 @@ static const struct net_device_ops netdev_ops = {
* Search ULI526X board, allocate space and register it
*/
-static int __devinit uli526x_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int uli526x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
@@ -436,7 +436,7 @@ err_out_free:
}
-static void __devexit uli526x_remove_one (struct pci_dev *pdev)
+static void uli526x_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct uli526x_board_info *db = netdev_priv(dev);
@@ -1788,7 +1788,7 @@ static struct pci_driver uli526x_driver = {
.name = "uli526x",
.id_table = uli526x_pci_tbl,
.probe = uli526x_init_one,
- .remove = __devexit_p(uli526x_remove_one),
+ .remove = uli526x_remove_one,
.suspend = uli526x_suspend,
.resume = uli526x_resume,
};
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 7c1ec4d7920b..c7b04ecf5b49 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -236,7 +236,7 @@ struct pci_id_info {
int drv_flags; /* Driver use, intended as capability flags. */
};
-static const struct pci_id_info pci_id_tbl[] __devinitconst = {
+static const struct pci_id_info pci_id_tbl[] = {
{ /* Sometime a Level-One switch card. */
"Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
{ "Winbond W89c840", CanHaveMII | HasBrokenTx},
@@ -358,8 +358,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit w840_probe1 (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct netdev_private *np;
@@ -1532,7 +1531,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static void __devexit w840_remove1 (struct pci_dev *pdev)
+static void w840_remove1(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -1647,7 +1646,7 @@ static struct pci_driver w840_driver = {
.name = DRV_NAME,
.id_table = w840_pci_tbl,
.probe = w840_probe1,
- .remove = __devexit_p(w840_remove1),
+ .remove = w840_remove1,
#ifdef CONFIG_PM
.suspend = w840_suspend,
.resume = w840_resume,
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 138bf83bc98e..88feced9a629 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -148,7 +148,7 @@ static struct pci_driver xircom_ops = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
- .remove = __devexit_p(xircom_remove),
+ .remove = xircom_remove,
};
@@ -190,7 +190,7 @@ static const struct net_device_ops netdev_ops = {
first two packets that get send, and pump hates that.
*/
-static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *d = &pdev->dev;
struct net_device *dev = NULL;
@@ -312,7 +312,7 @@ err_disable:
Interrupts and such are already stopped in the "ifconfig ethX down"
code.
*/
-static void __devexit xircom_remove(struct pci_dev *pdev)
+static void xircom_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct xircom_private *card = netdev_priv(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a059f0c27e28..1d342d37915c 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -23,7 +23,7 @@
#define dr16(reg) ioread16(ioaddr + (reg))
#define dr8(reg) ioread8(ioaddr + (reg))
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
@@ -110,7 +110,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = change_mtu,
};
-static int __devinit
+static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -1727,7 +1727,7 @@ rio_close (struct net_device *dev)
return 0;
}
-static void __devexit
+static void
rio_remove1 (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
@@ -1755,24 +1755,10 @@ static struct pci_driver rio_driver = {
.name = "dl2k",
.id_table = rio_pci_tbl,
.probe = rio_probe1,
- .remove = __devexit_p(rio_remove1),
+ .remove = rio_remove1,
};
-static int __init
-rio_init (void)
-{
- return pci_register_driver(&rio_driver);
-}
-
-static void __exit
-rio_exit (void)
-{
- pci_unregister_driver (&rio_driver);
-}
-
-module_init (rio_init);
-module_exit (rio_exit);
-
+module_pci_driver(rio_driver);
/*
Compile command:
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 3b83588e51f6..28fc11b2f1ea 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -102,7 +102,7 @@ static char *media[MAX_UNITS];
#include <linux/mii.h>
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
" Written by Donald Becker\n";
@@ -218,7 +218,7 @@ enum {
struct pci_id_info {
const char *name;
};
-static const struct pci_id_info pci_id_tbl[] __devinitconst = {
+static const struct pci_id_info pci_id_tbl[] = {
{"D-Link DFE-550TX FAST Ethernet Adapter"},
{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
{"D-Link DFE-580TX 4 port Server Adapter"},
@@ -259,6 +259,7 @@ enum alta_offsets {
EECtrl = 0x36,
FlashAddr = 0x40,
FlashData = 0x44,
+ WakeEvent = 0x45,
TxStatus = 0x46,
TxFrameId = 0x47,
DownCounter = 0x18,
@@ -333,6 +334,14 @@ enum mac_ctrl1_bits {
RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
};
+/* Bits in WakeEvent register. */
+enum wake_event_bits {
+ WakePktEnable = 0x01,
+ MagicPktEnable = 0x02,
+ LinkEventEnable = 0x04,
+ WolEnable = 0x80,
+};
+
/* The Rx and Tx buffer descriptors. */
/* Note that using only 32 bit fields simplifies conversion to big-endian
architectures. */
@@ -392,6 +401,7 @@ struct netdev_private {
unsigned int default_port:4; /* Last dev->if_port value. */
unsigned int an_enable:1;
unsigned int speed;
+ unsigned int wol_enabled:1; /* Wake on LAN enabled */
struct tasklet_struct rx_tasklet;
struct tasklet_struct tx_tasklet;
int budget;
@@ -472,8 +482,8 @@ static const struct net_device_ops netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit sundance_probe1 (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sundance_probe1(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev;
struct netdev_private *np;
@@ -701,7 +711,7 @@ static int change_mtu(struct net_device *dev, int new_mtu)
#define eeprom_delay(ee_addr) ioread32(ee_addr)
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
-static int __devinit eeprom_read(void __iomem *ioaddr, int location)
+static int eeprom_read(void __iomem *ioaddr, int location)
{
int boguscnt = 10000; /* Typical 1900 ticks. */
iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
@@ -829,7 +839,7 @@ static int netdev_open(struct net_device *dev)
unsigned long flags;
int i;
- /* Do we need to reset the chip??? */
+ sundance_reset(dev, 0x00ff << 16);
i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
if (i)
@@ -877,6 +887,10 @@ static int netdev_open(struct net_device *dev)
iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+ /* Disable Wol */
+ iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
+ np->wol_enabled = 0;
+
if (netif_msg_ifup(np))
printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
"MAC Control %x, %4.4x %4.4x.\n",
@@ -1715,6 +1729,60 @@ static void get_ethtool_stats(struct net_device *dev,
data[i++] = np->xstats.rx_mcasts;
}
+#ifdef CONFIG_PM
+
+static void sundance_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u8 wol_bits;
+
+ wol->wolopts = 0;
+
+ wol->supported = (WAKE_PHY | WAKE_MAGIC);
+ if (!np->wol_enabled)
+ return;
+
+ wol_bits = ioread8(ioaddr + WakeEvent);
+ if (wol_bits & MagicPktEnable)
+ wol->wolopts |= WAKE_MAGIC;
+ if (wol_bits & LinkEventEnable)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int sundance_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u8 wol_bits;
+
+ if (!device_can_wakeup(&np->pci_dev->dev))
+ return -EOPNOTSUPP;
+
+ np->wol_enabled = !!(wol->wolopts);
+ wol_bits = ioread8(ioaddr + WakeEvent);
+ wol_bits &= ~(WakePktEnable | MagicPktEnable |
+ LinkEventEnable | WolEnable);
+
+ if (np->wol_enabled) {
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_bits |= (MagicPktEnable | WolEnable);
+ if (wol->wolopts & WAKE_PHY)
+ wol_bits |= (LinkEventEnable | WolEnable);
+ }
+ iowrite8(wol_bits, ioaddr + WakeEvent);
+
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
+
+ return 0;
+}
+#else
+#define sundance_get_wol NULL
+#define sundance_set_wol NULL
+#endif /* CONFIG_PM */
+
static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = get_drvinfo,
@@ -1722,6 +1790,8 @@ static const struct ethtool_ops ethtool_ops = {
.set_settings = set_settings,
.nway_reset = nway_reset,
.get_link = get_link,
+ .get_wol = sundance_get_wol,
+ .set_wol = sundance_set_wol,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
.get_strings = get_strings,
@@ -1844,7 +1914,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static void __devexit sundance_remove1 (struct pci_dev *pdev)
+static void sundance_remove1(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -1867,6 +1937,8 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
if (!netif_running(dev))
return 0;
@@ -1875,6 +1947,12 @@ static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
netif_device_detach(dev);
pci_save_state(pci_dev);
+ if (np->wol_enabled) {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ iowrite16(RxEnable, ioaddr + MACCtrl1);
+ }
+ pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
+ np->wol_enabled);
pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
return 0;
@@ -1890,6 +1968,7 @@ static int sundance_resume(struct pci_dev *pci_dev)
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
+ pci_enable_wake(pci_dev, PCI_D0, 0);
err = netdev_open(dev);
if (err) {
@@ -1910,7 +1989,7 @@ static struct pci_driver sundance_driver = {
.name = DRV_NAME,
.id_table = sundance_pci_tbl,
.probe = sundance_probe1,
- .remove = __devexit_p(sundance_remove1),
+ .remove = sundance_remove1,
#ifdef CONFIG_PM
.suspend = sundance_suspend,
.resume = sundance_resume,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 290b26f868c9..2c177b329c8b 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -72,7 +72,7 @@ static void __dnet_set_hwaddr(struct dnet *bp)
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
}
-static void __devinit dnet_get_hwaddr(struct dnet *bp)
+static void dnet_get_hwaddr(struct dnet *bp)
{
u16 tmp;
u8 addr[6];
@@ -664,9 +664,6 @@ static int dnet_open(struct net_device *dev)
if (!bp->phy_dev)
return -EAGAIN;
- if (!is_valid_ether_addr(dev->dev_addr))
- return -EADDRNOTAVAIL;
-
napi_enable(&bp->napi);
dnet_init_hw(bp);
@@ -829,7 +826,7 @@ static const struct net_device_ops dnet_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int __devinit dnet_probe(struct platform_device *pdev)
+static int dnet_probe(struct platform_device *pdev)
{
struct resource *res;
struct net_device *dev;
@@ -945,7 +942,7 @@ err_out:
return err;
}
-static int __devexit dnet_remove(struct platform_device *pdev)
+static int dnet_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -971,7 +968,7 @@ static int __devexit dnet_remove(struct platform_device *pdev)
static struct platform_driver dnet_driver = {
.probe = dnet_probe,
- .remove = __devexit_p(dnet_remove),
+ .remove = dnet_remove,
.driver = {
.name = "dnet",
},
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
index 7a28a6433944..1b8d638c6cb1 100644
--- a/drivers/net/ethernet/emulex/Kconfig
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_EMULEX
bool "Emulex devices"
default y
- depends on PCI && INET
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 804db04a2bd0..231129dd1764 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -1,6 +1,6 @@
config BE2NET
tristate "ServerEngines' 10Gbps NIC - BladeEngine"
- depends on PCI && INET
+ depends on PCI
---help---
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf4c05bdf5fe..f1b3df167ff2 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,15 +34,15 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.4.31.0u"
+#define DRV_VER "4.4.161.0u"
#define DRV_NAME "be2net"
-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME "Emulex BladeEngine2"
+#define BE3_NAME "Emulex BladeEngine3"
+#define OC_NAME "Emulex OneConnect"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
@@ -53,6 +53,7 @@
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
+#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
#define OC_SUBSYS_DEVICE_ID1 0xE602
#define OC_SUBSYS_DEVICE_ID2 0xE642
#define OC_SUBSYS_DEVICE_ID3 0xE612
@@ -71,6 +72,7 @@ static inline char *nic_name(struct pci_dev *pdev)
case BE_DEVICE_ID2:
return BE3_NAME;
case OC_DEVICE_ID5:
+ case OC_DEVICE_ID6:
return OC_NAME_SH;
default:
return BE_NAME;
@@ -188,6 +190,7 @@ struct be_eq_obj {
u8 idx; /* array index */
u16 tx_budget;
+ u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
} ____cacheline_aligned_in_smp;
@@ -346,7 +349,6 @@ struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
- u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
@@ -374,11 +376,8 @@ struct be_adapter {
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
- u8 eq_next_idx;
struct be_drv_stats drv_stats;
-
u16 vlans_added;
- u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_N_VID];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
@@ -391,6 +390,7 @@ struct be_adapter {
struct delayed_work func_recovery_work;
u32 flags;
+ u32 cmd_privileges;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
int if_handle; /* Used to configure filtering */
@@ -408,10 +408,8 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool stats_cmd_sent;
- u8 generation; /* BladeEngine ASIC generation */
u32 if_type;
struct {
- u8 __iomem *base; /* Door Bell */
u32 size;
u32 total_size;
u64 io_addr;
@@ -434,10 +432,18 @@ struct be_adapter {
struct phy_info phy;
u8 wol_cap;
bool wol;
- u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
u32 uc_macs; /* Count of secondary UC MAC programmed */
u32 msg_enable;
int be_get_temp_freq;
+ u16 max_mcast_mac;
+ u16 max_tx_queues;
+ u16 max_rss_queues;
+ u16 max_rx_queues;
+ u16 max_pmac_cnt;
+ u16 max_vlans;
+ u16 max_event_queues;
+ u32 if_cap_flags;
+ u8 pf_number;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -448,21 +454,25 @@ struct be_adapter {
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
-/* BladeEngine Generation numbers */
-#define BE_GEN2 2
-#define BE_GEN3 3
-
#define ON 1
#define OFF 0
-#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
- (adapter->pdev->device == OC_DEVICE_ID4))
-#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
+ adapter->pdev->device == OC_DEVICE_ID4)
+
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
+ adapter->pdev->device == OC_DEVICE_ID6)
+
+#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
+ adapter->pdev->device == OC_DEVICE_ID2)
+#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
+ adapter->pdev->device == OC_DEVICE_ID1)
-#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
- adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
- (adapter->function_mode & RDMA_ENABLED))
+#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
+
+#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
+ (adapter->function_mode & RDMA_ENABLED))
extern const struct ethtool_ops be_ethtool_ops;
@@ -607,7 +617,7 @@ static inline bool be_error(struct be_adapter *adapter)
return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
}
-static inline bool be_crit_error(struct be_adapter *adapter)
+static inline bool be_hw_error(struct be_adapter *adapter)
{
return adapter->eeh_error || adapter->hw_error;
}
@@ -637,12 +647,6 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
}
}
-static inline bool be_type_2_3(struct be_adapter *adapter)
-{
- return (adapter->if_type == SLI_INTF_TYPE_2 ||
- adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
-}
-
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index af60bb26e330..8a250c38fb82 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,55 @@
#include "be.h"
#include "be_cmds.h"
+static struct be_cmd_priv_map cmd_priv_map[] = {
+ {
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ CMD_SUBSYSTEM_ETH,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_SET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_ETH_GET_PPORT_STATS,
+ CMD_SUBSYSTEM_ETH,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_COMMON_GET_PHY_DETAILS,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ }
+};
+
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
+ u8 subsystem)
+{
+ int i;
+ int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+ u32 cmd_privileges = adapter->cmd_privileges;
+
+ for (i = 0; i < num_entries; i++)
+ if (opcode == cmd_priv_map[i].opcode &&
+ subsystem == cmd_priv_map[i].subsystem)
+ if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
+ return false;
+
+ return true;
+}
+
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
@@ -249,7 +298,12 @@ void be_async_mcc_enable(struct be_adapter *adapter)
void be_async_mcc_disable(struct be_adapter *adapter)
{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
adapter->mcc_obj.rearm_cq = false;
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
}
int be_process_mcc(struct be_adapter *adapter)
@@ -419,14 +473,13 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
{
u32 sem;
+ u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
+ SLIPORT_SEMAPHORE_OFFSET_BE;
- if (lancer_chip(adapter))
- sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
- else
- sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+ pci_read_config_dword(adapter->pdev, reg, &sem);
+ *stage = sem & POST_STAGE_MASK;
- *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
- if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
+ if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
return -1;
else
return 0;
@@ -452,10 +505,33 @@ int lancer_wait_ready(struct be_adapter *adapter)
return status;
}
+static bool lancer_provisioning_error(struct be_adapter *adapter)
+{
+ u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ sliport_err1 = ioread32(adapter->db +
+ SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db +
+ SLIPORT_ERROR2_OFFSET);
+
+ if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
+ sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
+ return true;
+ }
+ return false;
+}
+
int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
{
int status;
u32 sliport_status, err, reset_needed;
+ bool resource_error;
+
+ resource_error = lancer_provisioning_error(adapter);
+ if (resource_error)
+ return -1;
+
status = lancer_wait_ready(adapter);
if (!status) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -477,6 +553,14 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
status = -1;
}
}
+ /* Stop error recovery if error is not recoverable.
+ * No resource error is temporary errors and will go away
+ * when PF provisions resources.
+ */
+ resource_error = lancer_provisioning_error(adapter);
+ if (status == -1 && !resource_error)
+ adapter->eeh_error = true;
+
return status;
}
@@ -601,6 +685,9 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_mcc_wrb *wrb;
+ if (!mccq->created)
+ return NULL;
+
if (atomic_read(&mccq->used) >= mccq->len) {
dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
return NULL;
@@ -1155,8 +1242,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
- if (!status)
- q->created = false;
+ q->created = false;
mutex_unlock(&adapter->mbox_lock);
return status;
@@ -1183,8 +1269,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
- if (!status)
- q->created = false;
+ q->created = false;
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -1281,7 +1366,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
- if (adapter->generation == BE_GEN3)
+ /* version 1 of the cmd is not supported only by BE2 */
+ if (!BE2_chip(adapter))
hdr->version = 1;
be_mcc_notify(adapter);
@@ -1301,6 +1387,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct lancer_cmd_req_pport_stats *req;
int status = 0;
+ if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
+ CMD_SUBSYSTEM_ETH))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1367,7 +1457,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
- if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+ /* version 1 of the cmd is not supported only by BE2 */
+ if (!BE2_chip(adapter))
req->hdr.version = 1;
req->hdr.domain = dom;
@@ -1658,9 +1749,9 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
*/
- if (!lancer_chip(adapter) || be_physfn(adapter))
- req->if_flags_mask |=
- cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags_mask |=
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
+ adapter->if_cap_flags);
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
netdev_for_each_mc_addr(ha, adapter->netdev)
@@ -1680,6 +1771,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
struct be_cmd_req_set_flow_control *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1709,6 +1804,10 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
struct be_cmd_req_get_flow_control *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -2067,7 +2166,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
int offset)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req;
+ struct be_cmd_read_flash_crc *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2080,7 +2179,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
+ wrb, NULL);
req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
@@ -2089,7 +2189,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
status = be_mcc_notify_wait(adapter);
if (!status)
- memcpy(flashed_crc, req->params.data_buf, 4);
+ memcpy(flashed_crc, req->crc, 4);
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -2275,6 +2375,10 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
struct be_dma_mem cmd;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -2434,6 +2538,42 @@ err:
return status;
}
+/* Get privilege(s) for a function */
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fn_privileges *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fn_privileges *resp =
+ embedded_payload(wrb);
+ *privilege = le32_to_cpu(resp->privilege_mask);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses synchronous MCCQ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id, u8 domain)
@@ -2651,6 +2791,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
int payload_len = sizeof(*req);
struct be_dma_mem cmd;
+ if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
+ CMD_SUBSYSTEM_ETH))
+ return -EPERM;
+
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2792,6 +2936,240 @@ err:
return status;
}
+static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+ u32 max_buf_size)
+{
+ struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ desc->desc_len = RESOURCE_DESC_SIZE;
+ if (((void *)desc + desc->desc_len) >
+ (void *)(buf + max_buf_size)) {
+ desc = NULL;
+ break;
+ }
+
+ if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
+ break;
+
+ desc = (void *)desc + desc->desc_len;
+ }
+
+ if (!desc || i == MAX_RESOURCE_DESC)
+ return NULL;
+
+ return desc;
+}
+
+/* Uses Mbox */
+int be_cmd_get_func_config(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_func_config *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_func_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FUNC_CONFIG,
+ cmd.size, wrb, &cmd);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_func_config *resp = cmd.va;
+ u32 desc_count = le32_to_cpu(resp->desc_count);
+ struct be_nic_resource_desc *desc;
+
+ desc = be_get_nic_desc(resp->func_param, desc_count,
+ sizeof(resp->func_param));
+ if (!desc) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ adapter->pf_number = desc->pf_num;
+ adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
+ adapter->max_vlans = le16_to_cpu(desc->vlan_count);
+ adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+ adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
+ adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
+ adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
+
+ adapter->max_event_queues = le16_to_cpu(desc->eq_count);
+ adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
+ return status;
+}
+
+ /* Uses sync mcc */
+int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+ u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_profile_config *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PROFILE_CONFIG,
+ cmd.size, wrb, &cmd);
+
+ req->type = ACTIVE_PROFILE_TYPE;
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_profile_config *resp = cmd.va;
+ u32 desc_count = le32_to_cpu(resp->desc_count);
+ struct be_nic_resource_desc *desc;
+
+ desc = be_get_nic_desc(resp->func_param, desc_count,
+ sizeof(resp->func_param));
+
+ if (!desc) {
+ status = -EINVAL;
+ goto err;
+ }
+ *cap_flags = le32_to_cpu(desc->cap_flags);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
+ u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_profile_config *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+ req->desc_count = cpu_to_le32(1);
+
+ req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
+ req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
+ req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
+ req->nic_desc.pf_num = adapter->pf_number;
+ req->nic_desc.vf_num = domain;
+
+ /* Mark fields invalid */
+ req->nic_desc.unicast_mac_count = 0xFFFF;
+ req->nic_desc.mcc_count = 0xFFFF;
+ req->nic_desc.vlan_count = 0xFFFF;
+ req->nic_desc.mcast_mac_count = 0xFFFF;
+ req->nic_desc.txq_count = 0xFFFF;
+ req->nic_desc.rq_count = 0xFFFF;
+ req->nic_desc.rssq_count = 0xFFFF;
+ req->nic_desc.lro_count = 0xFFFF;
+ req->nic_desc.cq_count = 0xFFFF;
+ req->nic_desc.toe_conn_count = 0xFFFF;
+ req->nic_desc.eq_count = 0xFFFF;
+ req->nic_desc.link_param = 0xFF;
+ req->nic_desc.bw_min = 0xFFFFFFFF;
+ req->nic_desc.acpi_params = 0xFF;
+ req->nic_desc.wol_param = 0x0F;
+
+ /* Change BW */
+ req->nic_desc.bw_min = cpu_to_le32(bps);
+ req->nic_desc.bw_max = cpu_to_le32(bps);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_enable_disable_vf *req;
+ int status;
+
+ if (!lancer_chip(adapter))
+ return 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+ req->enable = 1;
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0936e21e3cff..d6552e19ffee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -196,9 +196,14 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_MAC_LIST 147
#define OPCODE_COMMON_SET_MAC_LIST 148
#define OPCODE_COMMON_GET_HSW_CONFIG 152
+#define OPCODE_COMMON_GET_FUNC_CONFIG 160
+#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
+#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
#define OPCODE_COMMON_SET_HSW_CONFIG 153
+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
#define OPCODE_ETH_RSS_CONFIG 1
#define OPCODE_ETH_ACPI_CONFIG 2
@@ -1151,14 +1156,22 @@ struct flashrom_params {
u32 op_type;
u32 data_buf_size;
u32 offset;
- u8 data_buf[4];
};
struct be_cmd_write_flashrom {
struct be_cmd_req_hdr hdr;
struct flashrom_params params;
-};
+ u8 data_buf[32768];
+ u8 rsvd[4];
+} __packed;
+/* cmd to read flash crc */
+struct be_cmd_read_flash_crc {
+ struct be_cmd_req_hdr hdr;
+ struct flashrom_params params;
+ u8 crc[4];
+ u8 rsvd[4];
+};
/**************** Lancer Firmware Flash ************/
struct amap_lancer_write_obj_context {
u8 write_length[24];
@@ -1429,6 +1442,41 @@ struct be_cmd_resp_set_func_cap {
u8 rsvd[212];
};
+/*********************** Function Privileges ***********************/
+enum {
+ BE_PRIV_DEFAULT = 0x1,
+ BE_PRIV_LNKQUERY = 0x2,
+ BE_PRIV_LNKSTATS = 0x4,
+ BE_PRIV_LNKMGMT = 0x8,
+ BE_PRIV_LNKDIAG = 0x10,
+ BE_PRIV_UTILQUERY = 0x20,
+ BE_PRIV_FILTMGMT = 0x40,
+ BE_PRIV_IFACEMGMT = 0x80,
+ BE_PRIV_VHADM = 0x100,
+ BE_PRIV_DEVCFG = 0x200,
+ BE_PRIV_DEVSEC = 0x400
+};
+#define MAX_PRIVILEGES (BE_PRIV_VHADM | BE_PRIV_DEVCFG | \
+ BE_PRIV_DEVSEC)
+#define MIN_PRIVILEGES BE_PRIV_DEFAULT
+
+struct be_cmd_priv_map {
+ u8 opcode;
+ u8 subsystem;
+ u32 priv_mask;
+};
+
+struct be_cmd_req_get_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+struct be_cmd_resp_get_fn_privileges {
+ struct be_cmd_resp_hdr hdr;
+ u32 privilege_mask;
+};
+
+
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
struct be_cmd_req_get_mac_list {
@@ -1608,33 +1656,6 @@ struct be_cmd_resp_get_stats_v1 {
struct be_hw_stats_v1 hw_stats;
};
-static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
-
- return &cmd->hw_stats;
- } else {
- struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
-
- return &cmd->hw_stats;
- }
-}
-
-static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->erx;
- } else {
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->erx;
- }
-}
-
-
/************** get fat capabilites *******************/
#define MAX_MODULES 27
#define MAX_MODES 4
@@ -1684,6 +1705,96 @@ struct be_cmd_req_set_ext_fat_caps {
struct be_fat_conf_params set_params;
};
+#define RESOURCE_DESC_SIZE 72
+#define NIC_RESOURCE_DESC_TYPE_ID 0x41
+#define MAX_RESOURCE_DESC 4
+
+/* QOS unit number */
+#define QUN 4
+/* Immediate */
+#define IMM 6
+/* No save */
+#define NOSV 7
+
+struct be_nic_resource_desc {
+ u8 desc_type;
+ u8 desc_len;
+ u8 rsvd1;
+ u8 flags;
+ u8 vf_num;
+ u8 rsvd2;
+ u8 pf_num;
+ u8 rsvd3;
+ u16 unicast_mac_count;
+ u8 rsvd4[6];
+ u16 mcc_count;
+ u16 vlan_count;
+ u16 mcast_mac_count;
+ u16 txq_count;
+ u16 rq_count;
+ u16 rssq_count;
+ u16 lro_count;
+ u16 cq_count;
+ u16 toe_conn_count;
+ u16 eq_count;
+ u32 rsvd5;
+ u32 cap_flags;
+ u8 link_param;
+ u8 rsvd6[3];
+ u32 bw_min;
+ u32 bw_max;
+ u8 acpi_params;
+ u8 wol_param;
+ u16 rsvd7;
+ u32 rsvd8[3];
+};
+
+struct be_cmd_req_get_func_config {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_func_config {
+ struct be_cmd_req_hdr hdr;
+ u32 desc_count;
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+};
+
+#define ACTIVE_PROFILE_TYPE 0x2
+struct be_cmd_req_get_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd;
+ u8 type;
+ u16 rsvd1;
+};
+
+struct be_cmd_resp_get_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u32 desc_count;
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+};
+
+struct be_cmd_req_set_profile_config {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+ u32 desc_count;
+ struct be_nic_resource_desc nic_desc;
+};
+
+struct be_cmd_resp_set_profile_config {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_enable_disable_vf {
+ struct be_cmd_req_hdr hdr;
+ u8 enable;
+ u8 rsvd[3];
+};
+
+static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
+{
+ return flags & adapter->cmd_privileges ? true : false;
+}
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1780,6 +1891,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
+ u32 *privilege, u32 domain);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id,
u8 domain);
@@ -1798,4 +1911,10 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
extern int lancer_wait_ready(struct be_adapter *adapter);
extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+extern int be_cmd_get_func_config(struct be_adapter *adapter);
+extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+ u8 domain);
+extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
+ u8 domain);
+extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8e6fb0ba6aa9..00454a10f88d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -261,6 +261,9 @@ be_get_reg_len(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return 0;
+
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
@@ -525,6 +528,10 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
u8 link_status;
u16 link_speed = 0;
int status;
+ u32 auto_speeds;
+ u32 fixed_speeds;
+ u32 dac_cable_len;
+ u16 interface_type;
if (adapter->phy.link_speed < 0) {
status = be_cmd_link_status_query(adapter, &link_speed,
@@ -534,39 +541,46 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, link_speed);
status = be_cmd_get_phy_info(adapter);
- if (status)
- return status;
-
- ecmd->supported =
- convert_to_et_setting(adapter->phy.interface_type,
- adapter->phy.auto_speeds_supported |
- adapter->phy.fixed_speeds_supported);
- ecmd->advertising =
- convert_to_et_setting(adapter->phy.interface_type,
- adapter->phy.auto_speeds_supported);
-
- ecmd->port = be_get_port_type(adapter->phy.interface_type,
- adapter->phy.dac_cable_len);
-
- if (adapter->phy.auto_speeds_supported) {
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |= ADVERTISED_Autoneg;
- }
+ if (!status) {
+ interface_type = adapter->phy.interface_type;
+ auto_speeds = adapter->phy.auto_speeds_supported;
+ fixed_speeds = adapter->phy.fixed_speeds_supported;
+ dac_cable_len = adapter->phy.dac_cable_len;
+
+ ecmd->supported =
+ convert_to_et_setting(interface_type,
+ auto_speeds |
+ fixed_speeds);
+ ecmd->advertising =
+ convert_to_et_setting(interface_type,
+ auto_speeds);
+
+ ecmd->port = be_get_port_type(interface_type,
+ dac_cable_len);
+
+ if (adapter->phy.auto_speeds_supported) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ }
- if (be_pause_supported(adapter)) {
ecmd->supported |= SUPPORTED_Pause;
- ecmd->advertising |= ADVERTISED_Pause;
- }
-
- switch (adapter->phy.interface_type) {
- case PHY_TYPE_KR_10GB:
- case PHY_TYPE_KX4_10GB:
- ecmd->transceiver = XCVR_INTERNAL;
- break;
- default:
- ecmd->transceiver = XCVR_EXTERNAL;
- break;
+ if (be_pause_supported(adapter))
+ ecmd->advertising |= ADVERTISED_Pause;
+
+ switch (adapter->phy.interface_type) {
+ case PHY_TYPE_KR_10GB:
+ case PHY_TYPE_KX4_10GB:
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ } else {
+ ecmd->port = PORT_OTHER;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
}
/* Save for future use */
@@ -787,6 +801,10 @@ static int
be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return 0;
+
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index b755f7061dce..541d4530d5bf 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -31,12 +31,12 @@
#define MPU_EP_CONTROL 0
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+/********** MPU semphore: used for SH & BE *************/
+#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
+#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
+#define POST_STAGE_MASK 0x0000FFFF
+#define POST_ERR_MASK 0x1
+#define POST_ERR_SHIFT 31
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
@@ -59,6 +59,9 @@
#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
#define PHYSDEV_CONTROL_INP_MASK 0x40000000
+#define SLIPORT_ERROR_NO_RESOURCE1 0x2
+#define SLIPORT_ERROR_NO_RESOURCE2 0x9
+
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -102,11 +105,6 @@
#define SLI_INTF_TYPE_2 2
#define SLI_INTF_TYPE_3 3
-/* SLI family */
-#define BE_SLI_FAMILY 0x0
-#define LANCER_A0_SLI_FAMILY 0xA
-#define SKYHAWK_SLI_FAMILY 0x2
-
/********* ISR0 Register offset **********/
#define CEV_ISR0_OFFSET 0xC18
#define CEV_ISR_SIZE 4
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d1b6cc587639..4d6f3c54427a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -25,7 +25,7 @@
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
static unsigned int num_vfs;
@@ -44,6 +44,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,23 +238,46 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
int status = 0;
u8 current_mac[ETH_ALEN];
u32 pmac_id = adapter->pmac_id[0];
+ bool active_mac = true;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- status = be_cmd_mac_addr_query(adapter, current_mac, false,
- adapter->if_handle, 0);
+ /* For BE VF, MAC address is already activated by PF.
+ * Hence only operation left is updating netdev->devaddr.
+ * Update it if user is passing the same MAC which was used
+ * during configuring VF MAC from PF(Hypervisor).
+ */
+ if (!lancer_chip(adapter) && !be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, current_mac,
+ false, adapter->if_handle, 0);
+ if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
+ goto done;
+ else
+ goto err;
+ }
+
+ if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
+ goto done;
+
+ /* For Lancer check if any MAC is active.
+ * If active, get its mac id.
+ */
+ if (lancer_chip(adapter) && !be_physfn(adapter))
+ be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
+ &pmac_id, 0);
+
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+
if (status)
goto err;
- if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id[0], 0);
- if (status)
- goto err;
-
- be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
- }
+ if (active_mac)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ pmac_id, 0);
+done:
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
return 0;
err:
@@ -261,7 +285,35 @@ err:
return status;
}
-static void populate_be2_stats(struct be_adapter *adapter)
+/* BE2 supports only v0 cmd */
+static void *hw_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (BE2_chip(adapter)) {
+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ }
+}
+
+/* BE2 supports only v0 cmd */
+static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (BE2_chip(adapter)) {
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ }
+}
+
+static void populate_be_v0_stats(struct be_adapter *adapter)
{
struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -310,7 +362,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}
-static void populate_be3_stats(struct be_adapter *adapter)
+static void populate_be_v1_stats(struct be_adapter *adapter)
{
struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -412,28 +464,25 @@ void be_parse_stats(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int i;
- if (adapter->generation == BE_GEN3) {
- if (lancer_chip(adapter))
- populate_lancer_stats(adapter);
- else
- populate_be3_stats(adapter);
+ if (lancer_chip(adapter)) {
+ populate_lancer_stats(adapter);
} else {
- populate_be2_stats(adapter);
- }
-
- if (lancer_chip(adapter))
- goto done;
+ if (BE2_chip(adapter))
+ populate_be_v0_stats(adapter);
+ else
+ /* for BE3 and Skyhawk */
+ populate_be_v1_stats(adapter);
- /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
- for_all_rx_queues(adapter, rxo, i) {
- /* below erx HW counter can actually wrap around after
- * 65535. Driver accumulates a 32-bit value
- */
- accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
- (u16)erx->rx_drops_no_fragments[rxo->q.id]);
+ /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+ for_all_rx_queues(adapter, rxo, i) {
+ /* below erx HW counter can actually wrap around after
+ * 65535. Driver accumulates a 32-bit value
+ */
+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+ (u16)erx->rx_drops_no_fragments \
+ [rxo->q.id]);
+ }
}
-done:
- return;
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -597,16 +646,6 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
hdr, skb_shinfo(skb)->gso_size);
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
- if (lancer_chip(adapter) && adapter->sli_family ==
- LANCER_A0_SLI_FAMILY) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
- if (is_tcp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb,
- tcpcs, hdr, 1);
- else if (is_udp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb,
- udpcs, hdr, 1);
- }
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -856,11 +895,15 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!be_physfn(adapter)) {
+ if (!lancer_chip(adapter) && !be_physfn(adapter)) {
status = -EINVAL;
goto ret;
}
+ /* Packets with VID 0 are always received by Lancer by default */
+ if (lancer_chip(adapter) && vid == 0)
+ goto ret;
+
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
status = be_vid_config(adapter);
@@ -878,11 +921,15 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!be_physfn(adapter)) {
+ if (!lancer_chip(adapter) && !be_physfn(adapter)) {
status = -EINVAL;
goto ret;
}
+ /* Packets with VID 0 are always received by Lancer by default */
+ if (lancer_chip(adapter) && vid == 0)
+ goto ret;
+
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
status = be_vid_config(adapter);
@@ -917,7 +964,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > BE_MAX_MC) {
+ netdev_mc_count(netdev) > adapter->max_mcast_mac) {
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
goto done;
}
@@ -962,6 +1009,9 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
+ bool active_mac = false;
+ u32 pmac_id;
+ u8 old_mac[ETH_ALEN];
if (!sriov_enabled(adapter))
return -EPERM;
@@ -970,6 +1020,12 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return -EINVAL;
if (lancer_chip(adapter)) {
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, vf + 1);
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ pmac_id, vf + 1);
+
status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
} else {
status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
@@ -1062,7 +1118,10 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return -EINVAL;
}
- status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
+ if (lancer_chip(adapter))
+ status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
+ else
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
@@ -1616,24 +1675,6 @@ static inline int events_get(struct be_eq_obj *eqo)
return num;
}
-static int event_handle(struct be_eq_obj *eqo)
-{
- bool rearm = false;
- int num = events_get(eqo);
-
- /* Deal with any spurious interrupts that come without events */
- if (!num)
- rearm = true;
-
- if (num || msix_enabled(eqo->adapter))
- be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
-
- if (num)
- napi_schedule(&eqo->napi);
-
- return num;
-}
-
/* Leaves the EQ is disarmed state */
static void be_eq_clean(struct be_eq_obj *eqo)
{
@@ -1648,15 +1689,41 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
struct be_queue_info *rxq = &rxo->q;
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+ int flush_wait = 0;
u16 tail;
- /* First cleanup pending rx completions */
- while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
- be_rx_compl_discard(rxo, rxcp);
- be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
+ /* Consume pending rx completions.
+ * Wait for the flush completion (identified by zero num_rcvd)
+ * to arrive. Notify CQ even when there are no more CQ entries
+ * for HW to flush partially coalesced CQ entries.
+ * In Lancer, there is no need to wait for flush compl.
+ */
+ for (;;) {
+ rxcp = be_rx_compl_get(rxo);
+ if (rxcp == NULL) {
+ if (lancer_chip(adapter))
+ break;
+
+ if (flush_wait++ > 10 || be_hw_error(adapter)) {
+ dev_warn(&adapter->pdev->dev,
+ "did not receive flush compl\n");
+ break;
+ }
+ be_cq_notify(adapter, rx_cq->id, true, 0);
+ mdelay(1);
+ } else {
+ be_rx_compl_discard(rxo, rxcp);
+ be_cq_notify(adapter, rx_cq->id, true, 1);
+ if (rxcp->num_rcvd == 0)
+ break;
+ }
}
- /* Then free posted rx buffer that were not used */
+ /* After cleanup, leave the CQ in unarmed state */
+ be_cq_notify(adapter, rx_cq->id, false, 0);
+
+ /* Then free posted rx buffers that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
page_info = get_rx_page_info(rxo, tail);
@@ -1837,12 +1904,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_num_txqs_want(struct be_adapter *adapter)
{
- if (sriov_want(adapter) || be_is_mc(adapter) ||
- lancer_chip(adapter) || !be_physfn(adapter) ||
- adapter->generation == BE_GEN2)
+ if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
+ be_is_mc(adapter) ||
+ (!lancer_chip(adapter) && !be_physfn(adapter)) ||
+ BE2_chip(adapter))
return 1;
else
- return MAX_TX_QS;
+ return adapter->max_tx_queues;
}
static int be_tx_cqs_create(struct be_adapter *adapter)
@@ -1954,12 +2022,31 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
static irqreturn_t be_intx(int irq, void *dev)
{
- struct be_adapter *adapter = dev;
- int num_evts;
+ struct be_eq_obj *eqo = dev;
+ struct be_adapter *adapter = eqo->adapter;
+ int num_evts = 0;
+
+ /* IRQ is not expected when NAPI is scheduled as the EQ
+ * will not be armed.
+ * But, this can happen on Lancer INTx where it takes
+ * a while to de-assert INTx or in BE2 where occasionaly
+ * an interrupt may be raised even when EQ is unarmed.
+ * If NAPI is already scheduled, then counting & notifying
+ * events will orphan them.
+ */
+ if (napi_schedule_prep(&eqo->napi)) {
+ num_evts = events_get(eqo);
+ __napi_schedule(&eqo->napi);
+ if (num_evts)
+ eqo->spurious_intr = 0;
+ }
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- /* With INTx only one EQ is used */
- num_evts = event_handle(&adapter->eq_obj[0]);
- if (num_evts)
+ /* Return IRQ_HANDLED only for the the first spurious intr
+ * after a valid intr to stop the kernel from branding
+ * this irq as a bad one!
+ */
+ if (num_evts || eqo->spurious_intr++ == 0)
return IRQ_HANDLED;
else
return IRQ_NONE;
@@ -1969,7 +2056,8 @@ static irqreturn_t be_msix(int irq, void *dev)
{
struct be_eq_obj *eqo = dev;
- event_handle(eqo);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ napi_schedule(&eqo->napi);
return IRQ_HANDLED;
}
@@ -2065,9 +2153,11 @@ int be_poll(struct napi_struct *napi, int budget)
{
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter = eqo->adapter;
- int max_work = 0, work, i;
+ int max_work = 0, work, i, num_evts;
bool tx_done;
+ num_evts = events_get(eqo);
+
/* Process all TXQs serviced by this EQ */
for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
@@ -2090,10 +2180,10 @@ int be_poll(struct napi_struct *napi, int budget)
if (max_work < budget) {
napi_complete(napi);
- be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
} else {
/* As we'll continue in polling mode, count and clear events */
- be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
+ be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
}
return max_work;
}
@@ -2104,7 +2194,7 @@ void be_detect_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (be_crit_error(adapter))
+ if (be_hw_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2177,9 +2267,11 @@ static void be_msix_disable(struct be_adapter *adapter)
static uint be_num_rss_want(struct be_adapter *adapter)
{
u32 num = 0;
+
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !sriov_want(adapter) && be_physfn(adapter)) {
- num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ (lancer_chip(adapter) ||
+ (!sriov_want(adapter) && be_physfn(adapter)))) {
+ num = adapter->max_rss_queues;
num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
}
return num;
@@ -2277,10 +2369,10 @@ static int be_irq_register(struct be_adapter *adapter)
return status;
}
- /* INTx */
+ /* INTx: only the first EQ is used */
netdev->irq = adapter->pdev->irq;
status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
- adapter);
+ &adapter->eq_obj[0]);
if (status) {
dev_err(&adapter->pdev->dev,
"INTx request IRQ failed - err %d\n", status);
@@ -2302,7 +2394,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* INTx */
if (!msix_enabled(adapter)) {
- free_irq(netdev->irq, adapter);
+ free_irq(netdev->irq, &adapter->eq_obj[0]);
goto done;
}
@@ -2343,13 +2435,22 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- be_async_mcc_disable(adapter);
-
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
- for_all_evt_queues(adapter, eqo, i) {
+ for_all_evt_queues(adapter, eqo, i)
napi_disable(&eqo->napi);
+
+ be_async_mcc_disable(adapter);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ be_tx_compl_clean(adapter);
+
+ be_rx_qs_destroy(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
synchronize_irq(be_msix_vec_get(adapter, eqo));
else
@@ -2359,12 +2460,6 @@ static int be_close(struct net_device *netdev)
be_irq_unregister(adapter);
- /* Wait for all pending tx completions to arrive so that
- * all tx skbs are freed.
- */
- be_tx_compl_clean(adapter);
-
- be_rx_qs_destroy(adapter);
return 0;
}
@@ -2579,10 +2674,30 @@ static int be_clear(struct be_adapter *adapter)
be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+
be_msix_disable(adapter);
return 0;
}
+static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
+ u32 *cap_flags, u8 domain)
+{
+ bool profile_present = false;
+ int status;
+
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, cap_flags, domain);
+ if (!status)
+ profile_present = true;
+ }
+
+ if (!profile_present)
+ *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+}
+
static int be_vf_setup_init(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
@@ -2634,9 +2749,13 @@ static int be_vf_setup(struct be_adapter *adapter)
if (status)
goto err;
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
+ be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
+
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+
status = be_cmd_if_create(adapter, cap_flags, en_flags,
&vf_cfg->if_handle, vf + 1);
if (status)
@@ -2661,6 +2780,8 @@ static int be_vf_setup(struct be_adapter *adapter)
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
+
+ be_cmd_enable_vf(adapter, vf + 1);
}
return 0;
err:
@@ -2674,7 +2795,10 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->if_handle = -1;
adapter->be3_native = false;
adapter->promiscuous = false;
- adapter->eq_next_idx = 0;
+ if (be_physfn(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+ else
+ adapter->cmd_privileges = MIN_PRIVILEGES;
}
static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2712,12 +2836,93 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
return status;
}
+static void be_get_resources(struct be_adapter *adapter)
+{
+ int status;
+ bool profile_present = false;
+
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_func_config(adapter);
+
+ if (!status)
+ profile_present = true;
+ }
+
+ if (profile_present) {
+ /* Sanity fixes for Lancer */
+ adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
+ BE_UC_PMAC_COUNT);
+ adapter->max_vlans = min_t(u16, adapter->max_vlans,
+ BE_NUM_VLANS_SUPPORTED);
+ adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
+ BE_MAX_MC);
+ adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
+ MAX_TX_QS);
+ adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
+ BE3_MAX_RSS_QS);
+ adapter->max_event_queues = min_t(u16,
+ adapter->max_event_queues,
+ BE3_MAX_RSS_QS);
+
+ if (adapter->max_rss_queues &&
+ adapter->max_rss_queues == adapter->max_rx_queues)
+ adapter->max_rss_queues -= 1;
+
+ if (adapter->max_event_queues < adapter->max_rss_queues)
+ adapter->max_rss_queues = adapter->max_event_queues;
+
+ } else {
+ if (be_physfn(adapter))
+ adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
+ else
+ adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
+
+ if (adapter->function_mode & FLEX10_MODE)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
+ adapter->max_mcast_mac = BE_MAX_MC;
+ adapter->max_tx_queues = MAX_TX_QS;
+ adapter->max_rss_queues = (adapter->be3_native) ?
+ BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ adapter->max_event_queues = BE3_MAX_RSS_QS;
+
+ adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS;
+
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
+ adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
+ }
+}
+
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
- int pos;
+ int pos, status;
u16 dev_num_vfs;
+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
+ &adapter->function_mode,
+ &adapter->function_caps);
+ if (status)
+ goto err;
+
+ be_get_resources(adapter);
+
+ /* primary mac needs 1 pmac entry */
+ adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!adapter->pmac_id) {
+ status = -ENOMEM;
+ goto err;
+ }
+
pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos) {
pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
@@ -2726,13 +2931,14 @@ static int be_get_config(struct be_adapter *adapter)
dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
adapter->dev_num_vfs = dev_num_vfs;
}
- return 0;
+err:
+ return status;
}
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 cap_flags, en_flags;
+ u32 en_flags;
u32 tx_fc, rx_fc;
int status;
u8 mac[ETH_ALEN];
@@ -2740,9 +2946,12 @@ static int be_setup(struct be_adapter *adapter)
be_setup_init(adapter);
- be_get_config(adapter);
+ if (!lancer_chip(adapter))
+ be_cmd_req_native_mode(adapter);
- be_cmd_req_native_mode(adapter);
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
be_msix_enable(adapter);
@@ -2762,24 +2971,22 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
+ be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (be_is_mc(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
- cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
- cap_flags |= BE_IF_FLAGS_RSS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
en_flags |= BE_IF_FLAGS_RSS;
- }
- if (lancer_chip(adapter) && !be_physfn(adapter)) {
- en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
- cap_flags = en_flags;
- }
+ en_flags = en_flags & adapter->if_cap_flags;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
&adapter->if_handle, 0);
if (status != 0)
goto err;
@@ -2827,8 +3034,8 @@ static int be_setup(struct be_adapter *adapter)
dev_warn(dev, "device doesn't support SRIOV\n");
}
- be_cmd_get_phy_info(adapter);
- if (be_pause_supported(adapter))
+ status = be_cmd_get_phy_info(adapter);
+ if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -2846,8 +3053,10 @@ static void be_netpoll(struct net_device *netdev)
struct be_eq_obj *eqo;
int i;
- for_all_evt_queues(adapter, eqo, i)
- event_handle(eqo);
+ for_all_evt_queues(adapter, eqo, i) {
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ napi_schedule(&eqo->napi);
+ }
return;
}
@@ -2895,7 +3104,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
int i = 0, img_type = 0;
struct flash_section_info_g2 *fsec_g2 = NULL;
- if (adapter->generation != BE_GEN3)
+ if (BE2_chip(adapter))
fsec_g2 = (struct flash_section_info_g2 *)fsec;
for (i = 0; i < MAX_FLASH_COMP; i++) {
@@ -2928,7 +3137,49 @@ struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
return NULL;
}
-static int be_flash_data(struct be_adapter *adapter,
+static int be_flash(struct be_adapter *adapter, const u8 *img,
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
+{
+ u32 total_bytes = 0, flash_op, num_bytes = 0;
+ int status = 0;
+ struct be_cmd_write_flashrom *req = flash_cmd->va;
+
+ total_bytes = img_size;
+ while (total_bytes) {
+ num_bytes = min_t(u32, 32*1024, total_bytes);
+
+ total_bytes -= num_bytes;
+
+ if (!total_bytes) {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (optype == OPTYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
+
+ memcpy(req->data_buf, img, num_bytes);
+ img += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
+ flash_op, num_bytes);
+ if (status) {
+ if (status == ILLEGAL_IOCTL_REQ &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return status;
+ }
+ }
+ return 0;
+}
+
+/* For BE2 and BE3 */
+static int be_flash_BEx(struct be_adapter *adapter,
const struct firmware *fw,
struct be_dma_mem *flash_cmd,
int num_of_images)
@@ -2936,12 +3187,9 @@ static int be_flash_data(struct be_adapter *adapter,
{
int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
- u32 total_bytes = 0, flash_op;
- int num_bytes;
const u8 *p = fw->data;
- struct be_cmd_write_flashrom *req = flash_cmd->va;
const struct flash_comp *pflashcomp;
- int num_comp, hdr_size;
+ int num_comp, redboot;
struct flash_section_info *fsec = NULL;
struct flash_comp gen3_flash_types[] = {
@@ -2986,7 +3234,7 @@ static int be_flash_data(struct be_adapter *adapter,
FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
};
- if (adapter->generation == BE_GEN3) {
+ if (BE3_chip(adapter)) {
pflashcomp = gen3_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g3);
num_comp = ARRAY_SIZE(gen3_flash_types);
@@ -2995,6 +3243,7 @@ static int be_flash_data(struct be_adapter *adapter,
filehdr_size = sizeof(struct flash_file_hdr_g2);
num_comp = ARRAY_SIZE(gen2_flash_types);
}
+
/* Get flash section info*/
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
@@ -3010,70 +3259,105 @@ static int be_flash_data(struct be_adapter *adapter,
memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
continue;
- if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
- if (!phy_flashing_required(adapter))
+ if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
+ !phy_flashing_required(adapter))
continue;
- }
-
- hdr_size = filehdr_size +
- (num_of_images * sizeof(struct image_hdr));
- if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
- (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
- pflashcomp[i].size, hdr_size)))
- continue;
+ if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
+ redboot = be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size,
+ filehdr_size + img_hdrs_size);
+ if (!redboot)
+ continue;
+ }
- /* Flash the component */
p = fw->data;
p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
- total_bytes = pflashcomp[i].size;
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
- if (!total_bytes) {
- if (pflashcomp[i].optype == OPTYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_FLASH;
- else
- flash_op = FLASHROM_OPER_FLASH;
- } else {
- if (pflashcomp[i].optype == OPTYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_SAVE;
- else
- flash_op = FLASHROM_OPER_SAVE;
- }
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- pflashcomp[i].optype, flash_op, num_bytes);
- if (status) {
- if ((status == ILLEGAL_IOCTL_REQ) &&
- (pflashcomp[i].optype ==
- OPTYPE_PHY_FW))
- break;
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed.\n");
- return -1;
- }
+
+ status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
+ pflashcomp[i].size);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Flashing section type %d failed.\n",
+ pflashcomp[i].img_type);
+ return status;
}
}
return 0;
}
-static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+static int be_flash_skyhawk(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- if (fhdr == NULL)
- return 0;
- if (fhdr->build[0] == '3')
- return BE_GEN3;
- else if (fhdr->build[0] == '2')
- return BE_GEN2;
- else
- return 0;
+ int status = 0, i, filehdr_size = 0;
+ int img_offset, img_size, img_optype, redboot;
+ int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
+ const u8 *p = fw->data;
+ struct flash_section_info *fsec = NULL;
+
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+ if (!fsec) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid Cookie. UFI corrupted ?\n");
+ return -1;
+ }
+
+ for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
+ img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
+ img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+
+ switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
+ case IMAGE_FIRMWARE_iSCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCoE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_iSCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ default:
+ continue;
+ }
+
+ if (img_optype == OPTYPE_REDBOOT) {
+ redboot = be_flash_redboot(adapter, fw->data,
+ img_offset, img_size,
+ filehdr_size + img_hdrs_size);
+ if (!redboot)
+ continue;
+ }
+
+ p = fw->data;
+ p += filehdr_size + img_offset + img_hdrs_size;
+ if (p + img_size > fw->data + fw->size)
+ return -1;
+
+ status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Flashing section type %d failed.\n",
+ fsec->fsec_entry[i].type);
+ return status;
+ }
+ }
+ return 0;
}
static int lancer_wait_idle(struct be_adapter *adapter)
@@ -3207,6 +3491,28 @@ lancer_fw_exit:
return status;
}
+#define UFI_TYPE2 2
+#define UFI_TYPE3 3
+#define UFI_TYPE4 4
+static int be_get_ufi_type(struct be_adapter *adapter,
+ struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ goto be_get_ufi_exit;
+
+ if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
+ return UFI_TYPE4;
+ else if (BE3_chip(adapter) && fhdr->build[0] == '3')
+ return UFI_TYPE3;
+ else if (BE2_chip(adapter) && fhdr->build[0] == '2')
+ return UFI_TYPE2;
+
+be_get_ufi_exit:
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ return -1;
+}
+
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
struct flash_file_hdr_g2 *fhdr;
@@ -3214,12 +3520,9 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
const u8 *p;
- int status = 0, i = 0, num_imgs = 0;
-
- p = fw->data;
- fhdr = (struct flash_file_hdr_g2 *) p;
+ int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
- flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
@@ -3229,27 +3532,32 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
goto be_fw_exit;
}
- if ((adapter->generation == BE_GEN3) &&
- (get_ufigen_type(fhdr) == BE_GEN3)) {
- fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
- num_imgs = le32_to_cpu(fhdr3->num_imgs);
- for (i = 0; i < num_imgs; i++) {
- img_hdr_ptr = (struct image_hdr *) (fw->data +
- (sizeof(struct flash_file_hdr_g3) +
- i * sizeof(struct image_hdr)));
- if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
- status = be_flash_data(adapter, fw, &flash_cmd,
- num_imgs);
+ p = fw->data;
+ fhdr = (struct flash_file_hdr_g2 *)p;
+
+ ufi_type = be_get_ufi_type(adapter, fhdr);
+
+ fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *)(fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
+ if (ufi_type == UFI_TYPE4)
+ status = be_flash_skyhawk(adapter, fw,
+ &flash_cmd, num_imgs);
+ else if (ufi_type == UFI_TYPE3)
+ status = be_flash_BEx(adapter, fw, &flash_cmd,
+ num_imgs);
}
- } else if ((adapter->generation == BE_GEN2) &&
- (get_ufigen_type(fhdr) == BE_GEN2)) {
- status = be_flash_data(adapter, fw, &flash_cmd, 0);
- } else {
- dev_err(&adapter->pdev->dev,
- "UFI and Interface are not compatible for flashing\n");
- status = -1;
}
+ if (ufi_type == UFI_TYPE2)
+ status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
+ else if (ufi_type == -1)
+ status = -1;
+
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
flash_cmd.dma);
if (status) {
@@ -3344,80 +3652,47 @@ static void be_netdev_init(struct net_device *netdev)
static void be_unmap_pci_bars(struct be_adapter *adapter)
{
- if (adapter->csr)
- iounmap(adapter->csr);
if (adapter->db)
- iounmap(adapter->db);
- if (adapter->roce_db.base)
- pci_iounmap(adapter->pdev, adapter->roce_db.base);
+ pci_iounmap(adapter->pdev, adapter->db);
}
-static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
+static int db_bar(struct be_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
- u8 __iomem *addr;
-
- addr = pci_iomap(pdev, 2, 0);
- if (addr == NULL)
- return -ENOMEM;
+ if (lancer_chip(adapter) || !be_physfn(adapter))
+ return 0;
+ else
+ return 4;
+}
- adapter->roce_db.base = addr;
- adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
- adapter->roce_db.size = 8192;
- adapter->roce_db.total_size = pci_resource_len(pdev, 2);
+static int be_roce_map_pci_bars(struct be_adapter *adapter)
+{
+ if (skyhawk_chip(adapter)) {
+ adapter->roce_db.size = 4096;
+ adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
+ db_bar(adapter));
+ adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
+ db_bar(adapter));
+ }
return 0;
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int db_reg;
-
- if (lancer_chip(adapter)) {
- if (be_type_2_3(adapter)) {
- addr = ioremap_nocache(
- pci_resource_start(adapter->pdev, 0),
- pci_resource_len(adapter->pdev, 0));
- if (addr == NULL)
- return -ENOMEM;
- adapter->db = addr;
- }
- if (adapter->if_type == SLI_INTF_TYPE_3) {
- if (lancer_roce_map_pci_bars(adapter))
- goto pci_map_err;
- }
- return 0;
- }
+ u32 sli_intf;
- if (be_physfn(adapter)) {
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
- }
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+ SLI_INTF_IF_TYPE_SHIFT;
- if (adapter->generation == BE_GEN2) {
- db_reg = 4;
- } else {
- if (be_physfn(adapter))
- db_reg = 4;
- else
- db_reg = 0;
- }
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
- pci_resource_len(adapter->pdev, db_reg));
+ addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
if (addr == NULL)
goto pci_map_err;
adapter->db = addr;
- if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
- adapter->roce_db.size = 4096;
- adapter->roce_db.io_addr =
- pci_resource_start(adapter->pdev, db_reg);
- adapter->roce_db.total_size =
- pci_resource_len(adapter->pdev, db_reg);
- }
+
+ be_roce_map_pci_bars(adapter);
return 0;
+
pci_map_err:
be_unmap_pci_bars(adapter);
return -ENOMEM;
@@ -3437,7 +3712,6 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
if (mem->va)
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
- kfree(adapter->pmac_id);
}
static int be_ctrl_init(struct be_adapter *adapter)
@@ -3445,8 +3719,14 @@ static int be_ctrl_init(struct be_adapter *adapter)
struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
struct be_dma_mem *rx_filter = &adapter->rx_filter;
+ u32 sli_intf;
int status;
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT;
+ adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+
status = be_map_pci_bars(adapter);
if (status)
goto done;
@@ -3473,13 +3753,6 @@ static int be_ctrl_init(struct be_adapter *adapter)
goto free_mbox;
}
memset(rx_filter->va, 0, rx_filter->size);
-
- /* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -3512,14 +3785,14 @@ static int be_stats_init(struct be_adapter *adapter)
{
struct be_dma_mem *cmd = &adapter->stats_cmd;
- if (adapter->generation == BE_GEN2) {
+ if (lancer_chip(adapter))
+ cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+ else if (BE2_chip(adapter))
cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- } else {
- if (lancer_chip(adapter))
- cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
- else
- cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- }
+ else
+ /* BE3 and Skyhawk */
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
GFP_KERNEL);
if (cmd->va == NULL)
@@ -3528,7 +3801,7 @@ static int be_stats_init(struct be_adapter *adapter)
return 0;
}
-static void __devexit be_remove(struct pci_dev *pdev)
+static void be_remove(struct pci_dev *pdev)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
@@ -3573,6 +3846,9 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
u32 level = 0;
int j;
+ if (lancer_chip(adapter))
+ return 0;
+
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
@@ -3598,26 +3874,12 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
err:
return level;
}
+
static int be_get_initial_config(struct be_adapter *adapter)
{
int status;
u32 level;
- status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
- &adapter->function_mode, &adapter->function_caps);
- if (status)
- return status;
-
- if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
-
- if (be_physfn(adapter))
- adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
- else
- adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
-
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
@@ -3642,55 +3904,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
return 0;
}
-static int be_dev_type_check(struct be_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- u32 sli_intf = 0, if_type;
-
- switch (pdev->device) {
- case BE_DEVICE_ID1:
- case OC_DEVICE_ID1:
- adapter->generation = BE_GEN2;
- break;
- case BE_DEVICE_ID2:
- case OC_DEVICE_ID2:
- adapter->generation = BE_GEN3;
- break;
- case OC_DEVICE_ID3:
- case OC_DEVICE_ID4:
- pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
- if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
- if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
- !be_type_2_3(adapter)) {
- dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
- return -EINVAL;
- }
- adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
- SLI_INTF_FAMILY_SHIFT);
- adapter->generation = BE_GEN3;
- break;
- case OC_DEVICE_ID5:
- pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
- dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
- return -EINVAL;
- }
- adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
- SLI_INTF_FAMILY_SHIFT);
- adapter->generation = BE_GEN3;
- break;
- default:
- adapter->generation = 0;
- }
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
- return 0;
-}
-
static int lancer_recover_func(struct be_adapter *adapter)
{
int status;
@@ -3721,8 +3934,9 @@ static int lancer_recover_func(struct be_adapter *adapter)
"Adapter SLIPORT recovery succeeded\n");
return 0;
err:
- dev_err(&adapter->pdev->dev,
- "Adapter SLIPORT recovery failed\n");
+ if (adapter->eeh_error)
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery failed\n");
return status;
}
@@ -3820,8 +4034,7 @@ static inline char *func_name(struct be_adapter *adapter)
return be_physfn(adapter) ? "PF" : "VF";
}
-static int __devinit be_probe(struct pci_dev *pdev,
- const struct pci_device_id *pdev_id)
+static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
{
int status = 0;
struct be_adapter *adapter;
@@ -3845,11 +4058,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
adapter = netdev_priv(netdev);
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
-
- status = be_dev_type_check(adapter);
- if (status)
- goto free_netdev;
-
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -4023,9 +4231,6 @@ static void be_shutdown(struct pci_dev *pdev)
netif_device_detach(adapter->netdev);
- if (adapter->wol)
- be_setup_wol(adapter, true);
-
be_cmd_reset_function(adapter);
pci_disable_device(pdev);
@@ -4061,9 +4266,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
/* The error could cause the FW to trigger a flash debug dump.
* Resetting the card while flash dump is in progress
- * can cause it not to recover; wait for it to finish
+ * can cause it not to recover; wait for it to finish.
+ * Wait only for first function as it is needed only once per
+ * adapter.
*/
- ssleep(30);
+ if (pdev->devfn == 0)
+ ssleep(30);
+
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index deecc44b3617..55d32aa0a093 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -47,10 +47,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
dev_info.dpp_unmapped_len = 0;
}
dev_info.pdev = adapter->pdev;
- if (adapter->sli_family == SKYHAWK_SLI_FAMILY)
- dev_info.db = adapter->db;
- else
- dev_info.db = adapter->roce_db.base;
+ dev_info.db = adapter->db;
dev_info.unmapped_db = adapter->roce_db.io_addr;
dev_info.db_page_size = adapter->roce_db.size;
dev_info.db_total_size = adapter->roce_db.total_size;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 94b7bfcdb24e..8db1c06008de 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -665,7 +665,7 @@ static void ethoc_mdio_poll(struct net_device *dev)
{
}
-static int __devinit ethoc_mdio_probe(struct net_device *dev)
+static int ethoc_mdio_probe(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
struct phy_device *phy;
@@ -905,7 +905,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
* ethoc_probe - initialize OpenCores ethernet MAC
* pdev: platform device
*/
-static int __devinit ethoc_probe(struct platform_device *pdev)
+static int ethoc_probe(struct platform_device *pdev)
{
struct net_device *netdev = NULL;
struct resource *res = NULL;
@@ -1143,7 +1143,7 @@ out:
* ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
-static int __devexit ethoc_remove(struct platform_device *pdev)
+static int ethoc_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ethoc *priv = netdev_priv(netdev);
@@ -1190,7 +1190,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
- .remove = __devexit_p(ethoc_remove),
+ .remove = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 0e4a0ac86aa8..c706b7a9397e 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <asm/byteorder.h>
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
@@ -150,7 +150,7 @@ struct chip_info {
int flags;
};
-static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
+static const struct chip_info skel_netdrv_tbl[] = {
{ "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
{ "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
{ "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
@@ -477,8 +477,8 @@ static const struct net_device_ops netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit fealnx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int fealnx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct netdev_private *np;
int i, option, err, irq;
@@ -684,7 +684,7 @@ err_out_res:
}
-static void __devexit fealnx_remove_one(struct pci_dev *pdev)
+static void fealnx_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -1950,7 +1950,7 @@ static struct pci_driver fealnx_driver = {
.name = "fealnx",
.id_table = fealnx_pci_tbl,
.probe = fealnx_init_one,
- .remove = __devexit_p(fealnx_remove_one),
+ .remove = fealnx_remove_one,
};
static int __init fealnx_init(void)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index feff51664dcf..ec490d741fc0 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -92,4 +92,12 @@ config GIANFAR
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
+config FEC_PTP
+ bool "PTP Hardware Clock (PHC)"
+ depends on FEC && ARCH_MXC && !SOC_IMX25 && !SOC_IMX27 && !SOC_IMX35 && !SOC_IMX5
+ select PTP_1588_CLOCK
+ --help---
+ Say Y here if you want to use PTP Hardware Clock (PHC) in the
+ driver. Only the basic clock operations have been implemented.
+
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3d1839afff65..d4d19b3d00ae 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_PTP) += fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fffd20528b5d..0704bcab178a 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -140,21 +140,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
- * to keep them that size.
- * We don't need to allocate pages for the transmitter. We just use
- * the skbuffer directly.
- */
-#define FEC_ENET_RX_PAGES 8
-#define FEC_ENET_RX_FRSIZE 2048
-#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
-#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
-#define FEC_ENET_TX_FRSIZE 2048
-#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE 16 /* Must be power of two */
-#define TX_RING_MOD_MASK 15 /* for this to work */
-
#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
#error "FEC: descriptor ring size constants too large"
#endif
@@ -179,9 +164,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
#define PKT_MAXBLR_SIZE 1520
-/* This device has up to three irqs on some platforms */
-#define FEC_IRQ_NUM 3
-
/*
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
@@ -194,61 +176,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define OPT_FRAME_SIZE 0
#endif
-/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
- * tx_bd_base always point to the base of the buffer descriptors. The
- * cur_rx and cur_tx point to the currently available buffer.
- * The dirty_tx tracks the current buffer that is being sent by the
- * controller. The cur_tx and dirty_tx are equal under both completely
- * empty and completely full conditions. The empty/ready indicator in
- * the buffer descriptor determines the actual condition.
- */
-struct fec_enet_private {
- /* Hardware registers of the FEC device */
- void __iomem *hwp;
-
- struct net_device *netdev;
-
- struct clk *clk_ipg;
- struct clk *clk_ahb;
-
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
- unsigned char *tx_bounce[TX_RING_SIZE];
- struct sk_buff* tx_skbuff[TX_RING_SIZE];
- struct sk_buff* rx_skbuff[RX_RING_SIZE];
- ushort skb_cur;
- ushort skb_dirty;
-
- /* CPM dual port RAM relative addresses */
- dma_addr_t bd_dma;
- /* Address of Rx and Tx buffers */
- struct bufdesc *rx_bd_base;
- struct bufdesc *tx_bd_base;
- /* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
- /* The ring entries to be free()ed */
- struct bufdesc *dirty_tx;
-
- uint tx_full;
- /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
- spinlock_t hw_lock;
-
- struct platform_device *pdev;
-
- int opened;
- int dev_id;
-
- /* Phylib and MDIO interface */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
- int mii_timeout;
- uint phy_speed;
- phy_interface_t phy_interface;
- int link;
- int full_duplex;
- struct completion mdio_done;
- int irq[FEC_IRQ_NUM];
-};
-
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
#define FEC_MMFR_OP_READ (2 << 28)
@@ -353,6 +280,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_bdu = 0;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ fep->hwts_tx_en)) {
+ bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else {
+
+ bdp->cbd_esc = BD_ENET_TX_INT;
+ }
+#endif
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -510,10 +448,17 @@ fec_restart(struct net_device *ndev, int duplex)
writel(1 << 8, fep->hwp + FEC_X_WMRK);
}
+#ifdef CONFIG_FEC_PTP
+ ecntl |= (1 << 4);
+#endif
+
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+#ifdef CONFIG_FEC_PTP
+ fec_ptp_start_cyclecounter(ndev);
+#endif
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
@@ -599,6 +544,19 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_packets++;
}
+#ifdef CONFIG_FEC_PTP
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ unsigned long flags;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ shhwtstamps.hwtstamp = ns_to_ktime(
+ timecounter_cyc2time(&fep->tc, bdp->ts));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+#endif
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
@@ -725,6 +683,21 @@ fec_enet_rx(struct net_device *ndev)
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
+#ifdef CONFIG_FEC_PTP
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps(skb);
+ unsigned long flags;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ shhwtstamps->hwtstamp = ns_to_ktime(
+ timecounter_cyc2time(&fep->tc, bdp->ts));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+#endif
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
}
@@ -739,6 +712,12 @@ rx_processing_done:
status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status;
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_esc = BD_ENET_RX_INT;
+ bdp->cbd_prot = 0;
+ bdp->cbd_bdu = 0;
+#endif
+
/* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
@@ -1178,6 +1157,10 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
+#ifdef CONFIG_FEC_PTP
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_ioctl(ndev, rq, cmd);
+#endif
return phy_mii_ioctl(phydev, rq, cmd);
}
@@ -1224,6 +1207,9 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
bdp++;
}
@@ -1237,6 +1223,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
+
+#ifdef CONFIG_FEC_PTP
+ bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
bdp++;
}
@@ -1494,7 +1484,7 @@ static int fec_enet_init(struct net_device *ndev)
}
#ifdef CONFIG_OF
-static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
+static int fec_get_phy_mode_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1504,7 +1494,7 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
return -ENODEV;
}
-static void __devinit fec_reset_phy(struct platform_device *pdev)
+static void fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
int msec = 1;
@@ -1543,7 +1533,7 @@ static inline void fec_reset_phy(struct platform_device *pdev)
}
#endif /* CONFIG_OF */
-static int __devinit
+static int
fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
@@ -1638,9 +1628,19 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
+#ifdef CONFIG_FEC_PTP
+ fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ if (IS_ERR(fep->clk_ptp)) {
+ ret = PTR_ERR(fep->clk_ptp);
+ goto failed_clk;
+ }
+#endif
+
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
-
+#ifdef CONFIG_FEC_PTP
+ clk_prepare_enable(fep->clk_ptp);
+#endif
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
ret = regulator_enable(reg_phy);
@@ -1668,6 +1668,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
+#ifdef CONFIG_FEC_PTP
+ fec_ptp_init(ndev, pdev);
+#endif
+
return 0;
failed_register:
@@ -1677,6 +1681,9 @@ failed_init:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
+#ifdef CONFIG_FEC_PTP
+ clk_disable_unprepare(fep->clk_ptp);
+#endif
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1694,7 +1701,7 @@ failed_alloc_etherdev:
return ret;
}
-static int __devexit
+static int
fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1709,6 +1716,12 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
+#ifdef CONFIG_FEC_PTP
+ del_timer_sync(&fep->time_keep);
+ clk_disable_unprepare(fep->clk_ptp);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+#endif
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
@@ -1777,7 +1790,7 @@ static struct platform_driver fec_driver = {
},
.id_table = fec_devtype,
.probe = fec_probe,
- .remove = __devexit_p(fec_drv_remove),
+ .remove = fec_drv_remove,
};
module_platform_driver(fec_driver);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8408c627b195..c5a3bc1475c7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,6 +13,12 @@
#define FEC_H
/****************************************************************************/
+#ifdef CONFIG_FEC_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif
+
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
@@ -88,6 +94,13 @@ struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
+#ifdef CONFIG_FEC_PTP
+ unsigned long cbd_esc;
+ unsigned long cbd_prot;
+ unsigned long cbd_bdu;
+ unsigned long ts;
+ unsigned short res0[4];
+#endif
};
#else
struct bufdesc {
@@ -147,6 +160,112 @@ struct bufdesc {
#define BD_ENET_TX_CSL ((ushort)0x0001)
#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+/*enhanced buffer desciptor control/status used by Ethernet transmit*/
+#define BD_ENET_TX_INT 0x40000000
+#define BD_ENET_TX_TS 0x20000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM 3
+
+/* The number of Tx and Rx buffers. These are allocated from the page
+ * pool. The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter. We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_RX_PAGES 8
+#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE 2048
+#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE 16 /* Must be power of two */
+#define TX_RING_MOD_MASK 15 /* for this to work */
+
+#define BD_ENET_RX_INT 0x00800000
+#define BD_ENET_RX_PTP ((ushort)0x0400)
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ void __iomem *hwp;
+
+ struct net_device *netdev;
+
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+#ifdef CONFIG_FEC_PTP
+ struct clk *clk_ptp;
+#endif
+
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ ushort skb_cur;
+ ushort skb_dirty;
+
+ /* CPM dual port RAM relative addresses */
+ dma_addr_t bd_dma;
+ /* Address of Rx and Tx buffers */
+ struct bufdesc *rx_bd_base;
+ struct bufdesc *tx_bd_base;
+ /* The next free ring entry */
+ struct bufdesc *cur_rx, *cur_tx;
+ /* The ring entries to be free()ed */
+ struct bufdesc *dirty_tx;
+
+ uint tx_full;
+ /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+ spinlock_t hw_lock;
+
+ struct platform_device *pdev;
+
+ int opened;
+ int dev_id;
+
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
+ phy_interface_t phy_interface;
+ int link;
+ int full_duplex;
+ struct completion mdio_done;
+ int irq[FEC_IRQ_NUM];
+
+#ifdef CONFIG_FEC_PTP
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ unsigned long last_overflow_check;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ int rx_hwtstamp_filter;
+ u32 base_incval;
+ u32 cycle_speed;
+ int hwts_rx_en;
+ int hwts_tx_en;
+ struct timer_list time_keep;
+#endif
+
+};
+
+#ifdef CONFIG_FEC_PTP
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+#endif
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 2933d08b036e..817d081d2cd8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -845,7 +845,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
/* OF Driver */
/* ======================================================================== */
-static int __devinit mpc52xx_fec_probe(struct platform_device *op)
+static int mpc52xx_fec_probe(struct platform_device *op)
{
int rv;
struct net_device *ndev;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 000000000000..c40526c78c20
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,383 @@
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE 0x00002000
+#define FEC_T_CTRL_CAPTURE 0x00000800
+#define FEC_T_CTRL_RESTART 0x00000200
+#define FEC_T_CTRL_PERIOD_RST 0x00000030
+#define FEC_T_CTRL_PERIOD_EN 0x00000010
+#define FEC_T_CTRL_ENABLE 0x00000001
+
+#define FEC_T_INC_MASK 0x0000007f
+#define FEC_T_INC_OFFSET 0
+#define FEC_T_INC_CORR_MASK 0x00007f00
+#define FEC_T_INC_CORR_OFFSET 8
+
+#define FEC_ATIME_CTRL 0x400
+#define FEC_ATIME 0x404
+#define FEC_ATIME_EVT_OFFSET 0x408
+#define FEC_ATIME_EVT_PERIOD 0x40c
+#define FEC_ATIME_CORR 0x410
+#define FEC_ATIME_INC 0x414
+#define FEC_TS_TIMESTAMP 0x418
+
+#define FEC_CC_MULT (1 << 31)
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
+ int inc;
+
+ inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+
+ /* grab the ptp lock */
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* 1ns counter */
+ writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+ /* use free running count */
+ writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+ writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
+
+ memset(&fep->cc, 0, sizeof(fep->cc));
+ fep->cc.read = fec_ptp_read;
+ fep->cc.mask = CLOCKSOURCE_MASK(32);
+ fep->cc.shift = 31;
+ fep->cc.mult = FEC_CC_MULT;
+
+ /* reset the ns time counter */
+ timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 diff;
+ unsigned long flags;
+ int neg_adj = 0;
+ u32 mult = FEC_CC_MULT;
+
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ neg_adj = 1;
+ }
+
+ diff = mult;
+ diff *= ppb;
+ diff = div_u64(diff, 1000000000ULL);
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ /*
+ * dummy read to set cycle_last in tc to now.
+ * So use adjusted mult to calculate when next call
+ * timercounter_read.
+ */
+ timecounter_read(&fep->tc);
+
+ fep->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ unsigned long flags;
+ u64 now;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ now = timecounter_read(&fep->tc);
+ now += delta;
+
+ /* reset the timecounter */
+ timecounter_init(&fep->tc, &fep->cc, now);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct fec_enet_private *adapter =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ u64 ns;
+ unsigned long flags;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @ndev: pointer to net_device
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ */
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ fep->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ fep->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (fep->hwts_rx_en)
+ fep->hwts_rx_en = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ default:
+ /*
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
+ */
+ fep->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ * because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(unsigned long _data)
+{
+ struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ mod_timer(&fep->time_keep, jiffies + HZ);
+}
+
+/**
+ * fec_ptp_init
+ * @ndev: The FEC network adapter
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fep->ptp_caps.owner = THIS_MODULE;
+ snprintf(fep->ptp_caps.name, 16, "fec ptp");
+
+ fep->ptp_caps.max_adj = 250000000;
+ fep->ptp_caps.n_alarm = 0;
+ fep->ptp_caps.n_ext_ts = 0;
+ fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.pps = 0;
+ fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjtime = fec_ptp_adjtime;
+ fep->ptp_caps.gettime = fec_ptp_gettime;
+ fep->ptp_caps.settime = fec_ptp_settime;
+ fep->ptp_caps.enable = fec_ptp_enable;
+
+ spin_lock_init(&fep->tmreg_lock);
+
+ fec_ptp_start_cyclecounter(ndev);
+
+ init_timer(&fep->time_keep);
+ fep->time_keep.data = (unsigned long)fep;
+ fep->time_keep.function = fec_time_keep;
+ fep->time_keep.expires = jiffies + HZ;
+ add_timer(&fep->time_keep);
+
+ fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+ if (IS_ERR(fep->ptp_clock)) {
+ fep->ptp_clock = NULL;
+ pr_err("ptp_clock_register failed\n");
+ } else {
+ pr_info("registered PHC device on %s\n", ndev->name);
+ }
+}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2b7633f766d9..e9879c5af7ba 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1004,7 +1004,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
};
static struct of_device_id fs_enet_match[];
-static int __devinit fs_enet_probe(struct platform_device *ofdev)
+static int fs_enet_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
struct net_device *ndev;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 151453309401..2bafbd37c247 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -108,8 +108,7 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = mdio_read,
};
-static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
- struct device_node *np)
+static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
{
struct resource res;
const u32 *data;
@@ -150,7 +149,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
+static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct mii_bus *new_bus;
struct bb_info *bitbang;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index cdf702a59485..18e8ef203736 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
}
static struct of_device_id fs_enet_mdio_fec_match[];
-static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
+static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
struct resource res;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 19ac096cb07b..bffb2edd6858 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -210,7 +210,7 @@ static int gfar_init_bds(struct net_device *ndev)
skb = gfar_new_skb(ndev);
if (!skb) {
netdev_err(ndev, "Can't allocate RX buffers\n");
- goto err_rxalloc_fail;
+ return -ENOMEM;
}
rx_queue->rx_skbuff[j] = skb;
@@ -223,10 +223,6 @@ static int gfar_init_bds(struct net_device *ndev)
}
return 0;
-
-err_rxalloc_fail:
- free_skb_resources(priv);
- return -ENOMEM;
}
static int gfar_alloc_skb_resources(struct net_device *ndev)
@@ -1359,7 +1355,11 @@ static int gfar_restore(struct device *dev)
return 0;
}
- gfar_init_bds(ndev);
+ if (gfar_init_bds(ndev)) {
+ free_skb_resources(priv);
+ return -ENOMEM;
+ }
+
init_registers(ndev);
gfar_set_mac_address(ndev);
gfar_init_mac(ndev);
@@ -1712,6 +1712,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
tx_queue->tx_skbuff[i] = NULL;
}
kfree(tx_queue->tx_skbuff);
+ tx_queue->tx_skbuff = NULL;
}
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
@@ -1735,6 +1736,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
rxbdp++;
}
kfree(rx_queue->rx_skbuff);
+ rx_queue->rx_skbuff = NULL;
}
/* If there are any tx skbs or rx skbs still around, free them.
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 1afb5ea2a984..418068b941b1 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -189,7 +189,7 @@ static int xgmac_mdio_reset(struct mii_bus *bus)
return ret;
}
-static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+static int xgmac_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
@@ -240,7 +240,7 @@ err_ioremap:
return ret;
}
-static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+static int xgmac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3f4391bede81..e3c7c697fc45 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -308,7 +308,7 @@ static void wait(void)
* Read board id and convert to string.
* Effectively same code as decode_eisa_sig
*/
-static __devinit const char *hp100_read_id(int ioaddr)
+static const char *hp100_read_id(int ioaddr)
{
int i;
static char str[HP100_SIG_LEN];
@@ -447,8 +447,8 @@ static const struct net_device_ops hp100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
- u_char bus, struct pci_dev *pci_dev)
+static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
+ struct pci_dev *pci_dev)
{
int i;
int err = -ENODEV;
@@ -2866,7 +2866,7 @@ static int __init hp100_eisa_probe (struct device *gendev)
return err;
}
-static int __devexit hp100_eisa_remove (struct device *gendev)
+static int hp100_eisa_remove(struct device *gendev)
{
struct net_device *dev = dev_get_drvdata(gendev);
cleanup_dev(dev);
@@ -2878,14 +2878,14 @@ static struct eisa_driver hp100_eisa_driver = {
.driver = {
.name = "hp100",
.probe = hp100_eisa_probe,
- .remove = __devexit_p (hp100_eisa_remove),
+ .remove = hp100_eisa_remove,
}
};
#endif
#ifdef CONFIG_PCI
-static int __devinit hp100_pci_probe (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int hp100_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev;
int ioaddr;
@@ -2937,7 +2937,7 @@ static int __devinit hp100_pci_probe (struct pci_dev *pdev,
return err;
}
-static void __devexit hp100_pci_remove (struct pci_dev *pdev)
+static void hp100_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -2950,7 +2950,7 @@ static struct pci_driver hp100_pci_driver = {
.name = "hp100",
.id_table = hp100_pci_tbl,
.probe = hp100_pci_probe,
- .remove = __devexit_p(hp100_pci_remove),
+ .remove = hp100_pci_remove,
};
#endif
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index 067db3f13e91..5d353c660068 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -72,7 +72,7 @@ static void ether1_timeout(struct net_device *dev);
/* ------------------------------------------------------------------------- */
-static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
+static char version[] = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
#define BUS_16 16
#define BUS_8 8
@@ -250,7 +250,7 @@ ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsig
} while (thislen);
}
-static int __devinit
+static int
ether1_ramtest(struct net_device *dev, unsigned char byte)
{
unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
@@ -304,7 +304,7 @@ ether1_reset (struct net_device *dev)
return BUS_16;
}
-static int __devinit
+static int
ether1_init_2(struct net_device *dev)
{
int i;
@@ -638,12 +638,6 @@ ether1_txalloc (struct net_device *dev, int size)
static int
ether1_open (struct net_device *dev)
{
- if (!is_valid_ether_addr(dev->dev_addr)) {
- printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
- dev->name);
- return -EINVAL;
- }
-
if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
return -EAGAIN;
@@ -972,7 +966,7 @@ ether1_setmulticastlist (struct net_device *dev)
/* ------------------------------------------------------------------------- */
-static void __devinit ether1_banner(void)
+static void ether1_banner(void)
{
static unsigned int version_printed = 0;
@@ -991,7 +985,7 @@ static const struct net_device_ops ether1_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit
+static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
@@ -1052,7 +1046,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit ether1_remove(struct expansion_card *ec)
+static void ether1_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
@@ -1070,7 +1064,7 @@ static const struct ecard_id ether1_ids[] = {
static struct ecard_driver ether1_driver = {
.probe = ether1_probe,
- .remove = __devexit_p(ether1_remove),
+ .remove = ether1_remove,
.id_table = ether1_ids,
.drv = {
.name = "ether1",
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 6eba352c52e0..f42f1b707733 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -150,7 +150,7 @@ static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
#define LAN_PROM_ADDR 0xF0810000
-static int __devinit
+static int
lan_init_chip(struct parisc_device *dev)
{
struct net_device *netdevice;
@@ -195,7 +195,7 @@ lan_init_chip(struct parisc_device *dev)
return retval;
}
-static int __devexit lan_remove_chip (struct parisc_device *pdev)
+static int lan_remove_chip(struct parisc_device *pdev)
{
struct net_device *dev = parisc_get_drvdata(pdev);
struct i596_private *lp = netdev_priv(dev);
@@ -219,10 +219,10 @@ static struct parisc_driver lan_driver = {
.name = "lasi_82596",
.id_table = lan_tbl,
.probe = lan_init_chip,
- .remove = __devexit_p(lan_remove_chip),
+ .remove = lan_remove_chip,
};
-static int __devinit lasi_82596_init(void)
+static int lasi_82596_init(void)
{
printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
return register_parisc_driver(&lan_driver);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 3efbd8dbb63d..f045ea4dc514 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1048,7 +1048,7 @@ static const struct net_device_ops i596_netdev_ops = {
#endif
};
-static int __devinit i82596_probe(struct net_device *dev)
+static int i82596_probe(struct net_device *dev)
{
int i;
struct i596_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6b2a88817473..4ceae9a30274 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -75,7 +75,7 @@ static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
}
-static int __devinit sni_82596_probe(struct platform_device *dev)
+static int sni_82596_probe(struct platform_device *dev)
{
struct net_device *netdevice;
struct i596_private *lp;
@@ -147,7 +147,7 @@ probe_failed_free_mpu:
return retval;
}
-static int __devexit sni_82596_driver_remove(struct platform_device *pdev)
+static int sni_82596_driver_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct i596_private *lp = netdev_priv(dev);
@@ -163,14 +163,14 @@ static int __devexit sni_82596_driver_remove(struct platform_device *pdev)
static struct platform_driver sni_82596_driver = {
.probe = sni_82596_probe,
- .remove = __devexit_p(sni_82596_driver_remove),
+ .remove = sni_82596_driver_remove,
.driver = {
.name = sni_82596_string,
.owner = THIS_MODULE,
},
};
-static int __devinit sni_82596_init(void)
+static int sni_82596_init(void)
{
printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n");
return platform_driver_register(&sni_82596_driver);
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index b9773d229192..6529d31595a7 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_IBM
bool "IBM devices"
default y
depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
- (IBMEBUS && INET && SPARSEMEM)
+ (IBMEBUS && SPARSEMEM)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -33,8 +33,7 @@ source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
tristate "eHEA Ethernet support"
- depends on IBMEBUS && INET && SPARSEMEM
- select INET_LRO
+ depends on IBMEBUS && SPARSEMEM
---help---
This driver supports the IBM pSeries eHEA ethernet adapter.
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index f4d2da0db1b1..19b64de7124b 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -98,10 +98,10 @@ static struct ehea_fw_handle_array ehea_fw_handles;
static struct ehea_bcmc_reg_array ehea_bcmc_regs;
-static int __devinit ehea_probe_adapter(struct platform_device *dev,
- const struct of_device_id *id);
+static int ehea_probe_adapter(struct platform_device *dev,
+ const struct of_device_id *id);
-static int __devexit ehea_remove(struct platform_device *dev);
+static int ehea_remove(struct platform_device *dev);
static struct of_device_id ehea_device_table[] = {
{
@@ -2909,7 +2909,7 @@ static ssize_t ehea_show_port_id(struct device *dev,
static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
NULL);
-static void __devinit logical_port_release(struct device *dev)
+static void logical_port_release(struct device *dev)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
of_node_put(port->ofdev.dev.of_node);
@@ -3028,7 +3028,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ehea_set_ethtool_ops(dev);
dev->hw_features = NETIF_F_SG | NETIF_F_TSO
- | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
+ | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
@@ -3257,8 +3257,8 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
-static int __devinit ehea_probe_adapter(struct platform_device *dev,
- const struct of_device_id *id)
+static int ehea_probe_adapter(struct platform_device *dev,
+ const struct of_device_id *id)
{
struct ehea_adapter *adapter;
const u64 *adapter_handle;
@@ -3364,7 +3364,7 @@ out:
return ret;
}
-static int __devexit ehea_remove(struct platform_device *dev)
+static int ehea_remove(struct platform_device *dev)
{
struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
int i;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 8364815c32ff..99b6c2a38dbf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -39,26 +39,6 @@
* hcp_* - structures, variables and functions releated to Hypervisor Calls
*/
-static inline u32 get_longbusy_msecs(int long_busy_ret_code)
-{
- switch (long_busy_ret_code) {
- case H_LONG_BUSY_ORDER_1_MSEC:
- return 1;
- case H_LONG_BUSY_ORDER_10_MSEC:
- return 10;
- case H_LONG_BUSY_ORDER_100_MSEC:
- return 100;
- case H_LONG_BUSY_ORDER_1_SEC:
- return 1000;
- case H_LONG_BUSY_ORDER_10_SEC:
- return 10000;
- case H_LONG_BUSY_ORDER_100_SEC:
- return 100000;
- default:
- return 1;
- }
-}
-
/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
#define EHEA_MAX_RPAGE 512
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index a0fe6e3fce61..256bdb8e1994 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2261,8 +2261,8 @@ struct emac_depentry {
#define EMAC_DEP_PREV_IDX 5
#define EMAC_DEP_COUNT 6
-static int __devinit emac_check_deps(struct emac_instance *dev,
- struct emac_depentry *deps)
+static int emac_check_deps(struct emac_instance *dev,
+ struct emac_depentry *deps)
{
int i, there = 0;
struct device_node *np;
@@ -2314,8 +2314,8 @@ static void emac_put_deps(struct emac_instance *dev)
of_dev_put(dev->tah_dev);
}
-static int __devinit emac_of_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
{
/* We are only intereted in device addition */
if (action == BUS_NOTIFY_BOUND_DRIVER)
@@ -2323,11 +2323,11 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
return 0;
}
-static struct notifier_block emac_of_bus_notifier __devinitdata = {
+static struct notifier_block emac_of_bus_notifier = {
.notifier_call = emac_of_bus_notify
};
-static int __devinit emac_wait_deps(struct emac_instance *dev)
+static int emac_wait_deps(struct emac_instance *dev)
{
struct emac_depentry deps[EMAC_DEP_COUNT];
int i, err;
@@ -2367,8 +2367,8 @@ static int __devinit emac_wait_deps(struct emac_instance *dev)
return err;
}
-static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
- u32 *val, int fatal)
+static int emac_read_uint_prop(struct device_node *np, const char *name,
+ u32 *val, int fatal)
{
int len;
const u32 *prop = of_get_property(np, name, &len);
@@ -2382,7 +2382,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam
return 0;
}
-static int __devinit emac_init_phy(struct emac_instance *dev)
+static int emac_init_phy(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
struct net_device *ndev = dev->ndev;
@@ -2518,7 +2518,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
return 0;
}
-static int __devinit emac_init_config(struct emac_instance *dev)
+static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
@@ -2703,7 +2703,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_change_mtu = emac_change_mtu,
};
-static int __devinit emac_probe(struct platform_device *ofdev)
+static int emac_probe(struct platform_device *ofdev)
{
struct net_device *ndev;
struct emac_instance *dev;
@@ -2930,7 +2930,7 @@ static int __devinit emac_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit emac_remove(struct platform_device *ofdev)
+static int emac_remove(struct platform_device *ofdev)
{
struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 479e43e2f1ef..50ea12bfb579 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -33,8 +33,7 @@
static int mal_count;
-int __devinit mal_register_commac(struct mal_instance *mal,
- struct mal_commac *commac)
+int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
{
unsigned long flags;
@@ -517,7 +516,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
-static int __devinit mal_probe(struct platform_device *ofdev)
+static int mal_probe(struct platform_device *ofdev)
{
struct mal_instance *mal;
int err = 0, i, bd_size;
@@ -729,7 +728,7 @@ static int __devinit mal_probe(struct platform_device *ofdev)
return err;
}
-static int __devexit mal_remove(struct platform_device *ofdev)
+static int mal_remove(struct platform_device *ofdev)
{
struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
@@ -738,13 +737,11 @@ static int __devexit mal_remove(struct platform_device *ofdev)
/* Synchronize with scheduled polling */
napi_disable(&mal->napi);
- if (!list_empty(&mal->list)) {
+ if (!list_empty(&mal->list))
/* This is *very* bad */
- printk(KERN_EMERG
+ WARN(1, KERN_EMERG
"mal%d: commac list is not empty on remove!\n",
mal->index);
- WARN_ON(1);
- }
dev_set_drvdata(&ofdev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index d3123282e18e..39251765b55d 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -93,7 +93,7 @@ static inline u32 rgmii_mode_mask(int mode, int input)
}
}
-int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode)
+int rgmii_attach(struct platform_device *ofdev, int input, int mode)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -228,7 +228,7 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
}
-static int __devinit rgmii_probe(struct platform_device *ofdev)
+static int rgmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
@@ -289,7 +289,7 @@ static int __devinit rgmii_probe(struct platform_device *ofdev)
return rc;
}
-static int __devexit rgmii_remove(struct platform_device *ofdev)
+static int rgmii_remove(struct platform_device *ofdev)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 872912ef518d..795f1393e2b6 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -23,7 +23,7 @@
#include "emac.h"
#include "core.h"
-int __devinit tah_attach(struct platform_device *ofdev, int channel)
+int tah_attach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -87,7 +87,7 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit tah_probe(struct platform_device *ofdev)
+static int tah_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
@@ -135,7 +135,7 @@ static int __devinit tah_probe(struct platform_device *ofdev)
return rc;
}
-static int __devexit tah_remove(struct platform_device *ofdev)
+static int tah_remove(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 415e9b4d5408..f91202f42125 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -82,7 +82,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode)
+int zmii_attach(struct platform_device *ofdev, int input, int *mode)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct zmii_regs __iomem *p = dev->base;
@@ -231,7 +231,7 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit zmii_probe(struct platform_device *ofdev)
+static int zmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
@@ -282,7 +282,7 @@ static int __devinit zmii_probe(struct platform_device *ofdev)
return rc;
}
-static int __devexit zmii_remove(struct platform_device *ofdev)
+static int zmii_remove(struct platform_device *ofdev)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b68d28a130e6..f2fdbb79837e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1324,8 +1324,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
#endif
};
-static int __devinit ibmveth_probe(struct vio_dev *dev,
- const struct vio_device_id *id)
+static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
int rc, i;
struct net_device *netdev;
@@ -1426,7 +1425,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
return 0;
}
-static int __devexit ibmveth_remove(struct vio_dev *dev)
+static int ibmveth_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev_get_drvdata(&dev->dev);
struct ibmveth_adapter *adapter = netdev_priv(netdev);
@@ -1593,7 +1592,7 @@ static int ibmveth_resume(struct device *dev)
return 0;
}
-static struct vio_device_id ibmveth_device_table[] __devinitdata = {
+static struct vio_device_id ibmveth_device_table[] = {
{ "network", "IBM,l-lan"},
{ "", "" }
};
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1b563bb959c2..068d78151658 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2167,7 +2167,7 @@ static const struct ethtool_ops ipg_ethtool_ops = {
.nway_reset = ipg_nway_reset,
};
-static void __devexit ipg_remove(struct pci_dev *pdev)
+static void ipg_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ipg_nic_private *sp = netdev_priv(dev);
@@ -2199,8 +2199,7 @@ static const struct net_device_ops ipg_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit ipg_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned int i = id->driver_data;
struct ipg_nic_private *sp;
@@ -2296,7 +2295,7 @@ static struct pci_driver ipg_pci_driver = {
.name = IPG_DRIVER_NAME,
.id_table = ipg_pci_tbl,
.probe = ipg_probe,
- .remove = __devexit_p(ipg_remove),
+ .remove = ipg_remove,
};
static int __init ipg_init_module(void)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0cafe4fe9406..ddee4060948a 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -93,6 +93,7 @@ config E1000E
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@ config IGB_DCA
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
-config IGB_PTP
- bool "PTP Hardware Clock (PHC)"
- default n
- depends on IGB && EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- ---help---
- Say Y here if you want to use PTP Hardware Clock (PHC) in the
- driver. Only the basic clock operations have been implemented.
-
- Every timestamp and clock read operations must consult the
- overflow counter to form a correct time value.
-
config IGBVF
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
@@ -178,8 +166,9 @@ config IXGB
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
- depends on PCI && INET
+ depends on PCI
select MDIO
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@ config IXGBE_DCB
If unsure, say N.
-config IXGBE_PTP
- bool "PTP Clock Support"
- default n
- depends on IXGBE && EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- ---help---
- Say Y here if you want support for 1588 Timestamping with a
- PHC device, using the PTP 1588 Clock support. This is
- required to enable timestamping support for the device.
-
- If unsure, say N.
-
config IXGBEVF
tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 29ce9bd27f94..a59f0779e1c3 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2829,8 +2829,7 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_set_features = e100_set_features,
};
-static int __devinit e100_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct nic *nic;
@@ -2981,7 +2980,7 @@ err_out_free_dev:
return err;
}
-static void __devexit e100_remove(struct pci_dev *pdev)
+static void e100_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3167,7 +3166,7 @@ static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
.probe = e100_probe,
- .remove = __devexit_p(e100_remove),
+ .remove = e100_remove,
#ifdef CONFIG_PM
/* Power Management hooks */
.suspend = e100_suspend,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 9089d00f1421..14e30515f6aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1671,7 +1671,7 @@ static void e1000_get_wol(struct net_device *netdev,
/* apply any specific unsupported masks here */
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- /* KSP3 does not suppport UCAST wake-ups */
+ /* KSP3 does not support UCAST wake-ups */
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 3d6839528761..8fedd2451538 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -107,6 +107,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
};
static DEFINE_SPINLOCK(e1000_eeprom_lock);
+static DEFINE_SPINLOCK(e1000_phy_lock);
/**
* e1000_set_phy_type - Set the phy type member in the hw struct.
@@ -2830,19 +2831,25 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
+ unsigned long flags;
e_dbg("e1000_read_phy_reg");
+ spin_lock_irqsave(&e1000_phy_lock, flags);
+
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(u16) reg_addr);
- if (ret_val)
+ if (ret_val) {
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
+ }
}
ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data);
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
}
@@ -2965,19 +2972,25 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
+ unsigned long flags;
e_dbg("e1000_write_phy_reg");
+ spin_lock_irqsave(&e1000_phy_lock, flags);
+
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(u16) reg_addr);
- if (ret_val)
+ if (ret_val) {
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
+ }
}
ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data);
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 222bfaff4622..294da56b824c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -111,7 +111,7 @@ void e1000_update_stats(struct e1000_adapter *adapter);
static int e1000_init_module(void);
static void e1000_exit_module(void);
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void __devexit e1000_remove(struct pci_dev *pdev);
+static void e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
static int e1000_sw_init(struct e1000_adapter *adapter);
static int e1000_open(struct net_device *netdev);
@@ -202,7 +202,7 @@ static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
- .remove = __devexit_p(e1000_remove),
+ .remove = e1000_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = e1000_suspend,
@@ -938,8 +938,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit e1000_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct e1000_adapter *adapter;
@@ -1273,7 +1272,7 @@ err_pci_reg:
* memory.
**/
-static void __devexit e1000_remove(struct pci_dev *pdev)
+static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1309,7 +1308,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* e1000_init_hw_struct MUST be called before this function
**/
-static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1340,7 +1339,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
* number of queues at compile-time.
**/
-static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct e1000_tx_ring), GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 1301eba8b57a..750fc0194f37 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -45,7 +45,7 @@
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
@@ -205,9 +205,9 @@ struct e1000_option {
} arg;
};
-static int __devinit e1000_validate_option(unsigned int *value,
- const struct e1000_option *opt,
- struct e1000_adapter *adapter)
+static int e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -268,7 +268,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* in a variable in the adapter structure.
**/
-void __devinit e1000_check_options(struct e1000_adapter *adapter)
+void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
int bd = adapter->bd_number;
@@ -534,7 +534,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on fiber adapters
**/
-static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
@@ -560,7 +560,7 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on copper adapters
**/
-static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
unsigned int speed, dplx, an;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 4dd18a1f45d2..e73c2c355993 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -26,8 +26,7 @@
*******************************************************************************/
-/*
- * 80003ES2LAN Gigabit Ethernet Controller (Copper)
+/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
*/
@@ -80,7 +79,8 @@
1 = 50-80M
2 = 80-110M
3 = 110-140M
- 4 = >140M */
+ 4 = >140M
+ */
/* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
@@ -95,8 +95,7 @@
/* In-Band Control Register (Page 194, Register 18) */
#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-/*
- * A table for the GG82563 cable length where the range is defined
+/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
*/
@@ -183,8 +182,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -375,8 +373,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
if (!(swfw_sync & (fwmask | swmask)))
break;
- /*
- * Firmware currently using resource (fwmask)
+ /* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000e_put_hw_semaphore(hw);
@@ -442,8 +439,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
- /*
- * Use Alternative Page Select register to access
+ /* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -457,8 +453,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
+ /* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
@@ -513,8 +508,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
- /*
- * Use Alternative Page Select register to access
+ /* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -528,8 +522,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
+ /* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
@@ -618,8 +611,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -657,8 +649,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
return ret_val;
if (!link) {
- /*
- * We didn't get link.
+ /* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1000e_phy_reset_dsp(hw);
@@ -677,8 +668,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Resetting the phy means we need to verify the TX_CLK corresponds
+ /* Resetting the phy means we need to verify the TX_CLK corresponds
* to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
*/
phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@@ -687,8 +677,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
else
phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
+ /* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -766,8 +755,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
s32 ret_val;
u16 kum_reg_data;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@@ -899,8 +887,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -945,8 +932,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
reg |= (1 << 28);
ew32(TARC(1), reg);
- /*
- * Disable IPv6 extension header parsing because some malformed
+ /* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
reg = er32(RFCTL);
@@ -979,8 +965,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@@ -1006,8 +991,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@@ -1065,8 +1049,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Do not init these registers when the HW is in IAMT mode, since the
+ /* Do not init these registers when the HW is in IAMT mode, since the
* firmware will have already initialized them. We only initialize
* them if the HW is not in IAMT mode.
*/
@@ -1087,8 +1070,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Workaround: Disable padding in Kumeran interface in the MAC
+ /* Workaround: Disable padding in Kumeran interface in the MAC
* and in the PHY to avoid CRC errors.
*/
ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@@ -1121,8 +1103,7 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
- /*
- * Set the mac to wait the maximum time between each
+ /* Set the mac to wait the maximum time between each
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
@@ -1352,8 +1333,7 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
+ /* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index c98586408005..c77d010d5c59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -26,8 +26,7 @@
*******************************************************************************/
-/*
- * 82571EB Gigabit Ethernet Controller
+/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
* 82571EB Gigabit Ethernet Controller (Fiber)
* 82571EB Dual Port Gigabit Mezzanine Adapter
@@ -191,8 +190,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
- /*
- * Autonomous Flash update bit must be cleared due
+ /* Autonomous Flash update bit must be cleared due
* to Flash update issue.
*/
eecd &= ~E1000_EECD_AUPDEN;
@@ -204,8 +202,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -291,8 +288,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
/* FWSM register */
mac->has_fwsm = true;
- /*
- * ARC supported; valid only if manageability features are
+ /* ARC supported; valid only if manageability features are
* enabled.
*/
mac->arc_subsystem_valid = !!(er32(FWSM) &
@@ -314,8 +310,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
break;
}
- /*
- * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ /* Ensure that the inter-port SWSM.SMBI lock bit is clear before
* first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
@@ -352,11 +347,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
}
- /*
- * Initialize device specific counter of SMBI acquisition
- * timeouts.
- */
- hw->dev_spec.e82571.smb_counter = 0;
+ /* Initialize device specific counter of SMBI acquisition timeouts. */
+ hw->dev_spec.e82571.smb_counter = 0;
return 0;
}
@@ -445,8 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- /*
- * The 82571 firmware may still be configuring the PHY.
+ /* The 82571 firmware may still be configuring the PHY.
* In this case, we cannot access the PHY until the
* configuration is done. So we explicitly set the
* PHY ID.
@@ -492,8 +483,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
- /*
- * If we have timedout 3 times on trying to acquire
+ /* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
@@ -787,8 +777,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * If our nvm is an EEPROM, then we're done
+ /* If our nvm is an EEPROM, then we're done
* otherwise, commit the checksum to the flash NVM.
*/
if (hw->nvm.type != e1000_nvm_flash_hw)
@@ -806,8 +795,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
/* Reset the firmware if using STM opcode. */
if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
- /*
- * The enabling of and the actual reset must be done
+ /* The enabling of and the actual reset must be done
* in two write cycles.
*/
ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@@ -867,8 +855,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u32 i, eewr = 0;
s32 ret_val = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -957,8 +944,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -1002,8 +988,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
u32 ctrl, ctrl_ext, eecd, tctl;
s32 ret_val;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@@ -1021,8 +1006,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
usleep_range(10000, 20000);
- /*
- * Must acquire the MDIO ownership before MAC reset.
+ /* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
switch (hw->mac.type) {
@@ -1067,8 +1051,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* We don't want to continue accessing MAC registers. */
return ret_val;
- /*
- * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+ /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
@@ -1076,8 +1059,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- /*
- * REQ and GNT bits need to be cleared when using AUTO_RD
+ /* REQ and GNT bits need to be cleared when using AUTO_RD
* to access the EEPROM.
*/
eecd = er32(EECD);
@@ -1138,8 +1120,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
e_dbg("Initializing the IEEE VLAN\n");
mac->ops.clear_vfta(hw);
- /* Setup the receive address. */
- /*
+ /* Setup the receive address.
* If, however, a locally administered address was assigned to the
* 82571, we must reserve a RAR for it to work around an issue where
* resetting one port will reload the MAC on the other port.
@@ -1183,8 +1164,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break;
}
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -1281,8 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
ew32(PBA_ECC, reg);
}
- /*
- * Workaround for hardware errata.
+ /* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
@@ -1291,8 +1270,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
ew32(CTRL_EXT, reg);
}
- /*
- * Disable IPv6 extension header parsing because some malformed
+ /* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
if (hw->mac.type <= e1000_82573) {
@@ -1309,8 +1287,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
reg |= (1 << 22);
ew32(GCR, reg);
- /*
- * Workaround for hardware errata.
+ /* Workaround for hardware errata.
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
@@ -1344,8 +1321,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
if (hw->mng_cookie.vlan_id != 0) {
- /*
- * The VFTA is a 4096b bit-field, each identifying
+ /* The VFTA is a 4096b bit-field, each identifying
* a single VLAN ID. The following operations
* determine which 32b entry (i.e. offset) into the
* array we want to set the VLAN ID (i.e. bit) of
@@ -1362,8 +1338,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
break;
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /*
- * If the offset we want to clear is the same offset of the
+ /* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of
* the manageability unit.
*/
@@ -1401,8 +1376,7 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
ctrl = hw->mac.ledctl_mode2;
if (!(E1000_STATUS_LU & er32(STATUS))) {
- /*
- * If no link, then turn LED on by setting the invert bit
+ /* If no link, then turn LED on by setting the invert bit
* for each LED that's "on" (0x0E) in ledctl_mode2.
*/
for (i = 0; i < 4; i++)
@@ -1427,8 +1401,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
u16 receive_errors = 0;
s32 ret_val = 0;
- /*
- * Read PHY Receive Error counter first, if its is max - all F's then
+ /* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
*/
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
@@ -1458,8 +1431,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
**/
static s32 e1000_setup_link_82571(struct e1000_hw *hw)
{
- /*
- * 82573 does not have a word in the NVM to determine
+ /* 82573 does not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
@@ -1526,8 +1498,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- /*
- * If SerDes loopback mode is entered, there is no form
+ /* If SerDes loopback mode is entered, there is no form
* of reset to take the adapter out of that mode. So we
* have to explicitly take the adapter out of loopback
* mode. This prevents drivers from twiddling their thumbs
@@ -1584,8 +1555,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
if (!(status & E1000_STATUS_LU)) {
- /*
- * We have lost link, retry autoneg before
+ /* We have lost link, retry autoneg before
* reporting link failure
*/
mac->serdes_link_state =
@@ -1598,8 +1568,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
break;
case e1000_serdes_link_forced_up:
- /*
- * If we are receiving /C/ ordered sets, re-enable
+ /* If we are receiving /C/ ordered sets, re-enable
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
@@ -1619,8 +1588,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
case e1000_serdes_link_autoneg_progress:
if (rxcw & E1000_RXCW_C) {
- /*
- * We received /C/ ordered sets, meaning the
+ /* We received /C/ ordered sets, meaning the
* link partner has autonegotiated, and we can
* trust the Link Up (LU) status bit.
*/
@@ -1636,8 +1604,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("AN_PROG -> DOWN\n");
}
} else {
- /*
- * The link partner did not autoneg.
+ /* The link partner did not autoneg.
* Force link up and full duplex, and change
* state to forced.
*/
@@ -1660,8 +1627,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
case e1000_serdes_link_down:
default:
- /*
- * The link was down but the receiver has now gained
+ /* The link was down but the receiver has now gained
* valid sync, so lets see if we can bring the link
* up.
*/
@@ -1679,8 +1645,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
mac->serdes_link_state = e1000_serdes_link_down;
e_dbg("ANYSTATE -> DOWN\n");
} else {
- /*
- * Check several times, if SYNCH bit and CONFIG
+ /* Check several times, if SYNCH bit and CONFIG
* bit both are consistently 1 then simply ignore
* the IV bit and restart Autoneg
*/
@@ -1780,8 +1745,7 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
/* If workaround is activated... */
if (state)
- /*
- * Hold a copy of the LAA in RAR[14] This is done so that
+ /* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed, the actual LAA is in one of the RARs and no
* incoming packets directed to this port are dropped.
@@ -1810,8 +1774,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
if (nvm->type != e1000_nvm_flash_hw)
return 0;
- /*
- * Check bit 4 of word 10h. If it is 0, firmware is done updating
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
* 10h-12h. Checksum may need to be fixed.
*/
ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@@ -1819,8 +1782,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
return ret_val;
if (!(data & 0x10)) {
- /*
- * Read 0x23 and check bit 15. This bit is a 1
+ /* Read 0x23 and check bit 15. This bit is a 1
* when the checksum has already been fixed. If
* the checksum is still wrong and this bit is a
* 1, we need to return bad checksum. Otherwise,
@@ -1852,8 +1814,7 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
if (hw->mac.type == e1000_82571) {
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
+ /* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 76edbc1be33b..4dab6fc265a2 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -185,8 +185,7 @@
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-/*
- * Use byte values for the following shift parameters
+/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
@@ -233,6 +232,7 @@
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -242,8 +242,7 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-/*
- * Bit definitions for the Management Data IO (MDIO) and Management Data
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
* Clock (MDC) pins in the Device Control Register.
*/
@@ -391,6 +390,12 @@
#define E1000_PBS_16K E1000_PBA_16K
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
@@ -410,6 +415,7 @@
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@@ -424,8 +430,7 @@
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
-/*
- * This defines the bits that are set in the Interrupt Mask
+/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
@@ -446,6 +451,7 @@
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
@@ -475,8 +481,7 @@
/* 802.1q VLAN Packet Size */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-/* Receive Address */
-/*
+/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
@@ -723,8 +728,7 @@
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
-/* Bit definitions for valid PHY IDs. */
-/*
+/* Bit definitions for valid PHY IDs.
* I = Integrated
* E = External
*/
@@ -762,8 +766,7 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/*
- * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
+/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
* 0=Normal 10BASE-T Rx Threshold
*/
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
@@ -779,14 +782,12 @@
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
@@ -808,8 +809,7 @@
#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
((reg) & MAX_PHY_REG_ADDRESS))
-/*
- * Bits...
+/* Bits...
* 15-5: page
* 4-0: register offset
*/
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 04668b47a1df..7e95f221d60b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,8 +161,7 @@ struct e1000_info;
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
-/*
- * Count for polling __E1000_RESET condition every 10-20msec.
+/* Count for polling __E1000_RESET condition every 10-20msec.
* Experimentation has shown the reset can take approximately 210msec.
*/
#define E1000_CHECK_RESET_COUNT 25
@@ -172,8 +171,7 @@ struct e1000_info;
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
-/*
- * in the case of WTHRESH, it appears at least the 82571/2 hardware
+/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
* WTHRESH=4, so a setting of 5 gives the most efficient bus
* utilization but to avoid possible Tx stalls, set it to 1
@@ -214,8 +212,7 @@ struct e1000_ps_page {
u64 dma; /* must be u64 - written to hw */
};
-/*
- * wrappers around a pointer to a socket buffer,
+/* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
@@ -305,15 +302,15 @@ struct e1000_adapter {
u16 tx_itr;
u16 rx_itr;
- /*
- * Tx
- */
+ /* Tx */
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi;
+ unsigned int uncorr_errors; /* uncorrectable ECC errors */
+ unsigned int corr_errors; /* correctable ECC errors */
unsigned int restart_queue;
u32 txd_cmd;
@@ -340,9 +337,7 @@ struct e1000_adapter {
u32 tx_fifo_size;
u32 tx_dma_failed;
- /*
- * Rx
- */
+ /* Rx */
bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
int work_to_do) ____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index c11ac2756667..fd4772a2691c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("dropped_smbus", stats.mgpdc),
E1000_STAT("rx_dma_failed", rx_dma_failed),
E1000_STAT("tx_dma_failed", tx_dma_failed),
+ E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+ E1000_STAT("corr_ecc_errors", corr_errors),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -214,7 +216,8 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -263,8 +266,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * When SoL/IDER sessions are active, autoneg/speed/duplex
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
@@ -273,8 +275,7 @@ static int e1000_set_settings(struct net_device *netdev,
return -EINVAL;
}
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -316,8 +317,7 @@ static int e1000_set_settings(struct net_device *netdev,
/* MDI-X => 2; MDI => 1; Auto => 3 */
if (ecmd->eth_tp_mdix_ctrl) {
- /*
- * fix up the value for auto (3 => 0) as zero is mapped
+ /* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
@@ -454,8 +454,8 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
/* ethtool doesn't use anything past this point, so all this
- * code is likely legacy junk for apps that may or may not
- * exist */
+ * code is likely legacy junk for apps that may or may not exist
+ */
if (hw->phy.type == e1000_phy_m88) {
e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (u32)phy_data; /* cable length */
@@ -598,8 +598,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (ret_val)
goto out;
- /*
- * Update the checksum over the first part of the EEPROM if needed
+ /* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for applicable controllers
*/
if ((first_word <= NVM_CHECKSUM_REG) ||
@@ -623,8 +622,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version));
- /*
- * EEPROM image version # is reported as firmware version # for
+ /* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -708,8 +706,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
e1000e_down(adapter);
- /*
- * We can't just free everything and then setup again, because the
+ /* We can't just free everything and then setup again, because the
* ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
* structs. First, attempt to allocate new resources...
*/
@@ -813,8 +810,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 mask;
u32 wlock_mac = 0;
- /*
- * The status register is Read Only, so a write should fail.
+ /* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
switch (mac->type) {
@@ -996,8 +992,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
- /*
- * Disable the interrupt to be reported in
+ /* Disable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was posted to the bus, the
@@ -1015,8 +1010,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
}
- /*
- * Enable the interrupt to be reported in
+ /* Enable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was not posted to the bus, the
@@ -1034,8 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
- /*
- * Disable the other interrupts to be reported in
+ /* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
* an interrupt was posted to the bus, the
@@ -1378,8 +1371,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
hw->phy.type == e1000_phy_m88) {
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
} else {
- /*
- * Set the ILOS bit on the fiber Nic if half duplex link is
+ /* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
if ((er32(STATUS) & E1000_STATUS_FD) == 0)
@@ -1388,8 +1380,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
- /*
- * Disable the receiver on the PHY so when a cable is plugged in, the
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
if (hw->phy.type == e1000_phy_m88)
@@ -1408,8 +1399,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special requirements for 82571/82572 fiber adapters */
- /*
- * jump through hoops to make sure link is up because serdes
+ /* jump through hoops to make sure link is up because serdes
* link is hardwired up
*/
ctrl |= E1000_CTRL_SLU;
@@ -1429,8 +1419,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl);
}
- /*
- * special write to serdes control register to enable SerDes analog
+ /* special write to serdes control register to enable SerDes analog
* loopback
*/
#define E1000_SERDES_LB_ON 0x410
@@ -1448,8 +1437,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
u32 ctrlext = er32(CTRL_EXT);
u32 ctrl = er32(CTRL);
- /*
- * save CTRL_EXT to restore later, reuse an empty variable (unused
+ /* save CTRL_EXT to restore later, reuse an empty variable (unused
* on mac_type 80003es2lan)
*/
adapter->tx_fifo_head = ctrlext;
@@ -1585,8 +1573,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ew32(RDT(0), rx_ring->count - 1);
- /*
- * Calculate the loop count based on the largest descriptor ring
+ /* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
@@ -1627,8 +1614,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
l++;
if (l == rx_ring->count)
l = 0;
- /*
- * time + 20 msecs (200 msecs on 2.4) is more than
+ /* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
@@ -1649,10 +1635,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
- /*
- * PHY loopback cannot be performed if SoL/IDER
- * sessions are active
- */
+ /* PHY loopback cannot be performed if SoL/IDER sessions are active */
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
@@ -1686,8 +1669,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
int i = 0;
hw->mac.serdes_has_link = false;
- /*
- * On some blade server designs, link establishment
+ /* On some blade server designs, link establishment
* could take as long as 2-3 minutes
*/
do {
@@ -1701,8 +1683,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(hw);
if (hw->mac.autoneg)
- /*
- * On some Phy/switch combinations, link establishment
+ /* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
msleep(5000);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index d37bfd96c987..b88676ff3d86 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -77,6 +77,7 @@ enum e1e_registers {
#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
+ E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
@@ -85,8 +86,7 @@ enum e1e_registers {
E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
-/*
- * Convenience macros
+/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
@@ -800,8 +800,7 @@ struct e1000_mac_operations {
s32 (*read_mac_addr)(struct e1000_hw *);
};
-/*
- * When to use various PHY register access functions:
+/* When to use various PHY register access functions:
*
* Func Caller
* Function Does Does When to use
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e3a7b07df629..24d9f61956f0 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -26,8 +26,7 @@
*******************************************************************************/
-/*
- * 82562G 10/100 Network Connection
+/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
* 82562GT 10/100 Network Connection
* 82562GT-2 10/100 Network Connection
@@ -354,8 +353,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
return true;
}
- /*
- * In case the PHY needs to be in mdio slow mode,
+ /* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
hw->phy.ops.release(hw);
@@ -386,8 +384,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
*/
@@ -396,8 +393,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
if (e1000_phy_is_accessible_pchlan(hw))
break;
- /*
- * Before toggling LANPHYPC, see if PHY is accessible by
+ /* Before toggling LANPHYPC, see if PHY is accessible by
* forcing MAC to SMBus mode first.
*/
mac_reg = er32(CTRL_EXT);
@@ -406,8 +402,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
- /*
- * Gate automatic PHY configuration by hardware on
+ /* Gate automatic PHY configuration by hardware on
* non-managed 82579
*/
if ((hw->mac.type == e1000_pch2lan) &&
@@ -474,8 +469,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
- /*
- * Reset the PHY before any access to it. Doing so, ensures
+ /* Reset the PHY before any access to it. Doing so, ensures
* that the PHY is in a known good state before we read/write
* PHY registers. The generic reset is sufficient here,
* because we haven't determined the PHY type yet.
@@ -536,8 +530,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
case e1000_pch_lpt:
- /*
- * In case the PHY needs to be in mdio slow mode,
+ /* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -593,8 +586,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
- /*
- * We may need to do this twice - once for IGP and if that fails,
+ /* We may need to do this twice - once for IGP and if that fails,
* we'll set BM func pointers and try again
*/
ret_val = e1000e_determine_phy_address(hw);
@@ -679,8 +671,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
gfpreg = er32flash(ICH_FLASH_GFPREG);
- /*
- * sector_X_addr is a "sector"-aligned address (4096 bytes)
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
* Add 1 to sector_end_addr since this sector is included in
* the overall size.
*/
@@ -690,8 +681,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* flash_base_addr is byte-aligned */
nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
- /*
- * find total size of the NVM, then cut in half since the total
+ /* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
@@ -788,8 +778,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /*
- * Gate automatic PHY configuration by hardware on managed
+ /* Gate automatic PHY configuration by hardware on managed
* 82579 and i217
*/
if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
@@ -840,8 +829,7 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
goto release;
e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
- /*
- * EEE is not supported in 100Half, so ignore partner's EEE
+ /* EEE is not supported in 100Half, so ignore partner's EEE
* in 100 ability if full-duplex is not advertised.
*/
e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
@@ -869,8 +857,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
bool link;
u16 phy_reg;
- /*
- * We only want to go out to the PHY registers to see if Auto-Neg
+ /* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
@@ -878,8 +865,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (!mac->get_link_status)
return 0;
- /*
- * First we want to see if the MII Status Register reports
+ /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
@@ -914,8 +900,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Workaround for PCHx parts in half-duplex:
+ /* Workaround for PCHx parts in half-duplex:
* Set the number of preambles removed from the packet
* when it is passed from the PHY to the MAC to prevent
* the MAC from misinterpreting the packet type.
@@ -932,8 +917,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
break;
}
- /*
- * Check if there was DownShift, must be checked
+ /* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000e_check_downshift(hw);
@@ -943,22 +927,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * If we are forcing speed/duplex, then we simply return since
+ /* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
return -E1000_ERR_CONFIG;
- /*
- * Auto-Neg is enabled. Auto Speed Detection takes care
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
mac->ops.config_collision_dist(hw);
- /*
- * Configure Flow Control now that Auto-Neg has completed.
+ /* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
@@ -1000,8 +981,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
- /*
- * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+ /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
* on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
*/
if ((adapter->hw.phy.type == e1000_phy_ife) ||
@@ -1191,8 +1171,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
@@ -1256,8 +1235,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
u32 rar_low, rar_high;
u32 wlock_mac;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
@@ -1277,8 +1255,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
return;
}
- /*
- * The manageability engine (ME) can lock certain SHRAR registers that
+ /* The manageability engine (ME) can lock certain SHRAR registers that
* it is using - those registers are unavailable for use.
*/
if (index < hw->mac.rar_entry_count) {
@@ -1387,8 +1364,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
- /*
- * Initialize the PHY from the NVM on ICH platforms. This
+ /* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
* not properly autoloaded after power transitions.
* Therefore, after each PHY reset, we will load the
@@ -1422,8 +1398,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
if (!(data & sw_cfg_mask))
goto release;
- /*
- * Make sure HW does not configure LCD from PHY
+ /* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration
*/
data = er32(EXTCNF_CTRL);
@@ -1443,8 +1418,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pchlan) &&
!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
(hw->mac.type > e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
+ /* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
@@ -1748,8 +1722,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
}
if (hw->phy.type == e1000_phy_82578) {
- /*
- * Return registers to default by doing a soft reset then
+ /* Return registers to default by doing a soft reset then
* writing 0x3140 to the control register.
*/
if (hw->phy.revision < 2) {
@@ -1769,8 +1742,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Configure the K1 Si workaround during phy reset assuming there is
+ /* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
@@ -1853,8 +1825,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
if (enable) {
- /*
- * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
* SHRAL/H) and initial CRC values to the MAC
*/
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
@@ -2131,8 +2102,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
udelay(100);
} while ((!data) && --loop);
- /*
- * If basic configuration is incomplete before the above loop
+ /* If basic configuration is incomplete before the above loop
* count reaches 0, loading the configuration from NVM will
* leave the PHY in a bad state possibly resulting in no link.
*/
@@ -2299,8 +2269,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * Call gig speed drop workaround on LPLU before accessing
+ /* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
@@ -2319,8 +2288,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -2382,8 +2350,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -2420,8 +2387,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * Call gig speed drop workaround on LPLU before accessing
+ /* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
@@ -2589,8 +2555,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
- /*
- * Either we should have a hardware SPI cycle in progress
+ /* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
* FDONE bit should be changed in the hardware so that it
* is 1 after hardware reset, which can then be used as an
@@ -2599,8 +2564,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
*/
if (!hsfsts.hsf_status.flcinprog) {
- /*
- * There is no cycle running at present,
+ /* There is no cycle running at present,
* so we can start a cycle.
* Begin by setting Flash Cycle Done.
*/
@@ -2610,8 +2574,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
} else {
s32 i;
- /*
- * Otherwise poll for sometime so the current
+ /* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
*/
for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
@@ -2623,8 +2586,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
udelay(1);
}
if (!ret_val) {
- /*
- * Successful in waiting for previous cycle to timeout,
+ /* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
@@ -2753,8 +2715,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_READ_COMMAND_TIMEOUT);
- /*
- * Check if FCERR is set to 1, if set to 1, clear it
+ /* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
* read in (shift in) the Flash Data0, the order is
* least significant byte first msb to lsb
@@ -2767,8 +2728,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
*data = (u16)(flash_data & 0x0000FFFF);
break;
} else {
- /*
- * If we've gotten here, then things are probably
+ /* If we've gotten here, then things are probably
* completely hosed, but if the error condition is
* detected, it won't hurt to give it another try...
* ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -2849,8 +2809,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
nvm->ops.acquire(hw);
- /*
- * We're writing to the opposite bank so if we're on bank 1,
+ /* We're writing to the opposite bank so if we're on bank 1,
* write to bank 0 etc. We also need to erase the segment that
* is going to be written
*/
@@ -2875,8 +2834,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
- /*
- * Determine whether to write the value stored
+ /* Determine whether to write the value stored
* in the other NVM bank or a modified value stored
* in the shadow RAM
*/
@@ -2890,8 +2848,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
break;
}
- /*
- * If the word is 0x13, then make sure the signature bits
+ /* If the word is 0x13, then make sure the signature bits
* (15:14) are 11b until the commit has completed.
* This will allow us to write 10b which indicates the
* signature is valid. We want to do this after the write
@@ -2920,8 +2877,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
break;
}
- /*
- * Don't bother writing the segment valid bits if sector
+ /* Don't bother writing the segment valid bits if sector
* programming failed.
*/
if (ret_val) {
@@ -2930,8 +2886,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
goto release;
}
- /*
- * Finally validate the new segment by setting bit 15:14
+ /* Finally validate the new segment by setting bit 15:14
* to 10b in word 0x13 , this can be done without an
* erase as well since these bits are 11 to start with
* and we need to change bit 14 to 0b
@@ -2948,8 +2903,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
- /*
- * And invalidate the previously valid segment by setting
+ /* And invalidate the previously valid segment by setting
* its signature word (0x13) high_byte to 0b. This can be
* done without an erase because flash erase sets all bits
* to 1's. We can write 1's to 0's without an erase
@@ -2968,8 +2922,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
release:
nvm->ops.release(hw);
- /*
- * Reload the EEPROM, or else modifications will not appear
+ /* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
if (!ret_val) {
@@ -2997,8 +2950,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
s32 ret_val;
u16 data;
- /*
- * Read 0x19 and check bit 6. If this bit is 0, the checksum
+ /* Read 0x19 and check bit 6. If this bit is 0, the checksum
* needs to be fixed. This bit is an indication that the NVM
* was prepared by OEM software and did not calculate the
* checksum...a likely scenario.
@@ -3048,8 +3000,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
pr0.range.wpe = true;
ew32flash(ICH_FLASH_PR0, pr0.regval);
- /*
- * Lock down a subset of GbE Flash Control Registers, e.g.
+ /* Lock down a subset of GbE Flash Control Registers, e.g.
* PR0 to prevent the write-protection from being lifted.
* Once FLOCKDN is set, the registers protected by it cannot
* be written until FLOCKDN is cleared by a hardware reset.
@@ -3109,8 +3060,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ew32flash(ICH_FLASH_FDATA0, flash_data);
- /*
- * check if FCERR is set to 1 , if set to 1, clear it
+ /* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
ret_val = e1000_flash_cycle_ich8lan(hw,
@@ -3118,8 +3068,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
if (!ret_val)
break;
- /*
- * If we're here, then things are most likely
+ /* If we're here, then things are most likely
* completely hosed, but if the error condition
* is detected, it won't hurt to give it another
* try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -3207,8 +3156,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
- /*
- * Determine HW Sector size: Read BERASE bits of hw flash status
+ /* Determine HW Sector size: Read BERASE bits of hw flash status
* register
* 00: The Hw sector is 256 bytes, hence we need to erase 16
* consecutive sectors. The start index for the nth Hw sector
@@ -3253,16 +3201,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
if (ret_val)
return ret_val;
- /*
- * Write a value 11 (block Erase) in Flash
+ /* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
- /*
- * Write the last 24 bits of an index within the
+ /* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
* Address.
*/
@@ -3274,8 +3220,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
if (!ret_val)
break;
- /*
- * Check if FCERR is set to 1. If 1,
+ /* Check if FCERR is set to 1. If 1,
* clear it and try the whole sequence
* a few more times else Done
*/
@@ -3403,8 +3348,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_get_bus_info_pcie(hw);
- /*
- * ICH devices are "PCI Express"-ish. They have
+ /* ICH devices are "PCI Express"-ish. They have
* a configuration space, but do not contain
* PCI Express Capability registers, so bus width
* must be hardcoded.
@@ -3429,8 +3373,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
u32 ctrl, reg;
s32 ret_val;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@@ -3440,8 +3383,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
- /*
- * Disable the Transmit and Receive units. Then delay to allow
+ /* Disable the Transmit and Receive units. Then delay to allow
* any pending transactions to complete before we hit the MAC
* with the global reset.
*/
@@ -3474,15 +3416,13 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!hw->phy.ops.check_reset_block(hw)) {
- /*
- * Full-chip reset requires MAC and PHY reset at the same
+ /* Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
ctrl |= E1000_CTRL_PHY_RST;
- /*
- * Gate automatic PHY configuration by hardware on
+ /* Gate automatic PHY configuration by hardware on
* non-managed 82579
*/
if ((hw->mac.type == e1000_pch2lan) &&
@@ -3516,8 +3456,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * For PCH, this write will make sure that any noise
+ /* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
* as a bad packet to the DMA engine.
*/
@@ -3569,8 +3508,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
- /*
- * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ /* The 82578 Rx buffer will stall if wakeup is enabled in host and
* the ME. Disable wakeup by clearing the host wakeup bit.
* Reset the phy after disabling host wakeup to reset the Rx buffer.
*/
@@ -3600,8 +3538,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
ew32(TXDCTL(1), txdctl);
- /*
- * ICH8 has opposite polarity of no_snoop bits.
+ /* ICH8 has opposite polarity of no_snoop bits.
* By default, we should use snoop behavior.
*/
if (mac->type == e1000_ich8lan)
@@ -3614,8 +3551,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -3676,20 +3612,29 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(STATUS, reg);
}
- /*
- * work-around descriptor data corruption issue during nfs v2 udp
+ /* work-around descriptor data corruption issue during nfs v2 udp
* traffic, just disable the nfs filtering capability
*/
reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
- /*
- * Disable IPv6 extension header parsing because some malformed
+ /* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
if (hw->mac.type == e1000_ich8lan)
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if (hw->mac.type == e1000_pch_lpt) {
+ reg = er32(PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ ew32(PBECCSTS, reg);
+
+ reg = er32(CTRL);
+ reg |= E1000_CTRL_MEHE;
+ ew32(CTRL, reg);
+ }
}
/**
@@ -3709,8 +3654,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
if (hw->phy.ops.check_reset_block(hw))
return 0;
- /*
- * ICH parts do not have a word in the NVM to determine
+ /* ICH parts do not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
@@ -3722,8 +3666,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
hw->fc.requested_mode = e1000_fc_full;
}
- /*
- * Save off the requested flow control mode for use later. Depending
+ /* Save off the requested flow control mode for use later. Depending
* on the link partner's capabilities, we may or may not use this mode.
*/
hw->fc.current_mode = hw->fc.requested_mode;
@@ -3771,8 +3714,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
- /*
- * Set the mac to wait the maximum time between each iteration
+ /* Set the mac to wait the maximum time between each iteration
* and increase the max iterations when polling the phy;
* this fixes erroneous timeouts at 10Mbps.
*/
@@ -3892,8 +3834,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
if (!dev_spec->kmrn_lock_loss_workaround_enabled)
return 0;
- /*
- * Make sure link is up before proceeding. If not just return.
+ /* Make sure link is up before proceeding. If not just return.
* Attempting this while link is negotiating fouled up link
* stability
*/
@@ -3925,8 +3866,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
ew32(PHY_CTRL, phy_ctrl);
- /*
- * Call gig speed drop workaround on Gig disable before accessing
+ /* Call gig speed drop workaround on Gig disable before accessing
* any PHY registers
*/
e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -3983,8 +3923,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
ew32(PHY_CTRL, reg);
- /*
- * Call gig speed drop workaround on Gig disable before
+ /* Call gig speed drop workaround on Gig disable before
* accessing any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
@@ -4078,8 +4017,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
goto release;
e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
- /*
- * Disable LPLU if both link partners support 100BaseT
+ /* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link.
*/
@@ -4091,8 +4029,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_LPLU);
}
- /*
- * For i217 Intel Rapid Start Technology support,
+ /* For i217 Intel Rapid Start Technology support,
* when the system is going into Sx and no manageability engine
* is present, the driver must configure proxy to reset only on
* power good. LPI (Low Power Idle) state must also reset only
@@ -4106,8 +4043,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
- /*
- * Set bit enable LPI (EEE) to reset only on
+ /* Set bit enable LPI (EEE) to reset only on
* power good.
*/
e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
@@ -4120,8 +4056,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
}
- /*
- * Enable MTA to reset for Intel Rapid Start Technology
+ /* Enable MTA to reset for Intel Rapid Start Technology
* Support
*/
e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
@@ -4175,8 +4110,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
return;
}
- /*
- * For i217 Intel Rapid Start Technology support when the system
+ /* For i217 Intel Rapid Start Technology support when the system
* is transitioning from Sx and no manageability engine is present
* configure SMBus to restore on reset, disable proxy, and enable
* the reset on MTA (Multicast table array).
@@ -4191,8 +4125,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
}
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- /*
- * Restore clear on SMB if no manageability engine
+ /* Restore clear on SMB if no manageability engine
* is present
*/
ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
@@ -4298,8 +4231,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
u16 data = (u16)hw->mac.ledctl_mode2;
u32 i, led;
- /*
- * If no link, then turn LED on by setting the invert bit
+ /* If no link, then turn LED on by setting the invert bit
* for each LED that's mode is "link_up" in ledctl_mode2.
*/
if (!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -4329,8 +4261,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
u16 data = (u16)hw->mac.ledctl_mode1;
u32 i, led;
- /*
- * If no link, then turn LED off by clearing the invert bit
+ /* If no link, then turn LED off by clearing the invert bit
* for each LED that's mode is "link_up" in ledctl_mode1.
*/
if (!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -4375,8 +4306,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
} else {
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val) {
- /*
- * When auto config read does not complete, do not
+ /* When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index a13439928488..54d9dafaf126 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -73,8 +73,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
struct e1000_bus_info *bus = &hw->bus;
u32 reg;
- /*
- * The status register reports the correct function number
+ /* The status register reports the correct function number
* for the device regardless of function swap state.
*/
reg = er32(STATUS);
@@ -210,8 +209,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
return 0;
}
- /*
- * We have a valid alternate MAC address, and we want to treat it the
+ /* We have a valid alternate MAC address, and we want to treat it the
* same as the normal permanent MAC address stored by the HW into the
* RAR. Do this by mapping this address into RAR0.
*/
@@ -233,8 +231,7 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
@@ -246,8 +243,7 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
- /*
- * Some bridges will combine consecutive 32-bit writes into
+ /* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
@@ -273,15 +269,13 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
- /*
- * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
- /*
- * The portion of the address that is used for the hash table
+ /* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
@@ -423,8 +417,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
s32 ret_val;
bool link;
- /*
- * We only want to go out to the PHY registers to see if Auto-Neg
+ /* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
@@ -432,8 +425,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
if (!mac->get_link_status)
return 0;
- /*
- * First we want to see if the MII Status Register reports
+ /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
@@ -446,28 +438,24 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = false;
- /*
- * Check if there was DownShift, must be checked
+ /* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000e_check_downshift(hw);
- /*
- * If we are forcing speed/duplex, then we simply return since
+ /* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
return -E1000_ERR_CONFIG;
- /*
- * Auto-Neg is enabled. Auto Speed Detection takes care
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
mac->ops.config_collision_dist(hw);
- /*
- * Configure Flow Control now that Auto-Neg has completed.
+ /* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
@@ -498,8 +486,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), the cable is plugged in (we have signal),
* and our link partner is not trying to auto-negotiate with us (we
* are receiving idles or data), we need to force link up. We also
@@ -530,8 +517,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -565,8 +551,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -595,8 +580,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -607,8 +591,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
mac->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -665,8 +648,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
s32 ret_val;
u16 nvm_data;
- /*
- * Read and store word 0x0F of the EEPROM. This word contains bits
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
* disabling auto-negotiation, and the direction of the
@@ -705,15 +687,13 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
{
s32 ret_val;
- /*
- * In the case of the phy reset being blocked, we already have a link.
+ /* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return 0;
- /*
- * If requested flow control is set to default, set flow control
+ /* If requested flow control is set to default, set flow control
* based on the EEPROM flow control settings.
*/
if (hw->fc.requested_mode == e1000_fc_default) {
@@ -722,8 +702,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Save off the requested flow control mode for use later. Depending
+ /* Save off the requested flow control mode for use later. Depending
* on the link partner's capabilities, we may or may not use this mode.
*/
hw->fc.current_mode = hw->fc.requested_mode;
@@ -735,8 +714,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Initialize the flow control address, type, and PAUSE timer
+ /* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
@@ -763,8 +741,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
u32 txcw;
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the device accordingly. If auto-negotiation is enabled, then
* software will have to set the "PAUSE" bits to the correct value in
* the Transmit Config Word Register (TXCW) and re-start auto-
@@ -786,8 +763,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case e1000_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is disabled
+ /* Rx Flow control is enabled and Tx Flow control is disabled
* by a software over-ride. Since there really isn't a way to
* advertise that we are capable of Rx Pause ONLY, we will
* advertise that we support both symmetric and asymmetric Rx
@@ -797,15 +773,13 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case e1000_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is disabled,
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
* by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case e1000_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@@ -835,8 +809,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
u32 i, status;
s32 ret_val;
- /*
- * If we have a signal (the cable is plugged in, or assumed true for
+ /* If we have a signal (the cable is plugged in, or assumed true for
* serdes media) then poll for a "Link-Up" indication in the Device
* Status Register. Time-out if a link isn't seen in 500 milliseconds
* seconds (Auto-negotiation should complete in less than 500
@@ -851,8 +824,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
if (i == FIBER_LINK_UP_LIMIT) {
e_dbg("Never got a valid link from auto-neg!!!\n");
mac->autoneg_failed = true;
- /*
- * AutoNeg failed to achieve a link, so we'll call
+ /* AutoNeg failed to achieve a link, so we'll call
* mac->check_for_link. This routine will force the
* link up if we detect a signal. This will allow us to
* communicate with non-autonegotiating link partners.
@@ -894,8 +866,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Since auto-negotiation is enabled, take the link out of reset (the
+ /* Since auto-negotiation is enabled, take the link out of reset (the
* link will be in reset, because we previously reset the chip). This
* will restart auto-negotiation. If auto-negotiation is successful
* then the link-up status bit will be set and the flow control enable
@@ -907,8 +878,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
e1e_flush();
usleep_range(1000, 2000);
- /*
- * For these adapters, the SW definable pin 1 is set when the optics
+ /* For these adapters, the SW definable pin 1 is set when the optics
* detect a signal. If we have a signal, then poll for a "Link-Up"
* indication.
*/
@@ -954,16 +924,14 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
{
u32 fcrtl = 0, fcrth = 0;
- /*
- * Set the flow control receive threshold registers. Normally,
+ /* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (hw->fc.current_mode & e1000_fc_tx_pause) {
- /*
- * We need to set up the Receive Threshold high and low water
+ /* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
@@ -995,8 +963,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
ctrl = er32(CTRL);
- /*
- * Because we didn't get link via the internal auto-negotiation
+ /* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
@@ -1057,8 +1024,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
- /*
- * Check for the case where we have fiber media and auto-neg failed
+ /* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
@@ -1076,15 +1042,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Check for the case where we have copper media and auto-neg is
+ /* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
- /*
- * Read the MII Status Register and check to see if AutoNeg
+ /* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
@@ -1100,8 +1064,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
- /*
- * The AutoNeg process has completed, so we now need to
+ /* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
@@ -1115,8 +1078,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Two bits in the Auto Negotiation Advertisement Register
+ /* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
@@ -1151,8 +1113,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
+ /* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
@@ -1166,8 +1127,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg("Flow Control = Rx PAUSE frames only.\n");
}
}
- /*
- * For receiving PAUSE frames ONLY.
+ /* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1181,8 +1141,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
hw->fc.current_mode = e1000_fc_tx_pause;
e_dbg("Flow Control = Tx PAUSE frames only.\n");
}
- /*
- * For transmitting PAUSE frames ONLY.
+ /* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1196,16 +1155,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
- /*
- * Per the IEEE spec, at this point flow control
+ /* Per the IEEE spec, at this point flow control
* should be disabled.
*/
hw->fc.current_mode = e1000_fc_none;
e_dbg("Flow Control = NONE.\n");
}
- /*
- * Now we need to do one last check... If we auto-
+ /* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
@@ -1218,8 +1175,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
if (duplex == HALF_DUPLEX)
hw->fc.current_mode = e1000_fc_none;
- /*
- * Now we call a subroutine to actually force the MAC
+ /* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = e1000e_force_mac_fc(hw);
@@ -1520,8 +1476,7 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
- /*
- * set the blink bit for each LED that's "on" (0x0E)
+ /* set the blink bit for each LED that's "on" (0x0E)
* in ledctl_mode2
*/
ledctl_blink = hw->mac.ledctl_mode2;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index bacc950fc684..6dc47beb3adc 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -143,8 +143,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
return hw->mac.tx_pkt_filtering;
}
- /*
- * If we can't read from the host interface for whatever
+ /* If we can't read from the host interface for whatever
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
@@ -163,8 +162,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
E1000_MNG_DHCP_COOKIE_LENGTH);
- /*
- * If either the checksums or signature don't match, then
+ /* If either the checksums or signature don't match, then
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
@@ -252,8 +250,7 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
/* Calculate length in DWORDs */
length >>= 2;
- /*
- * The device driver writes the relevant command block into the
+ /* The device driver writes the relevant command block into the
* ram area.
*/
for (i = 0; i < length; i++) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0b76d8..643c883dd795 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -146,9 +146,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-/*
+/**
* e1000_regdump - register printout routine
- */
+ * @hw: pointer to the HW structure
+ * @reginfo: pointer to the register info table
+ **/
static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
{
int n = 0;
@@ -196,9 +198,10 @@ static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
}
}
-/*
+/**
* e1000e_dump - Print registers, Tx-ring and Rx-ring
- */
+ * @adapter: board private structure
+ **/
static void e1000e_dump(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -623,8 +626,7 @@ map_skb:
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -692,8 +694,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
goto no_buffers;
}
}
- /*
- * Refresh the desc even if buffer_addrs
+ /* Refresh the desc even if buffer_addrs
* didn't change because each write-back
* erases this info.
*/
@@ -726,8 +727,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -817,7 +817,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i);
@@ -891,8 +892,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
length = le16_to_cpu(rx_desc->wb.upper.length);
- /*
- * !EOP means multiple descriptors were used to store a single
+ /* !EOP means multiple descriptors were used to store a single
* packet, if that's the case we need to toss it. In fact, we
* need to toss every packet with the EOP bit clear and the
* next frame that _does_ have the EOP bit set, as it is by
@@ -933,8 +933,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
total_rx_bytes += length;
total_rx_packets++;
- /*
- * code added for copybreak, this should improve
+ /* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack
*/
@@ -1032,15 +1031,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (!adapter->tx_hang_recheck &&
(adapter->flags2 & FLAG2_DMA_BURST)) {
- /*
- * May be block on write-back, flush and detect again
+ /* May be block on write-back, flush and detect again
* flush pending descriptor writebacks to memory
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
/* execute the writes immediately */
e1e_flush();
- /*
- * Due to rare timing issues, write to TIDV again to ensure
+ /* Due to rare timing issues, write to TIDV again to ensure
* the write is successful
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -1169,8 +1166,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
}
if (adapter->detect_tx_hung) {
- /*
- * Detect a transmit hang in hardware, this serializes the
+ /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
adapter->detect_tx_hung = false;
@@ -1270,14 +1266,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
{
- /*
- * this looks ugly, but it seems compiler issues make
+ /* this looks ugly, but it seems compiler issues make
* it more efficient than reusing j
*/
int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
- /*
- * page alloc/put takes too long and effects small
+ /* page alloc/put takes too long and effects small
* packet throughput, so unsplit small packets and
* save the alloc/put only valid in softirq (napi)
* context to call kmap_*
@@ -1288,8 +1282,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
ps_page = &buffer_info->ps_pages[0];
- /*
- * there is no documentation about how to call
+ /* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
* very long
*/
@@ -1486,14 +1479,16 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
@@ -1502,7 +1497,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
@@ -1656,22 +1652,17 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
- /*
- * read ICR disables interrupts using IAM
- */
-
+ /* read ICR disables interrupts using IAM */
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
- /*
- * ICH8 workaround-- Call gig speed drop workaround on cable
+ /* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
- /*
- * 80003ES2LAN workaround-- For packet buffer work-around on
+ /* 80003ES2LAN workaround-- For packet buffer work-around on
* link down event; disable receives here in the ISR and reset
* adapter in watchdog
*/
@@ -1687,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1713,31 +1721,27 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (!icr || test_bit(__E1000_DOWN, &adapter->state))
return IRQ_NONE; /* Not our interrupt */
- /*
- * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt
*/
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
- /*
- * Interrupt Auto-Mask...upon reading ICR,
+ /* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write
*/
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
- /*
- * ICH8 workaround-- Call gig speed drop workaround on cable
+ /* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
- /*
- * 80003ES2LAN workaround--
+ /* 80003ES2LAN workaround--
* For packet buffer work-around on link down event;
* disable receives here in the ISR and
* reset adapter in watchdog
@@ -1754,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -2117,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
}
@@ -2469,8 +2492,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
set_itr_now:
if (new_itr != adapter->itr) {
- /*
- * this attempts to bias the interrupt rate towards Bulk
+ /* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing
*/
@@ -2517,7 +2539,7 @@ void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
-static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
int size = sizeof(struct e1000_ring);
@@ -2740,8 +2762,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
manc = er32(MANC);
- /*
- * enable receiving management packets to the host. this will probably
+ /* enable receiving management packets to the host. this will probably
* generate destination unreachable messages from the host OS, but
* the packets will be handled on SMBUS
*/
@@ -2754,8 +2775,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
break;
case e1000_82574:
case e1000_82583:
- /*
- * Check if IPMI pass-through decision filter already exists;
+ /* Check if IPMI pass-through decision filter already exists;
* if so, enable it.
*/
for (i = 0, j = 0; i < 8; i++) {
@@ -2827,8 +2847,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
u32 txdctl = er32(TXDCTL(0));
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
- /*
- * set up some performance related parameters to encourage the
+ /* set up some performance related parameters to encourage the
* hardware to use the bus more efficiently in bursts, depends
* on the tx_int_delay to be enabled,
* wthresh = 1 ==> burst write is disabled to avoid Tx stalls
@@ -2845,8 +2864,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
- /*
- * set the speed mode bit, we'll clear it if we're not at
+ /* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
#define SPEED_MODE_BIT (1 << 21)
@@ -2967,8 +2985,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rfctl |= E1000_RFCTL_EXTEN;
ew32(RFCTL, rfctl);
- /*
- * 82571 and greater support packet-split where the protocol
+ /* 82571 and greater support packet-split where the protocol
* header is placed in skb->data and the packet data is
* placed in pages hanging off of skb_shinfo(skb)->nr_frags.
* In the case of a non-split, skb->data is linearly filled,
@@ -3016,7 +3033,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3071,8 +3089,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
usleep_range(10000, 20000);
if (adapter->flags2 & FLAG2_DMA_BURST) {
- /*
- * set the writeback threshold (only takes effect if the RDTR
+ /* set the writeback threshold (only takes effect if the RDTR
* is set). set GRAN=1 and write back up to 0x4 worth, and
* enable prefetching of 0x20 Rx descriptors
* granularity = 01
@@ -3083,8 +3100,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
- /*
- * override the delay timers for enabling bursting, only if
+ /* override the delay timers for enabling bursting, only if
* the value was not set by the user via module options
*/
if (adapter->rx_int_delay == DEFAULT_RDTR)
@@ -3108,8 +3124,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
- /*
- * Setup the HW Rx Head and Tail Descriptor Pointers and
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
rdba = rx_ring->dma;
@@ -3130,8 +3145,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
- /*
- * With jumbo frames, excessive C-state transition
+ /* With jumbo frames, excessive C-state transition
* latencies result in dropped transactions.
*/
if (adapter->netdev->mtu > ETH_DATA_LEN) {
@@ -3216,8 +3230,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
- /*
- * write the addresses in reverse order to avoid write
+ /* write the addresses in reverse order to avoid write
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
@@ -3269,8 +3282,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
} else {
- /*
- * Write addresses to the MTA, if the attempt fails
+ /* Write addresses to the MTA, if the attempt fails
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
@@ -3279,8 +3291,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
- /*
- * Write addresses to available RAR registers, if there is not
+ /* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
@@ -3315,8 +3326,7 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
for (i = 0; i < 32; i++)
ew32(RETA(i), 0);
- /*
- * Disable raw packet checksumming so that RSS hash is placed in
+ /* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback.
*/
rxcsum = er32(RXCSUM);
@@ -3408,8 +3418,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
- /*
- * To maintain wire speed transmits, the Tx FIFO should be
+ /* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
@@ -3421,8 +3430,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the Tx fifo also stores 16 bytes of information about the Tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
@@ -3435,8 +3443,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
min_rx_space = ALIGN(min_rx_space, 1024);
min_rx_space >>= 10;
- /*
- * If current Tx allocation is less than the min Tx FIFO size,
+ /* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation
*/
@@ -3444,8 +3451,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
((min_tx_space - tx_space) < pba)) {
pba -= min_tx_space - tx_space;
- /*
- * if short on Rx space, Rx wins and must trump Tx
+ /* if short on Rx space, Rx wins and must trump Tx
* adjustment
*/
if (pba < min_rx_space)
@@ -3455,8 +3461,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
}
- /*
- * flow control settings
+ /* flow control settings
*
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
@@ -3490,8 +3495,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->low_water = fc->high_water - 8;
break;
case e1000_pchlan:
- /*
- * Workaround PCH LOM adapter hangs with certain network
+ /* Workaround PCH LOM adapter hangs with certain network
* loads. If hangs persist, try disabling Tx flow control.
*/
if (adapter->netdev->mtu > ETH_DATA_LEN) {
@@ -3516,8 +3520,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
}
- /*
- * Alignment of Tx data is on an arbitrary byte boundary with the
+ /* Alignment of Tx data is on an arbitrary byte boundary with the
* maximum size per Tx descriptor limited only to the transmit
* allocation of the packet buffer minus 96 bytes with an upper
* limit of 24KB due to receive synchronization limitations.
@@ -3525,8 +3528,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
24 << 10);
- /*
- * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer.
*/
if (adapter->itr_setting & 0x3) {
@@ -3549,8 +3551,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
- /*
- * For parts with AMT enabled, let the firmware know
+ /* For parts with AMT enabled, let the firmware know
* that the network interface is in control
*/
if (adapter->flags & FLAG_HAS_AMT)
@@ -3579,8 +3580,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
u16 phy_data = 0;
- /*
- * speed up time to link by disabling smart power down, ignore
+ /* speed up time to link by disabling smart power down, ignore
* the return value of this function because there is nothing
* different we would do if it failed
*/
@@ -3628,8 +3628,7 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
/* execute the writes immediately */
e1e_flush();
- /*
- * due to rare timing issues, write to TIDV/RDTR again to ensure the
+ /* due to rare timing issues, write to TIDV/RDTR again to ensure the
* write is successful
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -3647,8 +3646,7 @@ void e1000e_down(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 tctl, rctl;
- /*
- * signal that we're down so the interrupt handler does not
+ /* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer
*/
set_bit(__E1000_DOWN, &adapter->state);
@@ -3691,8 +3689,7 @@ void e1000e_down(struct e1000_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
- /*
- * TODO: for power management, we could drop the link and
+ /* TODO: for power management, we could drop the link and
* pci_disable_device here.
*/
}
@@ -3715,7 +3712,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+static int e1000_sw_init(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3755,8 +3752,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
e_dbg("icr is %08X\n", icr);
if (icr & E1000_ICR_RXSEQ) {
adapter->flags &= ~FLAG_MSI_TEST_FAILED;
- /*
- * Force memory writes to complete before acknowledging the
+ /* Force memory writes to complete before acknowledging the
* interrupt is handled.
*/
wmb();
@@ -3786,7 +3782,8 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
e1000e_reset_interrupt_capability(adapter);
/* Assume that the test fails, if it succeeds then the test
- * MSI irq handler will unset this flag */
+ * MSI irq handler will unset this flag
+ */
adapter->flags |= FLAG_MSI_TEST_FAILED;
err = pci_enable_msi(adapter->pdev);
@@ -3800,8 +3797,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
goto msi_test_failed;
}
- /*
- * Force memory writes to complete before enabling and firing an
+ /* Force memory writes to complete before enabling and firing an
* interrupt.
*/
wmb();
@@ -3901,8 +3897,7 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
- /*
- * If AMT is enabled, let the firmware know that the network
+ /* If AMT is enabled, let the firmware know that the network
* interface is now open and reset the part to a known state.
*/
if (adapter->flags & FLAG_HAS_AMT) {
@@ -3923,8 +3918,7 @@ static int e1000_open(struct net_device *netdev)
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- /*
- * before we allocate an interrupt, we must be ready to handle it.
+ /* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
* clean_rx handler before we do so.
@@ -3935,8 +3929,7 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- /*
- * Work around PCIe errata with MSI interrupts causing some chipsets to
+ /* Work around PCIe errata with MSI interrupts causing some chipsets to
* ignore e1000e MSI messages, which means we need to test our MSI
* interrupt now
*/
@@ -4017,16 +4010,14 @@ static int e1000_close(struct net_device *netdev)
e1000e_free_tx_resources(adapter->tx_ring);
e1000e_free_rx_resources(adapter->rx_ring);
- /*
- * kill manageability vlan ID if supported, but not if a vlan with
+ /* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it)
*/
if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- /*
- * If AMT is enabled, let the firmware know that the network
+ /* If AMT is enabled, let the firmware know that the network
* interface is now closed
*/
if ((adapter->flags & FLAG_HAS_AMT) &&
@@ -4065,8 +4056,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
/* activate the work around */
e1000e_set_laa_state_82571(&adapter->hw, 1);
- /*
- * Hold a copy of the LAA in RAR[14] This is done so that
+ /* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed (in e1000_watchdog), the actual LAA is in one
* of the RARs and no incoming packets directed to this port
@@ -4099,10 +4089,13 @@ static void e1000e_update_phy_task(struct work_struct *work)
e1000_get_phy_info(&adapter->hw);
}
-/*
+/**
+ * e1000_update_phy_info - timre call-back to update PHY info
+ * @data: pointer to adapter cast into an unsigned long
+ *
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
- */
+ **/
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -4129,8 +4122,7 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
if (ret_val)
return;
- /*
- * A page set is expensive so check if already on desired page.
+ /* A page set is expensive so check if already on desired page.
* If not, set to the page with the PHY status registers.
*/
hw->phy.addr = 1;
@@ -4201,8 +4193,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -4270,8 +4261,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
- /*
- * RLEC on some newer hardware can be incorrect so build
+ /* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
@@ -4297,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgptc += er32(MGTPTC);
adapter->stats.mgprc += er32(MGTPRC);
adapter->stats.mgpdc += er32(MGTPDC);
+
+ /* Correctable ECC Errors */
+ if (hw->mac.type == e1000_pch_lpt) {
+ u32 pbeccsts = er32(PBECCSTS);
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ }
}
/**
@@ -4323,8 +4323,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
if (ret_val)
e_warn("Error reading PHY register\n");
} else {
- /*
- * Do not read PHY registers if link is not up
+ /* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
*/
phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
@@ -4362,8 +4361,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
bool link_active = false;
s32 ret_val = 0;
- /*
- * get_link_status is set on LSC (link status) interrupt or
+ /* get_link_status is set on LSC (link status) interrupt or
* Rx sequence error interrupt. get_link_status will stay
* false until the check_for_link establishes link
* for copper adapters ONLY
@@ -4415,8 +4413,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- /*
- * With 82574 controllers, PHY needs to be checked periodically
+ /* With 82574 controllers, PHY needs to be checked periodically
* for hung state and reset, if two calls return true
*/
if (e1000_check_phy_82574(hw))
@@ -4484,8 +4481,7 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
- /*
- * On supported PHYs, check for duplex mismatch only
+ /* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
if ((hw->phy.type == e1000_phy_igp_3 ||
@@ -4515,8 +4511,7 @@ static void e1000_watchdog_task(struct work_struct *work)
break;
}
- /*
- * workaround: re-program speed mode bit after
+ /* workaround: re-program speed mode bit after
* link-up event
*/
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
@@ -4527,8 +4522,7 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0);
}
- /*
- * disable TSO for pcie and 10/100 speeds, to avoid
+ /* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues
*/
if (!(adapter->flags & FLAG_TSO_FORCE)) {
@@ -4549,16 +4543,14 @@ static void e1000_watchdog_task(struct work_struct *work)
}
}
- /*
- * enable transmits in the hardware, need to do this
+ /* enable transmits in the hardware, need to do this
* after setting TARC(0)
*/
tctl = er32(TCTL);
tctl |= E1000_TCTL_EN;
ew32(TCTL, tctl);
- /*
- * Perform any post-link-up configuration before
+ /* Perform any post-link-up configuration before
* reporting link up.
*/
if (phy->ops.cfg_on_link_up)
@@ -4609,8 +4601,7 @@ link_up:
if (!netif_carrier_ok(netdev) &&
(e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
- /*
- * We've lost link, so the controller stops DMA,
+ /* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
@@ -4622,8 +4613,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -4648,8 +4638,7 @@ link_up:
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = true;
- /*
- * With 82571 controllers, LAA may be overwritten due to controller
+ /* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
*/
if (e1000e_get_laa_state_82571(hw))
@@ -4948,8 +4937,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -4963,8 +4951,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
else
writel(i, tx_ring->tail);
- /*
- * we need this if more than one processor can write to our tail
+ /* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
@@ -5014,15 +5001,13 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
struct e1000_adapter *adapter = tx_ring->adapter;
netif_stop_queue(adapter->netdev);
- /*
- * Herbert's original patch had:
+ /* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();
- /*
- * We need to check again in a case another CPU has just
+ /* We need to check again in a case another CPU has just
* made room available.
*/
if (e1000_desc_unused(tx_ring) < size)
@@ -5067,18 +5052,26 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* The minimum packet size with TCTL.PSP set is 17 bytes so
+ * pad skb in order to meet this minimum size requirement
+ */
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
+ }
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
u8 hdr_len;
- /*
- * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- /*
- * we do this workaround for ES2LAN, but it is un-necessary,
+ /* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
if (skb->data_len && (hdr_len == len)) {
@@ -5109,8 +5102,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
- /*
- * need: count + 2 desc gap to keep tail from touching
+ /* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time
*/
if (e1000_maybe_stop_tx(tx_ring, count + 2))
@@ -5134,8 +5126,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
else if (e1000_tx_csum(tx_ring, skb))
tx_flags |= E1000_TX_FLAGS_CSUM;
- /*
- * Old method was to assume IPv4 packet by default if TSO was enabled.
+ /* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must.
*/
@@ -5222,8 +5213,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
/* Rx Errors */
- /*
- * RLEC on some newer hardware can be incorrect so build
+ /* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
@@ -5292,8 +5282,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
e1000e_down(adapter);
- /*
- * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
@@ -5555,8 +5544,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- /*
- * Release control of h/w to f/w. If f/w is AMT enabled, this
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
e1000e_release_hw_control(adapter);
@@ -5583,8 +5571,7 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * The pci-e switch on some quad port adapters will report a
+ /* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
@@ -5613,8 +5600,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
- /*
- * Both device and parent should have the same ASPM setting.
+ /* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
@@ -5708,8 +5694,7 @@ static int __e1000_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
@@ -5837,7 +5822,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
return IRQ_HANDLED;
}
-/*
+/**
+ * e1000_netpoll
+ * @netdev: network interface device structure
+ *
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
@@ -5962,8 +5950,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
@@ -6083,8 +6070,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit e1000_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct e1000_adapter *adapter;
@@ -6262,14 +6248,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
- /*
- * before reading the NVM, reset the controller to
+ /* before reading the NVM, reset the controller to
* put the device in a known good starting state
*/
adapter->hw.mac.ops.reset_hw(&adapter->hw);
- /*
- * systems with ASPM and others may see the checksum fail on the first
+ /* systems with ASPM and others may see the checksum fail on the first
* attempt. Let's give it a few tries
*/
for (i = 0;; i++) {
@@ -6324,8 +6308,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->rx_ring->count = E1000_DEFAULT_RXD;
adapter->tx_ring->count = E1000_DEFAULT_TXD;
- /*
- * Initial Wake on LAN setting - If APM wake is enabled in
+ /* Initial Wake on LAN setting - If APM wake is enabled in
* the EEPROM, enable the ACPI Magic Packet filter
*/
if (adapter->flags & FLAG_APME_IN_WUC) {
@@ -6349,8 +6332,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
- /*
- * now that we have the eeprom settings, apply the special cases
+ /* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
* wake on lan on a particular port
*/
@@ -6367,8 +6349,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
e1000e_reset(adapter);
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
@@ -6425,14 +6406,13 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit e1000_remove(struct pci_dev *pdev)
+static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
- /*
- * The timers may be rescheduled, so explicitly disable them
+ /* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
if (!down)
@@ -6457,8 +6437,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
- /*
- * Release control of h/w to f/w. If f/w is AMT enabled, this
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
e1000e_release_hw_control(adapter);
@@ -6578,7 +6557,7 @@ static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
- .remove = __devexit_p(e1000_remove),
+ .remove = e1000_remove,
#ifdef CONFIG_PM
.driver = {
.pm = &e1000_pm_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a969f1af1b4e..b6468804cb2e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -279,8 +279,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
e1e_flush();
udelay(1);
- /*
- * Read "Status Register" repeatedly until the LSB is cleared.
+ /* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
@@ -321,8 +320,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u32 i, eerd = 0;
s32 ret_val = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -364,8 +362,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 ret_val;
u16 widx = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -393,8 +390,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
e1000_standby_nvm(hw);
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
+ /* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
@@ -461,8 +457,7 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
return ret_val;
}
- /*
- * if nvm_data is not ptr guard the PBA must be in legacy format which
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index dfbfa7fd98c3..89d536dd7ff5 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -32,11 +32,9 @@
#include "e1000.h"
-/*
- * This is the only thing that needs to be changed to adjust the
+/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
-
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
@@ -49,22 +47,19 @@ module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
-/*
- * All parameters are treated the same, as an integer array of values.
+/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
-
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int __devinitdata X[E1000_MAX_NIC+1] \
+ static int X[E1000_MAX_NIC+1] \
= E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
-/*
- * Transmit Interrupt Delay in units of 1.024 microseconds
+/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
@@ -74,8 +69,7 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
-/*
- * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
@@ -84,8 +78,7 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
-/*
- * Receive Interrupt Delay in units of 1.024 microseconds
+/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
@@ -94,8 +87,7 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
-/*
- * Receive Absolute Interrupt Delay in units of 1.024 microseconds
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
@@ -103,8 +95,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
-/*
- * Interrupt Throttle Rate (interrupts/sec)
+/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
*/
@@ -113,8 +104,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define MAX_ITR 100000
#define MIN_ITR 100
-/*
- * IntMode (Interrupt Mode)
+/* IntMode (Interrupt Mode)
*
* Valid Range: varies depending on kernel configuration & hardware support
*
@@ -132,8 +122,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
#define MAX_INTMODE 2
#define MIN_INTMODE 0
-/*
- * Enable Smart Power Down of the PHY
+/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
@@ -141,8 +130,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
-/*
- * Enable Kumeran Lock Loss workaround
+/* Enable Kumeran Lock Loss workaround
*
* Valid Range: 0, 1
*
@@ -150,8 +138,7 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
*/
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-/*
- * Write Protect NVM
+/* Write Protect NVM
*
* Valid Range: 0, 1
*
@@ -159,8 +146,7 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*/
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
-/*
- * Enable CRC Stripping
+/* Enable CRC Stripping
*
* Valid Range: 0, 1
*
@@ -186,9 +172,9 @@ struct e1000_option {
} arg;
};
-static int __devinit e1000_validate_option(unsigned int *value,
- const struct e1000_option *opt,
- struct e1000_adapter *adapter)
+static int e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -249,7 +235,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-void __devinit e1000e_check_options(struct e1000_adapter *adapter)
+void e1000e_check_options(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
int bd = adapter->bd_number;
@@ -351,8 +337,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
- /*
- * Make sure a message is printed for non-special
+ /* Make sure a message is printed for non-special
* values. And in case of an invalid option, display
* warning, use default and go through itr/itr_setting
* adjustment logic below
@@ -361,14 +346,12 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&adapter->itr, &opt, adapter))
adapter->itr = opt.def;
} else {
- /*
- * If no option specified, use default value and go
+ /* If no option specified, use default value and go
* through the logic below to adjust itr/itr_setting
*/
adapter->itr = opt.def;
- /*
- * Make sure a message is printed for non-special
+ /* Make sure a message is printed for non-special
* default values
*/
if (adapter->itr > 4)
@@ -400,8 +383,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
opt.name);
break;
default:
- /*
- * Save the setting, because the dynamic bits
+ /* Save the setting, because the dynamic bits
* change itr.
*
* Clear the lower two bits because
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index fc62a3f3a5be..28b38ff37e84 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -193,8 +193,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
return -E1000_ERR_PARAM;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@@ -204,8 +203,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
ew32(MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
@@ -225,8 +223,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
}
*data = (u16) mdic;
- /*
- * Allow some time after each MDIC transaction to avoid
+ /* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
@@ -253,8 +250,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PARAM;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@@ -265,8 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
ew32(MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
@@ -285,8 +280,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PHY;
}
- /*
- * Allow some time after each MDIC transaction to avoid
+ /* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
@@ -708,8 +702,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
- /*
- * Options:
+ /* Options:
* 0 - Auto (default)
* 1 - MDI mode
* 2 - MDI-X mode
@@ -754,8 +747,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (phy->type != e1000_phy_bm)
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@@ -780,8 +772,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@@ -818,8 +809,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if ((phy->type == e1000_phy_m88) &&
(phy->revision < E1000_REVISION_4) &&
(phy->id != BME1000_E_PHY_ID_R2)) {
- /*
- * Force TX_CLK in the Extended PHY Specific Control Register
+ /* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -899,8 +889,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
* timeout issues when LFS is enabled.
*/
msleep(100);
@@ -936,8 +925,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
/* set auto-master slave resolution settings */
if (hw->mac.autoneg) {
- /*
- * when autonegotiation advertisement is only 1000Mbps then we
+ /* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default.
*/
@@ -1001,16 +989,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Need to parse both autoneg_advertised and fc and set up
+ /* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
- /*
- * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
@@ -1056,8 +1042,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -1076,15 +1061,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
switch (hw->fc.current_mode) {
case e1000_fc_none:
- /*
- * Flow control (Rx & Tx) is completely disabled by a
+ /* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_rx_pause:
- /*
- * Rx Flow control is enabled, and Tx Flow control is
+ /* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
@@ -1096,16 +1079,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
+ /* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case e1000_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -1142,14 +1123,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- /*
- * Perform some bounds checking on the autoneg advertisement
+ /* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
- /*
- * If autoneg_advertised is zero, we assume it was not defaulted
+ /* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
if (!phy->autoneg_advertised)
@@ -1163,8 +1142,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
}
e_dbg("Restarting Auto-Neg\n");
- /*
- * Restart auto-negotiation by setting the Auto Neg Enable bit and
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@@ -1176,8 +1154,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Does the user want to wait for Auto-Neg to complete here, or
+ /* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->autoneg_wait_to_complete) {
@@ -1208,16 +1185,14 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
bool link;
if (hw->mac.autoneg) {
- /*
- * Setup autoneg and flow control advertisement and perform
+ /* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
- /*
- * PHY will be set to 10H, 10F, 100H or 100F
+ /* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
e_dbg("Forcing Speed and Duplex\n");
@@ -1228,8 +1203,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
}
}
- /*
- * Check link status. Wait up to 100 microseconds for link to become
+ /* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
@@ -1273,8 +1247,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -1328,8 +1301,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1370,8 +1342,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_m88) {
e_dbg("Link taking longer than expected.\n");
} else {
- /*
- * We didn't get link.
+ /* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
@@ -1398,8 +1369,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Resetting the phy means we need to re-force TX_CLK in the
+ /* Resetting the phy means we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock from
* the reset value of 2.5MHz.
*/
@@ -1408,8 +1378,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
+ /* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1573,8 +1542,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
if (ret_val)
return ret_val;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -1702,8 +1670,7 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
s32 ret_val;
u16 data, offset, mask;
- /*
- * Polarity is determined based on the speed of
+ /* Polarity is determined based on the speed of
* our connection.
*/
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
@@ -1715,8 +1682,7 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
offset = IGP01E1000_PHY_PCS_INIT_REG;
mask = IGP01E1000_PHY_POLARITY_MASK;
} else {
- /*
- * This really only applies to 10Mbps since
+ /* This really only applies to 10Mbps since
* there is no polarity for 100Mbps (always 0).
*/
offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1745,8 +1711,7 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, offset, mask;
- /*
- * Polarity is determined based on the reversal feature being enabled.
+ /* Polarity is determined based on the reversal feature being enabled.
*/
if (phy->polarity_correction) {
offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -1791,8 +1756,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
msleep(100);
}
- /*
- * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
@@ -1814,15 +1778,13 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
- /*
- * Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
if (ret_val)
- /*
- * If the first read fails, another entity may have
+ /* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
@@ -1913,8 +1875,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Getting bits 15:9, which represent the combination of
+ /* Getting bits 15:9, which represent the combination of
* coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length.
@@ -2285,15 +2246,13 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
e1e_wphy(hw, 0x1796, 0x0008);
/* Change cg_icount + enable integbp for channels BCD */
e1e_wphy(hw, 0x1798, 0xD008);
- /*
- * Change cg_icount + enable integbp + change prop_factor_master
+ /* Change cg_icount + enable integbp + change prop_factor_master
* to 8 for channel A
*/
e1e_wphy(hw, 0x1898, 0xD918);
/* Disable AHT in Slave mode on channel A */
e1e_wphy(hw, 0x187A, 0x0800);
- /*
- * Enable LPLU and disable AN to 1000 in non-D0a states,
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
* Enable SPD+B2B
*/
e1e_wphy(hw, 0x0019, 0x008D);
@@ -2417,8 +2376,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
e1000e_get_phy_id(hw);
phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
- /*
- * If phy_type is valid, break - we found our
+ /* If phy_type is valid, break - we found our
* PHY address
*/
if (phy_type != e1000_phy_unknown)
@@ -2478,8 +2436,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select;
- /*
- * Page select is register 31 for phy address 1 and 22 for
+ /* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
* phy address 1.
*/
@@ -2537,8 +2494,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select;
- /*
- * Page select is register 31 for phy address 1 and 22 for
+ /* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
* phy address 1.
*/
@@ -2683,8 +2639,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
return ret_val;
}
- /*
- * Enable both PHY wakeup mode and Wakeup register page writes.
+ /* Enable both PHY wakeup mode and Wakeup register page writes.
* Prevent a power state change by disabling ME and Host PHY wakeup.
*/
temp = *phy_reg;
@@ -2698,8 +2653,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
return ret_val;
}
- /*
- * Select Host Wakeup Registers page - caller now able to write
+ /* Select Host Wakeup Registers page - caller now able to write
* registers on the Wakeup registers page
*/
return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
@@ -3038,8 +2992,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (page == HV_INTC_FC_PAGE_START)
page = 0;
- /*
- * Workaround MDIO accesses being disabled after entering IEEE
+ /* Workaround MDIO accesses being disabled after entering IEEE
* Power Down (when bit 11 of the PHY Control register is set)
*/
if ((hw->phy.type == e1000_phy_82578) &&
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197fd4a8e..624476cfa727 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o
-
-igb-$(CONFIG_IGB_PTP) += igb_ptp.o
+ e1000_i210.o igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e2f748..fdaaf2709d0a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
break;
case e1000_i211:
@@ -1027,6 +1028,15 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igb_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg("Error configuring flow control\n");
} else {
ret_val = igb_check_for_copper_link(hw);
}
@@ -1277,12 +1287,20 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
+ u32 phpm_reg;
ctrl = rd32(E1000_CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
+ /* Clear Go Link Disconnect bit */
+ if (hw->mac.type >= e1000_82580) {
+ phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
ret_val = igb_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
@@ -1336,7 +1354,7 @@ out:
**/
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
- u32 ctrl_ext, ctrl_reg, reg;
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
@@ -1424,27 +1442,45 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
- /*
- * We force flow control to prevent the CTRL register values from being
- * overwritten by the autonegotiated flow control values
- */
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
if (pcs_autoneg) {
/* Set PCS register for autoneg */
reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = rd32(E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+ wr32(E1000_PCS_ANADV, anadv_reg);
+
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
wr32(E1000_PCS_LCTL, reg);
- if (!igb_sgmii_active_82575(hw))
+ if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
igb_force_mac_fc(hw);
return ret_val;
@@ -1918,6 +1954,12 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
hw->dev_spec._82575.global_device_reset = false;
+ /* due to hw errata, global device reset doesn't always
+ * work on 82580
+ */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
/* Get current control state. */
ctrl = rd32(E1000_CTRL);
@@ -2233,19 +2275,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
- E1000_IPCNFG_EEE_100M_AN);
- eeer |= (E1000_EEER_TX_LPI_EN |
- E1000_EEER_RX_LPI_EN |
+ u32 eee_su = rd32(E1000_EEE_SU);
+
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
- /* keep the LPI clock running before EEE is enabled */
- if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
- u32 eee_su;
- eee_su = rd32(E1000_EEE_SU);
- eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
- wr32(E1000_EEE_SU, eee_su);
- }
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453f5428..44b76b3b6816 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41ec3c40..45dce06eff26 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -431,6 +431,10 @@
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
+/* Transmit Config Word */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
@@ -539,6 +543,9 @@
/* mPHY Near End Digital Loopback Override Bit */
#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
@@ -636,6 +643,7 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
+#define NVM_VERSION 0x0005
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
@@ -653,6 +661,19 @@
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
+/* NVM version defines */
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_BUILD_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -860,6 +881,7 @@
#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f939bc74..fbcdbebb0b5f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -35,11 +35,42 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
+/**
+ * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 ret_val = E1000_SUCCESS;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(E1000_SWSM);
+ wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igb_put_hw_semaphore(hw);
+ hw_dbg("Driver can't access the NVM\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
/**
* igb_acquire_nvm_i210 - Request for access to EEPROM
@@ -68,6 +99,23 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
}
/**
+ * igb_put_hw_semaphore_i210 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ */
+static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = rd32(E1000_SWSM);
+
+ swsm &= ~E1000_SWSM_SWESMBI;
+
+ wr32(E1000_SWSM, swsm);
+}
+
+/**
* igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
@@ -138,60 +186,6 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
}
/**
- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Acquire the HW semaphore to access the PHY or NVM
- **/
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
- s32 ret_val = E1000_SUCCESS;
- s32 timeout = hw->nvm.word_size + 1;
- s32 i = 0;
-
- /* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
- swsm = rd32(E1000_SWSM);
- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
- /* Semaphore acquired if bit latched */
- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
- break;
-
- udelay(50);
- }
-
- if (i == timeout) {
- /* Release semaphores */
- igb_put_hw_semaphore(hw);
- hw_dbg("Driver can't access the NVM\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * igb_put_hw_semaphore_i210 - Release hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Release hardware semaphore used to access the PHY or NVM
- **/
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
-
- swsm = rd32(E1000_SWSM);
-
- swsm &= ~E1000_SWSM_SWESMBI;
-
- wr32(E1000_SWSM, swsm);
-}
-
-/**
* igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
@@ -229,49 +223,6 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
- * @hw: pointer to the HW structure
- * @offset: offset within the Shadow RAM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the Shadow RAM
- *
- * Writes data to Shadow RAM at offset using EEWR register.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * data will not be committed to FLASH and also Shadow RAM will most likely
- * contain an invalid checksum.
- *
- * If error code is returned, data and Shadow RAM may be inconsistent - buffer
- * partially written.
- **/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- s32 status = E1000_SUCCESS;
- u16 i, count;
-
- /* We cannot hold synchronization semaphores for too long,
- * because of forceful takeover procedure. However it is more efficient
- * to write in bursts than synchronizing access for each word. */
- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
- E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = igb_write_nvm_srwr(hw, offset, count,
- data + i);
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
-
- if (status != E1000_SUCCESS)
- break;
- }
-
- return status;
-}
-
-/**
* igb_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
@@ -329,6 +280,50 @@ out:
}
/**
+ * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ */
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = igb_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
@@ -350,16 +345,40 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
- case NVM_ID_LED_SETTINGS:
case NVM_INIT_CTRL_2:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_INIT_CTRL_4:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_1_CFG:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_0_2_CFG:
igb_read_invm_i211(hw, offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
break;
- case NVM_COMPAT:
- *data = ID_LED_DEFAULT_I210;
- break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -423,6 +442,100 @@ s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
}
/**
+ * igb_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver) {
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
* igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
@@ -519,6 +632,28 @@ out:
}
/**
+ * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ */
+static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = rd32(E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
@@ -548,28 +683,6 @@ out:
}
/**
- * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
- * @hw: pointer to the HW structure
- *
- **/
-s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_NVM;
- u32 i, reg;
-
- for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
- reg = rd32(E1000_EECD);
- if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
- break;
- }
- udelay(5);
- }
-
- return ret_val;
-}
-
-/**
* igb_valid_led_default_i210 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3f50bc..1c89358a99ab 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@ extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
+extern s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -65,6 +67,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
(ID_LED_OFF1_OFF2 << 4) | \
@@ -73,4 +84,10 @@ enum E1000_INVM_STRUCTURE_TYPE {
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
+/* NVM offset defaults for i211 device */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145ac762..101e6e4da97f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -839,6 +839,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
@@ -1040,6 +1041,129 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+ && mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = rd32(E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ hw_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = rd32(E1000_PCS_ANADV);
+ pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ hw_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = igb_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
out:
return ret_val;
@@ -1391,6 +1515,10 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ /* All MDI settings are supported on 82580 and newer. */
+ if (hw->mac.type >= e1000_82580)
+ goto out;
+
if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
hw_dbg("Invalid MDI setting detected\n");
hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e51e30..e2b2c4b9c951 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
#include "e1000_phy.h"
#include "e1000_nvm.h"
#include "e1000_defines.h"
+#include "e1000_i210.h"
/*
* Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf3f357..fbb7604db364 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -438,7 +438,7 @@ out:
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/*
@@ -448,22 +448,21 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ return ret_val;
}
- ret_val = hw->nvm.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- msleep(10);
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = igb_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = igb_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
igb_standby_nvm(hw);
@@ -497,13 +496,10 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
+ usleep_range(1000, 2000);
+ nvm->ops.release(hw);
}
- msleep(10);
-release:
- hw->nvm.ops.release(hw);
-
-out:
return ret_val;
}
@@ -710,3 +706,74 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igb_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output structure
+ *
+ * unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 fw_version;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ break;
+ default:
+ return;
+ }
+ /* basic eeprom version numbers */
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
+
+ /* etrack id */
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i350:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ ((comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT));
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b0228cac0..7012d458c6f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc79f4ca..fe76004aca4e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- goto out;
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ goto out;
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- goto out;
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ goto out;
- hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ }
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
@@ -1710,6 +1715,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
switch (hw->phy.id) {
case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6ac3299bfcb9..ed282f877d9a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -124,6 +124,7 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
/* Enable flexible speed on link-up */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230c0592..17f1686ee411 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,16 +34,16 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-#ifdef CONFIG_IGB_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IGB_PTP */
#include <linux/bitops.h>
#include <linux/if_vlan.h>
struct igb_adapter;
+#define E1000_PCS_CFG_IGN_SD 1
+
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
@@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256 256
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@@ -151,11 +152,18 @@ struct vf_data_storage {
#define IGB_MNG_VLAN_NONE -1
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
+enum igb_tx_flags {
+ /* cmd_type flags */
+ IGB_TX_FLAGS_VLAN = 0x01,
+ IGB_TX_FLAGS_TSO = 0x02,
+ IGB_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGB_TX_FLAGS_IPV4 = 0x10,
+ IGB_TX_FLAGS_CSUM = 0x20,
+};
+
+/* VLAN info */
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
@@ -174,11 +182,9 @@ struct igb_tx_buffer {
};
struct igb_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
- u32 page_offset;
+ unsigned int page_offset;
};
struct igb_tx_queue_stats {
@@ -205,22 +211,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- int cpu; /* CPU for DCA */
- u32 eims_value; /* EIMS mask value */
-
- struct igb_ring_container rx, tx;
-
- struct napi_struct napi;
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
- char name[IFNAMSIZ + 9];
-};
-
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
@@ -232,15 +222,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
- u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
- u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_clean;
u16 next_to_use;
+ u16 next_to_alloc;
union {
/* TX */
@@ -251,12 +243,30 @@ struct igb_ring {
};
/* RX */
struct {
+ struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
- /* Items past this point are only used during ring alloc / free */
- dma_addr_t dma; /* phys address of the ring */
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
@@ -362,8 +372,6 @@ struct igb_adapter {
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
- u32 eeprom_wol;
-
u16 tx_ring_count;
u16 rx_ring_count;
unsigned int vfs_allocated_count;
@@ -373,7 +381,6 @@ struct igb_adapter {
u32 wvbr;
u32 *shadow_vfta;
-#ifdef CONFIG_IGB_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct delayed_work ptp_overflow_work;
@@ -382,17 +389,19 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
-#endif /* CONFIG_IGB_PTP */
char fw_version[32];
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
+#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -436,18 +445,27 @@ extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
extern void igb_set_fw_version(struct igb_adapter *);
-#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(q_vector, skb);
+}
+
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-#endif /* CONFIG_IGB_PTP */
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea012849825..bfe9208c4b18 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/highmem.h>
#include "igb.h"
@@ -1623,6 +1624,20 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
reg &= ~E1000_CONNSW_ENRGSRC;
wr32(E1000_CONNSW, reg);
+ /* Unset sigdetect for SERDES loopback on
+ * 82580 and i350 devices.
+ */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ reg = rd32(E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+ wr32(E1000_PCS_CFG0, reg);
+ break;
+ default:
+ break;
+ }
+
/* Set PCS register for forced speed */
reg = rd32(E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
@@ -1685,16 +1700,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size /= 2;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size + 10) == 0xBE) &&
- (*(skb->data + frame_size + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+ data = kmap(rx_buffer->page);
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+ kunmap(rx_buffer->page);
+
+ return match;
}
static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
- struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
- unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1738,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
- /* unmap rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- total_bytes += tx_buffer_info->bytecount;
- total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@@ -1746,8 +1770,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
- txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
- netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
@@ -1957,54 +1980,6 @@ static void igb_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static int igb_wol_exclusion(struct igb_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_hw *hw = &adapter->hw;
- int retval = 1; /* fail by default */
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- /* WoL not supported */
- wol->supported = 0;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events not supported on port B */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- default:
- /* dual port cards only support WoL on port A from now on
- * unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
- if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
- !adapter->eeprom_wol) {
- wol->supported = 0;
- break;
- }
-
- retval = 0;
- }
-
- return retval;
-}
-
static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2014,10 +1989,7 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
WAKE_PHY;
wol->wolopts = 0;
- /* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
/* apply any specific unsupported masks here */
@@ -2045,8 +2017,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
@@ -2301,7 +2272,6 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IGB_PTP
case e1000_82576:
case e1000_82580:
case e1000_i350:
@@ -2337,12 +2307,288 @@ static int igb_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
}
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ipcnfg, eeer;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ ipcnfg = rd32(E1000_IPCNFG);
+ eeer = rd32(E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
+ edata->advertised = ADVERTISED_1000baseT_Full;
+
+ if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
+ edata->advertised |= ADVERTISED_100baseT_Full;
+
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+
+ /* Report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI timer is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.advertised != edata->advertised) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE options are not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+ igb_set_eee_i350(hw);
+
+ /* reset link */
+ if (!netif_running(netdev))
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+
static int igb_ethtool_begin(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2383,6 +2629,10 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
.get_ts_info = igb_get_ts_info,
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37ef12e..31cfe2ec75df 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
#include "igb.h"
#define MAJ 4
-#define MIN 0
-#define BUILD 1
+#define MIN 1
+#define BUILD 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -118,10 +118,11 @@ static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void __devexit igb_remove(struct pci_dev *pdev);
+static void igb_remove(struct pci_dev *pdev);
static int igb_sw_init(struct igb_adapter *);
static int igb_open(struct net_device *);
static int igb_close(struct net_device *);
+static void igb_configure(struct igb_adapter *);
static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
@@ -228,7 +229,7 @@ static struct pci_driver igb_driver = {
.name = igb_driver_name,
.id_table = igb_pci_tbl,
.probe = igb_probe,
- .remove = __devexit_p(igb_remove),
+ .remove = igb_remove,
#ifdef CONFIG_PM
.driver.pm = &igb_pm_ops,
#endif
@@ -534,31 +535,27 @@ rx_ring_summary:
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX -------"
- "--------- %p%s\n", "RWB", i,
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb, next_desc);
+ next_desc);
} else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX"
- " %p%s\n", "R ", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb, next_desc);
+ next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->skb) {
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, buffer_info->skb->data,
- IGB_RX_HDR_LEN, true);
+ buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- PAGE_SIZE/2, true);
+ IGB_RX_BUFSZ, true);
}
}
}
@@ -656,80 +653,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
-static void igb_free_queues(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
- adapter->num_rx_queues = 0;
- adapter->num_tx_queues = 0;
-}
-
-/**
- * igb_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
-static int igb_alloc_queues(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* For 82575, context index must be unique per ring. */
- if (adapter->hw.mac.type == e1000_82575)
- set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
- adapter->tx_ring[i] = ring;
- }
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* set flag indicating ring supports SCTP checksum offload */
- if (adapter->hw.mac.type >= e1000_82576)
- set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
- /*
- * On i350, i210, and i211, loopback VLAN packets
- * have the tag byte-swapped.
- * */
- if (adapter->hw.mac.type >= e1000_i350)
- set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
- adapter->rx_ring[i] = ring;
- }
-
- igb_cache_ring_register(adapter);
-
- return 0;
-
-err:
- igb_free_queues(adapter);
-
- return -ENOMEM;
-}
-
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -909,17 +832,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- int i, err = 0, vector = 0;
+ int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
igb_msix_other, 0, netdev->name, adapter);
if (err)
- goto out;
- vector++;
+ goto err_out;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
+ vector++;
+
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
if (q_vector->rx.ring && q_vector->tx.ring)
@@ -938,13 +862,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
igb_msix_ring, 0, q_vector->name,
q_vector);
if (err)
- goto out;
- vector++;
+ goto err_free;
}
igb_configure_msix(adapter);
return 0;
-out:
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
return err;
}
@@ -960,6 +893,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
}
/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+
+ /*
+ * ixgbe_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -969,17 +931,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
- int v_idx;
+ int v_idx = adapter->num_q_vectors;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- if (!q_vector)
- continue;
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- }
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
}
/**
@@ -990,7 +949,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
- igb_free_queues(adapter);
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
@@ -1001,11 +959,14 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
-static int igb_set_interrupt_capability(struct igb_adapter *adapter)
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
{
int err;
int numvecs, i;
+ if (!msix)
+ goto msi_only;
+
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
if (adapter->vfs_allocated_count)
@@ -1038,7 +999,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries,
numvecs);
if (err == 0)
- goto out;
+ return;
igb_reset_interrupt_capability(adapter);
@@ -1068,105 +1029,183 @@ msi_only:
adapter->num_q_vectors = 1;
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
-out:
- /* Notify the stack of the (possibly) reduced queue counts. */
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- err = netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
- rtnl_unlock();
- return err;
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+ struct igb_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
}
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ int v_count, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
{
struct igb_q_vector *q_vector;
- struct e1000_hw *hw = &adapter->hw;
- int v_idx;
+ struct igb_ring *ring;
+ int ring_count, size;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
- if (!q_vector)
- goto err_out;
- q_vector->adapter = adapter;
- q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
- q_vector->itr_val = IGB_START_ITR;
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igb_q_vector) +
+ (sizeof(struct igb_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igb_add_ring(ring, &q_vector->tx);
+
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
}
- return 0;
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
-err_out:
- igb_free_q_vectors(adapter);
- return -ENOMEM;
-}
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
-static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
- q_vector->rx.ring = adapter->rx_ring[ring_idx];
- q_vector->rx.ring->q_vector = q_vector;
- q_vector->rx.count++;
- q_vector->itr_val = adapter->rx_itr_setting;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
-}
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /*
+ * On i350, i210, and i211, loopback VLAN packets
+ * have the tag byte-swapped.
+ * */
+ if (adapter->hw.mac.type >= e1000_i350)
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
- q_vector->tx.ring = adapter->tx_ring[ring_idx];
- q_vector->tx.ring->q_vector = q_vector;
- q_vector->tx.count++;
- q_vector->itr_val = adapter->tx_itr_setting;
- q_vector->tx.work_limit = adapter->tx_work_limit;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
}
+
/**
- * igb_map_ring_to_vector - maps allocated queues to vectors
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function maps the recently allocated queues to vectors.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
-static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
- int i;
- int v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
- if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
- (adapter->num_q_vectors < adapter->num_tx_queues))
- return -ENOMEM;
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
- if (adapter->num_q_vectors >=
- (adapter->num_rx_queues + adapter->num_tx_queues)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
- } else {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (i < adapter->num_tx_queues)
- igb_map_tx_ring_to_vector(adapter, i, v_idx);
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
}
- for (; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
}
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
}
/**
@@ -1174,14 +1213,12 @@ static int igb_map_ring_to_vector(struct igb_adapter *adapter)
*
* This function initializes the interrupts and allocates all of the queues.
**/
-static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{
struct pci_dev *pdev = adapter->pdev;
int err;
- err = igb_set_interrupt_capability(adapter);
- if (err)
- return err;
+ igb_set_interrupt_capability(adapter, msix);
err = igb_alloc_q_vectors(adapter);
if (err) {
@@ -1189,24 +1226,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- err = igb_map_ring_to_vector(adapter);
- if (err) {
- dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
- goto err_map_queues;
- }
-
+ igb_cache_ring_register(adapter);
return 0;
-err_map_queues:
- igb_free_queues(adapter);
-err_alloc_queues:
- igb_free_q_vectors(adapter);
+
err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter);
return err;
@@ -1229,29 +1252,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (!err)
goto request_done;
/* fall back to MSI */
- igb_clear_interrupt_scheme(adapter);
- if (!pci_enable_msi(pdev))
- adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
- adapter->num_tx_queues = 1;
- adapter->num_rx_queues = 1;
- adapter->num_q_vectors = 1;
- err = igb_alloc_q_vectors(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for vectors\n");
- goto request_done;
- }
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for queues\n");
- igb_free_q_vectors(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+ err = igb_init_interrupt_scheme(adapter, false);
+ if (err)
goto request_done;
- }
+
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
+ igb_configure(adapter);
}
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1587,8 +1598,7 @@ void igb_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space;
- u16 hwm;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
@@ -1663,7 +1673,7 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
@@ -1706,10 +1716,8 @@ void igb_reset(struct igb_adapter *adapter)
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
-#ifdef CONFIG_IGB_PTP
/* Re-enable PTP, where applicable. */
igb_ptp_reset(adapter);
-#endif /* CONFIG_IGB_PTP */
igb_get_phy_info(hw);
}
@@ -1783,58 +1791,34 @@ static const struct net_device_ops igb_netdev_ops = {
void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 major, build, patch, fw_version;
- u32 etrack_id;
-
- hw->nvm.ops.read(hw, 5, 1, &fw_version);
- if (adapter->hw.mac.type != e1000_i211) {
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
- etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
-
- /* combo image version needs to be found */
- hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) &&
- (comb_offset != IGB_NVM_VER_INVALID)) {
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
- + 1), 1, &comb_verh);
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
- 1, &comb_verl);
-
- /* Only display Option Rom if it exists and is valid */
- if ((comb_verh && comb_verl) &&
- ((comb_verh != IGB_NVM_VER_INVALID) &&
- (comb_verl != IGB_NVM_VER_INVALID))) {
- major = comb_verl >> IGB_COMB_VER_SHFT;
- build = (comb_verl << IGB_COMB_VER_SHFT) |
- (comb_verh >> IGB_COMB_VER_SHFT);
- patch = comb_verh & IGB_COMB_VER_MASK;
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x, %d.%d.%d",
- (fw_version & IGB_MAJOR_MASK) >>
- IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >>
- IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK),
- etrack_id, major, build, patch);
- goto out;
- }
- }
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK), etrack_id);
- } else {
+ struct e1000_fw_version fw;
+
+ igb_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i211:
snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK));
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+
+ default:
+ /* if option is rom valid, display its version too */
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ }
+ break;
}
-out:
return;
}
@@ -1849,8 +1833,7 @@ out:
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit igb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct igb_adapter *adapter;
@@ -1861,7 +1844,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
- u16 eeprom_apme_mask = IGB_EEPROM_APME;
u8 part_str[E1000_PBANUM_LENGTH];
/* Catch broken hardware that put the wrong VF device ID in
@@ -2069,28 +2051,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_validate_mdi_setting(hw);
- /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
- * enable the ACPI Magic Packet filter
- */
-
+ /* By default, support wake on port A */
if (hw->bus.func == 0)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type >= e1000_82580)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support on non-port A ports */
+ if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
else if (hw->bus.func == 1)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
- if (eeprom_data & eeprom_apme_mask)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
+ if (eeprom_data & IGB_EEPROM_APME)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
* lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82576_FIBER:
@@ -2098,24 +2079,38 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
}
/* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
+ device_set_wakeup_enable(&adapter->pdev->dev,
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igb_reset(adapter);
@@ -2141,10 +2136,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
#endif
-#ifdef CONFIG_IGB_PTP
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
-#endif /* CONFIG_IGB_PTP */
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
@@ -2212,16 +2205,14 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit igb_remove(struct pci_dev *pdev)
+static void igb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_PTP
igb_ptp_stop(adapter);
-#endif /* CONFIG_IGB_PTP */
/*
* The watchdog timer may be rescheduled, so explicitly
@@ -2294,7 +2285,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
* mor expensive time wise to disable SR-IOV than it is to allocate and free
* the memory for the VFs.
**/
-static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+static void igb_probe_vfs(struct igb_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
@@ -2355,7 +2346,7 @@ out:
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit igb_sw_init(struct igb_adapter *adapter)
+static int igb_sw_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
@@ -2461,7 +2452,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
GFP_ATOMIC);
/* This call may decrease the number of queues */
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -2531,6 +2522,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
@@ -2560,6 +2562,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
return 0;
+err_set_queues:
+ igb_free_irq(adapter);
err_req_irq:
igb_release_hw_control(adapter);
igb_power_down_link(adapter);
@@ -2637,10 +2641,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2777,18 +2779,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
-
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2893,18 +2893,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* Don't need to set TUOFL or IPOFL, they default to 1 */
wr32(E1000_RXCSUM, rxcsum);
- /*
- * Generate RSS hash based on TCP port numbers and/or
- * IPv4/v6 src and dst addresses since UDP cannot be
- * hashed reliably due to IP fragmentation
- */
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
* if we are only using one queue */
@@ -3106,16 +3109,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
- srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
- srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-#ifdef CONFIG_IGB_PTP
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* CONFIG_IGB_PTP */
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3302,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
unsigned long size;
u16 i;
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (buffer_info->dma) {
- dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- if (buffer_info->page_dma) {
- dma_unmap_page(rx_ring->dev,
- buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- buffer_info->page_offset = 0;
- }
+ if (!buffer_info->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
}
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3331,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -4028,6 +4017,9 @@ static int igb_tso(struct igb_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -4148,26 +4140,32 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
}
-static __le32 igb_tx_cmd_type(u32 tx_flags)
+#define IGB_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+ (E1000_ADVTXD_DCMD_VLE));
+
+ /* set segmentation bits for TSO */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+ (E1000_ADVTXD_DCMD_TSE));
-#ifdef CONFIG_IGB_PTP
/* set timestamp bit if present */
- if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
- cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
-#endif /* CONFIG_IGB_PTP */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
- /* set segmentation bits for TSO */
- if (tx_flags & IGB_TX_FLAGS_TSO)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */
+ cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -4178,19 +4176,19 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
{
u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring if any offload is enabled */
- if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
- test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */
- if (tx_flags & IGB_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_CSUM,
+ (E1000_TXD_POPTS_TXSM << 8));
- /* insert IPv4 checksum */
- if (tx_flags & IGB_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
- }
+ /* insert IPv4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_IPV4,
+ (E1000_TXD_POPTS_IXSM << 8));
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -4209,33 +4207,37 @@ static void igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag;
dma_addr_t dma;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
- __le32 cmd_type;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
+ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IGB_TX_DESC(tx_ring, i);
- igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
- cmd_type = igb_tx_cmd_type(tx_flags);
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -4243,18 +4245,18 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IGB_MAX_DATA_PER_TXD;
size -= IGB_MAX_DATA_PER_TXD;
- tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -4262,32 +4264,22 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
+ size, DMA_TO_DEVICE);
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.olinfo_status = 0;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
- frag++;
}
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
- if (unlikely(skb->no_fcs))
- cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
@@ -4372,9 +4364,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
-#ifdef CONFIG_IGB_PTP
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
-#endif /* CONFIG_IGB_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4397,7 +4387,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
-#ifdef CONFIG_IGB_PTP
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4396,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
-#endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4455,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4789,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -4811,7 +4799,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
wr32(E1000_EIMS, adapter->eims_other);
@@ -4851,45 +4838,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
}
#ifdef CONFIG_IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
static void igb_update_dca(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
- if (q_vector->tx.ring) {
- int q = q_vector->tx.ring->reg_idx;
- u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_TXCTRL_CPUID_SHIFT;
- }
- dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
- wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
- }
- if (q_vector->rx.ring) {
- int q = q_vector->rx.ring->reg_idx;
- u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_RXCTRL_CPUID_SHIFT;
- }
- dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
- wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
- }
+ if (q_vector->tx.ring)
+ igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+ if (q_vector->rx.ring)
+ igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
q_vector->cpu = cpu;
out_no_update:
put_cpu();
@@ -5545,7 +5550,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5556,7 +5560,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5599,7 +5602,6 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5610,7 +5612,6 @@ static irqreturn_t igb_intr(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5840,6 +5841,181 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
return !!budget;
}
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
+{
+ struct igb_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+ old_buff->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+
+ if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /*
+ * since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -5889,224 +6065,389 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-static void igb_rx_vlan(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
{
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid;
- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
- else
- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ u32 ntc = rx_ring->next_to_clean + 1;
- __vlan_hwaccel_put_tag(skb, vid);
- }
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
}
-static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
-{
- /* HW will not DMA in data larger than the given buffer, even if it
- * parses the (NFS, of course) header to be larger. In that case, it
- * fills the header buffer and spills the rest into the page.
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol if header is present */
+ if (!hdr.ipv4->frag_off)
+ nexthdr = hdr.ipv4->protocol;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return hdr.network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
*/
- u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IGB_RX_HDR_LEN)
- hlen = IGB_RX_HDR_LEN;
- return hlen;
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- union e1000_adv_rx_desc *rx_desc;
- const int current_node = numa_node_id();
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
- u16 i = rx_ring->next_to_clean;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_desc = IGB_RX_DESC(rx_ring, i);
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- struct sk_buff *skb = buffer_info->skb;
- union e1000_adv_rx_desc *next_rxd;
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ /* retrieve timestamp from buffer */
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- buffer_info->skb = NULL;
- prefetch(skb->data);
+ /* update pointers to remove timestamp header */
+ skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+ frag->page_offset += IGB_TS_HDR_LEN;
+ skb->data_len -= IGB_TS_HDR_LEN;
+ skb->len -= IGB_TS_HDR_LEN;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* move va to start of packet data */
+ va += IGB_TS_HDR_LEN;
+ }
- next_rxd = IGB_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (!skb_is_nonlinear(skb)) {
- __skb_put(skb, igb_get_hlen(rx_desc));
- dma_unmap_single(rx_ring->dev, buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
}
+ }
- if (rx_desc->wb.upper.length) {
- u16 length = le16_to_cpu(rx_desc->wb.upper.length);
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ igb_pull_tail(rx_ring, rx_desc, skb);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += PAGE_SIZE / 2;
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
- if ((page_count(buffer_info->page) != 1) ||
- (page_to_nid(buffer_info->page) != current_node))
- buffer_info->page = NULL;
- else
- get_page(buffer_info->page);
+ return false;
+}
- dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
- struct igb_rx_buffer *next_buffer;
- next_buffer = &rx_ring->rx_buffer_info[i];
- buffer_info->skb = next_buffer->skb;
- buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- goto next_desc;
- }
+ igb_rx_hash(rx_ring, rx_desc, skb);
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK))
- && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
- dev_kfree_skb_any(skb);
- goto next_desc;
- }
+ igb_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IGB_PTP
- igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif /* CONFIG_IGB_PTP */
- igb_rx_hash(rx_ring, rx_desc, skb);
- igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_rx_vlan(rx_ring, rx_desc, skb);
+ igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
- total_bytes += skb->len;
- total_packets++;
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
- napi_gro_receive(&q_vector->napi, skb);
+ skb_record_rx_queue(skb, rx_ring->queue_index);
- budget--;
-next_desc:
- if (!budget)
- break;
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ union e1000_adv_rx_desc *rx_desc;
- cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
- }
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- rx_ring->next_to_clean = i;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ /* retrieve a buffer from the ring */
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
- return !!budget;
-}
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
-static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct sk_buff *skb = bi->skb;
- dma_addr_t dma = bi->dma;
+ cleaned_count++;
- if (dma)
- return true;
+ /* fetch next buffer in frame if non-eop */
+ if (igb_is_non_eop(rx_ring, rx_desc))
+ continue;
- if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
- bi->skb = skb;
- if (!skb) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
+ /* verify the packet layout is correct */
+ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
}
- /* initialize skb for ring */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- }
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
- dma = dma_map_single(rx_ring->dev, skb->data,
- IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
- if (dma_mapping_error(rx_ring->dev, dma)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ napi_gro_receive(&q_vector->napi, skb);
- bi->dma = dma;
- return true;
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return (total_packets < budget);
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t page_dma = bi->page_dma;
- unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+ dma_addr_t dma;
- if (page_dma)
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
return true;
- if (!page) {
- page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
- bi->page = page;
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ /* alloc new page for storage */
+ page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
}
- page_dma = dma_map_page(rx_ring->dev, page,
- page_offset, PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
- if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
- bi->page_dma = page_dma;
- bi->page_offset = page_offset;
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
return true;
}
@@ -6120,22 +6461,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
- while (cleaned_count--) {
- if (!igb_alloc_mapped_skb(rx_ring, bi))
- break;
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info. */
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
+ do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -6148,17 +6490,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
- }
+
+ cleaned_count--;
+ } while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
rx_ring->next_to_use = i;
- /* Force memory writes to complete before letting h/w
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /*
+ * Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, rx_ring->tail);
}
@@ -6207,10 +6557,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
-#ifdef CONFIG_IGB_PTP
case SIOCSHWTSTAMP:
return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
@@ -6478,7 +6826,7 @@ static int igb_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -6492,7 +6840,9 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
if (netdev->flags & IFF_UP) {
+ rtnl_lock();
err = __igb_open(netdev, true);
+ rtnl_unlock();
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445157a3..ab3429729bde 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
}
-void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+{
+ __le64 *regval = (__le64 *)va;
+
+ /*
+ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+ igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
-
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
- }
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -532,18 +553,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
@@ -554,31 +563,33 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /* 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ /* fall through */
default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -596,6 +607,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
if ((hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 3e18045d8f89..d9fa999b1685 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -46,6 +46,7 @@
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a895e2f7b34d..fdca7b672776 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -295,7 +295,7 @@ struct igbvf_info {
/* hardware capability, feature, and workaround flags */
#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0ac11f527a84..277f5dfe3d90 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
#include "igbvf.h"
-#define DRV_VERSION "2.0.1-k"
+#define DRV_VERSION "2.0.2-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
@@ -107,12 +107,19 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct sk_buff *skb,
u32 status, u16 vlan)
{
+ u16 vid;
+
if (status & E1000_RXD_STAT_VP) {
- u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
+ (status & E1000_RXDEXT_STATERR_LB))
+ vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ else
+ vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, vid);
}
- netif_receive_skb(skb);
+
+ napi_gro_receive(&adapter->rx_ring->napi, skb);
}
static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
@@ -184,6 +191,13 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ buffer_info->page_dma)) {
+ __free_page(buffer_info->page);
+ buffer_info->page = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ break;
+ }
}
if (!buffer_info->skb) {
@@ -197,6 +211,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ goto no_buffers;
+ }
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -1078,7 +1098,7 @@ out:
* igbvf_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
-static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
+static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1530,7 +1550,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
+static int igbvf_sw_init(struct igbvf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
s32 rc;
@@ -2598,8 +2618,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit igbvf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct igbvf_adapter *adapter;
@@ -2754,6 +2773,10 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
igbvf_reset(adapter);
+ /* set hardware-specific flags */
+ if (adapter->hw.mac.type == e1000_vfadapt_i350)
+ adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
+
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
@@ -2794,7 +2817,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit igbvf_remove(struct pci_dev *pdev)
+static void igbvf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2851,7 +2874,7 @@ static struct pci_driver igbvf_driver = {
.name = igbvf_driver_name,
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
- .remove = __devexit_p(igbvf_remove),
+ .remove = igbvf_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = igbvf_suspend,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d99a2d51b948..ae96c10251be 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -73,7 +73,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
static int ixgb_init_module(void);
static void ixgb_exit_module(void);
static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void __devexit ixgb_remove(struct pci_dev *pdev);
+static void ixgb_remove(struct pci_dev *pdev);
static int ixgb_sw_init(struct ixgb_adapter *adapter);
static int ixgb_open(struct net_device *netdev);
static int ixgb_close(struct net_device *netdev);
@@ -125,7 +125,7 @@ static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
- .remove = __devexit_p(ixgb_remove),
+ .remove = ixgb_remove,
.err_handler = &ixgb_err_handler
};
@@ -391,7 +391,7 @@ static const struct net_device_ops ixgb_netdev_ops = {
* and a hardware reset occur.
**/
-static int __devinit
+static int
ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
@@ -558,7 +558,7 @@ err_dma_mask:
* memory.
**/
-static void __devexit
+static void
ixgb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -584,7 +584,7 @@ ixgb_remove(struct pci_dev *pdev)
* OS network device settings (MTU size).
**/
-static int __devinit
+static int
ixgb_sw_init(struct ixgb_adapter *adapter)
{
struct ixgb_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index 07d83ab46e21..04a60640ddda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -47,7 +47,7 @@
#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
#define IXGB_PARAM(X, desc) \
- static int __devinitdata X[IXGB_MAX_NIC+1] \
+ static int X[IXGB_MAX_NIC+1] \
= IXGB_PARAM_INIT; \
static unsigned int num_##X = 0; \
module_param_array_named(X, X, int, &num_##X, 0); \
@@ -199,7 +199,7 @@ struct ixgb_option {
} arg;
};
-static int __devinit
+static int
ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
{
if (*value == OPTION_UNSET) {
@@ -257,7 +257,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
* in a variable in the adapter structure.
**/
-void __devinit
+void
ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 89f40e51fc13..687c83d1bdab 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -32,13 +32,13 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
-ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
+ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 30efc9f0f47a..8e786764c60e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,11 +36,9 @@
#include <linux/aer.h>
#include <linux/if_vlan.h>
-#ifdef CONFIG_IXGBE_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IXGBE_PTP */
#include "ixgbe_type.h"
#include "ixgbe_common.h"
@@ -135,6 +133,7 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
+ unsigned int vf_api;
};
struct vf_macvlans {
@@ -482,8 +481,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
/* Tx fast path data */
int num_tx_queues;
@@ -571,7 +571,6 @@ struct ixgbe_adapter {
u32 interrupt_event;
u32 led_reg;
-#ifdef CONFIG_IXGBE_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
@@ -580,8 +579,6 @@ struct ixgbe_adapter {
struct timecounter tc;
int rx_hwtstamp_filter;
u32 base_incval;
- u32 cycle_speed;
-#endif /* CONFIG_IXGBE_PTP */
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -600,6 +597,8 @@ struct ixgbe_adapter {
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
#endif /*CONFIG_DEBUG_FS*/
+
+ u8 default_up;
};
struct ixgbe_fdir_filter {
@@ -691,6 +690,7 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
+extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -739,7 +739,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-#ifdef CONFIG_IXGBE_PTP
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -751,7 +750,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
-#endif /* CONFIG_IXGBE_PTP */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1077cb2b38db..1073aea5da40 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -62,7 +62,6 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -99,9 +98,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
- u32 reg_anlp1 = 0;
- u32 i = 0;
u16 list_offset, data_offset, data_value;
+ bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -137,28 +135,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Now restart DSP by setting Restart_AN and clearing LMS */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
-
- /* Wait for AN to leave state 0 */
- for (i = 0; i < 10; i++) {
- usleep_range(4000, 8000);
- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
- break;
+ /* Need SW/FW semaphore around AUTOC writes if LESM on,
+ * likewise reset_pipeline requires lock as it also writes
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto setup_sfp_out;
+
+ got_lock = true;
+ }
+
+ /* Restart DSP and set SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
+
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = false;
}
- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
- hw_dbg(hw, "sfp module setup not complete\n");
+
+ if (ret_val) {
+ hw_dbg(hw, " sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
-
- /* Restart DSP by setting Restart_AN and return to SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -394,14 +400,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
s32 status = 0;
+ bool got_lock = false;
+
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status)
+ goto out;
+
+ got_lock = true;
+ }
/* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -425,6 +443,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
/* Add delay to filter out noises during initial link setup */
msleep(50);
+out:
return status;
}
@@ -779,6 +798,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ bool got_lock = false;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -836,9 +856,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+ * likewise reset_pipeline requires us to hold this lock as
+ * it also writes to AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != 0)
+ goto out;
+
+ got_lock = true;
+ }
+
/* Restart link */
- autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -994,9 +1031,28 @@ mac_reset_top:
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
+ if (autoc != hw->mac.orig_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is
+ * on, likewise reset_pipeline requires us to hold
+ * this lock as it also writes to AUTOC.
+ */
+ bool got_lock = false;
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status)
+ goto reset_hw_out;
+
+ got_lock = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ }
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1022,7 +1078,7 @@ mac_reset_top:
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
/* Add the SAN MAC address to the RAR only if it's a valid address */
- if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
@@ -1983,7 +2039,7 @@ fw_version_out:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2080,6 +2136,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
return ret_val;
}
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
+ * to AUTOC, so this function assumes the semaphore is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 i, autoc_reg, ret_val;
+ s32 anlp1_reg = 0;
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ usleep_range(4000, 8000);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = 0;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index dbf37e4a45fd..5e68afdd502a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,13 +65,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
- return 0;
case IXGBE_DEV_ID_82599_T3_LOM:
return 0;
default:
@@ -90,6 +89,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
+ bool got_lock = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -210,8 +210,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
*
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on, likewise reset_pipeline requries the lock as
+ * it also writes AUTOC.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw) == 0)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
@@ -1762,30 +1783,6 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address.
- *
- * Tests a MAC address to ensure it is a valid Individual Address
- **/
-s32 ixgbe_validate_mac_addr(u8 *mac_addr)
-{
- s32 status = 0;
-
- /* Make sure it is not a multicast address */
- if (IXGBE_IS_MULTICAST(mac_addr))
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- else if (IXGBE_IS_BROADCAST(mac_addr))
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
- status = IXGBE_ERR_INVALID_MAC_ADDR;
-
- return status;
-}
-
-/**
* ixgbe_set_rar_generic - Set Rx address register
* @hw: pointer to hardware structure
* @index: Receive address register to write
@@ -1889,8 +1886,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* to the permanent address.
* Otherwise, use the permanent address from the eeprom.
*/
- if (ixgbe_validate_mac_addr(hw->mac.addr) ==
- IXGBE_ERR_INVALID_MAC_ADDR) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
@@ -2617,6 +2613,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = 0;
/*
* Link must be up to auto-blink the LEDs;
@@ -2625,10 +2622,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ bool got_lock = false;
+
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2637,7 +2652,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return 0;
+out:
+ return ret_val;
}
/**
@@ -2649,18 +2665,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = 0;
+ bool got_lock = false;
+
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return 0;
+out:
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d813d1188c36..f7a0970a251c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -78,9 +78,9 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
@@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 8d3a21889099..3504686d3af5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -24,9 +24,6 @@
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/debugfs.h>
#include <linux/module.h>
@@ -37,20 +34,6 @@ static struct dentry *ixgbe_dbg_root;
static char ixgbe_dbg_reg_ops_buf[256] = "";
/**
- * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
- * @inode: inode that was opened
- * @filp: file info
- *
- * Stash the adapter pointer hiding in the inode into the file pointer where
- * we can find it later in the read and write calls
- **/
-static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/**
* ixgbe_dbg_reg_ops_read - read for reg_ops datum
* @filp: the opened file
* @buffer: where to write the data for the user to read
@@ -61,23 +44,27 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- char buf[256];
- int bytes_not_copied;
+ char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
- len = snprintf(buf, sizeof(buf), "%s: %s\n",
- adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
- if (count < len)
+ buf = kasprintf(GFP_KERNEL, "%s: %s\n",
+ adapter->netdev->name,
+ ixgbe_dbg_reg_ops_buf);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
return -ENOSPC;
- bytes_not_copied = copy_to_user(buffer, buf, len);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
- *ppos = len;
+ kfree(buf);
return len;
}
@@ -93,7 +80,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- int bytes_not_copied;
+ int len;
/* don't allow partial writes */
if (*ppos != 0)
@@ -101,14 +88,15 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
return -ENOSPC;
- bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- else if (bytes_not_copied < count)
- count -= bytes_not_copied;
- else
- return -ENOSPC;
- ixgbe_dbg_reg_ops_buf[count] = '\0';
+ len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf,
+ sizeof(ixgbe_dbg_reg_ops_buf)-1,
+ ppos,
+ buffer,
+ count);
+ if (len < 0)
+ return len;
+
+ ixgbe_dbg_reg_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
u32 reg, value;
@@ -142,7 +130,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
static const struct file_operations ixgbe_dbg_reg_ops_fops = {
.owner = THIS_MODULE,
- .open = ixgbe_dbg_reg_ops_open,
+ .open = simple_open,
.read = ixgbe_dbg_reg_ops_read,
.write = ixgbe_dbg_reg_ops_write,
};
@@ -150,20 +138,6 @@ static const struct file_operations ixgbe_dbg_reg_ops_fops = {
static char ixgbe_dbg_netdev_ops_buf[256] = "";
/**
- * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
- * @inode: inode that was opened
- * @filp: file info
- *
- * Stash the adapter pointer hiding in the inode into the file pointer
- * where we can find it later in the read and write calls
- **/
-static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/**
* ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
* @filp: the opened file
* @buffer: where to write the data for the user to read
@@ -175,23 +149,27 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- char buf[256];
- int bytes_not_copied;
+ char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
- len = snprintf(buf, sizeof(buf), "%s: %s\n",
- adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
- if (count < len)
+ buf = kasprintf(GFP_KERNEL, "%s: %s\n",
+ adapter->netdev->name,
+ ixgbe_dbg_netdev_ops_buf);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
return -ENOSPC;
- bytes_not_copied = copy_to_user(buffer, buf, len);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
- *ppos = len;
+ kfree(buf);
return len;
}
@@ -207,7 +185,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- int bytes_not_copied;
+ int len;
/* don't allow partial writes */
if (*ppos != 0)
@@ -215,15 +193,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
return -ENOSPC;
- bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
- buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- else if (bytes_not_copied < count)
- count -= bytes_not_copied;
- else
- return -ENOSPC;
- ixgbe_dbg_netdev_ops_buf[count] = '\0';
+ len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf,
+ sizeof(ixgbe_dbg_netdev_ops_buf)-1,
+ ppos,
+ buffer,
+ count);
+ if (len < 0)
+ return len;
+
+ ixgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
@@ -238,7 +216,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
.owner = THIS_MODULE,
- .open = ixgbe_dbg_netdev_ops_open,
+ .open = simple_open,
.read = ixgbe_dbg_netdev_ops_read,
.write = ixgbe_dbg_netdev_ops_write,
};
@@ -296,5 +274,3 @@ void ixgbe_dbg_exit(void)
{
debugfs_remove_recursive(ixgbe_dbg_root);
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 116f0e901bee..326858424345 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -383,6 +383,11 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return -EINVAL;
+ /* some devices do not support autoneg of link flow control */
+ if ((pause->autoneg == AUTONEG_ENABLE) &&
+ (ixgbe_device_supports_autoneg_fc(hw) != 0))
+ return -EINVAL;
+
fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
@@ -887,24 +892,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+ struct ixgbe_ring *temp_ring;
int i, err = 0;
u32 new_rx_count, new_tx_count;
- bool need_update = false;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
- new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
- new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ IXGBE_MIN_TXD, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring[0]->count) &&
- (new_rx_count == adapter->rx_ring[0]->count)) {
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */
return 0;
}
@@ -922,81 +926,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
- if (!temp_tx_ring) {
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+ temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
+
+ if (!temp_ring) {
err = -ENOMEM;
goto clear_reset;
}
+ ixgbe_down(adapter);
+
+ /*
+ * Setup new Tx resources and free the old Tx resources in that order.
+ * We can then assign the new resources to the rings via a memcpy.
+ * The advantage to this approach is that we are guaranteed to still
+ * have resources even in the case of an allocation failure.
+ */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
- temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
+
+ temp_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(&temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_ring[i]);
}
- goto clear_reset;
+ goto err_setup;
}
}
- need_update = true;
- }
- temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
- if (!temp_rx_ring) {
- err = -ENOMEM;
- goto err_setup;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
}
+ /* Repeat the process for the Rx rings if needed */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
- temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
+
+ temp_ring[i].count = new_rx_count;
+ err = ixgbe_setup_rx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(&temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_ring[i]);
}
goto err_setup;
}
+
}
- need_update = true;
- }
- /* if rings need to be updated, here's the place to do it in one shot */
- if (need_update) {
- ixgbe_down(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
- /* tx */
- if (new_tx_count != adapter->tx_ring_count) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter->tx_ring[i]);
- memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
- sizeof(struct ixgbe_ring));
- }
- adapter->tx_ring_count = new_tx_count;
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
}
- /* rx */
- if (new_rx_count != adapter->rx_ring_count) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter->rx_ring[i]);
- memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
- sizeof(struct ixgbe_ring));
- }
- adapter->rx_ring_count = new_rx_count;
- }
- ixgbe_up(adapter);
+ adapter->rx_ring_count = new_rx_count;
}
- vfree(temp_rx_ring);
err_setup:
- vfree(temp_tx_ring);
+ ixgbe_up(adapter);
+ vfree(temp_ring);
clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
@@ -2669,7 +2672,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
struct ixgbe_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IXGBE_PTP
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -2695,7 +2697,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
-#endif /* CONFIG_IXGBE_PTP */
default:
return ethtool_op_get_ts_info(dev, info);
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ae73ef14fdf3..252850d9a3e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
+
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 17ecbcedd548..8c74f739011d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
- else
- cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
+#ifdef CONFIG_IXGBE_DCA
+ /* initialize CPU for DCA */
+ q_vector->cpu = -1;
+
+#endif
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
@@ -821,6 +824,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
+ /* intialize ITR */
+ if (txr_count && !rxr_count) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
+ }
+
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fa3d552e1f4a..20a5af6d87d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -62,11 +63,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define MAJ 3
-#define MIN 9
-#define BUILD 15
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
- __stringify(BUILD) "-k"
+#define DRV_VERSION "3.11.33-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2012 Intel Corporation.";
@@ -335,11 +332,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ pr_info(" %s %s %s %s\n",
+ "Queue [NTU] [NTC] [bi(ntc)->dma ]",
+ "leng", "ntw", "timestamp");
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
@@ -355,13 +354,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Transmit Descriptor Formats
*
- * Advanced Transmit Descriptor
+ * 82598 Advanced Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
- * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * 82598 Advanced Transmit Descriptor (Write-Back Format)
+ * +--------------------------------------------------------------+
+ * 0 | RSV [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | RSV | STA | NXTSEQ |
+ * +--------------------------------------------------------------+
+ * 63 36 35 32 31 0
+ *
+ * 82599+ Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
+ *
+ * 82599+ Advanced Transmit Descriptor (Write-Back Format)
+ * +--------------------------------------------------------------+
+ * 0 | RSV [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | RSV | STA | RSV |
+ * +--------------------------------------------------------------+
+ * 63 36 35 32 31 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
@@ -369,40 +392,43 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] "
- "[PlPOIdStDDt Ln] [bi->dma ] "
- "leng ntw timestamp bi->skb\n");
+ pr_info("%s%s %s %s %s %s\n",
+ "T [desc] [address 63:0 ] ",
+ "[PlPOIdStDDt Ln] [bi->dma ] ",
+ "leng", "ntw", "timestamp", "bi->skb");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_desc = IXGBE_TX_DESC(tx_ring, i);
tx_buffer = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- tx_buffer->next_to_watch,
- (u64)tx_buffer->time_stamp,
- tx_buffer->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- pr_cont(" NTC/U\n");
- else if (i == tx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == tx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
-
- if (netif_msg_pktdata(adapter) &&
- tx_buffer->skb)
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- tx_buffer->skb->data,
+ if (dma_unmap_len(tx_buffer, len) > 0) {
+ pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
+ i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
- true);
+ tx_buffer->next_to_watch,
+ (u64)tx_buffer->time_stamp,
+ tx_buffer->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ pr_cont(" NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ pr_cont(" NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ pr_cont(" NTC\n");
+ else
+ pr_cont("\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer->skb)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ tx_buffer->skb->data,
+ dma_unmap_len(tx_buffer, len),
+ true);
+ }
}
}
@@ -422,7 +448,9 @@ rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
- /* Advanced Receive Descriptor (Read) Format
+ /* Receive Descriptor Formats
+ *
+ * 82598 Advanced Receive Descriptor (Read) Format
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
@@ -431,27 +459,52 @@ rx_ring_summary:
* +-----------------------------------------------------+
*
*
- * Advanced Receive Descriptor (Write-Back) Format
+ * 82598 Advanced Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 30 21 20 16 15 4 3 0
* +------------------------------------------------------+
- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
- * | Checksum Ident | | | | Type | Type |
+ * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
+ * | Packet | IP | | | | Type | Type |
+ * | Checksum | Ident | | | | | |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
+ *
+ * 82599+ Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * 82599+ Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
+ * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
+ * |/ Flow Dir Flt ID | | | | | |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
*/
+
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] "
- "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ pr_info("%s%s%s",
+ "R [desc] [ PktBuf A0] ",
+ "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
"<-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] "
- "[vl er S cks ln] ---------------- [bi->skb] "
+ pr_info("%s%s%s",
+ "RWB[desc] [PcsmIpSHl PtRs] ",
+ "[vl er S cks ln] ---------------- [bi->skb ] ",
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
@@ -646,6 +699,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u32 xoff[8] = {0};
+ u8 tc;
int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
@@ -659,21 +713,26 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
/* update stats for each tc, only valid with PFC enabled */
for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ u32 pxoffrxc;
+
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
break;
default:
- xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
}
- hwstats->pxoffrxc[i] += xoff[i];
+ hwstats->pxoffrxc[i] += pxoffrxc;
+ /* Get the TC for given UP */
+ tc = netdev_get_prio_tc_map(adapter->netdev, i);
+ xoff[tc] += pxoffrxc;
}
/* disarm tx queues that have received xoff frames */
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- u8 tc = tx_ring->dcb_tc;
+ tc = tx_ring->dcb_tc;
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
@@ -791,10 +850,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-#endif
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -967,7 +1024,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
* which will cause the DCA tag to be cleared.
*/
rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
- IXGBE_DCA_RXCTRL_DATA_DCA_EN |
IXGBE_DCA_RXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1244,6 +1300,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
struct vlan_hdr *vlan;
/* l3 headers */
struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
} hdr;
__be16 protocol;
u8 nexthdr = 0; /* default to not TCP */
@@ -1281,20 +1338,30 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
if (hlen < sizeof(struct iphdr))
return hdr.network - data;
+ /* record next protocol if header is present */
+ if (!hdr.ipv4->frag_off)
+ nexthdr = hdr.ipv4->protocol;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
/* record next protocol */
- nexthdr = hdr.ipv4->protocol;
- hdr.network += hlen;
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
} else if (protocol == __constant_htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
return max_len;
- hdr.network += FCOE_HEADER_LEN;
+ hlen = FCOE_HEADER_LEN;
#endif
} else {
return hdr.network - data;
}
- /* finally sort out TCP */
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP/UDP */
if (nexthdr == IPPROTO_TCP) {
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
return max_len;
@@ -1307,6 +1374,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return hdr.network - data;
hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
}
/*
@@ -1369,9 +1441,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
-#endif
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1781,7 +1851,7 @@ dma_sync:
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
- int budget)
+ const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
@@ -1832,7 +1902,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
/* populate checksum, timestamp, VLAN, and protocol */
ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
@@ -1865,8 +1934,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
- budget--;
- } while (likely(budget));
+ total_rx_packets++;
+ } while (likely(total_rx_packets < budget));
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1878,7 +1947,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
- return !!budget;
+ return (total_rx_packets < budget);
}
/**
@@ -1914,20 +1983,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
- if (q_vector->tx.ring && !q_vector->rx.ring) {
- /* tx only vector */
- if (adapter->tx_itr_setting == 1)
- q_vector->itr = IXGBE_10K_ITR;
- else
- q_vector->itr = adapter->tx_itr_setting;
- } else {
- /* rx or rx/tx vector */
- if (adapter->rx_itr_setting == 1)
- q_vector->itr = IXGBE_20K_ITR;
- else
- q_vector->itr = adapter->rx_itr_setting;
- }
-
ixgbe_write_eitr(q_vector);
}
@@ -2324,10 +2379,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
-#ifdef CONFIG_IXGBE_PTP
if (adapter->hw.mac.type == ixgbe_mac_X540)
mask |= IXGBE_EIMS_TIMESYNC;
-#endif
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2393,10 +2446,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
/* re-enable the original interrupt state, no lsc, no queues */
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2588,10 +2639,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
}
ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
/* would disable interrupts here but EIAM disabled it */
napi_schedule(&q_vector->napi);
@@ -2699,12 +2748,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- /* rx/tx vector */
- if (adapter->rx_itr_setting == 1)
- q_vector->itr = IXGBE_20K_ITR;
- else
- q_vector->itr = adapter->rx_itr_setting;
-
ixgbe_write_eitr(q_vector);
ixgbe_set_ivar(adapter, 0, 0, 0);
@@ -3132,14 +3175,6 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
- /* If operating in IOV mode set RLPML for X540 */
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
- hw->mac.type == ixgbe_mac_X540) {
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
- ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
- }
-
if (hw->mac.type == ixgbe_mac_82598EB) {
/*
* enable cache line friendly hardware writes:
@@ -3211,7 +3246,8 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3234,8 +3270,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- /* enable Tx loopback for VF/PF communication */
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -3263,6 +3297,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3271,9 +3310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
- /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
- max_frame += VLAN_HLEN;
-
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -4072,11 +4108,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
- /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.enable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* enable the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -4192,6 +4225,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
+
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_reset(adapter);
}
/**
@@ -4393,11 +4429,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* power down the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
ixgbe_clean_all_tx_rings(adapter);
@@ -4429,11 +4462,12 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
+static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
unsigned int rss;
+ u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
struct tc_configuration *tc;
@@ -4457,7 +4491,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ if (fwsm & IXGBE_FWSM_TS_ENABLED)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
@@ -4533,7 +4569,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
- hw->fc.disable_fc_autoneg = false;
+ hw->fc.disable_fc_autoneg =
+ (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
#ifdef CONFIG_PCI_IOV
/* assign number of SR-IOV VFs */
@@ -4828,14 +4865,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
/*
- * For 82599EB we cannot allow PF to change MTU greater than 1500
- * in SR-IOV mode as it may cause buffer overruns in guest VFs that
- * don't allocate and chain buffers correctly.
+ * For 82599EB we cannot allow legacy VFs to enable their receive
+ * paths when MTU greater than 1500 is configured. So display a
+ * warning that legacy VFs will be disabled.
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
- return -EINVAL;
+ e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -4901,6 +4938,8 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_set_queues;
+ ixgbe_ptp_init(adapter);
+
ixgbe_up_complete(adapter);
return 0;
@@ -4932,6 +4971,8 @@ static int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ ixgbe_ptp_stop(adapter);
+
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
@@ -5022,14 +5063,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (wufc) {
ixgbe_set_rx_mode(netdev);
- /*
- * enable the optics for both mult-speed fiber and
- * 82599 SFP+ fiber as we can WoL.
- */
- if (hw->mac.ops.enable_tx_laser &&
- (hw->phy.multispeed_fiber ||
- (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
- hw->mac.type == ixgbe_mac_82599EB)))
+ /* enable the optics for 82599 SFP+ fiber as we can WoL */
+ if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
/* turn on all-multi mode if wake on multicast is enabled */
@@ -5442,6 +5477,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
adapter->link_speed = link_speed;
}
+static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+ struct net_device *netdev = adapter->netdev;
+ struct dcb_app app = {
+ .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+ .protocol = 0,
+ };
+ u8 up = 0;
+
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
+ up = dcb_ieee_getapp_mask(netdev, &app);
+
+ adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
+#endif
+}
+
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -5482,9 +5534,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
break;
}
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5501,6 +5552,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ /* update the default user priority for VFs */
+ ixgbe_update_default_up(adapter);
+
/* ping all the active vfs to let them know link has changed */
ixgbe_ping_all_vfs(adapter);
}
@@ -5526,9 +5580,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev);
@@ -5833,9 +5886,7 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
-#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_overflow_check(adapter);
-#endif
ixgbe_service_event_complete(adapter);
}
@@ -5988,10 +6039,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-#ifdef CONFIG_IXGBE_PTP
if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
-#endif
/* set segmentation enable bits for TSO/FSO */
#ifdef IXGBE_FCOE
@@ -6393,12 +6442,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
}
-#endif
#ifdef CONFIG_PCI_IOV
/*
@@ -6485,6 +6532,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
tx_ring = adapter->tx_ring[skb->queue_mapping];
@@ -6547,10 +6595,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
switch (cmd) {
-#ifdef CONFIG_IXGBE_PTP
case SIOCSHWTSTAMP:
return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
-#endif
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -6910,13 +6956,16 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return -EOPNOTSUPP;
- if (ndm->ndm_state & NUD_PERMANENT) {
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n",
ixgbe_driver_name);
return -EINVAL;
}
- if (is_unicast_ether_addr(addr)) {
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
if (netdev_uc_count(dev) < rar_uc_entries)
@@ -6974,6 +7023,61 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
return idx;
}
+static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+ u32 reg = 0;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA) {
+ reg = 0;
+ adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
+ } else if (mode == BRIDGE_MODE_VEB) {
+ reg = IXGBE_PFDTXGSWC_VT_LBEN;
+ adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+ } else
+ return -EINVAL;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
+
+ e_info(drv, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ }
+
+ return 0;
+}
+
+static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u16 mode;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return 0;
+
+ if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+ mode = BRIDGE_MODE_VEB;
+ else
+ mode = BRIDGE_MODE_VEPA;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -7013,6 +7117,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_add = ixgbe_ndo_fdb_add,
.ndo_fdb_del = ixgbe_ndo_fdb_del,
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
+ .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
+ .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
};
/**
@@ -7042,6 +7148,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
break;
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
+ case IXGBE_SUBDEV_ID_82599_ECNA_DP:
is_wol_supported = 1;
break;
}
@@ -7079,8 +7186,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit ixgbe_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct ixgbe_adapter *adapter = NULL;
@@ -7340,7 +7446,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
- if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
e_dev_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
@@ -7364,10 +7470,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_init(adapter);
-#endif /* CONFIG_IXGBE_PTP*/
-
/* save off EEPROM version number */
hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7420,11 +7522,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* power down the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
/* carrier off reporting is important to ethtool even BEFORE open */
@@ -7493,7 +7592,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit ixgbe_remove(struct pci_dev *pdev)
+static void ixgbe_remove(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -7505,9 +7604,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_DOWN, &adapter->state);
cancel_work_sync(&adapter->service_task);
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_stop(adapter);
-#endif
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7736,7 +7832,7 @@ static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.probe = ixgbe_probe,
- .remove = __devexit_p(ixgbe_remove),
+ .remove = ixgbe_remove,
#ifdef CONFIG_PM
.suspend = ixgbe_suspend,
.resume = ixgbe_resume,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 310bdd961075..42dd65e6ac97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -62,12 +62,39 @@
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * Each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d9291316ee9f..bb9256a1b0a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -387,6 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
struct ptp_clock_event event;
+ event.type = PTP_CLOCK_PPS;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!adapter->ptp_clock)
+ return;
+
switch (hw->mac.type) {
case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
@@ -411,7 +420,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
struct timespec ts;
- if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
+ if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
(elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
@@ -554,12 +563,14 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
adapter = q_vector->adapter;
hw = &adapter->hw;
+ if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
/* Check if we have a valid timestamp and make sure the skb should
* have been timestamped */
- if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
- !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
/*
@@ -622,8 +633,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_mtrl = 0;
- bool is_l4 = false;
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
@@ -646,16 +656,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
- is_l4 = true;
+ tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- is_l4 = true;
+ tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -668,7 +677,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
- is_l4 = true;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -693,42 +701,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
/* Store filter value for later use */
adapter->rx_hwtstamp_filter = config.rx_filter;
- /* define ethertype filter for timestamped packets */
+ /* define ethertype filter for timestamping L2 packets */
if (is_l2)
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
(IXGBE_ETQF_FILTER_EN | /* enable filter */
IXGBE_ETQF_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
-
-#define PTP_PORT 319
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
- | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
- | IXGBE_FTQF_QUEUE_ENABLE);
-
- ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
- & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
- & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
- << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
- (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
- IXGBE_IMIR_SIZE_BP_82599));
-
- /* enable port check */
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
- (htons(PTP_PORT) |
- htons(PTP_PORT) << 16));
-
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
-
- tsync_rx_mtrl |= PTP_PORT << 16;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
- }
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
@@ -759,58 +740,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
* @adapter: pointer to the adapter structure
*
- * this function initializes the timecounter and cyclecounter
- * structures for use in generated a ns counter from the arbitrary
- * fixed point cycles registers in the hardware.
- *
- * A change in link speed impacts the frequency of the DMA clock on
- * the device, which is used to generate the cycle counter
- * registers. Therefor this function is called whenever the link speed
- * changes.
- *
- * This function also turns on the SDP pin for clock out feature (X540
- * only), because this is where the shift is first calculated.
+ * This function should be called to set the proper values for the TIMINCA
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TIMINCA value is necessary,
+ * such as during initialization or when the link speed changes.
*/
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
- u32 timinca = 0;
u32 shift = 0;
- u32 cycle_speed;
unsigned long flags;
/**
- * Determine what speed we need to set the cyclecounter
- * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
- * unknown speeds as 10Gb. (Hence why we can't just copy the
- * link_speed.
- */
- switch (adapter->link_speed) {
- case IXGBE_LINK_SPEED_100_FULL:
- case IXGBE_LINK_SPEED_1GB_FULL:
- case IXGBE_LINK_SPEED_10GB_FULL:
- cycle_speed = adapter->link_speed;
- break;
- default:
- /* cycle speed should be 10Gb when there is no link */
- cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- }
-
- /*
- * grab the current TIMINCA value from the register so that it can be
- * double checked. If the register value has been cleared, it must be
- * reset to the correct value for generating a cyclecounter. If
- * TIMINCA is zero, the SYSTIME registers do not increment at all.
- */
- timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
-
- /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
- if (adapter->cycle_speed == cycle_speed && timinca)
- return;
-
- /**
* Scale the NIC cycle counter by a large factor so that
* relatively small corrections to the frequency can be added
* or subtracted. The drawbacks of a large factor include
@@ -819,8 +762,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* to nanoseconds using only a multiplier and a right-shift,
* and (c) the value must fit within the timinca register space
* => math based on internal DMA clock rate and available bits
+ *
+ * Note that when there is no link, internal DMA clock is same as when
+ * link speed is 10Gb. Set the registers correctly even when link is
+ * down to preserve the clock setting
*/
- switch (cycle_speed) {
+ switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
incval = IXGBE_INCVAL_100;
shift = IXGBE_INCVAL_SHIFT_100;
@@ -830,6 +777,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
shift = IXGBE_INCVAL_SHIFT_1GB;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
+ default:
incval = IXGBE_INCVAL_10GB;
shift = IXGBE_INCVAL_SHIFT_10GB;
break;
@@ -857,18 +805,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
return;
}
- /* reset the system time registers */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
- IXGBE_WRITE_FLUSH(hw);
-
- /* store the new cycle speed */
- adapter->cycle_speed = cycle_speed;
-
+ /* update the base incval used to calculate frequency adjustment */
ACCESS_ONCE(adapter->base_incval) = incval;
smp_mb();
- /* grab the ptp lock */
+ /* need lock to prevent incorrect read while modifying cyclecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
memset(&adapter->cc, 0, sizeof(adapter->cc));
@@ -877,6 +818,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
adapter->cc.shift = shift;
adapter->cc.mult = 1;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+}
+
+/**
+ * ixgbe_ptp_reset
+ * @adapter: the ixgbe private board structure
+ *
+ * When the MAC resets, all timesync features are reset. This function should be
+ * called to re-enable the PTP clock structure. It will re-init the timecounter
+ * structure based on the kernel time as well as setup the cycle counter data.
+ */
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ /* set SYSTIME registers to 0 just in case */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ptp_start_cyclecounter(adapter);
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
/* reset the ns time counter */
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
@@ -904,7 +870,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -918,7 +884,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -942,11 +908,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->tmreg_lock);
- ixgbe_ptp_start_cyclecounter(adapter);
-
- /* (Re)start the overflow check */
- adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -955,6 +916,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ ixgbe_ptp_reset(adapter);
+
+ /* set the flag that PTP has been enabled */
+ adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+
return;
}
@@ -967,7 +933,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
/* stop the overflow check task */
- adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED |
+ adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
IXGBE_FLAG2_PTP_PPS_ENABLED);
ixgbe_ptp_setup_sdp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dce48bf64d96..85cddac673ef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
}
}
+ /* Initialize default switching mode VEB */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
*/
@@ -150,16 +154,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
-#ifdef IXGBE_FCOE
- /*
- * When SR-IOV is enabled 82599 cannot support jumbo frames
- * so we must disable FCoE because we cannot support FCoE MTU.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
- IXGBE_FLAG_FCOE_CAPABLE);
-#endif
-
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
@@ -265,8 +259,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf)
+ u32 *msgbuf, u32 vf)
{
+ int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
int i;
@@ -353,31 +350,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int new_mtu = msgbuf[1];
+ int max_frame = msgbuf[1];
u32 max_frs;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- /* Only X540 supports jumbo frames in IOV mode */
- if (adapter->hw.mac.type != ixgbe_mac_X540)
- return;
+ /*
+ * For 82599EB we have to keep all PFs and VFs operating with
+ * the same max_frame value in order to avoid sending an oversize
+ * frame to a VF. In order to guarantee this is handled correctly
+ * for all cases we have several special exceptions to take into
+ * account before we can enable the VF for receive
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+ u32 reg_offset, vf_shift, vfre;
+ s32 err = 0;
+
+#ifdef CONFIG_FCOE
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_11:
+ /*
+ * Version 1.1 supports jumbo frames on VFs if PF has
+ * jumbo frames enabled which means legacy VFs are
+ * disabled
+ */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ break;
+ default:
+ /*
+ * If the PF or VF are running w/ jumbo frames enabled
+ * we need to shut down the VF Rx path as we cannot
+ * support jumbo frames on legacy VFs
+ */
+ if ((pf_max_frame > ETH_FRAME_LEN) ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+ err = -EINVAL;
+ break;
+ }
+
+ /* determine VF receive enable location */
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable or disable receive depending on error */
+ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ if (err)
+ vfre &= ~(1 << vf_shift);
+ else
+ vfre |= 1 << vf_shift;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
+
+ if (err) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return err;
+ }
+ }
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
- e_err(drv, "VF mtu %d out of range\n", new_mtu);
- return;
+ if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
}
- max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
- IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
- if (max_frs < new_mtu) {
- max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ /* pull current max frame size from hardware */
+ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ max_frs &= IXGBE_MHADD_MFS_MASK;
+ max_frs >>= IXGBE_MHADD_MFS_SHIFT;
+
+ if (max_frs < max_frame) {
+ max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
- e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+ e_info(hw, "VF requests change max MTU to %d\n", max_frame);
+
+ return 0;
}
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -392,35 +447,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+ u16 vid, u16 qos, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
- if (vid)
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
- (vid | IXGBE_VMVIR_VLANA_DEFAULT));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
}
+static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* add PF assigned VLAN or VLAN 0 */
+ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
/* reset offloads to defaults */
- if (adapter->vfinfo[vf].pf_vlan) {
- ixgbe_set_vf_vlan(adapter, true,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter,
- (adapter->vfinfo[vf].pf_vlan |
- (adapter->vfinfo[vf].pf_qos <<
- VLAN_PRIO_SHIFT)), vf);
- ixgbe_set_vmolr(hw, vf, false);
+ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ ixgbe_clear_vmvir(adapter, vf);
} else {
- ixgbe_set_vf_vlan(adapter, true, 0, vf);
- ixgbe_set_vmvir(adapter, 0, vf);
- ixgbe_set_vmolr(hw, vf, true);
+ if (vfinfo->pf_qos || !num_tcs)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ adapter->default_up, vf);
+
+ if (vfinfo->spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
}
/* reset multicast table array for vf */
@@ -430,6 +497,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_rx_mode(adapter->netdev);
hw->mac.ops.clear_rar(hw, rar_entry);
+
+ /* reset VF api back to unknown */
+ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -521,30 +591,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
-static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg;
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u32 reg, msgbuf[4];
u32 reg_offset, vf_shift;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ e_info(probe, "VF Reset msg received from vf %d\n", vf);
+
+ /* reset the filters for the device */
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* set vf mac address */
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= (reg | (1 << vf_shift));
+ reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= (reg | (1 << vf_shift));
+ reg |= 1 << vf_shift;
+ /*
+ * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#ifdef CONFIG_FCOE
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg &= ~(1 << vf_shift);
+ }
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ /* enable VF mailbox for further messages */
+ adapter->vfinfo[vf].clear_to_send = true;
+
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
- ixgbe_vf_reset_event(adapter, vf);
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+ if (!is_valid_ether_addr(new_mac)) {
+ e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+ return -1;
+ }
+
+ if (adapter->vfinfo[vf].pf_set_mac &&
+ memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
+ ETH_ALEN)) {
+ e_warn(drv,
+ "VF %d attempted to override administratively set MAC address\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ return -1;
+ }
+
+ return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
+}
+
+static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ int err;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->vfinfo[vf].pf_vlan || tcs) {
+ e_warn(drv,
+ "VF %d attempted to override administratively set VLAN configuration\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ return -1;
+ }
+
+ if (add)
+ adapter->vfinfo[vf].vlan_count++;
+ else if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+
+ err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ if (!err && adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+
+ return err;
+}
+
+static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ int err;
+
+ if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+ e_warn(drv,
+ "VF %d requested MACVLAN filter but is administratively denied\n",
+ vf);
+ return -1;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+ return -1;
+ }
+
+ /*
+ * If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (adapter->vfinfo[vf].spoofchk_enabled)
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ }
+
+ err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+ if (err == -ENOSPC)
+ e_warn(drv,
+ "VF %d has requested a MACVLAN filter but there is no space for it\n",
+ vf);
+
+ return err < 0;
+}
+
+static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case ixgbe_mbox_api_10:
+ case ixgbe_mbox_api_11:
+ adapter->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ break;
+ }
+
+ e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+
+ return -1;
+}
+
+static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ /* verify the PF is supporting the correct APIs */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_20:
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return -1;
+ }
+
+ /* only allow 1 Tx queue for bandwidth limiting */
+ msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ /* if TCs > 1 determine which TC belongs to default user priority */
+ if (num_tcs > 1)
+ default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
+
+ /* notify VF of need for VLAN tag stripping, and correct queue */
+ if (num_tcs)
+ msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+ else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
+ msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
}
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -553,10 +814,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
s32 retval;
- int entries;
- u16 *hash_list;
- int add, vid, index;
- u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -572,39 +829,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
+ if (msgbuf[0] == IXGBE_VF_RESET)
+ return ixgbe_vf_reset_msg(adapter, vf);
+
/*
* until the vf completes a virtual function reset it should not be
* allowed to start any configuration.
*/
-
- if (msgbuf[0] == IXGBE_VF_RESET) {
- unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- new_mac = (u8 *)(&msgbuf[1]);
- e_info(probe, "VF Reset msg received from vf %d\n", vf);
- adapter->vfinfo[vf].clear_to_send = false;
- ixgbe_vf_reset_msg(adapter, vf);
- adapter->vfinfo[vf].clear_to_send = true;
-
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac)
- ixgbe_set_vf_mac(adapter, vf, vf_mac);
- else
- ixgbe_set_vf_mac(adapter,
- vf, adapter->vfinfo[vf].vf_mac_addresses);
-
- /* reply to reset with ack and vf mac address */
- msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, ETH_ALEN);
- /*
- * Piggyback the multicast filter type so VF can compute the
- * correct vectors
- */
- msgbuf[3] = hw->mac.mc_filter_type;
- ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
- return retval;
- }
-
if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -613,70 +844,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
- new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac) {
- ixgbe_set_vf_mac(adapter, vf, new_mac);
- } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
- new_mac, ETH_ALEN)) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set MAC address\nReload "
- "the VF driver to resume operations\n", vf);
- retval = -1;
- }
+ retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MULTICAST:
- entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- hash_list = (u16 *)&msgbuf[1];
- retval = ixgbe_set_vf_multicasts(adapter, entries,
- hash_list, vf);
- break;
- case IXGBE_VF_SET_LPE:
- ixgbe_set_vf_lpe(adapter, msgbuf);
+ retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_VLAN:
- add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- if (adapter->vfinfo[vf].pf_vlan) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set VLAN configuration\n"
- "Reload the VF driver to resume operations\n",
- vf);
- retval = -1;
- } else {
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
- retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- }
+ retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MACVLAN:
- index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
- if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
- e_warn(drv, "VF %d requested MACVLAN filter but is "
- "administratively denied\n", vf);
- retval = -1;
- break;
- }
- /*
- * If the VF is allowed to set MAC filters then turn off
- * anti-spoofing to avoid false positives. An index
- * greater than 0 will indicate the VF is setting a
- * macvlan MAC filter.
- */
- if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
- retval = ixgbe_set_vf_macvlan(adapter, vf, index,
- (unsigned char *)(&msgbuf[1]));
- if (retval == -ENOSPC)
- e_warn(drv, "VF %d has requested a MACVLAN filter "
- "but there is no space for it\n", vf);
+ retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_API_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_GET_QUEUES:
+ retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -692,7 +878,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
- ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
return retval;
}
@@ -783,7 +969,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
- ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -803,7 +989,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
} else {
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0722f3368092..9cd8a13711d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,6 +56,7 @@
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -1833,15 +1834,6 @@ enum {
/* Number of 100 microseconds we wait for PCI Express master disable */
#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define IXGBE_IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define IXGBE_IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && \
- (((u8 *)(Address))[1] == ((u8)0xff)))
-
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
#define IXGBE_RAH_VIND_SHIFT 18
@@ -1962,6 +1954,8 @@ enum {
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+#define IXGBE_FWSM_TS_ENABLED 0x1
+
/* Queue Drop Enable */
#define IXGBE_QDE_ENABLE 0x00000001
#define IXGBE_QDE_IDX_MASK 0x00007F00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index de4da5219b71..c73b92993391 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -152,7 +152,7 @@ mac_reset_top:
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
/* Add the SAN MAC address to the RAR only if it's a valid address */
- if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index da17ccf5c09d..3147795bd135 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,8 +33,11 @@
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_VF_IRQ_CLEAR_MASK 7
-#define IXGBE_VF_MAX_TX_QUEUES 1
-#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4a9c9c285685..fc0af9a3bb35 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,7 +58,6 @@ struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
struct device *dev;
- struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
@@ -75,6 +74,8 @@ struct ixgbevf_ring {
u64 total_bytes;
u64 total_packets;
struct u64_stats_sync syncp;
+ u64 hw_csum_rx_error;
+ u64 hw_csum_rx_good;
u16 head;
u16 tail;
@@ -89,8 +90,8 @@ struct ixgbevf_ring {
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define MAX_RX_QUEUES 1
-#define MAX_TX_QUEUES 1
+#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
+#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -101,10 +102,10 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_3K 3072
-#define IXGBEVF_RXBUFFER_7K 7168
-#define IXGBEVF_RXBUFFER_15K 15360
-#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define IXGBEVF_RXBUFFER_2K 2048
+#define IXGBEVF_RXBUFFER_4K 4096
+#define IXGBEVF_RXBUFFER_8K 8192
+#define IXGBEVF_RXBUFFER_10K 10240
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -229,6 +230,7 @@ struct ixgbevf_adapter {
*/
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
/* OS defined structs */
struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de1ad506665d..257357ae66c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.6.0-k"
+#define DRV_VERSION "2.7.12-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -120,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to
* @msix_vector: the vector to map to the corresponding queue
- *
*/
static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector)
@@ -287,17 +287,19 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- napi_gro_receive(&q_vector->napi, skb);
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(&q_vector->napi, skb);
+ else
+ netif_rx(skb);
}
/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
- * @adapter: address of board private structure
+ * @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified
**/
-static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *ring,
+static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -309,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- adapter->hw_csum_rx_error++;
+ ring->hw_csum_rx_error++;
return;
}
@@ -317,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- adapter->hw_csum_rx_error++;
+ ring->hw_csum_rx_error++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
+ ring->hw_csum_rx_good++;
}
/**
@@ -337,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
- struct sk_buff *skb;
unsigned int i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- skb = bi->skb;
- if (!skb) {
+
+ if (!bi->skb) {
+ struct sk_buff *skb;
+
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
@@ -353,11 +356,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
goto no_buffers;
}
bi->skb = skb;
- }
- if (!bi->dma) {
+
bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ dev_kfree_skb(skb);
+ bi->skb = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ break;
+ }
}
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
@@ -370,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
-
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
@@ -454,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
+ ixgbevf_rx_checksum(rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -471,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* Workaround hardware that can't do proper VEPA multicast
+ * source pruning.
+ */
+ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+ !(compare_ether_addr(adapter->netdev->dev_addr,
+ eth_hdr(skb)->h_source))) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
next_desc:
@@ -533,9 +550,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
+ adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
+ adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
@@ -743,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
return IRQ_HANDLED;
}
-
/**
* ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
@@ -1065,20 +1083,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
max_frame += VLAN_HLEN;
/*
- * Make best use of allocation by using all but 1K of a
- * power of 2 allocation that will be used for skb->head.
+ * Allocate buffer sizes that fit well into 32K and
+ * take into account max frame size of 9.5K
*/
if ((hw->mac.type == ixgbe_mac_X540_vf) &&
(max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else if (max_frame <= IXGBEVF_RXBUFFER_3K)
- rx_buf_len = IXGBEVF_RXBUFFER_3K;
- else if (max_frame <= IXGBEVF_RXBUFFER_7K)
- rx_buf_len = IXGBEVF_RXBUFFER_7K;
- else if (max_frame <= IXGBEVF_RXBUFFER_15K)
- rx_buf_len = IXGBEVF_RXBUFFER_15K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_2K)
+ rx_buf_len = IXGBEVF_RXBUFFER_2K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_4K)
+ rx_buf_len = IXGBEVF_RXBUFFER_4K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_8K)
+ rx_buf_len = IXGBEVF_RXBUFFER_8K;
else
- rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+ rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1128,15 +1146,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw;
int err;
- if (!hw->mac.ops.set_vfta)
- return -EOPNOTSUPP;
-
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* add VID to filter table */
err = hw->mac.ops.set_vfta(hw, vid, 0, true);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
/* translate error return types so error makes sense */
if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1171,12 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw;
int err = -EOPNOTSUPP;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* remove VID from filter table */
- if (hw->mac.ops.set_vfta)
- err = hw->mac.ops.set_vfta(hw, vid, 0, false);
+ err = hw->mac.ops.set_vfta(hw, vid, 0, false);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
clear_bit(vid, adapter->active_vlans);
@@ -1206,27 +1220,27 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
}
/**
- * ixgbevf_set_rx_mode - Multicast set
+ * ixgbevf_set_rx_mode - Multicast and unicast set
* @netdev: network interface device structure
*
* The set_rx_method entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast mode.
+ * list, unicast address list or the network interface flags are updated.
+ * This routine is responsible for configuring the hardware for proper
+ * multicast mode and configuring requested unicast filters.
**/
static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* reprogram multicast list */
- if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, netdev);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1290,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
"not set within the polling period\n", rxr);
}
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
+ adapter->rx_ring[rxr].count - 1);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1349,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_10,
+ int api[] = { ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
int err = 0, idx = 0;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1363,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,16 +1404,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- if (hw->mac.ops.set_rar) {
- if (is_valid_ether_addr(hw->mac.addr))
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
- else
- hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
- }
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1426,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
+static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err, i;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* nothing to do if we have the correct number of queues */
+ if (adapter->num_rx_queues == num_rx_queues)
+ return 0;
+
+ /* allocate new rings */
+ rx_ring = kcalloc(num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return -ENOMEM;
+
+ /* setup ring fields */
+ for (i = 0; i < num_rx_queues; i++) {
+ rx_ring[i].count = adapter->rx_ring_count;
+ rx_ring[i].queue_index = i;
+ rx_ring[i].reg_idx = i;
+ rx_ring[i].dev = &adapter->pdev->dev;
+ rx_ring[i].netdev = adapter->netdev;
+
+ /* allocate resources on the ring */
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ return err;
+ }
+ }
+
+ /* free the existing rings and queues */
+ ixgbevf_free_all_rx_resources(adapter);
+ adapter->num_rx_queues = 0;
+ kfree(adapter->rx_ring);
+
+ /* move new rings into position on the adapter struct */
+ adapter->rx_ring = rx_ring;
+ adapter->num_rx_queues = num_rx_queues;
+
+ /* reset ring to vector mapping */
+ ixgbevf_reset_q_vectors(adapter);
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ return 0;
+}
+
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
ixgbevf_negotiate_api(adapter);
+ ixgbevf_reset_queues(adapter);
+
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1497,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
return;
/* Free all the Tx ring sk_buffs */
-
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
@@ -1593,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
- /*
- * Check if PF is up before re-init. If not then skip until
- * later when the PF is up and ready to service requests from
- * the VF via mailbox. If the VF is up and running then the
- * watchdog task will continue to schedule reset tasks until
- * the PF is up and running.
- */
ixgbevf_down(adapter);
ixgbevf_up(adapter);
@@ -1611,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- spin_lock(&adapter->mbx_lock);
-
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
- spin_unlock(&adapter->mbx_lock);
-
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1628,10 +1704,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
- int vectors)
+static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
{
- int err, vector_threshold;
+ int err = 0;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -1647,21 +1724,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
vectors);
- if (!err) /* Success in acquiring all requested vectors. */
+ if (!err || err < 0) /* Success or a nasty failure. */
break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err;
}
- if (vectors < vector_threshold) {
- /* Can't allocate enough MSI-X interrupts? Oh well.
- * This just means we'll go with either a single MSI
- * vector or fall back to legacy interrupts.
- */
- hw_dbg(&adapter->hw,
- "Unable to allocate MSI-X interrupts\n");
+ if (vectors < vector_threshold)
+ err = -ENOMEM;
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
@@ -1672,6 +1746,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
*/
adapter->num_msix_vectors = vectors;
}
+
+ return err;
}
/**
@@ -1717,6 +1793,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
+ /* reg_idx may be remapped later by DCB config */
adapter->tx_ring[i].reg_idx = i;
adapter->tx_ring[i].dev = &adapter->pdev->dev;
adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1774,7 +1851,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
- ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ if (err)
+ goto out;
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
@@ -1834,18 +1913,13 @@ err_out:
**/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors;
- int napi_vectors;
-
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
+ int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
- if (q_idx < napi_vectors)
- netif_napi_del(&q_vector->napi);
+ netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
}
@@ -1935,7 +2009,7 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -1950,8 +2024,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
hw->mbx.ops.init_params(hw);
- hw->mac.max_tx_queues = MAX_TX_QUEUES;
- hw->mac.max_rx_queues = MAX_RX_QUEUES;
+
+ /* assume legacy case in which PF would only give VF 2 queues */
+ hw->mac.max_tx_queues = 2;
+ hw->mac.max_rx_queues = 2;
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -1966,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
goto out;
}
memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
- adapter->netdev->addr_len);
+ adapter->netdev->addr_len);
}
/* lock to protect mailbox accesses */
@@ -2016,6 +2093,7 @@ out:
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc);
@@ -2029,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfgotc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
adapter->stats.vfmprc);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->hw_csum_rx_error +=
+ adapter->rx_ring[i].hw_csum_rx_error;
+ adapter->hw_csum_rx_good +=
+ adapter->rx_ring[i].hw_csum_rx_good;
+ adapter->rx_ring[i].hw_csum_rx_error = 0;
+ adapter->rx_ring[i].hw_csum_rx_good = 0;
+ }
}
/**
@@ -2103,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = adapter->link_speed;
bool link_up = adapter->link_up;
+ s32 need_reset;
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2110,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* Always check the link on the watchdog because we have
* no LSC interrupt
*/
- if (hw->mac.ops.check_link) {
- s32 need_reset;
-
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- need_reset = hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
+ need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
- if (need_reset) {
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- schedule_work(&adapter->reset_task);
- goto pf_has_reset;
- }
- } else {
- /* always assume link is up, if no check link
- * function */
- link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- link_up = true;
+ if (need_reset) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
}
adapter->link_up = link_up;
adapter->link_speed = link_speed;
@@ -2377,6 +2455,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
&adapter->rx_ring[i]);
}
+static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err, i;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* nothing to do if we have the correct number of queues */
+ if (adapter->num_rx_queues == num_rx_queues)
+ return 0;
+
+ /* allocate new rings */
+ rx_ring = kcalloc(num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return -ENOMEM;
+
+ /* setup ring fields */
+ for (i = 0; i < num_rx_queues; i++) {
+ rx_ring[i].count = adapter->rx_ring_count;
+ rx_ring[i].queue_index = i;
+ rx_ring[i].reg_idx = i;
+ rx_ring[i].dev = &adapter->pdev->dev;
+ rx_ring[i].netdev = adapter->netdev;
+ }
+
+ /* free the existing ring and queues */
+ adapter->num_rx_queues = 0;
+ kfree(adapter->rx_ring);
+
+ /* move new rings into position on the adapter struct */
+ adapter->rx_ring = rx_ring;
+ adapter->num_rx_queues = num_rx_queues;
+
+ return 0;
+}
+
/**
* ixgbevf_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -2413,6 +2548,11 @@ static int ixgbevf_open(struct net_device *netdev)
ixgbevf_negotiate_api(adapter);
+ /* setup queue reg_idx and Rx queue count */
+ err = ixgbevf_setup_queues(adapter);
+ if (err)
+ goto err_setup_queues;
+
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2451,6 +2591,7 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
+err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2562,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
-
-
-
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2678,10 +2816,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma =
skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
- tx_buffer_info->mapped_as_page = true;
if (dma_mapping_error(tx_ring->dev,
tx_buffer_info->dma))
goto dma_error;
+ tx_buffer_info->mapped_as_page = true;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2754,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
-
}
/*
@@ -2823,6 +2960,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+ if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
tx_ring = &adapter->tx_ring[r_idx];
@@ -2902,12 +3044,11 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- if (hw->mac.ops.set_rar)
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
return 0;
}
@@ -2925,8 +3066,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
- if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_11:
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+ break;
+ default:
+ if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+ break;
+ }
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > max_possible_frame))
@@ -3094,8 +3242,7 @@ static void ixgbevf_assign_netdev_ops(struct net_device *dev)
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit ixgbevf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct ixgbevf_adapter *adapter = NULL;
@@ -3223,10 +3370,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- /* pick up the PCI bus settings for reporting later */
- if (hw->mac.ops.get_bus_info)
- hw->mac.ops.get_bus_info(hw);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
@@ -3270,7 +3413,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+static void ixgbevf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3384,7 +3527,7 @@ static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
- .remove = __devexit_p(ixgbevf_remove),
+ .remove = ixgbevf_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = ixgbevf_suspend,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 946ce86f337f..0bc30058ff82 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -85,6 +85,7 @@
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -100,6 +101,15 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c7447e6fcc8..0c94557b53df 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -331,6 +331,9 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
+ if (is_link_local_ether_addr(ha->addr))
+ continue;
+
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
@@ -513,6 +516,64 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUE;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 5);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 5);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
+
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 47f11a584d8c..7b1f502d1716 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -174,5 +174,7 @@ struct ixgbevf_info {
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 60ac46f4ac08..0519afa413d2 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2965,7 +2965,7 @@ static const struct net_device_ops jme_netdev_ops = {
#endif
};
-static int __devinit
+static int
jme_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3203,7 +3203,7 @@ err_out:
return rc;
}
-static void __devexit
+static void
jme_remove_one(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3318,7 +3318,7 @@ static struct pci_driver jme_driver = {
.name = DRV_NAME,
.id_table = jme_pci_tbl,
.probe = jme_init_one,
- .remove = __devexit_p(jme_remove_one),
+ .remove = jme_remove_one,
.shutdown = jme_shutdown,
.driver.pm = JME_PM_OPS,
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 003c5bc7189f..c124e67a1a1c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -774,7 +774,7 @@ err_out:
return err;
}
-static int __devexit
+static int
ltq_etop_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -789,7 +789,7 @@ ltq_etop_remove(struct platform_device *pdev)
}
static struct platform_driver ltq_mii_driver = {
- .remove = __devexit_p(ltq_etop_remove),
+ .remove = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 0029934748bc..edfba9370922 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -31,6 +31,30 @@ config MV643XX_ETH
Some boards that use the Discovery chipset are the Momenco
Ocelot C and Jaguar ATX and Pegasos II.
+config MVMDIO
+ tristate "Marvell MDIO interface support"
+ ---help---
+ This driver supports the MDIO interface found in the network
+ interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
+ Dove, Armada 370 and Armada XP).
+
+ For now, this driver is only needed for the MVNETA driver
+ (used on Armada 370 and XP), but it could be used in the
+ future by the MV643XX_ETH driver.
+
+config MVNETA
+ tristate "Marvell Armada 370/XP network interface support"
+ depends on MACH_ARMADA_370_XP
+ select PHYLIB
+ select MVMDIO
+ ---help---
+ This driver supports the network interface units in the
+ Marvell ARMADA XP and ARMADA 370 SoC family.
+
+ Note that this driver is distinct from the mv643xx_eth
+ driver, which should be used for the older Marvell SoCs
+ (Dove, Orion, Discovery, Kirkwood).
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 57e3234a37ba..7f63b4aac434 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -3,6 +3,8 @@
#
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 000000000000..74f1c157a480
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,228 @@
+/*
+ * Driver for the MDIO interface of Marvell network interfaces.
+ *
+ * Since the MDIO interface of Marvell network interfaces is shared
+ * between all network interfaces, having a single driver allows to
+ * handle concurrent accesses properly (you may have four Ethernet
+ * ports, but they in fact share the same SMI interface to access the
+ * MDIO bus). Moreover, this MDIO interface code is similar between
+ * the mv643xx_eth driver and the mvneta driver. For now, it is only
+ * used by the mvneta driver, but it could later be used by the
+ * mv643xx_eth driver as well.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define MVMDIO_SMI_DATA_SHIFT 0
+#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
+#define MVMDIO_SMI_PHY_REG_SHIFT 21
+#define MVMDIO_SMI_READ_OPERATION BIT(26)
+#define MVMDIO_SMI_WRITE_OPERATION 0
+#define MVMDIO_SMI_READ_VALID BIT(27)
+#define MVMDIO_SMI_BUSY BIT(28)
+
+struct orion_mdio_dev {
+ struct mutex lock;
+ void __iomem *smireg;
+};
+
+/* Wait for the SMI unit to be ready for another operation
+ */
+static int orion_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int count;
+ u32 val;
+
+ count = 0;
+ while (1) {
+ val = readl(dev->smireg);
+ if (!(val & MVMDIO_SMI_BUSY))
+ break;
+
+ if (count > 100) {
+ dev_err(bus->parent, "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+ count++;
+ }
+
+ return 0;
+}
+
+static int orion_mdio_read(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int count;
+ u32 val;
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0) {
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_READ_OPERATION),
+ dev->smireg);
+
+ /* Wait for the value to become available */
+ count = 0;
+ while (1) {
+ val = readl(dev->smireg);
+ if (val & MVMDIO_SMI_READ_VALID)
+ break;
+
+ if (count > 100) {
+ dev_err(bus->parent, "Timeout when reading PHY\n");
+ mutex_unlock(&dev->lock);
+ return -ETIMEDOUT;
+ }
+
+ udelay(10);
+ count++;
+ }
+
+ mutex_unlock(&dev->lock);
+
+ return val & 0xFFFF;
+}
+
+static int orion_mdio_write(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ ret = orion_mdio_wait_ready(bus);
+ if (ret < 0) {
+ mutex_unlock(&dev->lock);
+ return ret;
+ }
+
+ writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
+ (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
+ MVMDIO_SMI_WRITE_OPERATION |
+ (value << MVMDIO_SMI_DATA_SHIFT)),
+ dev->smireg);
+
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+static int orion_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static int orion_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct orion_mdio_dev *dev;
+ int i, ret;
+
+ bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
+ if (!bus) {
+ dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ bus->name = "orion_mdio_bus";
+ bus->read = orion_mdio_read;
+ bus->write = orion_mdio_write;
+ bus->reset = orion_mdio_reset;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n");
+ mdiobus_free(bus);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->irq[i] = PHY_POLL;
+
+ dev = bus->priv;
+ dev->smireg = of_iomap(pdev->dev.of_node, 0);
+ if (!dev->smireg) {
+ dev_err(&pdev->dev, "No SMI register address given in DT\n");
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return -ENODEV;
+ }
+
+ mutex_init(&dev->lock);
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
+ iounmap(dev->smireg);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int orion_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+ mdiobus_unregister(bus);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return 0;
+}
+
+static const struct of_device_id orion_mdio_match[] = {
+ { .compatible = "marvell,orion-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, orion_mdio_match);
+
+static struct platform_driver orion_mdio_driver = {
+ .probe = orion_mdio_probe,
+ .remove = orion_mdio_remove,
+ .driver = {
+ .name = "orion-mdio",
+ .of_match_table = orion_mdio_match,
+ },
+};
+
+module_platform_driver(orion_mdio_driver);
+
+MODULE_DESCRIPTION("Marvell MDIO interface driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 000000000000..b6025c305e10
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,2847 @@
+/*
+ * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Rami Rosen <rosenr@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+/* Registers */
+#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
+#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
+#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
+#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
+#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
+#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
+#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
+#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
+#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
+#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
+#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
+#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
+#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
+#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
+#define MVNETA_PORT_RX_RESET 0x1cc0
+#define MVNETA_PORT_RX_DMA_RESET BIT(0)
+#define MVNETA_PHY_ADDR 0x2000
+#define MVNETA_PHY_ADDR_MASK 0x1f
+#define MVNETA_MBUS_RETRY 0x2010
+#define MVNETA_UNIT_INTR_CAUSE 0x2080
+#define MVNETA_UNIT_CONTROL 0x20B0
+#define MVNETA_PHY_POLLING_ENABLE BIT(1)
+#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
+#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
+#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
+#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_PORT_CONFIG 0x2400
+#define MVNETA_UNI_PROMISC_MODE BIT(0)
+#define MVNETA_DEF_RXQ(q) ((q) << 1)
+#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
+#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
+#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
+#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
+#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
+#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
+#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
+ MVNETA_DEF_RXQ_ARP(q) | \
+ MVNETA_DEF_RXQ_TCP(q) | \
+ MVNETA_DEF_RXQ_UDP(q) | \
+ MVNETA_DEF_RXQ_BPDU(q) | \
+ MVNETA_TX_UNSET_ERR_SUM | \
+ MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
+#define MVNETA_PORT_CONFIG_EXTEND 0x2404
+#define MVNETA_MAC_ADDR_LOW 0x2414
+#define MVNETA_MAC_ADDR_HIGH 0x2418
+#define MVNETA_SDMA_CONFIG 0x241c
+#define MVNETA_SDMA_BRST_SIZE_16 4
+#define MVNETA_NO_DESC_SWAP 0x0
+#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
+#define MVNETA_RX_NO_DATA_SWAP BIT(4)
+#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
+#define MVNETA_PORT_STATUS 0x2444
+#define MVNETA_TX_IN_PRGRS BIT(1)
+#define MVNETA_TX_FIFO_EMPTY BIT(8)
+#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+#define MVNETA_TYPE_PRIO 0x24bc
+#define MVNETA_FORCE_UNI BIT(21)
+#define MVNETA_TXQ_CMD_1 0x24e4
+#define MVNETA_TXQ_CMD 0x2448
+#define MVNETA_TXQ_DISABLE_SHIFT 8
+#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_ACC_MODE 0x2500
+#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
+#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
+#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
+#define MVNETA_INTR_NEW_CAUSE 0x25a0
+#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
+#define MVNETA_INTR_NEW_MASK 0x25a4
+#define MVNETA_INTR_OLD_CAUSE 0x25a8
+#define MVNETA_INTR_OLD_MASK 0x25ac
+#define MVNETA_INTR_MISC_CAUSE 0x25b0
+#define MVNETA_INTR_MISC_MASK 0x25b4
+#define MVNETA_INTR_ENABLE 0x25b8
+#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
+#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
+#define MVNETA_RXQ_CMD 0x2680
+#define MVNETA_RXQ_DISABLE_SHIFT 8
+#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
+#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
+#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
+#define MVNETA_GMAC_CTRL_0 0x2c00
+#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
+#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
+#define MVNETA_GMAC_CTRL_2 0x2c08
+#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PORT_RGMII BIT(4)
+#define MVNETA_GMAC2_PORT_RESET BIT(6)
+#define MVNETA_GMAC_STATUS 0x2c10
+#define MVNETA_GMAC_LINK_UP BIT(0)
+#define MVNETA_GMAC_SPEED_1000 BIT(1)
+#define MVNETA_GMAC_SPEED_100 BIT(2)
+#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
+#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
+#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
+#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_MIB_COUNTERS_BASE 0x3080
+#define MVNETA_MIB_LATE_COLLISION 0x7c
+#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
+#define MVNETA_DA_FILT_OTH_MCAST 0x3500
+#define MVNETA_DA_FILT_UCAST_BASE 0x3600
+#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
+#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
+#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
+#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
+#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
+#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
+#define MVNETA_TXQ_SENT_DESC_SHIFT 16
+#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
+#define MVNETA_PORT_TX_RESET 0x3cf0
+#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TX_MTU 0x3e0c
+#define MVNETA_TX_TOKEN_SIZE 0x3e14
+#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
+#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+
+#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Descriptor ring Macros */
+#define MVNETA_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVNETA_TXDONE_COAL_PKTS 16
+#define MVNETA_RX_COAL_PKTS 32
+#define MVNETA_RX_COAL_USEC 100
+
+/* Timer */
+#define MVNETA_TX_DONE_TIMER_PERIOD 10
+
+/* Napi polling weight */
+#define MVNETA_RX_POLL_WEIGHT 64
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVNETA_MH_SIZE 2
+
+#define MVNETA_VLAN_TAG_LEN 4
+
+#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
+#define MVNETA_TX_CSUM_MAX_SIZE 9800
+#define MVNETA_ACC_MODE_EXT 1
+
+/* Timeout constants */
+#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
+#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
+
+#define MVNETA_TX_MTU_MAX 0x3ffff
+
+/* Max number of Rx descriptors */
+#define MVNETA_MAX_RXD 128
+
+/* Max number of Tx descriptors */
+#define MVNETA_MAX_TXD 532
+
+/* descriptor aligned size */
+#define MVNETA_DESC_ALIGNED_SIZE 32
+
+#define MVNETA_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, \
+ MVNETA_CPU_D_CACHE_LINE_SIZE)
+
+#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+
+struct mvneta_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct mvneta_port {
+ int pkt_size;
+ void __iomem *base;
+ struct mvneta_rx_queue *rxqs;
+ struct mvneta_tx_queue *txqs;
+ struct timer_list tx_done_timer;
+ struct net_device *dev;
+
+ u32 cause_rx_tx;
+ struct napi_struct napi;
+
+ /* Flags */
+ unsigned long flags;
+#define MVNETA_F_TX_DONE_TIMER_BIT 0
+
+ /* Napi weight */
+ int weight;
+
+ /* Core clock */
+ struct clk *clk;
+ u8 mcast_count[256];
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+ struct mvneta_stats tx_stats;
+ struct mvneta_stats rx_stats;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ phy_interface_t phy_interface;
+ struct device_node *phy_node;
+ unsigned int link;
+ unsigned int duplex;
+ unsigned int speed;
+};
+
+/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+#define MVNETA_TX_L3_OFF_SHIFT 0
+#define MVNETA_TX_IP_HLEN_SHIFT 8
+#define MVNETA_TX_L4_UDP BIT(16)
+#define MVNETA_TX_L3_IP6 BIT(17)
+#define MVNETA_TXD_IP_CSUM BIT(18)
+#define MVNETA_TXD_Z_PAD BIT(19)
+#define MVNETA_TXD_L_DESC BIT(20)
+#define MVNETA_TXD_F_DESC BIT(21)
+#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
+ MVNETA_TXD_L_DESC | \
+ MVNETA_TXD_F_DESC)
+#define MVNETA_TX_L4_CSUM_FULL BIT(30)
+#define MVNETA_TX_L4_CSUM_NOT BIT(31)
+
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
+#define MVNETA_RXD_ERR_CRC 0x0
+#define MVNETA_RXD_ERR_SUMMARY BIT(16)
+#define MVNETA_RXD_ERR_OVERRUN BIT(17)
+#define MVNETA_RXD_ERR_LEN BIT(18)
+#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
+#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
+#define MVNETA_RXD_L3_IP4 BIT(25)
+#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
+#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u16 data_size; /* Size of received packet in bytes */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+
+struct mvneta_tx_queue {
+ /* Number of this TX queue, in the range 0-7 */
+ u8 id;
+
+ /* Number of TX DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used TX DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+
+ /* Array of transmitted skb */
+ struct sk_buff **tx_skb;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+
+ u32 done_pkts_coal;
+
+ /* Virtual address of the TX DMA descriptors array */
+ struct mvneta_tx_desc *descs;
+
+ /* DMA address of the TX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last TX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next TX DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+struct mvneta_rx_queue {
+ /* rx queue number, in the range 0-7 */
+ u8 id;
+
+ /* num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ /* counter of times when mvneta_refill() failed */
+ int missed;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvneta_rx_desc *descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+static int rxq_number = 8;
+static int txq_number = 8;
+
+static int rxq_def;
+static int txq_def;
+
+#define MVNETA_DRIVER_NAME "mvneta"
+#define MVNETA_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+/* Write helper method */
+static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
+{
+ writel(data, pp->base + offset);
+}
+
+/* Read helper method */
+static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
+{
+ return readl(pp->base + offset);
+}
+
+/* Increment txq get counter */
+static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
+{
+ txq->txq_get_index++;
+ if (txq->txq_get_index == txq->size)
+ txq->txq_get_index = 0;
+}
+
+/* Increment txq put counter */
+static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
+{
+ txq->txq_put_index++;
+ if (txq->txq_put_index == txq->size)
+ txq->txq_put_index = 0;
+}
+
+
+/* Clear all MIB counters */
+static void mvneta_mib_counters_clear(struct mvneta_port *pp)
+{
+ int i;
+ u32 dummy;
+
+ /* Perform dummy reads from MIB counters */
+ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
+ dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+}
+
+/* Get System Network Statistics */
+struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ unsigned int start;
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
+ stats->rx_packets = pp->rx_stats.packets;
+ stats->rx_bytes = pp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
+
+
+ do {
+ start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
+ stats->tx_packets = pp->tx_stats.packets;
+ stats->tx_bytes = pp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ stats->tx_dropped = dev->stats.tx_dropped;
+
+ return stats;
+}
+
+/* Rx descriptors helper methods */
+
+/* Checks whether the given RX descriptor is both the first and the
+ * last descriptor for the RX packet. Each RX packet is currently
+ * received through a single RX descriptor, so not having each RX
+ * descriptor with its first and last bits set is an error
+ */
+static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
+{
+ return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
+ MVNETA_RXD_FIRST_LAST_DESC;
+}
+
+/* Add number of descriptors ready to receive new packets */
+static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int ndescs)
+{
+ /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
+ * be added at once
+ */
+ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
+ MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+ ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
+ (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
+}
+
+/* Get number of RX descriptors occupied by received packets */
+static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
+ return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
+}
+
+/* Update num of rx desc called upon return from rx path or
+ * from mvneta_rxq_drop_pkts().
+ */
+static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int rx_done, int rx_filled)
+{
+ u32 val;
+
+ if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
+ val = rx_done |
+ (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ return;
+ }
+
+ /* Only 255 descriptors can be added at once */
+ while ((rx_done > 0) || (rx_filled > 0)) {
+ if (rx_done <= 0xff) {
+ val = rx_done;
+ rx_done = 0;
+ } else {
+ val = 0xff;
+ rx_done -= 0xff;
+ }
+ if (rx_filled <= 0xff) {
+ val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled = 0;
+ } else {
+ val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
+ rx_filled -= 0xff;
+ }
+ mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
+ }
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static struct mvneta_rx_desc *
+mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
+ return rxq->descs + rx_desc;
+}
+
+/* Change maximum receive size of the port. */
+static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
+ val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
+ MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+
+/* Set rx queue offset */
+static void mvneta_rxq_offset_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int offset)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
+
+ /* Offset is in */
+ val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+/* Tx descriptors helper methods */
+
+/* Update HW with number of TX descriptors to be sent */
+static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int pend_desc)
+{
+ u32 val;
+
+ /* Only 255 descriptors can be added at once ; Assume caller
+ * process TX desriptors in quanta less than 256
+ */
+ val = pend_desc;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get pointer to next TX descriptor to be processed (send) by HW */
+static struct mvneta_tx_desc *
+mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
+{
+ int tx_desc = txq->next_desc_to_proc;
+
+ txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
+ return txq->descs + tx_desc;
+}
+
+/* Release the last allocated TX descriptor. Useful to handle DMA
+ * mapping failures in the TX path.
+ */
+static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+/* Set rxq buf size */
+static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq,
+ int buf_size)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
+
+ val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
+ val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
+}
+
+/* Disable buffer management (BM) */
+static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+
+
+/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
+static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ if (enable)
+ val |= MVNETA_GMAC2_PORT_RGMII;
+ else
+ val &= ~MVNETA_GMAC2_PORT_RGMII;
+
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Config SGMII port */
+static void mvneta_port_sgmii_config(struct mvneta_port *pp)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val |= MVNETA_GMAC2_PSC_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+}
+
+/* Start the Ethernet port RX and TX activity */
+static void mvneta_port_up(struct mvneta_port *pp)
+{
+ int queue;
+ u32 q_map;
+
+ /* Enable all initialized TXs. */
+ mvneta_mib_counters_clear(pp);
+ q_map = 0;
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ if (txq->descs != NULL)
+ q_map |= (1 << queue);
+ }
+ mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
+
+ /* Enable all initialized RXQs. */
+ q_map = 0;
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ if (rxq->descs != NULL)
+ q_map |= (1 << queue);
+ }
+
+ mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+}
+
+/* Stop the Ethernet port activity */
+static void mvneta_port_down(struct mvneta_port *pp)
+{
+ u32 val;
+ int count;
+
+ /* Stop Rx port activity. Check port Rx activity. */
+ val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
+
+ /* Issue stop command for active channels only */
+ if (val != 0)
+ mvreg_write(pp, MVNETA_RXQ_CMD,
+ val << MVNETA_RXQ_DISABLE_SHIFT);
+
+ /* Wait for all Rx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_RXQ_CMD);
+ } while (val & 0xff);
+
+ /* Stop Tx port activity. Check port Tx activity. Issue stop
+ * command for active channels only
+ */
+ val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
+
+ if (val != 0)
+ mvreg_write(pp, MVNETA_TXQ_CMD,
+ (val << MVNETA_TXQ_DISABLE_SHIFT));
+
+ /* Wait for all Tx activity to terminate. */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(pp->dev,
+ "TIMEOUT for TX stopped status=0x%08x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ /* Check TX Command reg that all Txqs are stopped */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD);
+
+ } while (val & 0xff);
+
+ /* Double check to verify that TX FIFO is empty */
+ count = 0;
+ do {
+ if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
+ netdev_warn(pp->dev,
+ "TX FIFO empty timeout status=0x08%x\n",
+ val);
+ break;
+ }
+ mdelay(1);
+
+ val = mvreg_read(pp, MVNETA_PORT_STATUS);
+ } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
+ (val & MVNETA_TX_IN_PRGRS));
+
+ udelay(200);
+}
+
+/* Enable the port by setting the port enable bit of the MAC control register */
+static void mvneta_port_enable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Enable port */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val |= MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+}
+
+/* Disable the port and wait for about 200 usec before retuning */
+static void mvneta_port_disable(struct mvneta_port *pp)
+{
+ u32 val;
+
+ /* Reset the Enable bit in the Serial Control Register */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ val &= ~MVNETA_GMAC0_PORT_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+
+ udelay(200);
+}
+
+/* Multicast tables methods */
+
+/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
+}
+
+/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
+static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ val = 0;
+ } else {
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
+
+}
+
+/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
+static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
+{
+ int offset;
+ u32 val;
+
+ if (queue == -1) {
+ memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
+ val = 0;
+ } else {
+ memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
+ val = 0x1 | (queue << 1);
+ val |= (val << 24) | (val << 16) | (val << 8);
+ }
+
+ for (offset = 0; offset <= 0xfc; offset += 4)
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
+}
+
+/* This method sets defaults to the NETA port:
+ * Clears interrupt Cause and Mask registers.
+ * Clears all MAC tables.
+ * Sets defaults to all registers.
+ * Resets RX and TX descriptor rings.
+ * Resets PHY.
+ * This method can be called after mvneta_port_down() to return the port
+ * settings to defaults.
+ */
+static void mvneta_defaults_set(struct mvneta_port *pp)
+{
+ int cpu;
+ int queue;
+ u32 val;
+
+ /* Clear all Cause registers */
+ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+
+ /* Mask all interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
+
+ /* Enable MBUS Retry bit16 */
+ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
+
+ /* Set CPU queue access map - all CPUs have access to all RX
+ * queues and to all TX queues
+ */
+ for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+ mvreg_write(pp, MVNETA_CPU_MAP(cpu),
+ (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
+ MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+
+ /* Reset RX and TX DMAs */
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+
+ /* Disable Legacy WRR, Disable EJP, Release from reset */
+ mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
+ for (queue = 0; queue < txq_number; queue++) {
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
+ }
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+
+ /* Set Port Acceleration Mode */
+ val = MVNETA_ACC_MODE_EXT;
+ mvreg_write(pp, MVNETA_ACC_MODE, val);
+
+ /* Update val of portCfg register accordingly with all RxQueue types */
+ val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
+ mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+ val = 0;
+ mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
+ mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
+
+ /* Build PORT_SDMA_CONFIG_REG */
+ val = 0;
+
+ /* Default burst size */
+ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+
+ val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
+ MVNETA_NO_DESC_SWAP);
+
+ /* Assign port SDMA configuration */
+ mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ /* Set port interrupt enable register - default enable all */
+ mvreg_write(pp, MVNETA_INTR_ENABLE,
+ (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
+ | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+}
+
+/* Set max sizes for tx queues */
+static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
+
+{
+ u32 val, size, mtu;
+ int queue;
+
+ mtu = max_tx_size * 8;
+ if (mtu > MVNETA_TX_MTU_MAX)
+ mtu = MVNETA_TX_MTU_MAX;
+
+ /* Set MTU */
+ val = mvreg_read(pp, MVNETA_TX_MTU);
+ val &= ~MVNETA_TX_MTU_MAX;
+ val |= mtu;
+ mvreg_write(pp, MVNETA_TX_MTU, val);
+
+ /* TX token size and all TXQs token size must be larger that MTU */
+ val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
+
+ size = val & MVNETA_TX_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
+ }
+ for (queue = 0; queue < txq_number; queue++) {
+ val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
+
+ size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
+ }
+ }
+}
+
+/* Set unicast address */
+static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
+ int queue)
+{
+ unsigned int unicast_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Locate the Unicast table entry */
+ last_nibble = (0xf & last_nibble);
+
+ /* offset from unicast tbl base */
+ tbl_offset = (last_nibble / 4) * 4;
+
+ /* offset within the above reg */
+ reg_offset = last_nibble % 4;
+
+ unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified unicast DA tbl entry */
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ unicast_reg &= ~(0xff << (8 * reg_offset));
+ unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
+}
+
+/* Set mac address */
+static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
+ int queue)
+{
+ unsigned int mac_h;
+ unsigned int mac_l;
+
+ if (queue != -1) {
+ mac_l = (addr[4] << 8) | (addr[5]);
+ mac_h = (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | (addr[3] << 0);
+
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
+ }
+
+ /* Accept frames of this address */
+ mvneta_set_ucast_addr(pp, addr[5], queue);
+}
+
+/* Set the number of packets that will be received before RX interrupt
+ * will be generated by HW.
+ */
+static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
+ value | MVNETA_RXQ_NON_OCCUPIED(0));
+ rxq->pkts_coal = value;
+}
+
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
+ */
+static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, u32 value)
+{
+ u32 val;
+ unsigned long clk_rate;
+
+ clk_rate = clk_get_rate(pp->clk);
+ val = (clk_rate / 1000000) * value;
+
+ mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
+ rxq->time_coal = value;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, u32 value)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
+
+ val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
+ val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
+
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
+
+ txq->done_pkts_coal = value;
+}
+
+/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
+static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
+{
+ if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
+ pp->tx_done_timer.expires = jiffies +
+ msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
+ add_timer(&pp->tx_done_timer);
+ }
+}
+
+
+/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
+static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
+ u32 phys_addr, u32 cookie)
+{
+ rx_desc->buf_cookie = cookie;
+ rx_desc->buf_phys_addr = phys_addr;
+}
+
+/* Decrement sent descriptors counter */
+static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq,
+ int sent_desc)
+{
+ u32 val;
+
+ /* Only 255 TX descriptors can be updated at once */
+ while (sent_desc > 0xff) {
+ val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ sent_desc = sent_desc - 0xff;
+ }
+
+ val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+}
+
+/* Get number of TX descriptors already sent by HW */
+static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ u32 val;
+ int sent_desc;
+
+ val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
+ sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
+ MVNETA_TXQ_SENT_DESC_SHIFT;
+
+ return sent_desc;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ */
+static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int sent_desc;
+
+ /* Get number of sent descriptors */
+ sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
+
+ /* Decrement sent descriptors counter */
+ if (sent_desc)
+ mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
+
+ return sent_desc;
+}
+
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type; required only for checksum
+ * calculation
+ */
+ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
+ command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+
+ if (l3_proto == swab16(ETH_P_IP))
+ command |= MVNETA_TXD_IP_CSUM;
+ else
+ command |= MVNETA_TX_L3_IP6;
+
+ if (l4_proto == IPPROTO_TCP)
+ command |= MVNETA_TX_L4_CSUM_FULL;
+ else if (l4_proto == IPPROTO_UDP)
+ command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
+ else
+ command |= MVNETA_TX_L4_CSUM_NOT;
+
+ return command;
+}
+
+
+/* Display more error info */
+static void mvneta_rx_error(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+{
+ u32 status = rx_desc->status;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
+ netdev_err(pp->dev,
+ "bad rx status %08x (buffer oversize), size=%d\n",
+ rx_desc->status, rx_desc->data_size);
+ return;
+ }
+
+ switch (status & MVNETA_RXD_ERR_CODE_MASK) {
+ case MVNETA_RXD_ERR_CRC:
+ netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_OVERRUN:
+ netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_LEN:
+ netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVNETA_RXD_ERR_RESOURCE:
+ netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ }
+}
+
+/* Handle RX checksum offload */
+static void mvneta_rx_csum(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
+ (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause) - 1;
+
+ return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+}
+
+/* Free tx queue skbuffs */
+static void mvneta_txq_bufs_free(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct mvneta_tx_desc *tx_desc = txq->descs +
+ txq->txq_get_index;
+ struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+
+ mvneta_txq_inc_get(txq);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Handle end of transmission */
+static int mvneta_txq_done(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+ int tx_done;
+
+ tx_done = mvneta_txq_sent_desc_proc(pp, txq);
+ if (tx_done == 0)
+ return tx_done;
+ mvneta_txq_bufs_free(pp, txq, tx_done);
+
+ txq->count -= tx_done;
+
+ if (netif_tx_queue_stopped(nq)) {
+ if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ netif_tx_wake_queue(nq);
+ }
+
+ return tx_done;
+}
+
+/* Refill processing */
+static int mvneta_rx_refill(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc)
+
+{
+ dma_addr_t phys_addr;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
+ if (!skb)
+ return -ENOMEM;
+
+ phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
+
+ return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int ip_hdr_len = 0;
+ u8 l4_proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ /* Calculate IPv4 checksum and L4 checksum */
+ ip_hdr_len = ip4h->ihl;
+ l4_proto = ip4h->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ /* Read l4_protocol from one of IPv6 extra headers */
+ if (skb_network_header_len(skb) > 0)
+ ip_hdr_len = (skb_network_header_len(skb) >> 2);
+ l4_proto = ip6h->nexthdr;
+ } else
+ return MVNETA_TX_L4_CSUM_NOT;
+
+ return mvneta_txq_desc_csum(skb_network_offset(skb),
+ skb->protocol, ip_hdr_len, l4_proto);
+ }
+
+ return MVNETA_TX_L4_CSUM_NOT;
+}
+
+/* Returns rx queue pointer (find last set bit) according to causeRxTx
+ * value
+ */
+static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
+ u32 cause)
+{
+ int queue = fls(cause >> 8) - 1;
+
+ return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
+}
+
+/* Drop packets received by the RXQ and free buffers */
+static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ int rx_done, i;
+
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ for (i = 0; i < rxq->size; i++) {
+ struct mvneta_rx_desc *rx_desc = rxq->descs + i;
+ struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+ dev_kfree_skb_any(skb);
+ dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ rx_desc->data_size, DMA_FROM_DEVICE);
+ }
+
+ if (rx_done)
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+}
+
+/* Main rx processing */
+static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
+{
+ struct net_device *dev = pp->dev;
+ int rx_done, rx_filled;
+
+ /* Get number of received packets */
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+ if (rx_todo > rx_done)
+ rx_todo = rx_done;
+
+ rx_done = 0;
+ rx_filled = 0;
+
+ /* Fairness NAPI loop */
+ while (rx_done < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ struct sk_buff *skb;
+ u32 rx_status;
+ int rx_bytes, err;
+
+ prefetch(rx_desc);
+ rx_done++;
+ rx_filled++;
+ rx_status = rx_desc->status;
+ skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
+ (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ dev->stats.rx_errors++;
+ mvneta_rx_error(pp, rx_desc);
+ mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
+ (u32)skb);
+ continue;
+ }
+
+ dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ rx_desc->data_size, DMA_FROM_DEVICE);
+
+ rx_bytes = rx_desc->data_size -
+ (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ u64_stats_update_begin(&pp->rx_stats.syncp);
+ pp->rx_stats.packets++;
+ pp->rx_stats.bytes += rx_bytes;
+ u64_stats_update_end(&pp->rx_stats.syncp);
+
+ /* Linux processing */
+ skb_reserve(skb, MVNETA_MH_SIZE);
+ skb_put(skb, rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ mvneta_rx_csum(pp, rx_desc, skb);
+
+ napi_gro_receive(&pp->napi, skb);
+
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(pp->dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ rx_filled--;
+ }
+ }
+
+ /* Update rxq management counters */
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+
+ return rx_done;
+}
+
+/* Handle tx fragmentation processing */
+static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
+ struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *addr = page_address(frag->page.p) + frag->page_offset;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = frag->size;
+
+ tx_desc->buf_phys_addr =
+ dma_map_single(pp->dev->dev.parent, addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto error;
+ }
+
+ if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ /* Last descriptor */
+ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->tx_skb[txq->txq_put_index] = skb;
+
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* Descriptor in the middle: Not First, Not Last */
+ tx_desc->command = 0;
+
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ mvneta_txq_inc_put(txq);
+ }
+ }
+
+ return 0;
+
+error:
+ /* Release all descriptors that were used to map fragments of
+ * this packet, as well as the corresponding DMA mappings
+ */
+ for (i = i - 1; i >= 0; i--) {
+ tx_desc = txq->descs + i;
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ }
+
+ return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+ struct mvneta_tx_desc *tx_desc;
+ struct netdev_queue *nq;
+ int frags = 0;
+ u32 tx_cmd;
+
+ if (!netif_running(dev))
+ goto out;
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ nq = netdev_get_tx_queue(dev, txq_def);
+
+ /* Get a descriptor for the first part of the packet */
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ tx_cmd = mvneta_skb_tx_csum(pp, skb);
+
+ tx_desc->data_size = skb_headlen(skb);
+
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+
+ if (frags == 1) {
+ /* First and Last descriptor */
+ tx_cmd |= MVNETA_TXD_FLZ_DESC;
+ tx_desc->command = tx_cmd;
+ txq->tx_skb[txq->txq_put_index] = skb;
+ mvneta_txq_inc_put(txq);
+ } else {
+ /* First but not Last */
+ tx_cmd |= MVNETA_TXD_F_DESC;
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ mvneta_txq_inc_put(txq);
+ tx_desc->command = tx_cmd;
+ /* Continue with other skb fragments */
+ if (mvneta_tx_frag_process(pp, skb, txq)) {
+ dma_unmap_single(dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ frags = 0;
+ goto out;
+ }
+ }
+
+ txq->count += frags;
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+
+ if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
+ netif_tx_stop_queue(nq);
+
+out:
+ if (frags > 0) {
+ u64_stats_update_begin(&pp->tx_stats.syncp);
+ pp->tx_stats.packets++;
+ pp->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&pp->tx_stats.syncp);
+
+ } else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
+ mvneta_txq_done(pp, txq);
+
+ /* If after calling mvneta_txq_done, count equals
+ * frags, we need to set the timer
+ */
+ if (txq->count == frags && frags > 0)
+ mvneta_add_tx_done_timer(pp);
+
+ return NETDEV_TX_OK;
+}
+
+
+/* Free tx resources, when resetting a port */
+static void mvneta_txq_done_force(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+
+{
+ int tx_done = txq->count;
+
+ mvneta_txq_bufs_free(pp, txq, tx_done);
+
+ /* reset txq */
+ txq->count = 0;
+ txq->txq_put_index = 0;
+ txq->txq_get_index = 0;
+}
+
+/* handle tx done - called from tx done timer callback */
+static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
+ int *tx_todo)
+{
+ struct mvneta_tx_queue *txq;
+ u32 tx_done = 0;
+ struct netdev_queue *nq;
+
+ *tx_todo = 0;
+ while (cause_tx_done != 0) {
+ txq = mvneta_tx_done_policy(pp, cause_tx_done);
+ if (!txq)
+ break;
+
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (txq->count) {
+ tx_done += mvneta_txq_done(pp, txq);
+ *tx_todo += txq->count;
+ }
+
+ __netif_tx_unlock(nq);
+ cause_tx_done &= ~((1 << txq->id));
+ }
+
+ return tx_done;
+}
+
+/* Compute crc8 of the specified address, using a unique algorithm ,
+ * according to hw spec, different than generic crc8 algorithm
+ */
+static int mvneta_addr_crc(unsigned char *addr)
+{
+ int crc = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ int j;
+
+ crc = (crc ^ addr[i]) << 8;
+ for (j = 7; j >= 0; j--) {
+ if (crc & (0x100 << j))
+ crc ^= 0x107 << j;
+ }
+ }
+
+ return crc;
+}
+
+/* This method controls the net device special MAC multicast support.
+ * The Special Multicast Table for MAC addresses supports MAC of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table. This method set the Special
+ * Multicast Table appropriate entry.
+ */
+static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
+ unsigned char last_byte,
+ int queue)
+{
+ unsigned int smc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ /* Register offset from SMC table base */
+ tbl_offset = (last_byte / 4);
+ /* Entry offset within the above reg */
+ reg_offset = last_byte % 4;
+
+ smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
+ + tbl_offset * 4));
+
+ if (queue == -1)
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ else {
+ smc_table_reg &= ~(0xff << (8 * reg_offset));
+ smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
+ smc_table_reg);
+}
+
+/* This method controls the network device Other MAC multicast support.
+ * The Other Multicast Table is used for multicast of another type.
+ * A CRC-8 is used as an index to the Other Multicast Table entries
+ * in the DA-Filter table.
+ * The method gets the CRC-8 value from the calling routine and
+ * sets the Other Multicast Table appropriate entry according to the
+ * specified CRC-8 .
+ */
+static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
+ unsigned char crc8,
+ int queue)
+{
+ unsigned int omc_table_reg;
+ unsigned int tbl_offset;
+ unsigned int reg_offset;
+
+ tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
+ reg_offset = crc8 % 4; /* Entry offset within the above reg */
+
+ omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
+
+ if (queue == -1) {
+ /* Clear accepts frame bit at specified Other DA table entry */
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ } else {
+ omc_table_reg &= ~(0xff << (8 * reg_offset));
+ omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
+ }
+
+ mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
+}
+
+/* The network device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+ * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ * Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8 value
+ * is used as an index to the Other Multicast Table entries in the
+ * DA-Filter table.
+ */
+static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
+ int queue)
+{
+ unsigned char crc_result = 0;
+
+ if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
+ mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
+ return 0;
+ }
+
+ crc_result = mvneta_addr_crc(p_addr);
+ if (queue == -1) {
+ if (pp->mcast_count[crc_result] == 0) {
+ netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
+ crc_result);
+ return -EINVAL;
+ }
+
+ pp->mcast_count[crc_result]--;
+ if (pp->mcast_count[crc_result] != 0) {
+ netdev_info(pp->dev,
+ "After delete there are %d valid Mcast for crc8=0x%02x\n",
+ pp->mcast_count[crc_result], crc_result);
+ return -EINVAL;
+ }
+ } else
+ pp->mcast_count[crc_result]++;
+
+ mvneta_set_other_mcast_addr(pp, crc_result, queue);
+
+ return 0;
+}
+
+/* Configure Fitering mode of Ethernet port */
+static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
+ int is_promisc)
+{
+ u32 port_cfg_reg, val;
+
+ port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
+
+ val = mvreg_read(pp, MVNETA_TYPE_PRIO);
+
+ /* Set / Clear UPM bit in port configuration register */
+ if (is_promisc) {
+ /* Accept all Unicast addresses */
+ port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
+ val |= MVNETA_FORCE_UNI;
+ mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
+ mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
+ } else {
+ /* Reject all Unicast addresses */
+ port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
+ val &= ~MVNETA_FORCE_UNI;
+ }
+
+ mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
+ mvreg_write(pp, MVNETA_TYPE_PRIO, val);
+}
+
+/* register unicast and multicast addresses */
+static void mvneta_set_rx_mode(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Accept all: Multicast + Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 1);
+ mvneta_set_ucast_table(pp, rxq_def);
+ mvneta_set_special_mcast_table(pp, rxq_def);
+ mvneta_set_other_mcast_table(pp, rxq_def);
+ } else {
+ /* Accept single Unicast */
+ mvneta_rx_unicast_promisc_set(pp, 0);
+ mvneta_set_ucast_table(pp, -1);
+ mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast */
+ mvneta_set_special_mcast_table(pp, rxq_def);
+ mvneta_set_other_mcast_table(pp, rxq_def);
+ } else {
+ /* Accept only initialized multicast */
+ mvneta_set_special_mcast_table(pp, -1);
+ mvneta_set_other_mcast_table(pp, -1);
+
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ mvneta_mcast_addr_set(pp, ha->addr,
+ rxq_def);
+ }
+ }
+ }
+ }
+}
+
+/* Interrupt handling - the callback for request_irq() */
+static irqreturn_t mvneta_isr(int irq, void *dev_id)
+{
+ struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+
+ /* Mask all interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+
+ napi_schedule(&pp->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* NAPI handler
+ * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
+ * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
+ * Bits 8 -15 of the cause Rx Tx register indicate that are received
+ * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
+ * Each CPU has its own causeRxTx register
+ */
+static int mvneta_poll(struct napi_struct *napi, int budget)
+{
+ int rx_done = 0;
+ u32 cause_rx_tx;
+ unsigned long flags;
+ struct mvneta_port *pp = netdev_priv(napi->dev);
+
+ if (!netif_running(pp->dev)) {
+ napi_complete(napi);
+ return rx_done;
+ }
+
+ /* Read cause register */
+ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
+ MVNETA_RX_INTR_MASK(rxq_number);
+
+ /* For the case where the last mvneta_poll did not process all
+ * RX packets
+ */
+ cause_rx_tx |= pp->cause_rx_tx;
+ if (rxq_number > 1) {
+ while ((cause_rx_tx != 0) && (budget > 0)) {
+ int count;
+ struct mvneta_rx_queue *rxq;
+ /* get rx queue number from cause_rx_tx */
+ rxq = mvneta_rx_policy(pp, cause_rx_tx);
+ if (!rxq)
+ break;
+
+ /* process the packet in that rx queue */
+ count = mvneta_rx(pp, budget, rxq);
+ rx_done += count;
+ budget -= count;
+ if (budget > 0) {
+ /* set off the rx bit of the
+ * corresponding bit in the cause rx
+ * tx register, so that next iteration
+ * will find the next rx queue where
+ * packets are received on
+ */
+ cause_rx_tx &= ~((1 << rxq->id) << 8);
+ }
+ }
+ } else {
+ rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+ budget -= rx_done;
+ }
+
+ if (budget > 0) {
+ cause_rx_tx = 0;
+ napi_complete(napi);
+ local_irq_save(flags);
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number));
+ local_irq_restore(flags);
+ }
+
+ pp->cause_rx_tx = cause_rx_tx;
+ return rx_done;
+}
+
+/* tx done timer callback */
+static void mvneta_tx_done_timer_callback(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvneta_port *pp = netdev_priv(dev);
+ int tx_done = 0, tx_todo = 0;
+
+ if (!netif_running(dev))
+ return ;
+
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ tx_done = mvneta_tx_done_gbe(pp,
+ (((1 << txq_number) - 1) &
+ MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
+ &tx_todo);
+ if (tx_todo > 0)
+ mvneta_add_tx_done_timer(pp);
+}
+
+/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
+static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ int num)
+{
+ struct net_device *dev = pp->dev;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct sk_buff *skb;
+ struct mvneta_rx_desc *rx_desc;
+ unsigned long phys_addr;
+
+ skb = dev_alloc_skb(pp->pkt_size);
+ if (!skb) {
+ netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
+ __func__, rxq->id, i, num);
+ break;
+ }
+
+ rx_desc = rxq->descs + i;
+ memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
+ phys_addr = dma_map_single(dev->dev.parent, skb->head,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
+ }
+
+ /* Add this number of RX descriptors as non occupied (ready to
+ * get packets)
+ */
+ mvneta_rxq_non_occup_desc_add(pp, rxq, i);
+
+ return i;
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+static void mvneta_tx_reset(struct mvneta_port *pp)
+{
+ int queue;
+
+ /* free the skb's in the hal tx ring */
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_done_force(pp, &pp->txqs[queue]);
+
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
+}
+
+static void mvneta_rx_reset(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
+ mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+
+{
+ rxq->size = pp->rx_ring_size;
+
+ /* Allocate memory for RX descriptors */
+ rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &rxq->descs_phys, GFP_KERNEL);
+ if (rxq->descs == NULL) {
+ netdev_err(pp->dev,
+ "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
+ rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->size);
+ return -ENOMEM;
+ }
+
+ BUG_ON(rxq->descs !=
+ PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+ rxq->last_desc = rxq->size - 1;
+
+ /* Set Rx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
+ mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
+
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
+
+ /* Set coalescing pkts and time */
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+
+ /* Fill RXQ with buffers from RX pool */
+ mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
+
+ return 0;
+}
+
+/* Cleanup Rx queue */
+static void mvneta_rxq_deinit(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ mvneta_rxq_drop_pkts(pp, rxq);
+
+ if (rxq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ rxq->size * MVNETA_DESC_ALIGNED_SIZE,
+ rxq->descs,
+ rxq->descs_phys);
+
+ rxq->descs = NULL;
+ rxq->last_desc = 0;
+ rxq->next_desc_to_proc = 0;
+ rxq->descs_phys = 0;
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ txq->size = pp->tx_ring_size;
+
+ /* Allocate memory for TX descriptors */
+ txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ &txq->descs_phys, GFP_KERNEL);
+ if (txq->descs == NULL) {
+ netdev_err(pp->dev,
+ "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
+ txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->size);
+ return -ENOMEM;
+ }
+
+ /* Make sure descriptor address is cache line size aligned */
+ BUG_ON(txq->descs !=
+ PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
+
+ txq->last_desc = txq->size - 1;
+
+ /* Set maximum bandwidth for enabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+ /* Set Tx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+ txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
+ if (txq->tx_skb == NULL) {
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+ return -ENOMEM;
+ }
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+
+ return 0;
+}
+
+/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ kfree(txq->tx_skb);
+
+ if (txq->descs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ txq->descs = NULL;
+ txq->last_desc = 0;
+ txq->next_desc_to_proc = 0;
+ txq->descs_phys = 0;
+
+ /* Set minimum bandwidth for disabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
+
+ /* Set Tx descriptors queue starting address and size */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
+}
+
+/* Cleanup all Tx queues */
+static void mvneta_cleanup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++)
+ mvneta_txq_deinit(pp, &pp->txqs[queue]);
+}
+
+/* Cleanup all Rx queues */
+static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++)
+ mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+}
+
+
+/* Init all Rx queues */
+static int mvneta_setup_rxqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_rxqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Init all tx queues */
+static int mvneta_setup_txqs(struct mvneta_port *pp)
+{
+ int queue;
+
+ for (queue = 0; queue < txq_number; queue++) {
+ int err = mvneta_txq_init(pp, &pp->txqs[queue]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create txq=%d\n",
+ __func__, queue);
+ mvneta_cleanup_txqs(pp);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void mvneta_start_dev(struct mvneta_port *pp)
+{
+ mvneta_max_rx_size_set(pp, pp->pkt_size);
+ mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
+
+ /* start the Rx/Tx activity */
+ mvneta_port_enable(pp);
+
+ /* Enable polling on the port */
+ napi_enable(&pp->napi);
+
+ /* Unmask interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number));
+
+ phy_start(pp->phy_dev);
+ netif_tx_start_all_queues(pp->dev);
+}
+
+static void mvneta_stop_dev(struct mvneta_port *pp)
+{
+ phy_stop(pp->phy_dev);
+
+ napi_disable(&pp->napi);
+
+ netif_carrier_off(pp->dev);
+
+ mvneta_port_down(pp);
+ netif_tx_stop_all_queues(pp->dev);
+
+ /* Stop the port activity */
+ mvneta_port_disable(pp);
+
+ /* Clear all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+ mvneta_tx_reset(pp);
+ mvneta_rx_reset(pp);
+}
+
+/* tx timeout callback - display a message and stop/start the network device */
+static void mvneta_tx_timeout(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ netdev_info(dev, "tx timeout\n");
+ mvneta_stop_dev(pp);
+ mvneta_start_dev(pp);
+}
+
+/* Return positive if MTU is valid */
+static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
+{
+ if (mtu < 68) {
+ netdev_err(dev, "cannot change mtu to less than 68\n");
+ return -EINVAL;
+ }
+
+ /* 9676 == 9700 - 20 and rounding to 8 */
+ if (mtu > 9676) {
+ netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
+ mtu = 9676;
+ }
+
+ if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
+ netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
+ mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
+ mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
+ }
+
+ return mtu;
+}
+
+/* Change the device mtu */
+static int mvneta_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ mtu = mvneta_check_mtu_valid(dev, mtu);
+ if (mtu < 0)
+ return -EINVAL;
+
+ dev->mtu = mtu;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* The interface is running, so we have to force a
+ * reallocation of the RXQs
+ */
+ mvneta_stop_dev(pp);
+
+ mvneta_cleanup_txqs(pp);
+ mvneta_cleanup_rxqs(pp);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret) {
+ netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+ return ret;
+ }
+
+ mvneta_setup_txqs(pp);
+
+ mvneta_start_dev(pp);
+ mvneta_port_up(pp);
+
+ return 0;
+}
+
+/* Handle setting mac address */
+static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u8 *mac = addr + 2;
+ int i;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Remove previous address table entry */
+ mvneta_mac_addr_set(pp, dev->dev_addr, -1);
+
+ /* Set new addr in hw */
+ mvneta_mac_addr_set(pp, mac, rxq_def);
+
+ /* Set addr in the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = mac[i];
+
+ return 0;
+}
+
+static void mvneta_adjust_link(struct net_device *ndev)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ struct phy_device *phydev = pp->phy_dev;
+ int status_change = 0;
+
+ if (phydev->link) {
+ if ((pp->speed != phydev->speed) ||
+ (pp->duplex != phydev->duplex)) {
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+
+ if (phydev->duplex)
+ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (phydev->speed == SPEED_1000)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+
+ pp->duplex = phydev->duplex;
+ pp->speed = phydev->speed;
+ }
+ }
+
+ if (phydev->link != pp->link) {
+ if (!phydev->link) {
+ pp->duplex = -1;
+ pp->speed = 0;
+ }
+
+ pp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val |= (MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_FORCE_LINK_DOWN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ mvneta_port_up(pp);
+ netdev_info(pp->dev, "link up\n");
+ } else {
+ mvneta_port_down(pp);
+ netdev_info(pp->dev, "link down\n");
+ }
+ }
+}
+
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct phy_device *phy_dev;
+
+ phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
+ pp->phy_interface);
+ if (!phy_dev) {
+ netdev_err(pp->dev, "could not find the PHY\n");
+ return -ENODEV;
+ }
+
+ phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
+
+ pp->phy_dev = phy_dev;
+ pp->link = 0;
+ pp->duplex = 0;
+ pp->speed = 0;
+
+ return 0;
+}
+
+static void mvneta_mdio_remove(struct mvneta_port *pp)
+{
+ phy_disconnect(pp->phy_dev);
+ pp->phy_dev = NULL;
+}
+
+static int mvneta_open(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret)
+ return ret;
+
+ ret = mvneta_setup_txqs(pp);
+ if (ret)
+ goto err_cleanup_rxqs;
+
+ /* Connect to port interrupt line */
+ ret = request_irq(pp->dev->irq, mvneta_isr, 0,
+ MVNETA_DRIVER_NAME, pp);
+ if (ret) {
+ netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
+ goto err_cleanup_txqs;
+ }
+
+ /* In default link is down */
+ netif_carrier_off(pp->dev);
+
+ ret = mvneta_mdio_probe(pp);
+ if (ret < 0) {
+ netdev_err(dev, "cannot probe MDIO bus\n");
+ goto err_free_irq;
+ }
+
+ mvneta_start_dev(pp);
+
+ return 0;
+
+err_free_irq:
+ free_irq(pp->dev->irq, pp);
+err_cleanup_txqs:
+ mvneta_cleanup_txqs(pp);
+err_cleanup_rxqs:
+ mvneta_cleanup_rxqs(pp);
+ return ret;
+}
+
+/* Stop the port, free port interrupt line */
+static int mvneta_stop(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ mvneta_stop_dev(pp);
+ mvneta_mdio_remove(pp);
+ free_irq(dev->irq, pp);
+ mvneta_cleanup_rxqs(pp);
+ mvneta_cleanup_txqs(pp);
+ del_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ return 0;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(pp->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if (!pp->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(pp->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvneta_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int queue;
+
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+ mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
+ }
+
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->done_pkts_coal = c->tx_max_coalesced_frames;
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvneta_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
+ c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
+
+ c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
+ return 0;
+}
+
+
+static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+
+static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvneta_port *pp = netdev_priv(netdev);
+
+ ring->rx_max_pending = MVNETA_MAX_RXD;
+ ring->tx_max_pending = MVNETA_MAX_TXD;
+ ring->rx_pending = pp->rx_ring_size;
+ ring->tx_pending = pp->tx_ring_size;
+}
+
+static int mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
+ return -EINVAL;
+ pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
+ ring->rx_pending : MVNETA_MAX_RXD;
+ pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
+ ring->tx_pending : MVNETA_MAX_TXD;
+
+ if (netif_running(dev)) {
+ mvneta_stop(dev);
+ if (mvneta_open(dev)) {
+ netdev_err(dev,
+ "error on opening device after ring param change\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops mvneta_netdev_ops = {
+ .ndo_open = mvneta_open,
+ .ndo_stop = mvneta_stop,
+ .ndo_start_xmit = mvneta_tx,
+ .ndo_set_rx_mode = mvneta_set_rx_mode,
+ .ndo_set_mac_address = mvneta_set_mac_addr,
+ .ndo_change_mtu = mvneta_change_mtu,
+ .ndo_tx_timeout = mvneta_tx_timeout,
+ .ndo_get_stats64 = mvneta_get_stats64,
+};
+
+const struct ethtool_ops mvneta_eth_tool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = mvneta_ethtool_get_settings,
+ .set_settings = mvneta_ethtool_set_settings,
+ .set_coalesce = mvneta_ethtool_set_coalesce,
+ .get_coalesce = mvneta_ethtool_get_coalesce,
+ .get_drvinfo = mvneta_ethtool_get_drvinfo,
+ .get_ringparam = mvneta_ethtool_get_ringparam,
+ .set_ringparam = mvneta_ethtool_set_ringparam,
+};
+
+/* Initialize hw */
+static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+{
+ int queue;
+
+ /* Disable port */
+ mvneta_port_disable(pp);
+
+ /* Set port default values */
+ mvneta_defaults_set(pp);
+
+ pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
+ GFP_KERNEL);
+ if (!pp->txqs)
+ return -ENOMEM;
+
+ /* Initialize TX descriptor rings */
+ for (queue = 0; queue < txq_number; queue++) {
+ struct mvneta_tx_queue *txq = &pp->txqs[queue];
+ txq->id = queue;
+ txq->size = pp->tx_ring_size;
+ txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
+ }
+
+ pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
+ GFP_KERNEL);
+ if (!pp->rxqs) {
+ kfree(pp->txqs);
+ return -ENOMEM;
+ }
+
+ /* Create Rx descriptor rings */
+ for (queue = 0; queue < rxq_number; queue++) {
+ struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+ rxq->id = queue;
+ rxq->size = pp->rx_ring_size;
+ rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
+ rxq->time_coal = MVNETA_RX_COAL_USEC;
+ }
+
+ return 0;
+}
+
+static void mvneta_deinit(struct mvneta_port *pp)
+{
+ kfree(pp->txqs);
+ kfree(pp->rxqs);
+}
+
+/* platform glue : initialize decoding windows */
+static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
+ const struct mbus_dram_target_info *dram)
+{
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+ mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
+
+ mvreg_write(pp, MVNETA_WIN_SIZE(i),
+ (cs->size - 1) & 0xffff0000);
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+
+ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Power up the port */
+static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+{
+ u32 val;
+
+ /* MAC Cause register should be cleared */
+ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII)
+ mvneta_port_sgmii_config(pp);
+
+ mvneta_gmac_rgmii_set(pp, 1);
+
+ /* Cancel Port Reset */
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ val &= ~MVNETA_GMAC2_PORT_RESET;
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
+}
+
+/* Device initialization routine */
+static int mvneta_probe(struct platform_device *pdev)
+{
+ const struct mbus_dram_target_info *dram_target_info;
+ struct device_node *dn = pdev->dev.of_node;
+ struct device_node *phy_node;
+ u32 phy_addr;
+ struct mvneta_port *pp;
+ struct net_device *dev;
+ const char *mac_addr;
+ int phy_mode;
+ int err;
+
+ /* Our multiqueue support is not complete, so for now, only
+ * allow the usage of the first RX queue
+ */
+ if (rxq_def != 0) {
+ dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
+ return -EINVAL;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0) {
+ err = -EINVAL;
+ goto err_free_netdev;
+ }
+
+ phy_node = of_parse_phandle(dn, "phy", 0);
+ if (!phy_node) {
+ dev_err(&pdev->dev, "no associated PHY\n");
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+
+ phy_mode = of_get_phy_mode(dn);
+ if (phy_mode < 0) {
+ dev_err(&pdev->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto err_free_irq;
+ }
+
+ mac_addr = of_get_mac_address(dn);
+
+ if (!mac_addr || !is_valid_ether_addr(mac_addr))
+ eth_hw_addr_random(dev);
+ else
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+
+ SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+
+ pp = netdev_priv(dev);
+
+ pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+ init_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
+
+ pp->weight = MVNETA_RX_POLL_WEIGHT;
+ pp->phy_node = phy_node;
+ pp->phy_interface = phy_mode;
+
+ pp->base = of_iomap(dn, 0);
+ if (pp->base == NULL) {
+ err = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_unmap;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->tx_done_timer.data = (unsigned long)dev;
+
+ pp->tx_ring_size = MVNETA_MAX_TXD;
+ pp->rx_ring_size = MVNETA_MAX_RXD;
+
+ pp->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = mvneta_init(pp, phy_addr);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't init eth hal\n");
+ goto err_clk;
+ }
+ mvneta_port_power_up(pp, phy_mode);
+
+ dram_target_info = mv_mbus_dram_info();
+ if (dram_target_info)
+ mvneta_conf_mbus_windows(pp, dram_target_info);
+
+ netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+
+ err = register_netdev(dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register\n");
+ goto err_deinit;
+ }
+
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+ platform_set_drvdata(pdev, pp->dev);
+
+ return 0;
+
+err_deinit:
+ mvneta_deinit(pp);
+err_clk:
+ clk_disable_unprepare(pp->clk);
+err_unmap:
+ iounmap(pp->base);
+err_free_irq:
+ irq_dispose_mapping(dev->irq);
+err_free_netdev:
+ free_netdev(dev);
+ return err;
+}
+
+/* Device removal routine */
+static int mvneta_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ mvneta_deinit(pp);
+ clk_disable_unprepare(pp->clk);
+ iounmap(pp->base);
+ irq_dispose_mapping(dev->irq);
+ free_netdev(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id mvneta_match[] = {
+ { .compatible = "marvell,armada-370-neta" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvneta_match);
+
+static struct platform_driver mvneta_driver = {
+ .probe = mvneta_probe,
+ .remove = mvneta_remove,
+ .driver = {
+ .name = MVNETA_DRIVER_NAME,
+ .of_match_table = mvneta_match,
+ },
+};
+
+module_platform_driver(mvneta_driver);
+
+MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
+
+module_param(rxq_number, int, S_IRUGO);
+module_param(txq_number, int, S_IRUGO);
+
+module_param(rxq_def, int, S_IRUGO);
+module_param(txq_def, int, S_IRUGO);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 59489722e898..10d678d3dd01 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1131,7 +1131,7 @@ static int pxa168_eth_open(struct net_device *dev)
err = request_irq(dev->irq, pxa168_eth_int_handler,
IRQF_DISABLED, dev->name, dev);
if (err) {
- dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ dev_err(&dev->dev, "can't assign irq\n");
return -EAGAIN;
}
pep->rx_resource_err = 0;
@@ -1201,9 +1201,8 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
*/
pxa168_eth_stop(dev);
if (pxa168_eth_open(dev)) {
- dev_printk(KERN_ERR, &dev->dev,
- "fatal error on re-opening device after "
- "MTU change\n");
+ dev_err(&dev->dev,
+ "fatal error on re-opening device after MTU change\n");
}
return 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index d19a143aa5a8..5544a1fe2f94 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3860,7 +3860,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
return dev;
}
-static void __devinit skge_show_addr(struct net_device *dev)
+static void skge_show_addr(struct net_device *dev)
{
const struct skge_port *skge = netdev_priv(dev);
@@ -3869,8 +3869,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
static int only_32bit_dma;
-static int __devinit skge_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev, *dev1;
struct skge_hw *hw;
@@ -4012,7 +4011,7 @@ err_out:
return err;
}
-static void __devexit skge_remove(struct pci_dev *pdev)
+static void skge_remove(struct pci_dev *pdev)
{
struct skge_hw *hw = pci_get_drvdata(pdev);
struct net_device *dev0, *dev1;
@@ -4142,7 +4141,7 @@ static struct pci_driver skge_driver = {
.name = DRV_NAME,
.id_table = skge_id_table,
.probe = skge_probe,
- .remove = __devexit_p(skge_remove),
+ .remove = skge_remove,
.shutdown = skge_shutdown,
.driver.pm = SKGE_PM_OPS,
};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 78946feab4a2..3269eb38cc57 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3140,7 +3140,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
}
-static int __devinit sky2_init(struct sky2_hw *hw)
+static int sky2_init(struct sky2_hw *hw)
{
u8 t8;
@@ -4741,9 +4741,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
};
/* Initialize network device */
-static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
- unsigned port,
- int highmem, int wol)
+static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
+ int highmem, int wol)
{
struct sky2_port *sky2;
struct net_device *dev = alloc_etherdev(sizeof(*sky2));
@@ -4807,7 +4806,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
return dev;
}
-static void __devinit sky2_show_addr(struct net_device *dev)
+static void sky2_show_addr(struct net_device *dev)
{
const struct sky2_port *sky2 = netdev_priv(dev);
@@ -4815,7 +4814,7 @@ static void __devinit sky2_show_addr(struct net_device *dev)
}
/* Handle software interrupt used during MSI test */
-static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
+static irqreturn_t sky2_test_intr(int irq, void *dev_id)
{
struct sky2_hw *hw = dev_id;
u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
@@ -4834,7 +4833,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
}
/* Test interrupt path by forcing a a software IRQ */
-static int __devinit sky2_test_msi(struct sky2_hw *hw)
+static int sky2_test_msi(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
int err;
@@ -4896,8 +4895,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
return buf;
}
-static int __devinit sky2_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev, *dev1;
struct sky2_hw *hw;
@@ -4919,13 +4917,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
if (err) {
dev_err(&pdev->dev, "PCI read config failed\n");
- goto err_out;
+ goto err_out_disable;
}
if (~reg == 0) {
dev_err(&pdev->dev, "PCI configuration read error\n");
err = -EIO;
- goto err_out;
+ goto err_out_disable;
}
err = pci_request_regions(pdev, DRV_NAME);
@@ -5012,10 +5010,11 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
if (!disable_msi && pci_enable_msi(pdev) == 0) {
err = sky2_test_msi(hw);
- if (err == -EOPNOTSUPP)
+ if (err) {
pci_disable_msi(pdev);
- else if (err)
- goto err_out_free_netdev;
+ if (err != -EOPNOTSUPP)
+ goto err_out_free_netdev;
+ }
}
err = register_netdev(dev);
@@ -5063,10 +5062,10 @@ err_out_unregister_dev1:
err_out_free_dev1:
free_netdev(dev1);
err_out_unregister:
- if (hw->flags & SKY2_HW_USE_MSI)
- pci_disable_msi(pdev);
unregister_netdev(dev);
err_out_free_netdev:
+ if (hw->flags & SKY2_HW_USE_MSI)
+ pci_disable_msi(pdev);
free_netdev(dev);
err_out_free_pci:
pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
@@ -5086,7 +5085,7 @@ err_out:
return err;
}
-static void __devexit sky2_remove(struct pci_dev *pdev)
+static void sky2_remove(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
int i;
@@ -5207,7 +5206,7 @@ static struct pci_driver sky2_driver = {
.name = DRV_NAME,
.id_table = sky2_id_table,
.probe = sky2_probe,
- .remove = __devexit_p(sky2_remove),
+ .remove = sky2_remove,
.shutdown = sky2_shutdown,
.driver.pm = SKY2_PM_OPS,
};
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d8099a7903d3..bcdbc14aeff0 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_MELLANOX
bool "Mellanox devices"
default y
- depends on PCI && INET
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 5f027f95cc84..eb520ab64014 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -4,9 +4,8 @@
config MLX4_EN
tristate "Mellanox Technologies 10Gbit Ethernet support"
- depends on PCI && INET
+ depends on PCI
select MLX4_CORE
- select INET_LRO
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 3d1899ff1076..fdc5f23d8e9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1498,6 +1498,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u32 reply;
u8 is_going_down = 0;
int i;
+ unsigned long flags;
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
@@ -1576,12 +1577,12 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
goto reset_slave;
}
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = cmd;
else
is_going_down = 1;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d)"
" executing from slave:%d\n",
@@ -1597,10 +1598,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
reset_slave:
/* cleanup any slave resources */
mlx4_delete_all_resources_for_slave(dev, slave);
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*with slave in the middle of flr, no need to clean resources again.*/
inform_slave_state:
memset(&slave_state[slave].event_eq, 0,
@@ -1755,7 +1756,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
spin_lock_init(&s_state->lock);
}
- memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index aa9c2f6cf3c0..b8d0854a7ad1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,7 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->ring = ring;
cq->is_tx = mode;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 9d0b88eea02b..03447dad07e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -43,6 +43,34 @@
#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
+static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ priv->tx_cq[i].moder_cnt = priv->tx_frames;
+ priv->tx_cq[i].moder_time = priv->tx_usecs;
+ err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ if (err)
+ return err;
+ }
+
+ if (priv->adaptive_rx_coal)
+ return 0;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_cq[i].moder_cnt = priv->rx_frames;
+ priv->rx_cq[i].moder_time = priv->rx_usecs;
+ priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
@@ -381,7 +409,6 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int err, i;
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
@@ -397,14 +424,6 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
coal->tx_max_coalesced_frames != priv->tx_frames) {
priv->tx_usecs = coal->tx_coalesce_usecs;
priv->tx_frames = coal->tx_max_coalesced_frames;
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i].moder_cnt = priv->tx_frames;
- priv->tx_cq[i].moder_time = priv->tx_usecs;
- if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
- en_warn(priv, "Failed changing moderation "
- "for TX cq %d\n", i);
- }
- }
}
/* Set adaptive coalescing params */
@@ -414,18 +433,8 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
- if (priv->adaptive_rx_coal)
- return 0;
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i].moder_cnt = priv->rx_frames;
- priv->rx_cq[i].moder_time = priv->rx_usecs;
- priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
- if (err)
- return err;
- }
- return 0;
+ return mlx4_en_moderation_update(priv);
}
static int mlx4_en_set_pauseparam(struct net_device *dev,
@@ -466,7 +475,6 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
- int i;
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
@@ -505,14 +513,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
en_err(priv, "Failed starting port\n");
}
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i].moder_cnt = priv->rx_frames;
- priv->rx_cq[i].moder_time = priv->rx_usecs;
- priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
- if (err)
- goto out;
- }
+ err = mlx4_en_moderation_update(priv);
out:
mutex_unlock(&mdev->state_lock);
@@ -612,13 +613,17 @@ static int mlx4_en_validate_flow(struct net_device *dev,
struct ethtool_usrip4_spec *l3_mask;
struct ethtool_tcpip4_spec *l4_mask;
struct ethhdr *eth_mask;
- u64 full_mac = ~0ull;
- u64 zero_mac = 0;
if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
return -EINVAL;
- switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ if (cmd->fs.flow_type & FLOW_MAC_EXT) {
+ /* dest mac mask must be ff:ff:ff:ff:ff:ff */
+ if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
+ return -EINVAL;
+ }
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
if (cmd->fs.m_u.tcp_ip4_spec.tos)
@@ -643,11 +648,11 @@ static int mlx4_en_validate_flow(struct net_device *dev,
case ETHER_FLOW:
eth_mask = &cmd->fs.m_u.ether_spec;
/* source mac mask must not be set */
- if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
+ if (!is_zero_ether_addr(eth_mask->h_source))
return -EINVAL;
/* dest mac mask must be ff:ff:ff:ff:ff:ff */
- if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
+ if (!is_broadcast_ether_addr(eth_mask->h_dest))
return -EINVAL;
if (!all_zeros_or_all_ones(eth_mask->h_proto))
@@ -746,7 +751,6 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
struct list_head *rule_list_h)
{
int err;
- u64 mac;
__be64 be_mac;
struct ethhdr *eth_spec;
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -761,12 +765,16 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
if (!spec_l2)
return -ENOMEM;
- mac = priv->mac & MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
+ if (cmd->fs.flow_type & FLOW_MAC_EXT) {
+ memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
+ } else {
+ u64 mac = priv->mac & MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+ }
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
- if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
@@ -776,7 +784,7 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
list_add_tail(&spec_l2->list, rule_list_h);
- switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
eth_spec = &cmd->fs.h_u.ether_spec;
memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
@@ -998,6 +1006,73 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
+static void mlx4_en_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = MAX_RX_RINGS;
+ channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
+
+ channel->rx_count = priv->rx_ring_num;
+ channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
+}
+
+static int mlx4_en_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up;
+ int err = 0;
+
+ if (channel->other_count || channel->combined_count ||
+ channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
+ channel->rx_count > MAX_RX_RINGS ||
+ !channel->tx_count || !channel->rx_count)
+ return -EINVAL;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ priv->num_tx_rings_p_up = channel->tx_count;
+ priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ priv->rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
+ mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+
+ en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+ en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+ err = mlx4_en_moderation_update(priv);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -1022,6 +1097,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
+ .get_channels = mlx4_en_get_channels,
+ .set_channels = mlx4_en_set_channels,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a52922ed85c1..3a2b8c65642d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -250,7 +250,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
min_t(int,
dev->caps.num_comp_vectors,
- MAX_RX_RINGS)));
+ DEF_RX_RINGS)));
} else {
mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
min_t(int, dev->caps.comp_pool/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index edd9cb8d3e1d..75a3f467bb5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -47,11 +47,11 @@
#include "mlx4_en.h"
#include "en_port.h"
-static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i;
- unsigned int q, offset = 0;
+ unsigned int offset = 0;
if (up && up != MLX4_EN_NUM_UP)
return -EINVAL;
@@ -59,10 +59,9 @@ static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
netdev_set_num_tc(dev, up);
/* Partition Tx queues evenly amongst UP's */
- q = priv->tx_ring_num / up;
for (i = 0; i < up; i++) {
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
+ netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
+ offset += priv->num_tx_rings_p_up;
}
return 0;
@@ -870,7 +869,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
* - moder_cnt is set to the number of mtu sized packets to
- * satisfy our coelsing target.
+ * satisfy our coalescing target.
* - moder_time is set to a fixed value.
*/
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
@@ -1114,7 +1113,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure ring */
tx_ring = &priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
- i / priv->mdev->profile.num_tx_rings_p_up);
+ i / priv->num_tx_rings_p_up);
if (err) {
en_err(priv, "Failed allocating Tx ring\n");
mlx4_en_deactivate_cq(priv, cq);
@@ -1564,10 +1563,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int err;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
- prof->tx_ring_num, prof->rx_ring_num);
+ MAX_TX_RINGS, MAX_RX_RINGS);
if (dev == NULL)
return -ENOMEM;
+ netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
+ netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
+
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
dev->dev_id = port - 1;
@@ -1586,20 +1588,23 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags = prof->flags;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
+ priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) *
- priv->tx_ring_num, GFP_KERNEL);
+
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
+ GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num,
- GFP_KERNEL);
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
+ GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
goto out;
}
priv->rx_ring_num = prof->rx_ring_num;
+ priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5aba5ecdf1e2..fed26d867f4e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -566,6 +566,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct ethhdr *ethh;
dma_addr_t dma;
u64 s_mac;
+ int factor = priv->cqe_factor;
if (!priv->port_up)
return 0;
@@ -574,7 +575,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
- cqe = &cq->buf[index];
+ cqe = &cq->buf[(index << factor) + factor];
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -630,7 +631,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
ring->csum_ok++;
- /* This packet is eligible for LRO if it is:
+ /* This packet is eligible for GRO if it is:
* - DIX Ethernet (type interpretation)
* - TCP/IP (v4)
* - without IP options
@@ -667,7 +668,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
- /* LRO not possible, complete processing here */
+ /* GRO not possible, complete processing here */
ip_summed = CHECKSUM_UNNECESSARY;
} else {
ip_summed = CHECKSUM_NONE;
@@ -709,12 +710,9 @@ next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
- cqe = &cq->buf[index];
- if (++polled == budget) {
- /* We are here because we reached the NAPI budget -
- * flush only pending LRO sessions */
+ cqe = &cq->buf[(index << factor) + factor];
+ if (++polled == budget)
goto out;
- }
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index b35094c590ba..6771b69f40d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -315,12 +315,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_cqe *buf = cq->buf;
u32 packets = 0;
u32 bytes = 0;
+ int factor = priv->cqe_factor;
if (!priv->port_up)
return;
index = cons_index & size_mask;
- cqe = &buf[index];
+ cqe = &buf[(index << factor) + factor];
ring_index = ring->cons & size_mask;
/* Process all completed CQEs */
@@ -349,7 +350,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
++cons_index;
index = cons_index & size_mask;
- cqe = &buf[index];
+ cqe = &buf[(index << factor) + factor];
}
@@ -523,7 +524,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
+ u16 rings_p_up = priv->num_tx_rings_p_up;
u8 up = 0;
if (dev->num_tc)
@@ -629,10 +630,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}
- /* Copy dst mac address to wqe */
- ethh = (struct ethhdr *)skb->data;
- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+ /* Copy dst mac address to wqe. This allows loopback in eSwitch,
+ * so that VFs and PF can communicate with each other
+ */
+ ethh = (struct ethhdr *)skb->data;
+ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index b84a88bc44dc..251ae2f93116 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -101,15 +101,21 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
mb();
}
-static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
{
- unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
- return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+ /* (entry & (eq->nent - 1)) gives us a cyclic array */
+ unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
+ /* CX3 is capable of extending the EQE from 32 to 64 bytes.
+ * When this feature is enabled, the first (in the lower addresses)
+ * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
+ * contain the legacy EQE information.
+ */
+ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
}
-static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
{
- struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+ struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
@@ -177,7 +183,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}
- memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
wmb();
@@ -401,6 +407,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int i;
int err;
+ unsigned long flags;
mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
@@ -412,10 +419,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
slave_state[i].is_slave_going_down = 0;
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*notify the FW:*/
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
@@ -440,8 +447,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
u8 update_slave_state;
int i;
enum slave_port_gen_event gen_event;
+ unsigned long flags;
- while ((eqe = next_eqe_sw(eq))) {
+ while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -647,13 +655,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
} else
update_slave_state = 1;
- spin_lock(&priv->mfunc.master.slave_state_lock);
+ spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (update_slave_state) {
priv->mfunc.master.slave_state[flr_slave].active = false;
priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
}
- spin_unlock(&priv->mfunc.master.slave_state_lock);
+ spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
@@ -864,7 +872,8 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
- npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+ npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -966,8 +975,9 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int err;
- int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
int i;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+ int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -1267,7 +1277,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
- /* Map the new eq to handle all asyncronous events */
+ /* Map the new eq to handle all asynchronous events */
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[i].eqn);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 4f30b99324cf..8b3d0512a46b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -110,6 +110,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[42] = "Multicast VEP steering support",
[48] = "Counters support",
[59] = "Port management change event support",
+ [61] = "64 byte EQE support",
+ [62] = "64 byte CQE support",
};
int i;
@@ -235,7 +237,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
- size = 0; /* no PF behaviour is set for now */
+ size = dev->caps.function_caps; /* set PF behaviours */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
field = 0; /* protected FMR support not available as yet */
@@ -1237,6 +1239,24 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+ /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
+ *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
+ dev->caps.eqe_size = 64;
+ dev->caps.eqe_factor = 1;
+ } else {
+ dev->caps.eqe_size = 32;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
+ *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
+ dev->caps.cqe_size = 64;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ } else {
+ dev->caps.cqe_size = 32;
+ }
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1318,7 +1338,9 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
+ u32 dword_field;
int err;
+ u8 byte_field;
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
@@ -1351,10 +1373,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+ MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
+ if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
+ param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ } else {
+ MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
+ if (byte_field & 0x8)
+ param->steering_mode = MLX4_STEERING_MODE_B0;
+ else
+ param->steering_mode = MLX4_STEERING_MODE_A0;
+ }
/* steering attributes */
- if (dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED) {
-
+ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
@@ -1370,6 +1400,13 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
}
+ /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
+ MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
+ if (byte_field & 0x20) /* 64-bytes eqe enabled */
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
+ if (byte_field & 0x40) /* 64-bytes cqe enabled */
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 85abe9c11a22..dbf2f69cc59f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -172,6 +172,8 @@ struct mlx4_init_hca_param {
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 fs_hash_enable_bits;
+ u8 steering_mode; /* for QUERY_HCA */
+ u64 dev_cap_enabled;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2aa80afd98d2..5163af314990 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -85,20 +85,26 @@ static int probe_vf;
module_param(probe_vf, int, 0644);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
-int mlx4_log_num_mgm_entry_size = 10;
+int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
- " 10 gives 248.range: 9<="
+ " 10 gives 248.range: 7 <="
" log_num_mgm_entry_size <= 12."
- " Not in use with device managed"
- " flow steering");
+ " To activate device managed"
+ " flow steering when available, set to -1");
+
+static bool enable_64b_cqe_eqe;
+module_param(enable_64b_cqe_eqe, bool, 0444);
+MODULE_PARM_DESC(enable_64b_cqe_eqe,
+ "Enable 64 byte CQEs/EQEs when the the FW supports this");
#define HCA_GLOBAL_CAP_MASK 0
-#define PF_CONTEXT_BEHAVIOUR_MASK 0
-static char mlx4_version[] __devinitdata =
+#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
+
+static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -275,28 +281,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
- if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
- dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
- dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
- dev->caps.fs_log_max_ucast_qp_range_size =
- dev_cap->fs_log_max_ucast_qp_range_size;
- } else {
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
- dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
- } else {
- dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
- }
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
- }
- mlx4_dbg(dev, "Steering mode is: %s\n",
- mlx4_steering_mode_str(dev->caps.steering_mode));
-
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -386,6 +370,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
+
+ if (!enable_64b_cqe_eqe) {
+ if (dev_cap->flags &
+ (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
+ mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
+ }
+ }
+
+ if ((dev->caps.flags &
+ (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
+ mlx4_is_master(dev))
+ dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
+
return 0;
}
/*The function checks if there are live vf, return the num of them*/
@@ -472,6 +471,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
}
EXPORT_SYMBOL(mlx4_is_slave_active);
+static void slave_adjust_steering_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *hca_param)
+{
+ dev->caps.steering_mode = hca_param->steering_mode;
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else
+ dev->caps.num_qp_per_mgm =
+ 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
+
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+}
+
static int mlx4_slave_cap(struct mlx4_dev *dev)
{
int err;
@@ -599,6 +615,23 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
goto err_mem;
}
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
+ dev->caps.eqe_size = 64;
+ dev->caps.eqe_factor = 1;
+ } else {
+ dev->caps.eqe_size = 32;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
+ dev->caps.cqe_size = 64;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ } else {
+ dev->caps.cqe_size = 32;
+ }
+
+ slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
+
return 0;
err_mem:
@@ -1285,6 +1318,59 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
}
}
+static int choose_log_fs_mgm_entry_size(int qp_per_entry)
+{
+ int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
+
+ for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
+ i++) {
+ if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
+ break;
+ }
+
+ return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
+}
+
+static void choose_steering_mode(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ if (mlx4_log_num_mgm_entry_size == -1 &&
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
+ (!mlx4_is_mfunc(dev) ||
+ (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
+ choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
+ dev->oper_log_mgm_entry_size =
+ choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->oper_log_mgm_entry_size =
+ mlx4_log_num_mgm_entry_size > 0 ?
+ mlx4_log_num_mgm_entry_size :
+ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
+ "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode),
+ dev->oper_log_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size);
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1324,6 +1410,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ choose_steering_mode(dev, &dev_cap);
+
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
@@ -1702,15 +1790,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- /* In multifunction mode each function gets 2 msi-X vectors
- * one for data path completions anf the other for asynch events
- * or command completions */
- if (mlx4_is_mfunc(dev)) {
- nreq = 2;
- } else {
- nreq = min_t(int, dev->caps.num_eqs -
- dev->caps.reserved_eqs, nreq);
- }
+ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ nreq);
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -2224,8 +2305,7 @@ err_disable_pdev:
return err;
}
-static int __devinit mlx4_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
printk_once(KERN_INFO "%s", mlx4_version);
@@ -2391,7 +2471,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
- .remove = __devexit_p(mlx4_remove_one),
+ .remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
@@ -2417,6 +2497,17 @@ static int __init mlx4_verify_params(void)
port_type_array[0] = true;
}
+ if (mlx4_log_num_mgm_entry_size != -1 &&
+ (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
+ mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
+ pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
+ "in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ return -1;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index e151c21baf2b..1ee4db3c6400 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,12 +54,7 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- if (dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED)
- return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
- else
- return min((1 << mlx4_log_num_mgm_entry_size),
- MLX4_MAX_MGM_ENTRY_SIZE);
+ return 1 << dev->oper_log_mgm_entry_size;
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 1cf42036d7bb..116c5c29d2d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -94,8 +94,10 @@ enum {
};
enum {
- MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
- MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
+ MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
MLX4_MTT_ENTRY_PER_SEG = 8,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9d27e42264e2..8d54412ada63 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -67,7 +67,8 @@
#define MLX4_EN_PAGE_SHIFT 12
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
-#define MAX_RX_RINGS 16
+#define DEF_RX_RINGS 16
+#define MAX_RX_RINGS 128
#define MIN_RX_RINGS 4
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
@@ -95,8 +96,6 @@
#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
-#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
-
/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
* and 4K allocations) */
enum {
@@ -120,13 +119,15 @@ enum {
#define MLX4_EN_NUM_UP 8
#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
+#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
+ MLX4_EN_NUM_UP)
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
#define MLX4_EN_RX_COAL_TIME 0x10
#define MLX4_EN_TX_COAL_PKTS 16
-#define MLX4_EN_TX_COAL_TIME 0x80
+#define MLX4_EN_TX_COAL_TIME 0x10
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
@@ -290,21 +291,6 @@ struct mlx4_en_rx_ring {
unsigned long csum_none;
};
-
-static inline int mlx4_en_can_lro(__be16 status)
-{
- return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV4F |
- MLX4_CQE_STATUS_IPV6 |
- MLX4_CQE_STATUS_IPV4OPT |
- MLX4_CQE_STATUS_TCP |
- MLX4_CQE_STATUS_UDP |
- MLX4_CQE_STATUS_IPOK)) ==
- cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPOK |
- MLX4_CQE_STATUS_TCP);
-}
-
struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
@@ -487,12 +473,14 @@ struct mlx4_en_priv {
int mac_index;
unsigned max_mtu;
int base_qpn;
+ int cqe_factor;
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
+ u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
@@ -613,6 +601,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
#endif
+int mlx4_en_setup_tc(struct net_device *dev, u8 up);
+
#ifdef CONFIG_RFS_ACCEL
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *rx_ring);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b05705f50f0f..561ed2a22a17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3071,6 +3071,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
int err;
+ int qpn;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
@@ -3080,13 +3081,21 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+ qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, NULL);
+ if (err) {
+ pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ return err;
+ }
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
- if (validate_eth_header_mac(slave, rule_header, rlist))
- return -EINVAL;
+ if (validate_eth_header_mac(slave, rule_header, rlist)) {
+ err = -EINVAL;
+ goto err_put;
+ }
break;
case MLX4_NET_TRANS_RULE_ID_IB:
break;
@@ -3094,14 +3103,17 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
- if (add_eth_header(dev, slave, inbox, rlist, header_id))
- return -EINVAL;
+ if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
+ err = -EINVAL;
+ goto err_put;
+ }
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
pr_err("Corrupted mailbox.\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put;
}
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@@ -3109,16 +3121,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- return err;
+ goto err_put;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to add flow steering resources.\n ");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
- MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
}
+err_put:
+ put_res(dev, slave, qpn, RES_QP);
return err;
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index dccae1d1743a..07a6ebc47c92 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1249,9 +1249,6 @@ ks8695_open(struct net_device *ndev)
struct ks8695_priv *ksp = netdev_priv(ndev);
int ret;
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
ks8695_reset(ksp);
ks8695_update_mac(ksp);
@@ -1277,7 +1274,7 @@ ks8695_open(struct net_device *ndev)
* This initialises the LAN switch in the KS8695 to a known-good
* set of defaults.
*/
-static void __devinit
+static void
ks8695_init_switch(struct ks8695_priv *ksp)
{
u32 ctrl;
@@ -1305,7 +1302,7 @@ ks8695_init_switch(struct ks8695_priv *ksp)
* This initialises a KS8695's WAN phy to sensible values for
* autonegotiation etc.
*/
-static void __devinit
+static void
ks8695_init_wan_phy(struct ks8695_priv *ksp)
{
u32 ctrl;
@@ -1349,7 +1346,7 @@ static const struct net_device_ops ks8695_netdev_ops = {
* wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
* port.
*/
-static int __devinit
+static int
ks8695_probe(struct platform_device *pdev)
{
struct ks8695_priv *ksp;
@@ -1597,7 +1594,7 @@ ks8695_drv_resume(struct platform_device *pdev)
*
* This unregisters and releases a KS8695 ethernet device.
*/
-static int __devexit
+static int
ks8695_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1620,7 +1617,7 @@ static struct platform_driver ks8695_driver = {
.owner = THIS_MODULE,
},
.probe = ks8695_probe,
- .remove = __devexit_p(ks8695_drv_remove),
+ .remove = ks8695_drv_remove,
.suspend = ks8695_drv_suspend,
.resume = ks8695_drv_resume,
};
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 24fb049ac2f2..b71eb39ab448 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1141,7 +1141,7 @@ static const struct ethtool_ops ks8842_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static int __devinit ks8842_probe(struct platform_device *pdev)
+static int ks8842_probe(struct platform_device *pdev)
{
int err = -ENOMEM;
struct resource *iomem;
@@ -1240,7 +1240,7 @@ err_mem_region:
return err;
}
-static int __devexit ks8842_remove(struct platform_device *pdev)
+static int ks8842_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1262,7 +1262,7 @@ static struct platform_driver ks8842_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ks8842_probe,
- .remove = __devexit_p(ks8842_remove),
+ .remove = ks8842_remove,
};
module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 1540ebeb8669..286816a4e783 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1415,7 +1415,7 @@ static int ks8851_resume(struct spi_device *spi)
#define ks8851_resume NULL
#endif
-static int __devinit ks8851_probe(struct spi_device *spi)
+static int ks8851_probe(struct spi_device *spi)
{
struct net_device *ndev;
struct ks8851_net *ks;
@@ -1534,7 +1534,7 @@ err_irq:
return ret;
}
-static int __devexit ks8851_remove(struct spi_device *spi)
+static int ks8851_remove(struct spi_device *spi)
{
struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
@@ -1554,7 +1554,7 @@ static struct spi_driver ks8851_driver = {
.owner = THIS_MODULE,
},
.probe = ks8851_probe,
- .remove = __devexit_p(ks8851_remove),
+ .remove = ks8851_remove,
.suspend = ks8851_suspend,
.resume = ks8851_resume,
};
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 38529edfe350..ef8f9f92e547 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1506,7 +1506,7 @@ static int ks_hw_init(struct ks_net *ks)
}
-static int __devinit ks8851_probe(struct platform_device *pdev)
+static int ks8851_probe(struct platform_device *pdev)
{
int err = -ENOMEM;
struct resource *io_d, *io_c;
@@ -1641,7 +1641,7 @@ err_mem_region:
return err;
}
-static int __devexit ks8851_remove(struct platform_device *pdev)
+static int ks8851_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ks_net *ks = netdev_priv(netdev);
@@ -1663,7 +1663,7 @@ static struct platform_driver ks8851_platform_driver = {
.owner = THIS_MODULE,
},
.probe = ks8851_probe,
- .remove = __devexit_p(ks8851_remove),
+ .remove = ks8851_remove,
};
module_platform_driver(ks8851_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 69e01977a1dd..8ebc352bcbe6 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1487,7 +1487,7 @@ struct dev_priv {
#define DRV_VERSION "1.0.0"
#define DRV_RELDATE "Feb 8, 2010"
-static char version[] __devinitdata =
+static char version[] =
"Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
@@ -4761,7 +4761,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
struct ksz_dma_buf *dma_buf;
struct net_device *dev = NULL;
- spin_lock(&hw_priv->hwlock);
+ spin_lock_irq(&hw_priv->hwlock);
last = info->last;
while (info->avail < info->alloc) {
@@ -4795,7 +4795,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
info->avail++;
}
info->last = last;
- spin_unlock(&hw_priv->hwlock);
+ spin_unlock_irq(&hw_priv->hwlock);
/* Notify the network subsystem that the packet has been sent. */
if (dev)
@@ -5259,11 +5259,15 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
+ spin_lock(&hw_priv->hwlock);
+
hw_read_intr(hw, &int_enable);
/* Not our interrupt! */
- if (!int_enable)
+ if (!int_enable) {
+ spin_unlock(&hw_priv->hwlock);
return IRQ_NONE;
+ }
do {
hw_ack_intr(hw, int_enable);
@@ -5310,6 +5314,8 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
hw_ena_intr(hw);
+ spin_unlock(&hw_priv->hwlock);
+
return IRQ_HANDLED;
}
@@ -6769,7 +6775,7 @@ static int stp;
/*
* This enables fast aging in the KSZ8842 switch. Not sure what situation
* needs that. However, fast aging is used to flush the dynamic MAC table when
- * STP suport is enabled.
+ * STP support is enabled.
*/
static int fast_aging;
@@ -6919,8 +6925,7 @@ static void read_other_addr(struct ksz_hw *hw)
#define PCI_VENDOR_ID_MICREL_KS 0x16c6
#endif
-static int __devinit pcidev_init(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net_device *dev;
struct dev_priv *priv;
@@ -7243,18 +7248,7 @@ static struct pci_driver pci_device_driver = {
.remove = pcidev_exit
};
-static int __init ksz884x_init_module(void)
-{
- return pci_register_driver(&pci_device_driver);
-}
-
-static void __exit ksz884x_cleanup_module(void)
-{
- pci_unregister_driver(&pci_device_driver);
-}
-
-module_init(ksz884x_init_module);
-module_exit(ksz884x_cleanup_module);
+module_pci_driver(pci_device_driver);
MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 6118bdad244f..a99456c3dd87 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1541,7 +1541,7 @@ static const struct net_device_ops enc28j60_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit enc28j60_probe(struct spi_device *spi)
+static int enc28j60_probe(struct spi_device *spi)
{
struct net_device *dev;
struct enc28j60_net *priv;
@@ -1617,7 +1617,7 @@ error_alloc:
return ret;
}
-static int __devexit enc28j60_remove(struct spi_device *spi)
+static int enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = dev_get_drvdata(&spi->dev);
@@ -1637,7 +1637,7 @@ static struct spi_driver enc28j60_driver = {
.owner = THIS_MODULE,
},
.probe = enc28j60_probe,
- .remove = __devexit_p(enc28j60_remove),
+ .remove = enc28j60_remove,
};
static int __init enc28j60_init(void)
diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig
index 540f0c6fc160..3932d081fa21 100644
--- a/drivers/net/ethernet/myricom/Kconfig
+++ b/drivers/net/ethernet/myricom/Kconfig
@@ -23,7 +23,6 @@ config MYRI10GE
depends on PCI && INET
select FW_LOADER
select CRC32
- select INET_LRO
---help---
This driver supports Myricom Myri-10G Dual Protocol interface in
Ethernet mode. If the eeprom on your board is not recent enough,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 83516e3369c9..f8408d6e961c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -50,7 +50,6 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
#include <linux/dca.h>
#include <linux/ip.h>
#include <linux/inet.h>
@@ -96,8 +95,6 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MYRI10GE_EEPROM_STRINGS_SIZE 256
#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
-#define MYRI10GE_MAX_LRO_DESCRIPTORS 8
-#define MYRI10GE_LRO_MAX_PKTS 64
#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
@@ -165,8 +162,6 @@ struct myri10ge_rx_done {
dma_addr_t bus;
int cnt;
int idx;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
};
struct myri10ge_slice_netstats {
@@ -338,11 +333,6 @@ static int myri10ge_debug = -1; /* defaults above */
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
-static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
-module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_lro_max_pkts,
- "Number of LRO packets to be aggregated");
-
static int myri10ge_fill_thresh = 256;
module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
@@ -1197,36 +1187,6 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
}
}
-static inline void
-myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
- struct skb_frag_struct *rx_frags, int len, int hlen)
-{
- struct skb_frag_struct *skb_frags;
-
- skb->len = skb->data_len = len;
- /* attach the page(s) */
-
- skb_frags = skb_shinfo(skb)->frags;
- while (len > 0) {
- memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
- len -= skb_frag_size(rx_frags);
- skb_frags++;
- rx_frags++;
- skb_shinfo(skb)->nr_frags++;
- }
-
- /* pskb_may_pull is not available in irq context, but
- * skb_pull() (for ether_pad and eth_type_trans()) requires
- * the beginning of the packet in skb_headlen(), move it
- * manually */
- skb_copy_to_linear_data(skb, va, hlen);
- skb_shinfo(skb)->frags[0].page_offset += hlen;
- skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen);
- skb->data_len -= hlen;
- skb->tail += hlen;
- skb_pull(skb, MXGEFW_PAD);
-}
-
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
@@ -1304,18 +1264,50 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
}
}
-#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
- * page into an skb */
+/*
+ * GRO does not support acceleration of tagged vlan frames, and
+ * this NIC does not support vlan tag offload, so we must pop
+ * the tag ourselves to be able to achieve GRO performance that
+ * is comparable to LRO.
+ */
+
+static inline void
+myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
+{
+ u8 *va;
+ struct vlan_ethhdr *veh;
+ struct skb_frag_struct *frag;
+ __wsum vsum;
+
+ va = addr;
+ va += MXGEFW_PAD;
+ veh = (struct vlan_ethhdr *)va;
+ if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX &&
+ veh->h_vlan_proto == htons(ETH_P_8021Q)) {
+ /* fixup csum if needed */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
+ skb->csum = csum_sub(skb->csum, vsum);
+ }
+ /* pop tag */
+ __vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI));
+ memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
+ skb->len -= VLAN_HLEN;
+ skb->data_len -= VLAN_HLEN;
+ frag = skb_shinfo(skb)->frags;
+ frag->page_offset += VLAN_HLEN;
+ skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
+ }
+}
static inline int
-myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
- bool lro_enabled)
+myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
- struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
+ struct skb_frag_struct *rx_frags;
struct myri10ge_rx_buf *rx;
- int i, idx, hlen, remainder, bytes;
+ int i, idx, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
@@ -1332,67 +1324,48 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
idx = rx->cnt & rx->mask;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
+
+ skb = napi_get_frags(&ss->napi);
+ if (unlikely(skb == NULL)) {
+ ss->stats.rx_dropped++;
+ for (i = 0, remainder = len; remainder > 0; i++) {
+ myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
+ put_page(rx->info[idx].page);
+ rx->cnt++;
+ idx = rx->cnt & rx->mask;
+ remainder -= MYRI10GE_ALLOC_SIZE;
+ }
+ return 0;
+ }
+ rx_frags = skb_shinfo(skb)->frags;
/* Fill skb_frag_struct(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
- __skb_frag_set_page(&rx_frags[i], rx->info[idx].page);
- rx_frags[i].page_offset = rx->info[idx].page_offset;
- if (remainder < MYRI10GE_ALLOC_SIZE)
- skb_frag_size_set(&rx_frags[i], remainder);
- else
- skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE);
+ skb_fill_page_desc(skb, i, rx->info[idx].page,
+ rx->info[idx].page_offset,
+ remainder < MYRI10GE_ALLOC_SIZE ?
+ remainder : MYRI10GE_ALLOC_SIZE);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
}
- if (lro_enabled) {
- rx_frags[0].page_offset += MXGEFW_PAD;
- skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
- len -= MXGEFW_PAD;
- lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
- /* opaque, will come back in get_frag_header */
- len, len,
- (void *)(__force unsigned long)csum, csum);
-
- return 1;
- }
-
- hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
-
- /* allocate an skb to attach the page(s) to. This is done
- * after trying LRO, so as to avoid skb allocation overheads */
-
- skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
- if (unlikely(skb == NULL)) {
- ss->stats.rx_dropped++;
- do {
- i--;
- __skb_frag_unref(&rx_frags[i]);
- } while (i != 0);
- return 0;
- }
+ /* remove padding */
+ rx_frags[0].page_offset += MXGEFW_PAD;
+ rx_frags[0].size -= MXGEFW_PAD;
+ len -= MXGEFW_PAD;
- /* Attach the pages to the skb, and trim off any padding */
- myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
- if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) {
- skb_frag_unref(skb, 0);
- skb_shinfo(skb)->nr_frags = 0;
- } else {
- skb->truesize += bytes * skb_shinfo(skb)->nr_frags;
+ skb->len = len;
+ skb->data_len = len;
+ skb->truesize += len;
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum;
}
- skb->protocol = eth_type_trans(skb, dev);
+ myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
- if (dev->features & NETIF_F_RXCSUM) {
- if ((skb->protocol == htons(ETH_P_IP)) ||
- (skb->protocol == htons(ETH_P_IPV6))) {
- skb->csum = csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- } else
- myri10ge_vlan_ip_csum(skb, csum);
- }
- netif_receive_skb(skb);
+ napi_gro_frags(&ss->napi);
return 1;
}
@@ -1480,18 +1453,11 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
u16 length;
__wsum checksum;
- /*
- * Prevent compiler from generating more than one ->features memory
- * access to avoid theoretical race condition with functions that
- * change NETIF_F_LRO flag at runtime.
- */
- bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
-
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
- rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
+ rx_ok = myri10ge_rx_done(ss, length, checksum);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
@@ -1503,9 +1469,6 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
- if (lro_enabled)
- lro_flush_all(&rx_done->lro_mgr);
-
/* restock receive rings if needed */
if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
@@ -1779,7 +1742,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
- "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc",
};
#define MYRI10GE_NET_STATS_LEN 21
@@ -1880,14 +1842,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
- data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
- data[i++] = ss->rx_done.lro_mgr.stats.flushed;
- if (ss->rx_done.lro_mgr.stats.flushed)
- data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
- ss->rx_done.lro_mgr.stats.flushed;
- else
- data[i++] = 0;
- data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
}
}
@@ -1931,7 +1885,7 @@ static int myri10ge_led(struct myri10ge_priv *mgp, int on)
}
if (!on)
pattern = swab32(readl(mgp->sram + pattern_off + 4));
- writel(htonl(pattern), mgp->sram + pattern_off);
+ writel(swab32(pattern), mgp->sram + pattern_off);
return 0;
}
@@ -2271,67 +2225,6 @@ static void myri10ge_free_irq(struct myri10ge_priv *mgp)
pci_disable_msix(pdev);
}
-static int
-myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr,
- u64 * hdr_flags, void *priv)
-{
- struct ethhdr *eh;
- struct vlan_ethhdr *veh;
- struct iphdr *iph;
- u8 *va = skb_frag_address(frag);
- unsigned long ll_hlen;
- /* passed opaque through lro_receive_frags() */
- __wsum csum = (__force __wsum) (unsigned long)priv;
-
- /* find the mac header, aborting if not IPv4 */
-
- eh = (struct ethhdr *)va;
- *mac_hdr = eh;
- ll_hlen = ETH_HLEN;
- if (eh->h_proto != htons(ETH_P_IP)) {
- if (eh->h_proto == htons(ETH_P_8021Q)) {
- veh = (struct vlan_ethhdr *)va;
- if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -1;
-
- ll_hlen += VLAN_HLEN;
-
- /*
- * HW checksum starts ETH_HLEN bytes into
- * frame, so we must subtract off the VLAN
- * header's checksum before csum can be used
- */
- csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
- VLAN_HLEN, 0));
- } else {
- return -1;
- }
- }
- *hdr_flags = LRO_IPV4;
-
- iph = (struct iphdr *)(va + ll_hlen);
- *ip_hdr = iph;
- if (iph->protocol != IPPROTO_TCP)
- return -1;
- if (ip_is_fragment(iph))
- return -1;
- *hdr_flags |= LRO_TCP;
- *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
-
- /* verify the IP checksum */
- if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
- return -1;
-
- /* verify the checksum */
- if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
- ntohs(iph->tot_len) - (iph->ihl << 2),
- IPPROTO_TCP, csum)))
- return -1;
-
- return 0;
-}
-
static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
@@ -2401,8 +2294,7 @@ static int myri10ge_open(struct net_device *dev)
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int i, status, big_pow2, slice;
- u8 *itable;
- struct net_lro_mgr *lro_mgr;
+ u8 __iomem *itable;
if (mgp->running != MYRI10GE_ETH_STOPPED)
return -EBUSY;
@@ -2513,19 +2405,6 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
- lro_mgr = &ss->rx_done.lro_mgr;
- lro_mgr->dev = dev;
- lro_mgr->features = LRO_F_NAPI;
- lro_mgr->ip_summed = CHECKSUM_COMPLETE;
- lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
- lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
- lro_mgr->lro_arr = ss->rx_done.lro_desc;
- lro_mgr->get_frag_header = myri10ge_get_frag_header;
- lro_mgr->max_aggr = myri10ge_lro_max_pkts;
- lro_mgr->frag_align_pad = 2;
- if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
- lro_mgr->max_aggr = MAX_SKB_FRAGS;
-
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
@@ -2878,7 +2757,7 @@ again:
flags_next |= next_is_first *
MXGEFW_FLAGS_FIRST;
rdma_count |= -(chop | next_is_first);
- rdma_count += chop & !next_is_first;
+ rdma_count += chop & ~next_is_first;
} else if (likely(cum_len_next >= 0)) { /* header ends */
int small;
@@ -3143,15 +3022,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-static netdev_features_t myri10ge_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- if (!(features & NETIF_F_RXCSUM))
- features &= ~NETIF_F_LRO;
-
- return features;
-}
-
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
@@ -3878,7 +3748,6 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_get_stats64 = myri10ge_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
- .ndo_fix_features = myri10ge_fix_features,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
};
@@ -3967,9 +3836,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_mtrr;
}
hdr_offset =
- ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
+ swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
- mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
+ mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
if (mgp->sram_size > mgp->board_span ||
mgp->sram_size <= MYRI10GE_FW_OFFSET) {
dev_err(&pdev->dev,
@@ -4018,7 +3887,11 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->mtu = myri10ge_initial_mtu;
- netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
+ netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
+
+ /* fake NETIF_F_HW_VLAN_RX for good GRO performance */
+ netdev->hw_features |= NETIF_F_HW_VLAN_RX;
+
netdev->features = netdev->hw_features;
if (dac_enabled)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
index 7ec4b864a550..75ec5e7cf50d 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
@@ -27,7 +27,7 @@ struct mcp_gen_header {
*
* Fields below this comment are extensions added in later versions
* of this struct, drivers should compare the header_length against
- * offsetof(field) to check wether a given MCP implements them.
+ * offsetof(field) to check whether a given MCP implements them.
*
* Never remove any field. Keep everything naturally align.
*/
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
index 3f94ddbf4dc0..923e640d604c 100644
--- a/drivers/net/ethernet/natsemi/ibmlana.c
+++ b/drivers/net/ethernet/natsemi/ibmlana.c
@@ -900,7 +900,7 @@ static short ibmlana_adapter_ids[] __initdata = {
0x0000
};
-static char *ibmlana_adapter_names[] __devinitdata = {
+static char *ibmlana_adapter_names[] = {
"IBM LAN Adapter/A",
NULL
};
@@ -916,7 +916,7 @@ static const struct net_device_ops ibmlana_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit ibmlana_init_one(struct device *kdev)
+static int ibmlana_init_one(struct device *kdev)
{
struct mca_device *mdev = to_mca_device(kdev);
struct net_device *dev;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 95dd39ffb230..b0b361546365 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -117,7 +117,7 @@ static const struct net_device_ops sonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
{
static unsigned version_printed;
unsigned int silicon_revision;
@@ -220,7 +220,7 @@ out:
* Probe for a SONIC ethernet controller on a Mips Jazz board.
* Actually probing is superfluous but we're paranoid.
*/
-static int __devinit jazz_sonic_probe(struct platform_device *pdev)
+static int jazz_sonic_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sonic_local *lp;
@@ -270,7 +270,7 @@ MODULE_ALIAS("platform:jazzsonic");
#include "sonic.c"
-static int __devexit jazz_sonic_device_remove (struct platform_device *pdev)
+static int jazz_sonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -286,7 +286,7 @@ static int __devexit jazz_sonic_device_remove (struct platform_device *pdev)
static struct platform_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
- .remove = __devexit_p(jazz_sonic_device_remove),
+ .remove = jazz_sonic_device_remove,
.driver = {
.name = jazz_sonic_string,
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index b9680ba5a325..0ffde69c8d01 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -196,7 +196,7 @@ static const struct net_device_ops macsonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit macsonic_init(struct net_device *dev)
+static int macsonic_init(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
@@ -245,7 +245,7 @@ static int __devinit macsonic_init(struct net_device *dev)
memcmp(mac, "\x00\x80\x19", 3) && \
memcmp(mac, "\x00\x05\x02", 3))
-static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
+static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
const int prom_addr = ONBOARD_SONIC_PROM_BASE;
@@ -309,7 +309,7 @@ static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
eth_hw_addr_random(dev);
}
-static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
+static int mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
@@ -420,9 +420,8 @@ static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
return macsonic_init(dev);
}
-static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
- unsigned long prom_addr,
- int id)
+static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
+ unsigned long prom_addr, int id)
{
int i;
for(i = 0; i < 6; i++)
@@ -435,7 +434,7 @@ static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
return 0;
}
-static int __devinit macsonic_ident(struct nubus_dev *ndev)
+static int macsonic_ident(struct nubus_dev *ndev)
{
if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
@@ -460,7 +459,7 @@ static int __devinit macsonic_ident(struct nubus_dev *ndev)
return -1;
}
-static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
+static int mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
struct nubus_dev* ndev = NULL;
@@ -573,7 +572,7 @@ static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
return macsonic_init(dev);
}
-static int __devinit mac_sonic_probe(struct platform_device *pdev)
+static int mac_sonic_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sonic_local *lp;
@@ -619,7 +618,7 @@ MODULE_ALIAS("platform:macsonic");
#include "sonic.c"
-static int __devexit mac_sonic_device_remove (struct platform_device *pdev)
+static int mac_sonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -634,7 +633,7 @@ static int __devexit mac_sonic_device_remove (struct platform_device *pdev)
static struct platform_driver mac_sonic_driver = {
.probe = mac_sonic_probe,
- .remove = __devexit_p(mac_sonic_device_remove),
+ .remove = mac_sonic_device_remove,
.driver = {
.name = mac_sonic_string,
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index dbaaa99a0d43..f4ad60c97eae 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -127,7 +127,7 @@ static int full_duplex[MAX_UNITS];
#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO DRV_NAME " dp8381x driver, version "
DRV_VERSION ", " DRV_RELDATE "\n"
" originally by Donald Becker <becker@scyld.com>\n"
@@ -242,7 +242,7 @@ static struct {
const char *name;
unsigned long flags;
unsigned int eeprom_size;
-} natsemi_pci_info[] __devinitdata = {
+} natsemi_pci_info[] = {
{ "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
{ "NatSemi DP8381[56]", 0, 24 },
};
@@ -742,7 +742,7 @@ static void move_int_phy(struct net_device *dev, int addr)
udelay(1);
}
-static void __devinit natsemi_init_media (struct net_device *dev)
+static void natsemi_init_media(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
u32 tmp;
@@ -797,8 +797,7 @@ static const struct net_device_ops natsemi_netdev_ops = {
#endif
};
-static int __devinit natsemi_probe1 (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct netdev_private *np;
@@ -3214,7 +3213,7 @@ static int netdev_close(struct net_device *dev)
}
-static void __devexit natsemi_remove1 (struct pci_dev *pdev)
+static void natsemi_remove1(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
void __iomem * ioaddr = ns_ioaddr(dev);
@@ -3353,7 +3352,7 @@ static struct pci_driver natsemi_driver = {
.name = DRV_NAME,
.id_table = natsemi_pci_tbl,
.probe = natsemi_probe1,
- .remove = __devexit_p(natsemi_remove1),
+ .remove = natsemi_remove1,
#ifdef CONFIG_PM
.suspend = natsemi_suspend,
.resume = natsemi_resume,
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d52728b3c436..77c070de621e 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1941,8 +1941,8 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = ns83820_tx_timeout,
};
-static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
+static int ns83820_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
{
struct net_device *ndev;
struct ns83820 *dev;
@@ -2241,7 +2241,7 @@ out:
return err;
}
-static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
+static void ns83820_remove_one(struct pci_dev *pci_dev)
{
struct net_device *ndev = pci_get_drvdata(pci_dev);
struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */
@@ -2272,7 +2272,7 @@ static struct pci_driver driver = {
.name = "ns83820",
.id_table = ns83820_pci_tbl,
.probe = ns83820_init_one,
- .remove = __devexit_p(ns83820_remove_one),
+ .remove = ns83820_remove_one,
#if 0 /* FIXME: implement */
.suspend = ,
.resume = ,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 7dfe88398d7d..5e4748e855f6 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -249,7 +249,7 @@ out:
* Actually probing is superfluous but we're paranoid.
*/
-int __devinit xtsonic_probe(struct platform_device *pdev)
+int xtsonic_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sonic_local *lp;
@@ -297,7 +297,7 @@ MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
#include "sonic.c"
-static int __devexit xtsonic_device_remove (struct platform_device *pdev)
+static int xtsonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local *lp = netdev_priv(dev);
@@ -314,7 +314,7 @@ static int __devexit xtsonic_device_remove (struct platform_device *pdev)
static struct platform_driver xtsonic_driver = {
.probe = xtsonic_probe,
- .remove = __devexit_p(xtsonic_device_remove),
+ .remove = xtsonic_device_remove,
.driver = {
.name = xtsonic_string,
},
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index ff26b54bd3fb..87abb4f10c43 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,7 +32,7 @@ config S2IO
config VXGE
tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
- depends on PCI && INET
+ depends on PCI
---help---
This driver supports Exar Corp's X3100 Series 10 GbE PCIe
I/O Virtualized Server Adapter.
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index de50547c187d..7c94c089212f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -494,7 +494,7 @@ static struct pci_driver s2io_driver = {
.name = "S2IO",
.id_table = s2io_tbl,
.probe = s2io_init_nic,
- .remove = __devexit_p(s2io_rem_nic),
+ .remove = s2io_rem_nic,
.err_handler = &s2io_err_handler,
};
@@ -1040,7 +1040,7 @@ static int s2io_verify_pci_mode(struct s2io_nic *nic)
static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
{
struct pci_dev *tdev = NULL;
- while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
+ for_each_pci_dev(tdev) {
if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
if (tdev->bus == s2io_pdev->bus->parent) {
pci_dev_put(tdev);
@@ -7702,7 +7702,7 @@ static const struct net_device_ops s2io_netdev_ops = {
* returns 0 on success and negative on failure.
*/
-static int __devinit
+static int
s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
{
struct s2io_nic *sp;
@@ -8200,7 +8200,7 @@ mem_alloc_failed:
* from memory.
*/
-static void __devexit s2io_rem_nic(struct pci_dev *pdev)
+static void s2io_rem_nic(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct s2io_nic *sp;
@@ -8239,7 +8239,8 @@ static int __init s2io_starter(void)
/**
* s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
+ * Description: This function is the cleanup routine for the driver. It
+ * unregisters the driver.
*/
static __exit void s2io_closer(void)
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d5596926a1ef..d89b6ed82c51 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1075,9 +1075,8 @@ static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
/*
* Prototype declaration.
*/
-static int __devinit s2io_init_nic(struct pci_dev *pdev,
- const struct pci_device_id *pre);
-static void __devexit s2io_rem_nic(struct pci_dev *pdev);
+static int s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre);
+static void s2io_rem_nic(struct pci_dev *pdev);
static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index c2e420a84d22..fbe5363cb89c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -993,7 +993,7 @@ exit:
* for the driver, FW version information, and the first mac address for
* each vpath
*/
-enum vxge_hw_status __devinit
+enum vxge_hw_status
vxge_hw_device_hw_info_get(void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info)
{
@@ -1310,7 +1310,7 @@ __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
* When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
* to enable the latter to perform Titan hardware initialization.
*/
-enum vxge_hw_status __devinit
+enum vxge_hw_status
vxge_hw_device_initialize(
struct __vxge_hw_device **devh,
struct vxge_hw_device_attr *attr,
@@ -2917,7 +2917,7 @@ exit:
* vxge_hw_device_config_default_get - Initialize device config with defaults.
* Initialize Titan device config with default values.
*/
-enum vxge_hw_status __devinit
+enum vxge_hw_status
vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
{
u32 i;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 9e0c1eed5dc5..6ce4412fcc1a 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1846,11 +1846,11 @@ struct vxge_hw_vpath_attr {
struct vxge_hw_fifo_attr fifo_attr;
};
-enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
+enum vxge_hw_status vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
-enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
+enum vxge_hw_status vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
/**
@@ -1877,7 +1877,7 @@ u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
const u8 *
vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
-enum vxge_hw_status __devinit vxge_hw_device_initialize(
+enum vxge_hw_status vxge_hw_device_initialize(
struct __vxge_hw_device **devh,
struct vxge_hw_device_attr *attr,
struct vxge_hw_device_config *device_config);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 3e5b7509502c..7c87105ca049 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3371,10 +3371,9 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
-static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev_out)
+static int vxge_device_register(struct __vxge_hw_device *hldev,
+ struct vxge_config *config, int high_dma,
+ int no_of_vpath, struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3672,9 +3671,8 @@ static void verify_bandwidth(void)
/*
* Vpath configuration
*/
-static int __devinit vxge_config_vpaths(
- struct vxge_hw_device_config *device_config,
- u64 vpath_mask, struct vxge_config *config_param)
+static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
+ u64 vpath_mask, struct vxge_config *config_param)
{
int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
u32 txdl_size, txdl_per_memblock;
@@ -3859,9 +3857,8 @@ static int __devinit vxge_config_vpaths(
}
/* initialize device configuratrions */
-static void __devinit vxge_device_config_init(
- struct vxge_hw_device_config *device_config,
- int *intr_type)
+static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
+ int *intr_type)
{
/* Used for CQRQ/SRQ. */
device_config->dma_blockpool_initial =
@@ -3912,7 +3909,7 @@ static void __devinit vxge_device_config_init(
device_config->rth_it_type);
}
-static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
+static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
{
int i;
@@ -4269,7 +4266,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
return ret;
}
-static int __devinit is_sriov_initialized(struct pci_dev *pdev)
+static int is_sriov_initialized(struct pci_dev *pdev)
{
int pos;
u16 ctrl;
@@ -4300,7 +4297,7 @@ static const struct vxge_hw_uld_cbs vxge_callbacks = {
* returns 0 on success and negative on failure.
*
*/
-static int __devinit
+static int
vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
{
struct __vxge_hw_device *hldev;
@@ -4764,7 +4761,7 @@ _exit0:
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device.
*/
-static void __devexit vxge_remove(struct pci_dev *pdev)
+static void vxge_remove(struct pci_dev *pdev)
{
struct __vxge_hw_device *hldev;
struct vxgedev *vdev;
@@ -4809,7 +4806,7 @@ static struct pci_driver vxge_driver = {
.name = VXGE_DRIVER_NAME,
.id_table = vxge_id_table,
.probe = vxge_probe,
- .remove = __devexit_p(vxge_remove),
+ .remove = vxge_remove,
#ifdef CONFIG_PM
.suspend = vxge_pm_suspend,
.resume = vxge_pm_resume,
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 6893a65ae55f..cbd6a529d0c0 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -978,7 +978,7 @@ static int w90p910_ether_setup(struct net_device *dev)
return 0;
}
-static int __devinit w90p910_ether_probe(struct platform_device *pdev)
+static int w90p910_ether_probe(struct platform_device *pdev)
{
struct w90p910_ether *ether;
struct net_device *dev;
@@ -1071,7 +1071,7 @@ failed_free:
return error;
}
-static int __devexit w90p910_ether_remove(struct platform_device *pdev)
+static int w90p910_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct w90p910_ether *ether = netdev_priv(dev);
@@ -1096,7 +1096,7 @@ static int __devexit w90p910_ether_remove(struct platform_device *pdev)
static struct platform_driver w90p910_ether_driver = {
.probe = w90p910_ether_probe,
- .remove = __devexit_p(w90p910_ether_remove),
+ .remove = w90p910_ether_remove,
.driver = {
.name = "nuc900-emc",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 876beceaf2d7..87fa5919c455 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1821,6 +1821,11 @@ static int nv_alloc_rx(struct net_device *dev)
skb->data,
skb_tailroom(skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_rx_ctx->dma)) {
+ kfree_skb(skb);
+ goto packet_dropped;
+ }
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
wmb();
@@ -1830,6 +1835,7 @@ static int nv_alloc_rx(struct net_device *dev)
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
+packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
@@ -1856,6 +1862,11 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
skb->data,
skb_tailroom(skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_rx_ctx->dma)) {
+ kfree_skb(skb);
+ goto packet_dropped;
+ }
np->put_rx_ctx->dma_len = skb_tailroom(skb);
np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
@@ -1866,6 +1877,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
} else {
+packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
np->stat_rx_dropped++;
u64_stats_update_end(&np->swstats_rx_syncp);
@@ -2217,6 +2229,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_tx_ctx->dma)) {
+ /* on DMA mapping error - drop the packet */
+ kfree_skb(skb);
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2337,6 +2358,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->put_tx_ctx->dma)) {
+ /* on DMA mapping error - drop the packet */
+ kfree_skb(skb);
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 1;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -5003,6 +5033,11 @@ static int nv_loopback_test(struct net_device *dev)
test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
skb_tailroom(tx_skb),
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ test_dma_addr)) {
+ dev_kfree_skb_any(tx_skb);
+ goto out;
+ }
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
@@ -5520,7 +5555,7 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
#endif
};
-static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
struct net_device *dev;
struct fe_priv *np;
@@ -5995,7 +6030,7 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
base + NvRegTransmitPoll);
}
-static void __devexit nv_remove(struct pci_dev *pci_dev)
+static void nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
@@ -6271,7 +6306,7 @@ static struct pci_driver driver = {
.name = DRV_NAME,
.id_table = pci_tbl,
.probe = nv_probe,
- .remove = __devexit_p(nv_remove),
+ .remove = nv_remove,
.shutdown = nv_shutdown,
.driver.pm = NV_PM_OPS,
};
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index af8b4142088c..3466ca1e8f6c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1219,9 +1219,6 @@ static int lpc_eth_open(struct net_device *ndev)
if (netif_msg_ifup(pldat))
dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EADDRNOTAVAIL;
-
__lpc_eth_clock_enable(pldat, true);
/* Reset and initialize */
@@ -1301,6 +1298,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
.ndo_do_ioctl = lpc_eth_ioctl,
.ndo_set_mac_address = lpc_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
};
@@ -1597,7 +1595,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match);
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
- .remove = __devexit_p(lpc_eth_drv_remove),
+ .remove = lpc_eth_drv_remove,
#ifdef CONFIG_PM
.suspend = lpc_eth_drv_suspend,
.resume = lpc_eth_drv_resume,
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index f97719c48516..b5499198e029 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1419,7 +1419,7 @@ static const struct net_device_ops octeon_mgmt_ops = {
#endif
};
-static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
+static int octeon_mgmt_probe(struct platform_device *pdev)
{
struct net_device *netdev;
struct octeon_mgmt *p;
@@ -1559,7 +1559,7 @@ err:
return result;
}
-static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
+static int octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
@@ -1583,7 +1583,7 @@ static struct platform_driver octeon_mgmt_driver = {
.of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
- .remove = __devexit_p(octeon_mgmt_remove),
+ .remove = octeon_mgmt_remove,
};
extern void octeon_mdiobus_force_mod_depencency(void);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 5296cc8d3cba..34d05bf72b2e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@ config PCH_GBE
depends on PCI
select NET_CORE
select MII
+ select PTP_1588_CLOCK_PCH
---help---
This is a gigabit ethernet driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's
@@ -20,19 +21,3 @@ config PCH_GBE
purpose use.
ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-if PCH_GBE
-
-config PCH_PTP
- bool "PCH PTP clock support"
- default n
- depends on EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- select PTP_1588_CLOCK_PCH
- ---help---
- Say Y here if you want to use Precision Time Protocol (PTP) in the
- driver. PTP is a method to precisely synchronize distributed clocks
- over Ethernet networks.
-
-endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index b07311eaa693..7fb7e178c74e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -649,7 +649,6 @@ extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
struct pch_gbe_rx_ring *rx_ring);
extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-#ifdef CONFIG_PCH_PTP
extern u32 pch_ch_control_read(struct pci_dev *pdev);
extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
extern u32 pch_ch_event_read(struct pci_dev *pdev);
@@ -659,7 +658,6 @@ extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
extern u64 pch_rx_snap_read(struct pci_dev *pdev);
extern u64 pch_tx_snap_read(struct pci_dev *pdev);
extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
-#endif
/* pch_gbe_param.c */
extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4c4fe5b1a29a..39ab4d09faaa 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -21,10 +21,8 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
#include <linux/module.h>
-#ifdef CONFIG_PCH_PTP
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
-#endif
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
@@ -98,7 +96,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_DISABLE_ALL 0
-#ifdef CONFIG_PCH_PTP
/* Macros for ieee1588 */
/* 0x40 Time Synchronization Channel Control Register Bits */
#define MASTER_MODE (1<<0)
@@ -113,7 +110,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
-#endif
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -122,7 +118,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
static void pch_gbe_set_multi(struct net_device *netdev);
-#ifdef CONFIG_PCH_PTP
static struct sock_filter ptp_filter[] = {
PTP_FILTER
};
@@ -291,7 +286,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
-#endif
inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
{
@@ -1244,9 +1238,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
(int)sizeof(struct pch_gbe_tx_desc) * ring_num,
&hw->reg->TX_DSC_SW_P);
-#ifdef CONFIG_PCH_PTP
pch_tx_timestamp(adapter, skb);
-#endif
dev_kfree_skb_any(skb);
}
@@ -1730,9 +1722,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
/* Write meta date of skb */
skb_put(skb, length);
-#ifdef CONFIG_PCH_PTP
pch_rx_timestamp(adapter, skb);
-#endif
skb->protocol = eth_type_trans(skb, netdev);
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
@@ -2334,10 +2324,8 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
pr_debug("cmd : 0x%04x\n", cmd);
-#ifdef CONFIG_PCH_PTP
if (cmd == SIOCSHWTSTAMP)
return hwtstamp_ioctl(netdev, ifr, cmd);
-#endif
return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
}
@@ -2623,14 +2611,12 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_netdev;
}
-#ifdef CONFIG_PCH_PTP
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
pr_err("Bad ptp filter\n");
return -EINVAL;
}
-#endif
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index c2367158350e..bf829ee30077 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -166,7 +166,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <asm/unaligned.h>
#include <asm/cache.h>
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
@@ -576,8 +576,8 @@ static const struct net_device_ops hamachi_netdev_ops = {
};
-static int __devinit hamachi_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int hamachi_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct hamachi_private *hmp;
int option, i, rx_int_var, tx_int_var, boguscnt;
@@ -791,7 +791,7 @@ err_out:
return ret;
}
-static int __devinit read_eeprom(void __iomem *ioaddr, int location)
+static int read_eeprom(void __iomem *ioaddr, int location)
{
int bogus_cnt = 1000;
@@ -1894,7 +1894,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
-static void __devexit hamachi_remove_one (struct pci_dev *pdev)
+static void hamachi_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -1923,7 +1923,7 @@ static struct pci_driver hamachi_driver = {
.name = DRV_NAME,
.id_table = hamachi_pci_tbl,
.probe = hamachi_init_one,
- .remove = __devexit_p(hamachi_remove_one),
+ .remove = hamachi_remove_one,
};
static int __init hamachi_init (void)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 04e622fd468d..fbaed4fa72fa 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -106,7 +106,7 @@ static int gx_fix;
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
" (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
@@ -367,8 +367,8 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = yellowfin_tx_timeout,
};
-static int __devinit yellowfin_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int yellowfin_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev;
struct yellowfin_private *np;
@@ -522,7 +522,7 @@ err_out_free_netdev:
return -ENODEV;
}
-static int __devinit read_eeprom(void __iomem *ioaddr, int location)
+static int read_eeprom(void __iomem *ioaddr, int location)
{
int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
@@ -1372,7 +1372,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
-static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
+static void yellowfin_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct yellowfin_private *np;
@@ -1399,7 +1399,7 @@ static struct pci_driver yellowfin_driver = {
.name = DRV_NAME,
.id_table = yellowfin_pci_tbl,
.probe = yellowfin_init_one,
- .remove = __devexit_p(yellowfin_remove_one),
+ .remove = yellowfin_remove_one,
};
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 6fa74d530e44..0be5844d6372 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1727,7 +1727,7 @@ static const struct net_device_ops pasemi_netdev_ops = {
#endif
};
-static int __devinit
+static int
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -1849,7 +1849,7 @@ out_disable_device:
}
-static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
+static void pasemi_mac_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct pasemi_mac *mac;
@@ -1884,7 +1884,7 @@ static struct pci_driver pasemi_mac_driver = {
.name = "pasemi_mac",
.id_table = pasemi_mac_pci_tbl,
.probe = pasemi_mac_probe,
- .remove = __devexit_p(pasemi_mac_remove),
+ .remove = pasemi_mac_remove,
};
static void __exit pasemi_mac_cleanup_module(void)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 10468e7932dd..4ca2c196c98a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -218,7 +218,7 @@ skip:
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
} else {
- ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |=
(ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
@@ -381,7 +381,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
static int
netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
- u8 * bytes)
+ u8 *bytes)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int offset;
@@ -488,6 +488,8 @@ netxen_nic_get_pauseparam(struct net_device *dev,
__u32 val;
int port = adapter->physical_port;
+ pause->autoneg = 0;
+
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return;
@@ -496,19 +498,19 @@ netxen_nic_get_pauseparam(struct net_device *dev,
pause->rx_pause = netxen_gb_get_rx_flowctl(val);
val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
switch (port) {
- case 0:
- pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
- break;
- case 1:
- pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
- break;
- case 2:
- pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
- break;
- case 3:
- default:
- pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
- break;
+ case 0:
+ pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+ break;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
@@ -532,6 +534,11 @@ netxen_nic_set_pauseparam(struct net_device *dev,
struct netxen_adapter *adapter = netdev_priv(dev);
__u32 val;
int port = adapter->physical_port;
+
+ /* not supported */
+ if (pause->autoneg)
+ return -EINVAL;
+
/* read mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
@@ -549,31 +556,31 @@ netxen_nic_set_pauseparam(struct net_device *dev,
/* set autoneg */
val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
switch (port) {
- case 0:
- if (pause->tx_pause)
- netxen_gb_unset_gb0_mask(val);
- else
- netxen_gb_set_gb0_mask(val);
- break;
- case 1:
- if (pause->tx_pause)
- netxen_gb_unset_gb1_mask(val);
- else
- netxen_gb_set_gb1_mask(val);
- break;
- case 2:
- if (pause->tx_pause)
- netxen_gb_unset_gb2_mask(val);
- else
- netxen_gb_set_gb2_mask(val);
- break;
- case 3:
- default:
- if (pause->tx_pause)
- netxen_gb_unset_gb3_mask(val);
- else
- netxen_gb_set_gb3_mask(val);
- break;
+ case 0:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb0_mask(val);
+ else
+ netxen_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb1_mask(val);
+ else
+ netxen_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb2_mask(val);
+ else
+ netxen_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb3_mask(val);
+ else
+ netxen_gb_set_gb3_mask(val);
+ break;
}
NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
@@ -636,7 +643,7 @@ static int netxen_get_sset_count(struct net_device *dev, int sset)
static void
netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
- u64 * data)
+ u64 *data)
{
memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
if ((data[0] = netxen_nic_reg_test(dev)))
@@ -647,7 +654,7 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
}
static void
-netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int index;
@@ -668,7 +675,7 @@ netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
static void
netxen_nic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 * data)
+ struct ethtool_stats *stats, u64 *data)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int index;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index bc165f4d0f65..695667d471a1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index df450616ab37..69e321a65077 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -60,9 +60,9 @@ static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
-static int __devinit netxen_nic_probe(struct pci_dev *pdev,
+static int netxen_nic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
-static void __devexit netxen_nic_remove(struct pci_dev *pdev);
+static void netxen_nic_remove(struct pci_dev *pdev);
static int netxen_nic_open(struct net_device *netdev);
static int netxen_nic_close(struct net_device *netdev);
static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
@@ -1397,7 +1397,7 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
}
#endif
-static int __devinit
+static int
netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
@@ -1569,7 +1569,7 @@ void netxen_cleanup_minidump(struct netxen_adapter *adapter)
}
}
-static void __devexit netxen_nic_remove(struct pci_dev *pdev)
+static void netxen_nic_remove(struct pci_dev *pdev)
{
struct netxen_adapter *adapter;
struct net_device *netdev;
@@ -1963,10 +1963,12 @@ unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
out_err:
return -ENOMEM;
@@ -3350,7 +3352,7 @@ static struct pci_driver netxen_driver = {
.name = netxen_nic_driver_name,
.id_table = netxen_pci_tbl,
.probe = netxen_nic_probe,
- .remove = __devexit_p(netxen_nic_remove),
+ .remove = netxen_nic_remove,
#ifdef CONFIG_PM
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume,
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 6407d0d77e81..67a679aaf29a 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1920,7 +1920,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
{
struct ql_tx_buf_cb *tx_cb;
int i;
- int retval = 0;
if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
netdev_warn(qdev->ndev,
@@ -1935,7 +1934,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
"Frame too short to be legal, frame not sent\n");
qdev->ndev->stats.tx_errors++;
- retval = -EIO;
goto frame_not_sent;
}
@@ -1944,7 +1942,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
mac_rsp->transaction_id);
qdev->ndev->stats.tx_errors++;
- retval = -EIO;
goto invalid_seg_count;
}
@@ -3772,8 +3769,8 @@ static const struct net_device_ops ql3xxx_netdev_ops = {
.ndo_tx_timeout = ql3xxx_tx_timeout,
};
-static int __devinit ql3xxx_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_entry)
+static int ql3xxx_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
{
struct net_device *ndev = NULL;
struct ql3_adapter *qdev = NULL;
@@ -3928,7 +3925,7 @@ err_out:
return err;
}
-static void __devexit ql3xxx_remove(struct pci_dev *pdev)
+static void ql3xxx_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -3955,18 +3952,7 @@ static struct pci_driver ql3xxx_driver = {
.name = DRV_NAME,
.id_table = ql3xxx_pci_tbl,
.probe = ql3xxx_probe,
- .remove = __devexit_p(ql3xxx_remove),
+ .remove = ql3xxx_remove,
};
-static int __init ql3xxx_init_module(void)
-{
- return pci_register_driver(&ql3xxx_driver);
-}
-
-static void __exit ql3xxx_exit(void)
-{
- pci_unregister_driver(&ql3xxx_driver);
-}
-
-module_init(ql3xxx_init_module);
-module_exit(ql3xxx_exit);
+module_pci_driver(ql3xxx_driver);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index ddba83ef3f44..c4b8ced83829 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_QLCNIC) := qlcnic.o
qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
- qlcnic_ethtool.o qlcnic_ctx.o
+ qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
+ qlcnic_sysfs.o qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index eaa1db9fec32..bc7ec64e9c7a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 29
-#define QLCNIC_LINUX_VERSIONID "5.0.29"
+#define _QLCNIC_LINUX_SUBVERSION 30
+#define QLCNIC_LINUX_VERSIONID "5.0.30"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -89,16 +89,6 @@
#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
#define QLCNIC_LRO_BUFFER_EXTRA 2048
-/* Opcodes to be used with the commands */
-#define TX_ETHER_PKT 0x01
-#define TX_TCP_PKT 0x02
-#define TX_UDP_PKT 0x03
-#define TX_IP_PKT 0x04
-#define TX_TCP_LSO 0x05
-#define TX_TCP_LSO6 0x06
-#define TX_TCPV6_PKT 0x0b
-#define TX_UDPV6_PKT 0x0c
-
/* Tx defines */
#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
@@ -147,28 +137,6 @@
* Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
* we are doing LSO (above the 1500 size packet) only.
*/
-
-#define FLAGS_VLAN_TAGGED 0x10
-#define FLAGS_VLAN_OOB 0x40
-
-#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
- (cmd_desc)->vlan_TCI = cpu_to_le16(v);
-#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
- ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
-#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
- ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
-
-#define qlcnic_set_tx_port(_desc, _port) \
- ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
-
-#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
- ((_desc)->flags_opcode |= \
- cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
-
-#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
- ((_desc)->nfrags__length = \
- cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
-
struct cmd_desc_type0 {
u8 tcp_hdr_offset; /* For LSO only */
u8 ip_hdr_offset; /* For LSO only */
@@ -203,65 +171,6 @@ struct rcv_desc {
__le64 addr_buffer;
} __packed;
-/* opcode field in status_desc */
-#define QLCNIC_SYN_OFFLOAD 0x03
-#define QLCNIC_RXPKT_DESC 0x04
-#define QLCNIC_OLD_RXPKT_DESC 0x3f
-#define QLCNIC_RESPONSE_DESC 0x05
-#define QLCNIC_LRO_DESC 0x12
-
-/* for status field in status_desc */
-#define STATUS_CKSUM_LOOP 0
-#define STATUS_CKSUM_OK 2
-
-/* owner bits of status_desc */
-#define STATUS_OWNER_HOST (0x1ULL << 56)
-#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
-
-/* Status descriptor:
- 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
- 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
- 53-55 desc_cnt, 56-57 owner, 58-63 opcode
- */
-#define qlcnic_get_sts_port(sts_data) \
- ((sts_data) & 0x0F)
-#define qlcnic_get_sts_status(sts_data) \
- (((sts_data) >> 4) & 0x0F)
-#define qlcnic_get_sts_type(sts_data) \
- (((sts_data) >> 8) & 0x0F)
-#define qlcnic_get_sts_totallength(sts_data) \
- (((sts_data) >> 12) & 0xFFFF)
-#define qlcnic_get_sts_refhandle(sts_data) \
- (((sts_data) >> 28) & 0xFFFF)
-#define qlcnic_get_sts_prot(sts_data) \
- (((sts_data) >> 44) & 0x0F)
-#define qlcnic_get_sts_pkt_offset(sts_data) \
- (((sts_data) >> 48) & 0x1F)
-#define qlcnic_get_sts_desc_cnt(sts_data) \
- (((sts_data) >> 53) & 0x7)
-#define qlcnic_get_sts_opcode(sts_data) \
- (((sts_data) >> 58) & 0x03F)
-
-#define qlcnic_get_lro_sts_refhandle(sts_data) \
- ((sts_data) & 0x0FFFF)
-#define qlcnic_get_lro_sts_length(sts_data) \
- (((sts_data) >> 16) & 0x0FFFF)
-#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
- (((sts_data) >> 32) & 0x0FF)
-#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
- (((sts_data) >> 40) & 0x0FF)
-#define qlcnic_get_lro_sts_timestamp(sts_data) \
- (((sts_data) >> 48) & 0x1)
-#define qlcnic_get_lro_sts_type(sts_data) \
- (((sts_data) >> 49) & 0x7)
-#define qlcnic_get_lro_sts_push_flag(sts_data) \
- (((sts_data) >> 52) & 0x1)
-#define qlcnic_get_lro_sts_seq_number(sts_data) \
- ((sts_data) & 0x0FFFFFFFF)
-#define qlcnic_get_lro_sts_mss(sts_data1) \
- ((sts_data1 >> 32) & 0x0FFFF)
-
-
struct status_desc {
__le64 status_desc_data[2];
} __attribute__ ((aligned(16)));
@@ -280,16 +189,16 @@ struct status_desc {
#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
struct uni_table_desc{
- u32 findex;
- u32 num_entries;
- u32 entry_size;
- u32 reserved[5];
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
};
struct uni_data_desc{
- u32 findex;
- u32 size;
- u32 reserved[5];
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
};
/* Flash Defines and Structures */
@@ -416,19 +325,19 @@ struct qlcnic_nic_intr_coalesce {
};
struct qlcnic_dump_template_hdr {
- __le32 type;
- __le32 offset;
- __le32 size;
- __le32 cap_mask;
- __le32 num_entries;
- __le32 version;
- __le32 timestamp;
- __le32 checksum;
- __le32 drv_cap_mask;
- __le32 sys_info[3];
- __le32 saved_state[16];
- __le32 cap_sizes[8];
- __le32 rsvd[0];
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[0];
};
struct qlcnic_fw_dump {
@@ -456,11 +365,41 @@ struct qlcnic_hardware_context {
u8 pci_func;
u8 linkup;
u8 loopback_state;
+ u8 beacon_state;
+ u8 has_link_events;
+ u8 fw_type;
+ u8 physical_port;
+ u8 reset_context;
+ u8 msix_supported;
+ u8 max_mac_filters;
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 diag_test;
+ u8 num_msix;
+ u8 nic_mode;
+ char diag_cnt;
+
u16 port_type;
u16 board_type;
- u8 beacon_state;
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+ u16 op_mode;
+ u16 switch_mode;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 max_mtu;
+ u32 msg_enable;
+ u16 act_pci_func;
+
+ u32 capabilities;
+ u32 temp;
+ u32 int_vec_bit;
+ u32 fw_hal_version;
+ struct qlcnic_hardware_ops *hw_ops;
struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_fw_dump fw_dump;
};
@@ -521,6 +460,7 @@ struct qlcnic_host_sds_ring {
} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
+ u16 ctx_id;
u32 producer;
u32 sw_consumer;
u32 num_desc;
@@ -985,6 +925,7 @@ struct qlcnic_adapter {
unsigned long state;
u32 flags;
+ int max_drv_tx_rings;
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
@@ -993,57 +934,28 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
- u8 msix_supported;
u8 portnum;
- u8 physical_port;
- u8 reset_context;
- u8 mc_enabled;
- u8 max_mc_count;
u8 fw_wait_cnt;
u8 fw_fail_cnt;
u8 tx_timeo_cnt;
u8 need_fw_reset;
- u8 has_link_events;
- u8 fw_type;
- u16 tx_context_id;
u16 is_up;
-
- u16 link_speed;
- u16 link_duplex;
- u16 link_autoneg;
- u16 module_type;
-
- u16 op_mode;
- u16 switch_mode;
- u16 max_tx_ques;
- u16 max_rx_ques;
- u16 max_mtu;
u16 pvid;
- u32 fw_hal_version;
- u32 capabilities;
u32 irq;
- u32 temp;
-
- u32 int_vec_bit;
u32 heartbeat;
- u8 max_mac_filters;
u8 dev_state;
- u8 diag_test;
- char diag_cnt;
u8 reset_ack_timeo;
u8 dev_init_timeo;
- u16 msg_enable;
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
u8 mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -1057,24 +969,22 @@ struct qlcnic_adapter {
void __iomem *isr_int_vec;
struct msix_entry *msix_entries;
-
struct delayed_work fw_work;
-
struct qlcnic_filter_hash fhash;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
- __le32 file_prd_off; /*File fw product offset*/
+ u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
};
-struct qlcnic_info {
+struct qlcnic_info_le {
__le16 pci_func;
- __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+ __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
__le16 phys_port;
- __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+ __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
__le32 capabilities;
u8 max_mac_filters;
@@ -1088,13 +998,28 @@ struct qlcnic_info {
u8 reserved2[104];
} __packed;
-struct qlcnic_pci_info {
- __le16 id; /* pci function id */
- __le16 active; /* 1 = Enabled */
- __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
- __le16 default_port; /* default port number */
+struct qlcnic_info {
+ u16 pci_func;
+ u16 op_mode;
+ u16 phys_port;
+ u16 switch_mode;
+ u32 capabilities;
+ u8 max_mac_filters;
+ u8 reserved1;
+ u16 max_mtu;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 min_tx_bw;
+ u16 max_tx_bw;
+};
+
+struct qlcnic_pci_info_le {
+ __le16 id; /* pci function id */
+ __le16 active; /* 1 = Enabled */
+ __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+ __le16 default_port; /* default port number */
- __le16 tx_min_bw; /* Multiple of 100mbpc */
+ __le16 tx_min_bw; /* Multiple of 100mbpc */
__le16 tx_max_bw;
__le16 reserved1[2];
@@ -1102,6 +1027,16 @@ struct qlcnic_pci_info {
u8 reserved2[106];
} __packed;
+struct qlcnic_pci_info {
+ u16 id;
+ u16 active;
+ u16 type;
+ u16 default_port;
+ u16 tx_min_bw;
+ u16 tx_max_bw;
+ u8 mac[ETH_ALEN];
+};
+
struct qlcnic_npar_info {
u16 pvid;
u16 min_bw;
@@ -1116,6 +1051,7 @@ struct qlcnic_npar_info {
u8 mac_anti_spoof;
u8 promisc_mode;
u8 offload_flags;
+ u8 pci_func;
};
struct qlcnic_eswitch {
@@ -1208,7 +1144,7 @@ do { \
(VAL1) += (VAL2); \
} while (0)
-struct qlcnic_mac_statistics{
+struct qlcnic_mac_statistics_le {
__le64 mac_tx_frames;
__le64 mac_tx_bytes;
__le64 mac_tx_mcast_pkts;
@@ -1248,7 +1184,45 @@ struct qlcnic_mac_statistics{
__le64 mac_align_error;
} __packed;
-struct __qlcnic_esw_statistics {
+struct qlcnic_mac_statistics {
+ u64 mac_tx_frames;
+ u64 mac_tx_bytes;
+ u64 mac_tx_mcast_pkts;
+ u64 mac_tx_bcast_pkts;
+ u64 mac_tx_pause_cnt;
+ u64 mac_tx_ctrl_pkt;
+ u64 mac_tx_lt_64b_pkts;
+ u64 mac_tx_lt_127b_pkts;
+ u64 mac_tx_lt_255b_pkts;
+ u64 mac_tx_lt_511b_pkts;
+ u64 mac_tx_lt_1023b_pkts;
+ u64 mac_tx_lt_1518b_pkts;
+ u64 mac_tx_gt_1518b_pkts;
+ u64 rsvd1[3];
+ u64 mac_rx_frames;
+ u64 mac_rx_bytes;
+ u64 mac_rx_mcast_pkts;
+ u64 mac_rx_bcast_pkts;
+ u64 mac_rx_pause_cnt;
+ u64 mac_rx_ctrl_pkt;
+ u64 mac_rx_lt_64b_pkts;
+ u64 mac_rx_lt_127b_pkts;
+ u64 mac_rx_lt_255b_pkts;
+ u64 mac_rx_lt_511b_pkts;
+ u64 mac_rx_lt_1023b_pkts;
+ u64 mac_rx_lt_1518b_pkts;
+ u64 mac_rx_gt_1518b_pkts;
+ u64 rsvd2[3];
+ u64 mac_rx_length_error;
+ u64 mac_rx_length_small;
+ u64 mac_rx_length_large;
+ u64 mac_rx_jabber;
+ u64 mac_rx_dropped;
+ u64 mac_rx_crc_error;
+ u64 mac_align_error;
+};
+
+struct qlcnic_esw_stats_le {
__le16 context_id;
__le16 version;
__le16 size;
@@ -1263,147 +1237,27 @@ struct __qlcnic_esw_statistics {
__le64 rsvd[3];
} __packed;
+struct __qlcnic_esw_statistics {
+ u16 context_id;
+ u16 version;
+ u16 size;
+ u16 unused;
+ u64 unicast_frames;
+ u64 multicast_frames;
+ u64 broadcast_frames;
+ u64 dropped_frames;
+ u64 errors;
+ u64 local_frames;
+ u64 numbytes;
+ u64 rsvd[3];
+};
+
struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics rx;
struct __qlcnic_esw_statistics tx;
};
-struct qlcnic_common_entry_hdr {
- __le32 type;
- __le32 offset;
- __le32 cap_size;
- u8 mask;
- u8 rsvd[2];
- u8 flags;
-} __packed;
-
-struct __crb {
- __le32 addr;
- u8 stride;
- u8 rsvd1[3];
- __le32 data_size;
- __le32 no_ops;
- __le32 rsvd2[4];
-} __packed;
-
-struct __ctrl {
- __le32 addr;
- u8 stride;
- u8 index_a;
- __le16 timeout;
- __le32 data_size;
- __le32 no_ops;
- u8 opcode;
- u8 index_v;
- u8 shl_val;
- u8 shr_val;
- __le32 val1;
- __le32 val2;
- __le32 val3;
-} __packed;
-
-struct __cache {
- __le32 addr;
- __le16 stride;
- __le16 init_tag_val;
- __le32 size;
- __le32 no_ops;
- __le32 ctrl_addr;
- __le32 ctrl_val;
- __le32 read_addr;
- u8 read_addr_stride;
- u8 read_addr_num;
- u8 rsvd1[2];
-} __packed;
-
-struct __ocm {
- u8 rsvd[8];
- __le32 size;
- __le32 no_ops;
- u8 rsvd1[8];
- __le32 read_addr;
- __le32 read_addr_stride;
-} __packed;
-
-struct __mem {
- u8 rsvd[24];
- __le32 addr;
- __le32 size;
-} __packed;
-
-struct __mux {
- __le32 addr;
- u8 rsvd[4];
- __le32 size;
- __le32 no_ops;
- __le32 val;
- __le32 val_stride;
- __le32 read_addr;
- u8 rsvd2[4];
-} __packed;
-
-struct __queue {
- __le32 sel_addr;
- __le16 stride;
- u8 rsvd[2];
- __le32 size;
- __le32 no_ops;
- u8 rsvd2[8];
- __le32 read_addr;
- u8 read_addr_stride;
- u8 read_addr_cnt;
- u8 rsvd3[2];
-} __packed;
-
-struct qlcnic_dump_entry {
- struct qlcnic_common_entry_hdr hdr;
- union {
- struct __crb crb;
- struct __cache cache;
- struct __ocm ocm;
- struct __mem mem;
- struct __mux mux;
- struct __queue que;
- struct __ctrl ctrl;
- } region;
-} __packed;
-
-enum op_codes {
- QLCNIC_DUMP_NOP = 0,
- QLCNIC_DUMP_READ_CRB = 1,
- QLCNIC_DUMP_READ_MUX = 2,
- QLCNIC_DUMP_QUEUE = 3,
- QLCNIC_DUMP_BRD_CONFIG = 4,
- QLCNIC_DUMP_READ_OCM = 6,
- QLCNIC_DUMP_PEG_REG = 7,
- QLCNIC_DUMP_L1_DTAG = 8,
- QLCNIC_DUMP_L1_ITAG = 9,
- QLCNIC_DUMP_L1_DATA = 11,
- QLCNIC_DUMP_L1_INST = 12,
- QLCNIC_DUMP_L2_DTAG = 21,
- QLCNIC_DUMP_L2_ITAG = 22,
- QLCNIC_DUMP_L2_DATA = 23,
- QLCNIC_DUMP_L2_INST = 24,
- QLCNIC_DUMP_READ_ROM = 71,
- QLCNIC_DUMP_READ_MEM = 72,
- QLCNIC_DUMP_READ_CTRL = 98,
- QLCNIC_DUMP_TLHDR = 99,
- QLCNIC_DUMP_RDEND = 255
-};
-
-#define QLCNIC_DUMP_WCRB BIT_0
-#define QLCNIC_DUMP_RWCRB BIT_1
-#define QLCNIC_DUMP_ANDCRB BIT_2
-#define QLCNIC_DUMP_ORCRB BIT_3
-#define QLCNIC_DUMP_POLLCRB BIT_4
-#define QLCNIC_DUMP_RD_SAVE BIT_5
-#define QLCNIC_DUMP_WRT_SAVED BIT_6
-#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
-#define QLCNIC_DUMP_SKIP BIT_7
-
-#define QLCNIC_DUMP_MASK_MIN 3
#define QLCNIC_DUMP_MASK_DEF 0x1f
-#define QLCNIC_DUMP_MASK_MAX 0xff
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1411,12 +1265,6 @@ enum op_codes {
#define QLCNIC_SET_QUIESCENT 0xadd00010
#define QLCNIC_RESET_QUIESCENT 0xadd00020
-struct qlcnic_dump_operations {
- enum op_codes opcode;
- u32 (*handler)(struct qlcnic_adapter *,
- struct qlcnic_dump_entry *, u32 *);
-};
-
struct _cdrp_cmd {
u32 cmd;
u32 arg1;
@@ -1432,7 +1280,7 @@ struct qlcnic_cmd_args {
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
-u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
@@ -1474,6 +1322,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define __QLCNIC_MAX_LED_RATE 0xf
#define __QLCNIC_MAX_LED_STATE 0x2
+#define MAX_CTL_CHECK 1000
+
int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
@@ -1496,7 +1346,7 @@ int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
-void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32);
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
@@ -1530,9 +1380,8 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
-void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring);
-void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
+void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
+void qlcnic_fetch_mac(u32, u32, u8, u8 *);
void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
@@ -1571,12 +1420,32 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
extern int qlcnic_config_tso;
+int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_napi_del(struct qlcnic_adapter *adapter);
+void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
+void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_free_tx_rings(struct qlcnic_adapter *);
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_set_vlan_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+
/*
* QLOGIC Board information
*/
#define QLCNIC_MAX_BOARD_NAME_LEN 100
-struct qlcnic_brdinfo {
+struct qlcnic_board_info {
unsigned short vendor;
unsigned short device;
unsigned short sub_vendor;
@@ -1584,30 +1453,6 @@ struct qlcnic_brdinfo {
char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
};
-static const struct qlcnic_brdinfo qlcnic_boards[] = {
- {0x1077, 0x8020, 0x1077, 0x203,
- "8200 Series Single Port 10GbE Converged Network Adapter "
- "(TCP/IP Networking)"},
- {0x1077, 0x8020, 0x1077, 0x207,
- "8200 Series Dual Port 10GbE Converged Network Adapter "
- "(TCP/IP Networking)"},
- {0x1077, 0x8020, 0x1077, 0x20b,
- "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x1077, 0x20c,
- "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x1077, 0x20f,
- "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x103c, 0x3733,
- "NC523SFP 10Gb 2-port Server Adapter"},
- {0x1077, 0x8020, 0x103c, 0x3346,
- "CN1000Q Dual Port Converged Network Adapter"},
- {0x1077, 0x8020, 0x1077, 0x210,
- "QME8242-k 10GbE Dual Port Mezzanine Card"},
- {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
-};
-
-#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
-
static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
{
if (likely(tx_ring->producer < tx_ring->sw_consumer))
@@ -1617,6 +1462,21 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
extern const struct ethtool_ops qlcnic_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
@@ -1627,10 +1487,17 @@ struct qlcnic_nic_template {
};
#define QLCDB(adapter, lvl, _fmt, _args...) do { \
- if (NETIF_MSG_##lvl & adapter->msg_enable) \
+ if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
printk(KERN_INFO "%s: %s: " _fmt, \
dev_name(&adapter->pdev->dev), \
__func__, ##_args); \
} while (0)
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
+}
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 2a179d087207..b14b8f0787ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -7,6 +7,18 @@
#include "qlcnic.h"
+static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -1;
+}
+
static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
@@ -35,7 +47,7 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
struct qlcnic_hardware_context *ahw = adapter->ahw;
signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
- adapter->fw_hal_version);
+ adapter->ahw->fw_hal_version);
/* Acquire semaphore before accessing CRB */
if (qlcnic_api_lock(adapter)) {
@@ -103,7 +115,7 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
}
-static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
{
uint64_t sum = 0;
int count = temp_size / sizeof(uint32_t);
@@ -117,12 +129,12 @@ static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
int err, i;
- u16 temp_size;
void *tmp_addr;
- u32 version, csum, *template, *tmp_buf;
+ u32 temp_size, version, csum, *template;
+ __le32 *tmp_buf;
struct qlcnic_cmd_args cmd;
struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
dma_addr_t tmp_addr_t = 0;
ahw = adapter->ahw;
@@ -138,6 +150,8 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
}
temp_size = cmd.rsp.arg2;
version = cmd.rsp.arg3;
+ dev_info(&adapter->pdev->dev,
+ "minidump template version = 0x%x", version);
if (!temp_size)
return -EIO;
@@ -162,14 +176,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
err = -EIO;
goto error;
}
- tmp_tmpl = tmp_addr;
- csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
- if (csum) {
- dev_err(&adapter->pdev->dev,
- "Template header checksum validation failed\n");
- err = -EIO;
- goto error;
- }
ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
if (!ahw->fw_dump.tmpl_hdr) {
err = -EIO;
@@ -180,6 +186,14 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
for (i = 0; i < temp_size/sizeof(u32); i++)
*template++ = __le32_to_cpu(*tmp_buf++);
+ csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ err = -EIO;
+ goto error;
+ }
+
tmpl_hdr = ahw->fw_dump.tmpl_hdr;
tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
ahw->fw_dump.enable = 1;
@@ -231,6 +245,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
int err;
+ u16 temp;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -267,8 +282,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
cap |= QLCNIC_CAP0_LRO_MSS;
- prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
- msix_handler);
+ temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp);
prq->txrx_sds_binding = nsds_rings - 1;
prq->capabilities[0] = cpu_to_le32(cap);
@@ -453,8 +468,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
- adapter->tx_context_id =
- le16_to_cpu(prsp->context_id);
+ adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
} else {
dev_err(&adapter->pdev->dev,
"Failed to create tx ctx in firmware%d\n", err);
@@ -476,7 +490,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
struct qlcnic_cmd_args cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->tx_context_id;
+ cmd.req.arg1 = adapter->tx_ring->ctx_id;
cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
cmd.req.arg3 = 0;
cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
@@ -671,7 +685,7 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
err = cmd.rsp.cmd;
if (err == QLCNIC_RCODE_SUCCESS)
- qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
+ qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
@@ -687,10 +701,10 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
{
int err;
dma_addr_t nic_dma_t;
- struct qlcnic_info *nic_info;
+ struct qlcnic_info_le *nic_info;
void *nic_info_addr;
struct qlcnic_cmd_args cmd;
- size_t nic_size = sizeof(struct qlcnic_info);
+ size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
&nic_dma_t, GFP_KERNEL);
@@ -745,10 +759,10 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
dma_addr_t nic_dma_t;
void *nic_info_addr;
struct qlcnic_cmd_args cmd;
- struct qlcnic_info *nic_info;
- size_t nic_size = sizeof(struct qlcnic_info);
+ struct qlcnic_info_le *nic_info;
+ size_t nic_size = sizeof(struct qlcnic_info_le);
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
@@ -796,9 +810,9 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
int err = 0, i;
struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
- struct qlcnic_pci_info *npar;
+ struct qlcnic_pci_info_le *npar;
void *pci_info_addr;
- size_t npar_size = sizeof(struct qlcnic_pci_info);
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
@@ -816,11 +830,14 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
qlcnic_issue_cmd(adapter, &cmd);
err = cmd.rsp.cmd;
+ adapter->ahw->act_pci_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
pci_info->id = le16_to_cpu(npar->id);
pci_info->active = le16_to_cpu(npar->active);
pci_info->type = le16_to_cpu(npar->type);
+ if (pci_info->type == QLCNIC_TYPE_NIC)
+ adapter->ahw->act_pci_func++;
pci_info->default_port =
le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
@@ -848,8 +865,8 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
u32 arg1;
struct qlcnic_cmd_args cmd;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
return err;
arg1 = id | (enable_mirroring ? BIT_4 : 0);
@@ -877,8 +894,8 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
- size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
- struct __qlcnic_esw_statistics *stats;
+ size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
+ struct qlcnic_esw_stats_le *stats;
dma_addr_t stats_dma_t;
void *stats_addr;
u32 arg1;
@@ -888,8 +905,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
if (esw_stats == NULL)
return -ENOMEM;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
- func != adapter->ahw->pci_func) {
+ if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
+ (func != adapter->ahw->pci_func)) {
dev_err(&adapter->pdev->dev,
"Not privilege to query stats for func=%d", func);
return -EIO;
@@ -939,9 +956,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
struct qlcnic_mac_statistics *mac_stats)
{
- struct qlcnic_mac_statistics *stats;
+ struct qlcnic_mac_statistics_le *stats;
struct qlcnic_cmd_args cmd;
- size_t stats_size = sizeof(struct qlcnic_mac_statistics);
+ size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
dma_addr_t stats_dma_t;
void *stats_addr;
int err;
@@ -1000,7 +1017,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
if (esw_stats == NULL)
return -ENOMEM;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (adapter->npars == NULL)
return -EIO;
@@ -1015,12 +1032,13 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
esw_stats->context_id = eswitch;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
if (adapter->npars[i].phy_port != eswitch)
continue;
memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
- if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
+ if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
+ rx_tx, &port_stats))
continue;
esw_stats->size = port_stats.size;
@@ -1051,7 +1069,7 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
u32 arg1;
struct qlcnic_cmd_args cmd;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (func_esw == QLCNIC_STATS_PORT) {
@@ -1119,15 +1137,18 @@ op_type = 1 for port vlan_id
int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
- int err = -EIO;
+ int err = -EIO, index;
u32 arg1, arg2 = 0;
struct qlcnic_cmd_args cmd;
u8 pci_func;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
pci_func = esw_cfg->pci_func;
- arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return err;
+ arg1 = (adapter->npars[index].phy_port & BIT_0);
arg1 |= (pci_func << 8);
if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
@@ -1139,7 +1160,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
case QLCNIC_PORT_DEFAULTS:
arg1 |= (BIT_4 | BIT_6 | BIT_7);
arg2 |= (BIT_0 | BIT_1);
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
arg2 |= (BIT_2 | BIT_3);
if (!(esw_cfg->discard_tagged))
arg1 &= ~BIT_4;
@@ -1191,11 +1212,17 @@ qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
u32 arg1, arg2;
+ int index;
u8 phy_port;
- if (adapter->op_mode == QLCNIC_MGMT_FUNC)
- phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
- else
- phy_port = adapter->physical_port;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
+ if (index < 0)
+ return -EIO;
+ phy_port = adapter->npars[index].phy_port;
+ } else {
+ phy_port = adapter->ahw->physical_port;
+ }
arg1 = phy_port;
arg1 |= (esw_cfg->pci_func << 8);
if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 9e9e78a5c4d7..74b98110c5b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -208,9 +208,9 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full);
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- ecmd->duplex = adapter->link_duplex;
- ecmd->autoneg = adapter->link_autoneg;
+ ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
+ ecmd->duplex = adapter->ahw->link_duplex;
+ ecmd->autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val;
@@ -224,10 +224,10 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->advertising = ADVERTISED_10000baseT_Full;
}
- if (netif_running(dev) && adapter->has_link_events) {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- ecmd->autoneg = adapter->link_autoneg;
- ecmd->duplex = adapter->link_duplex;
+ if (netif_running(dev) && adapter->ahw->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
+ ecmd->autoneg = adapter->ahw->link_autoneg;
+ ecmd->duplex = adapter->ahw->link_duplex;
goto skip;
}
@@ -238,7 +238,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EIO;
skip:
- ecmd->phy_address = adapter->physical_port;
+ ecmd->phy_address = adapter->ahw->physical_port;
ecmd->transceiver = XCVR_EXTERNAL;
switch (adapter->ahw->board_type) {
@@ -254,7 +254,7 @@ skip:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- ecmd->autoneg = adapter->link_autoneg;
+ ecmd->autoneg = adapter->ahw->link_autoneg;
break;
case QLCNIC_BRDTYPE_P3P_IMEZ:
case QLCNIC_BRDTYPE_P3P_XG_LOM:
@@ -270,7 +270,7 @@ skip:
ecmd->advertising |= ADVERTISED_TP;
ecmd->supported |= SUPPORTED_TP;
check_sfp_module = netif_running(dev) &&
- adapter->has_link_events;
+ adapter->ahw->has_link_events;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
@@ -285,7 +285,7 @@ skip:
(ADVERTISED_FIBRE | ADVERTISED_TP);
ecmd->port = PORT_FIBRE;
check_sfp_module = netif_running(dev) &&
- adapter->has_link_events;
+ adapter->ahw->has_link_events;
} else {
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -301,7 +301,7 @@ skip:
}
if (check_sfp_module) {
- switch (adapter->module_type) {
+ switch (adapter->ahw->module_type) {
case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
case LINKEVENT_MODULE_OPTICAL_SRLR:
case LINKEVENT_MODULE_OPTICAL_LRM:
@@ -359,9 +359,9 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
else if (ret)
return -EIO;
- adapter->link_speed = ethtool_cmd_speed(ecmd);
- adapter->link_duplex = ecmd->duplex;
- adapter->link_autoneg = ecmd->autoneg;
+ adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
+ adapter->ahw->link_duplex = ecmd->duplex;
+ adapter->ahw->link_autoneg = ecmd->autoneg;
if (!netif_running(dev))
return 0;
@@ -508,14 +508,15 @@ qlcnic_set_ringparam(struct net_device *dev,
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
+ int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- channel->max_rx = rounddown_pow_of_two(min_t(int,
- adapter->max_rx_ques, num_online_cpus()));
- channel->max_tx = adapter->max_tx_ques;
+ min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
+ channel->max_rx = rounddown_pow_of_two(min);
+ channel->max_tx = adapter->ahw->max_tx_ques;
channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->max_tx_ques;
+ channel->tx_count = adapter->ahw->max_tx_ques;
}
static int qlcnic_set_channels(struct net_device *dev,
@@ -543,7 +544,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int port = adapter->physical_port;
+ int port = adapter->ahw->physical_port;
__u32 val;
if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -588,7 +589,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int port = adapter->physical_port;
+ int port = adapter->ahw->physical_port;
__u32 val;
/* read mode */
@@ -703,7 +704,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
if (ret)
goto clear_it;
- adapter->diag_cnt = 0;
+ adapter->ahw->diag_cnt = 0;
memset(&cmd, 0, sizeof(cmd));
cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
cmd.req.arg1 = adapter->ahw->pci_func;
@@ -715,7 +716,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
msleep(10);
- ret = !adapter->diag_cnt;
+ ret = !adapter->ahw->diag_cnt;
done:
qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -761,7 +762,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
- adapter->diag_cnt = 0;
+ adapter->ahw->diag_cnt = 0;
qlcnic_xmit_frame(skb, adapter->netdev);
loop = 0;
@@ -770,11 +771,11 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
break;
- } while (!adapter->diag_cnt);
+ } while (!adapter->ahw->diag_cnt);
dev_kfree_skb_any(skb);
- if (!adapter->diag_cnt)
+ if (!adapter->ahw->diag_cnt)
QLCDB(adapter, DRV,
"LB Test: packet #%d was not received\n", i + 1);
else
@@ -800,14 +801,15 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
int loop = 0;
int ret;
- if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ if (!(adapter->ahw->capabilities &
+ QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
netdev_info(netdev, "Firmware is not loopback test capable\n");
return -EOPNOTSUPP;
}
QLCDB(adapter, DRV, "%s loopback test in progress\n",
mode == QLCNIC_ILB_MODE ? "internal" : "external");
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev, "Loopback test not supported for non "
"privilege function\n");
return 0;
@@ -826,7 +828,7 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
if (ret)
goto free_res;
- adapter->diag_cnt = 0;
+ adapter->ahw->diag_cnt = 0;
do {
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -835,8 +837,8 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
" configure request\n");
ret = -QLCNIC_FW_NOT_RESPOND;
goto free_res;
- } else if (adapter->diag_cnt) {
- ret = adapter->diag_cnt;
+ } else if (adapter->ahw->diag_cnt) {
+ ret = adapter->ahw->diag_cnt;
goto free_res;
}
} while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
@@ -1028,7 +1030,7 @@ static int qlcnic_set_led(struct net_device *dev,
int max_sds_rings = adapter->max_sds_rings;
int err = -EIO, active = 1;
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
return -EOPNOTSUPP;
@@ -1207,14 +1209,14 @@ static u32 qlcnic_get_msglevel(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
+ return adapter->ahw->msg_enable;
}
static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = msglvl;
+ adapter->ahw->msg_enable = msglvl;
}
static int
@@ -1247,7 +1249,8 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
void *buffer)
{
int i, copy_sz;
- u32 *hdr_ptr, *data;
+ u32 *hdr_ptr;
+ __le32 *data;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 28a6b28192e3..49cc1ac4f057 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -792,22 +792,6 @@ static const u32 MIU_TEST_READ_DATA[] = {
#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
#define QLCNIC_FLASH_LOCK_ID 0x001B2100
-#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \
- writel((addr & 0xFFFF0000), (void *) (bar0 + \
- QLCNIC_FW_DUMP_REG1)); \
- readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
- *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \
- LSW(addr))); \
-} while (0)
-
-#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \
- writel((addr & 0xFFFF0000), (void *) (bar0 + \
- QLCNIC_FW_DUMP_REG1)); \
- readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
- writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
- readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \
-} while (0)
-
/* PCI function operational mode */
enum {
QLCNIC_MGMT_FUNC = 0,
@@ -832,55 +816,63 @@ enum {
#define LSD(x) ((uint32_t)((uint64_t)(x)))
#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+#define QLCNIC_MS_CTRL 0x41000090
+#define QLCNIC_MS_ADDR_LO 0x41000094
+#define QLCNIC_MS_ADDR_HI 0x41000098
+#define QLCNIC_MS_WRTDATA_LO 0x410000A0
+#define QLCNIC_MS_WRTDATA_HI 0x410000A4
+#define QLCNIC_MS_WRTDATA_ULO 0x410000B0
+#define QLCNIC_MS_WRTDATA_UHI 0x410000B4
+#define QLCNIC_MS_RDDATA_LO 0x410000A8
+#define QLCNIC_MS_RDDATA_HI 0x410000AC
+#define QLCNIC_MS_RDDATA_ULO 0x410000B8
+#define QLCNIC_MS_RDDATA_UHI 0x410000BC
+
+#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_WRITE_START (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE)
+
#define QLCNIC_LEGACY_INTR_CONFIG \
{ \
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
.tgt_status_reg = ISR_INT_TARGET_STATUS, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, }, \
\
{ \
.int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
.tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
- .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
- .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, }, \
}
/* NIU REGS */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 2a0c9dc48eb3..7a6d5ebe4e0f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -6,6 +6,7 @@
*/
#include "qlcnic.h"
+#include "qlcnic_hdr.h"
#include <linux/slab.h>
#include <net/ip.h>
@@ -22,6 +23,15 @@
#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
#define CRB_INDIRECT_2M (0x1e0000UL)
+struct qlcnic_ms_reg_ctrl {
+ u32 ocm_window;
+ u32 control;
+ u32 hi;
+ u32 low;
+ u32 rd[4];
+ u32 wd[4];
+ u64 off;
+};
#ifndef readq
static inline u64 readq(void __iomem *addr)
@@ -266,10 +276,44 @@ static const unsigned crb_hub_agt[64] = {
0,
};
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
/* PCI Windowing for DDR regions. */
#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+ u32 dest;
+ void __iomem *val;
+
+ dest = addr & 0xFFFF0000;
+ val = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, val);
+ readl(val);
+ val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ *data = readl(val);
+}
+
+static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+ u32 dest;
+ void __iomem *val;
+
+ dest = addr & 0xFFFF0000;
+ val = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, val);
+ readl(val);
+ val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ writel(data, val);
+ readl(val);
+}
+
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
@@ -300,11 +344,28 @@ qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
+static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+{
+ u32 data;
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
+ else
+ return -EIO;
+ return data;
+}
+
+static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+{
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+}
+
static int
qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct qlcnic_cmd_buffer *pbuf;
struct cmd_desc_type0 *cmd_desc;
struct qlcnic_host_tx_ring *tx_ring;
@@ -318,7 +379,6 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -341,7 +401,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
pbuf->frag_count = 0;
memcpy(&tx_ring->desc_head[producer],
- &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+ cmd_desc, sizeof(struct cmd_desc_type0));
producer = get_next_index(producer, tx_ring->num_desc);
i++;
@@ -350,7 +410,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
tx_ring->producer = producer;
- qlcnic_update_cmd_producer(adapter, tx_ring);
+ qlcnic_update_cmd_producer(tx_ring);
__netif_tx_unlock_bh(tx_ring->txq);
@@ -434,7 +494,7 @@ void qlcnic_set_multi(struct net_device *netdev)
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
@@ -540,7 +600,7 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
}
}
-int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
{
struct qlcnic_nic_req req;
int rv;
@@ -863,9 +923,8 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
* 0 if no window access is needed. 'off' is set to 2M addr
* In: 'off' is offset from base in 128M pci map
*/
-static int
-qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
- ulong off, void __iomem **addr)
+static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw,
+ ulong off, void __iomem **addr)
{
const struct crb_128M_2M_sub_block_map *m;
@@ -880,7 +939,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
- *addr = adapter->ahw->pci_base0 + m->start_2M +
+ *addr = ahw->pci_base0 + m->start_2M +
(off - m->start_128M);
return 0;
}
@@ -888,7 +947,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
/*
* Not in direct map, use crb window
*/
- *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
return 1;
}
@@ -929,7 +988,7 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
int rv;
void __iomem *addr = NULL;
- rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+ rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
if (rv == 0) {
writel(data, addr);
@@ -954,15 +1013,14 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
return -EIO;
}
-u32
-qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
{
unsigned long flags;
int rv;
u32 data = -1;
void __iomem *addr = NULL;
- rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+ rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
if (rv == 0)
return readl(addr);
@@ -985,46 +1043,28 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
}
-void __iomem *
-qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
+ u32 offset)
{
void __iomem *addr = NULL;
- WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr));
return addr;
}
-
-static int
-qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
- u64 addr, u32 *start)
-{
- u32 window;
-
- window = OCM_WIN_P3P(addr);
-
- writel(window, adapter->ahw->ocm_win_crb);
- /* read back to flush */
- readl(adapter->ahw->ocm_win_crb);
-
- *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
- return 0;
-}
-
-static int
-qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
- u64 *data, int op)
+static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
+ u32 window, u64 off, u64 *data, int op)
{
void __iomem *addr;
- int ret;
u32 start;
mutex_lock(&adapter->ahw->mem_lock);
- ret = qlcnic_pci_set_window_2M(adapter, off, &start);
- if (ret != 0)
- goto unlock;
+ writel(window, adapter->ahw->ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw->ocm_win_crb);
+ start = QLCNIC_PCI_OCM0_2M + off;
addr = adapter->ahw->pci_base0 + start;
@@ -1033,10 +1073,12 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
else /* write */
writeq(*data, addr);
-unlock:
- mutex_unlock(&adapter->ahw->mem_lock);
+ /* Set window to 0 */
+ writel(0, adapter->ahw->ocm_win_crb);
+ readl(adapter->ahw->ocm_win_crb);
- return ret;
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return 0;
}
void
@@ -1061,54 +1103,74 @@ qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
mutex_unlock(&adapter->ahw->mem_lock);
}
-#define MAX_CTL_CHECK 1000
-int
-qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
- u64 off, u64 data)
+
+/* Set MS memory control data for different adapters */
+static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off,
+ struct qlcnic_ms_reg_ctrl *ms)
+{
+ ms->control = QLCNIC_MS_CTRL;
+ ms->low = QLCNIC_MS_ADDR_LO;
+ ms->hi = QLCNIC_MS_ADDR_HI;
+ if (off & 0xf) {
+ ms->wd[0] = QLCNIC_MS_WRTDATA_LO;
+ ms->rd[0] = QLCNIC_MS_RDDATA_LO;
+ ms->wd[1] = QLCNIC_MS_WRTDATA_HI;
+ ms->rd[1] = QLCNIC_MS_RDDATA_HI;
+ ms->wd[2] = QLCNIC_MS_WRTDATA_ULO;
+ ms->wd[3] = QLCNIC_MS_WRTDATA_UHI;
+ ms->rd[2] = QLCNIC_MS_RDDATA_ULO;
+ ms->rd[3] = QLCNIC_MS_RDDATA_UHI;
+ } else {
+ ms->wd[0] = QLCNIC_MS_WRTDATA_ULO;
+ ms->rd[0] = QLCNIC_MS_RDDATA_ULO;
+ ms->wd[1] = QLCNIC_MS_WRTDATA_UHI;
+ ms->rd[1] = QLCNIC_MS_RDDATA_UHI;
+ ms->wd[2] = QLCNIC_MS_WRTDATA_LO;
+ ms->wd[3] = QLCNIC_MS_WRTDATA_HI;
+ ms->rd[2] = QLCNIC_MS_RDDATA_LO;
+ ms->rd[3] = QLCNIC_MS_RDDATA_HI;
+ }
+
+ ms->ocm_window = OCM_WIN_P3P(off);
+ ms->off = GET_MEM_OFFS_2M(off);
+}
+
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
{
- int i, j, ret;
+ int j, ret = 0;
u32 temp, off8;
- void __iomem *mem_crb;
+ struct qlcnic_ms_reg_ctrl ms;
/* Only 64-bit aligned access */
if (off & 7)
return -EIO;
- /* P3 onward, test agent base for MIU and SIU is same */
- if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX)) {
- mem_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
- goto correct;
- }
+ memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+ if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX) ||
+ ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))
+ return -EIO;
- if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
- mem_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
- goto correct;
- }
+ qlcnic_set_ms_controls(adapter, off, &ms);
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
- return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
-
- return -EIO;
+ return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+ ms.off, &data, 1);
-correct:
off8 = off & ~0xf;
mutex_lock(&adapter->ahw->mem_lock);
- writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
- writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ qlcnic_ind_wr(adapter, ms.low, off8);
+ qlcnic_ind_wr(adapter, ms.hi, 0);
- i = 0;
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
+ qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
+ temp = qlcnic_ind_rd(adapter, ms.control);
if ((temp & TA_CTL_BUSY) == 0)
break;
}
@@ -1118,24 +1180,18 @@ correct:
goto done;
}
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
-
- writel(data & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel((data >> 32) & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ /* This is the modify part of read-modify-write */
+ qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0]));
+ qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1]));
+ /* This is the write part of read-modify-write */
+ qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff);
+ qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff);
- writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
- (mem_crb + TEST_AGT_CTRL));
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
+ temp = qlcnic_ind_rd(adapter, ms.control);
if ((temp & TA_CTL_BUSY) == 0)
break;
}
@@ -1154,52 +1210,41 @@ done:
return ret;
}
-int
-qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
- u64 off, u64 *data)
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
{
int j, ret;
u32 temp, off8;
u64 val;
- void __iomem *mem_crb;
+ struct qlcnic_ms_reg_ctrl ms;
/* Only 64-bit aligned access */
if (off & 7)
return -EIO;
+ if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX) ||
+ ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))
+ return -EIO;
- /* P3 onward, test agent base for MIU and SIU is same */
- if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX)) {
- mem_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
- goto correct;
- }
-
- if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
- mem_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
- goto correct;
- }
+ memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+ qlcnic_set_ms_controls(adapter, off, &ms);
- if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
- return qlcnic_pci_mem_access_direct(adapter,
- off, data, 0);
- }
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+ ms.off, data, 0);
- return -EIO;
+ mutex_lock(&adapter->ahw->mem_lock);
-correct:
off8 = off & ~0xf;
- mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_ind_wr(adapter, ms.low, off8);
+ qlcnic_ind_wr(adapter, ms.hi, 0);
- writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
- writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+ qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
+ temp = qlcnic_ind_rd(adapter, ms.control);
if ((temp & TA_CTL_BUSY) == 0)
break;
}
@@ -1210,13 +1255,10 @@ correct:
"failed to read through agent\n");
ret = -EIO;
} else {
- off8 = MIU_TEST_AGT_RDDATA_LO;
- if (off & 0xf)
- off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
- temp = readl(mem_crb + off8 + 4);
+ temp = qlcnic_ind_rd(adapter, ms.rd[3]);
val = (u64)temp << 32;
- val |= readl(mem_crb + off8);
+ val |= qlcnic_ind_rd(adapter, ms.rd[2]);
*data = val;
ret = 0;
}
@@ -1320,469 +1362,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
-
-/* FW dump related functions */
-static u32
-qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
- u32 *buffer)
-{
- int i;
- u32 addr, data;
- struct __crb *crb = &entry->region.crb;
- void __iomem *base = adapter->ahw->pci_base0;
-
- addr = crb->addr;
-
- for (i = 0; i < crb->no_ops; i++) {
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- *buffer++ = cpu_to_le32(addr);
- *buffer++ = cpu_to_le32(data);
- addr += crb->stride;
- }
- return crb->no_ops * 2 * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
- struct qlcnic_dump_entry *entry, u32 *buffer)
-{
- int i, k, timeout = 0;
- void __iomem *base = adapter->ahw->pci_base0;
- u32 addr, data;
- u8 opcode, no_ops;
- struct __ctrl *ctr = &entry->region.ctrl;
- struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
-
- addr = ctr->addr;
- no_ops = ctr->no_ops;
-
- for (i = 0; i < no_ops; i++) {
- k = 0;
- opcode = 0;
- for (k = 0; k < 8; k++) {
- if (!(ctr->opcode & (1 << k)))
- continue;
- switch (1 << k) {
- case QLCNIC_DUMP_WCRB:
- QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
- break;
- case QLCNIC_DUMP_RWCRB:
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- QLCNIC_WR_DUMP_REG(addr, base, data);
- break;
- case QLCNIC_DUMP_ANDCRB:
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- QLCNIC_WR_DUMP_REG(addr, base,
- (data & ctr->val2));
- break;
- case QLCNIC_DUMP_ORCRB:
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- QLCNIC_WR_DUMP_REG(addr, base,
- (data | ctr->val3));
- break;
- case QLCNIC_DUMP_POLLCRB:
- while (timeout <= ctr->timeout) {
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- if ((data & ctr->val2) == ctr->val1)
- break;
- msleep(1);
- timeout++;
- }
- if (timeout > ctr->timeout) {
- dev_info(&adapter->pdev->dev,
- "Timed out, aborting poll CRB\n");
- return -EINVAL;
- }
- break;
- case QLCNIC_DUMP_RD_SAVE:
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- t_hdr->saved_state[ctr->index_v] = data;
- break;
- case QLCNIC_DUMP_WRT_SAVED:
- if (ctr->index_v)
- data = t_hdr->saved_state[ctr->index_v];
- else
- data = ctr->val1;
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
- QLCNIC_WR_DUMP_REG(addr, base, data);
- break;
- case QLCNIC_DUMP_MOD_SAVE_ST:
- data = t_hdr->saved_state[ctr->index_v];
- data <<= ctr->shl_val;
- data >>= ctr->shr_val;
- if (ctr->val2)
- data &= ctr->val2;
- data |= ctr->val3;
- data += ctr->val1;
- t_hdr->saved_state[ctr->index_v] = data;
- break;
- default:
- dev_info(&adapter->pdev->dev,
- "Unknown opcode\n");
- break;
- }
- }
- addr += ctr->stride;
- }
- return 0;
-}
-
-static u32
-qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
- u32 *buffer)
-{
- int loop;
- u32 val, data = 0;
- struct __mux *mux = &entry->region.mux;
- void __iomem *base = adapter->ahw->pci_base0;
-
- val = mux->val;
- for (loop = 0; loop < mux->no_ops; loop++) {
- QLCNIC_WR_DUMP_REG(mux->addr, base, val);
- QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
- *buffer++ = cpu_to_le32(val);
- *buffer++ = cpu_to_le32(data);
- val += mux->val_stride;
- }
- return 2 * mux->no_ops * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
- u32 *buffer)
-{
- int i, loop;
- u32 cnt, addr, data, que_id = 0;
- void __iomem *base = adapter->ahw->pci_base0;
- struct __queue *que = &entry->region.que;
-
- addr = que->read_addr;
- cnt = que->read_addr_cnt;
-
- for (loop = 0; loop < que->no_ops; loop++) {
- QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
- addr = que->read_addr;
- for (i = 0; i < cnt; i++) {
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- *buffer++ = cpu_to_le32(data);
- addr += que->read_addr_stride;
- }
- que_id += que->stride;
- }
- return que->no_ops * cnt * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
- u32 *buffer)
-{
- int i;
- u32 data;
- void __iomem *addr;
- struct __ocm *ocm = &entry->region.ocm;
-
- addr = adapter->ahw->pci_base0 + ocm->read_addr;
- for (i = 0; i < ocm->no_ops; i++) {
- data = readl(addr);
- *buffer++ = cpu_to_le32(data);
- addr += ocm->read_addr_stride;
- }
- return ocm->no_ops * sizeof(u32);
-}
-
-static u32
-qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
- u32 *buffer)
-{
- int i, count = 0;
- u32 fl_addr, size, val, lck_val, addr;
- struct __mem *rom = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
-
- fl_addr = rom->addr;
- size = rom->size/4;
-lock_try:
- lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
- if (!lck_val && count < MAX_CTL_CHECK) {
- msleep(10);
- count++;
- goto lock_try;
- }
- writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
- for (i = 0; i < size; i++) {
- addr = fl_addr & 0xFFFF0000;
- QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
- addr = LSW(fl_addr) + FLASH_ROM_DATA;
- QLCNIC_RD_DUMP_REG(addr, base, &val);
- fl_addr += 4;
- *buffer++ = cpu_to_le32(val);
- }
- readl(base + QLCNIC_FLASH_SEM2_ULK);
- return rom->size;
-}
-
-static u32
-qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
- struct qlcnic_dump_entry *entry, u32 *buffer)
-{
- int i;
- u32 cnt, val, data, addr;
- void __iomem *base = adapter->ahw->pci_base0;
- struct __cache *l1 = &entry->region.cache;
-
- val = l1->init_tag_val;
-
- for (i = 0; i < l1->no_ops; i++) {
- QLCNIC_WR_DUMP_REG(l1->addr, base, val);
- QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
- addr = l1->read_addr;
- cnt = l1->read_addr_num;
- while (cnt) {
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- *buffer++ = cpu_to_le32(data);
- addr += l1->read_addr_stride;
- cnt--;
- }
- val += l1->stride;
- }
- return l1->no_ops * l1->read_addr_num * sizeof(u32);
-}
-
-static u32
-qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
- struct qlcnic_dump_entry *entry, u32 *buffer)
-{
- int i;
- u32 cnt, val, data, addr;
- u8 poll_mask, poll_to, time_out = 0;
- void __iomem *base = adapter->ahw->pci_base0;
- struct __cache *l2 = &entry->region.cache;
-
- val = l2->init_tag_val;
- poll_mask = LSB(MSW(l2->ctrl_val));
- poll_to = MSB(MSW(l2->ctrl_val));
-
- for (i = 0; i < l2->no_ops; i++) {
- QLCNIC_WR_DUMP_REG(l2->addr, base, val);
- if (LSW(l2->ctrl_val))
- QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
- LSW(l2->ctrl_val));
- if (!poll_mask)
- goto skip_poll;
- do {
- QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
- if (!(data & poll_mask))
- break;
- msleep(1);
- time_out++;
- } while (time_out <= poll_to);
-
- if (time_out > poll_to) {
- dev_err(&adapter->pdev->dev,
- "Timeout exceeded in %s, aborting dump\n",
- __func__);
- return -EINVAL;
- }
-skip_poll:
- addr = l2->read_addr;
- cnt = l2->read_addr_num;
- while (cnt) {
- QLCNIC_RD_DUMP_REG(addr, base, &data);
- *buffer++ = cpu_to_le32(data);
- addr += l2->read_addr_stride;
- cnt--;
- }
- val += l2->stride;
- }
- return l2->no_ops * l2->read_addr_num * sizeof(u32);
-}
-
-static u32
-qlcnic_read_memory(struct qlcnic_adapter *adapter,
- struct qlcnic_dump_entry *entry, u32 *buffer)
-{
- u32 addr, data, test, ret = 0;
- int i, reg_read;
- struct __mem *mem = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
-
- reg_read = mem->size;
- addr = mem->addr;
- /* check for data size of multiple of 16 and 16 byte alignment */
- if ((addr & 0xf) || (reg_read%16)) {
- dev_info(&adapter->pdev->dev,
- "Unaligned memory addr:0x%x size:0x%x\n",
- addr, reg_read);
- return -EINVAL;
- }
-
- mutex_lock(&adapter->ahw->mem_lock);
-
- while (reg_read != 0) {
- QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
- QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
- QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
- TA_CTL_ENABLE | TA_CTL_START);
-
- for (i = 0; i < MAX_CTL_CHECK; i++) {
- QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
- if (!(test & TA_CTL_BUSY))
- break;
- }
- if (i == MAX_CTL_CHECK) {
- if (printk_ratelimit()) {
- dev_err(&adapter->pdev->dev,
- "failed to read through agent\n");
- ret = -EINVAL;
- goto out;
- }
- }
- for (i = 0; i < 4; i++) {
- QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
- *buffer++ = cpu_to_le32(data);
- }
- addr += 16;
- reg_read -= 16;
- ret += 16;
- }
-out:
- mutex_unlock(&adapter->ahw->mem_lock);
- return mem->size;
-}
-
-static u32
-qlcnic_dump_nop(struct qlcnic_adapter *adapter,
- struct qlcnic_dump_entry *entry, u32 *buffer)
-{
- entry->hdr.flags |= QLCNIC_DUMP_SKIP;
- return 0;
-}
-
-struct qlcnic_dump_operations fw_dump_ops[] = {
- { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
- { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
- { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
- { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
- { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
- { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
- { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
- { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
- u32 size)
-{
- int ret = 1;
- if (size != entry->hdr.cap_size) {
- dev_info(dev,
- "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
- entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
- dev_info(dev, "Aborting further dump capture\n");
- ret = 0;
- }
- return ret;
-}
-
-int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
-{
- u32 *buffer;
- char mesg[64];
- char *msg[] = {mesg, NULL};
- int i, k, ops_cnt, ops_index, dump_size = 0;
- u32 entry_offset, dump, no_entries, buf_offset = 0;
- struct qlcnic_dump_entry *entry;
- struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
-
- if (fw_dump->clr) {
- dev_info(&adapter->pdev->dev,
- "Previous dump not cleared, not capturing dump\n");
- return -EIO;
- }
- /* Calculate the size for dump data area only */
- for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
- if (i & tmpl_hdr->drv_cap_mask)
- dump_size += tmpl_hdr->cap_sizes[k];
- if (!dump_size)
- return -EIO;
-
- fw_dump->data = vzalloc(dump_size);
- if (!fw_dump->data) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate (%d KB) for fw dump\n",
- dump_size/1024);
- return -ENOMEM;
- }
- buffer = fw_dump->data;
- fw_dump->size = dump_size;
- no_entries = tmpl_hdr->num_entries;
- ops_cnt = ARRAY_SIZE(fw_dump_ops);
- entry_offset = tmpl_hdr->offset;
- tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
- tmpl_hdr->sys_info[1] = adapter->fw_version;
-
- for (i = 0; i < no_entries; i++) {
- entry = (void *)tmpl_hdr + entry_offset;
- if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
- entry->hdr.flags |= QLCNIC_DUMP_SKIP;
- entry_offset += entry->hdr.offset;
- continue;
- }
- /* Find the handler for this entry */
- ops_index = 0;
- while (ops_index < ops_cnt) {
- if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
- break;
- ops_index++;
- }
- if (ops_index == ops_cnt) {
- dev_info(&adapter->pdev->dev,
- "Invalid entry type %d, exiting dump\n",
- entry->hdr.type);
- goto error;
- }
- /* Collect dump for this entry */
- dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
- dump))
- entry->hdr.flags |= QLCNIC_DUMP_SKIP;
- buf_offset += entry->hdr.cap_size;
- entry_offset += entry->hdr.offset;
- buffer = fw_dump->data + buf_offset;
- }
- if (dump_size != buf_offset) {
- dev_info(&adapter->pdev->dev,
- "Captured(%d) and expected size(%d) do not match\n",
- buf_offset, dump_size);
- goto error;
- } else {
- fw_dump->clr = 1;
- snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
- adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
- fw_dump->size);
- /* Send a udev event to notify availability of FW dump */
- kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
- return 0;
- }
-error:
- vfree(fw_dump->data);
- return -EINVAL;
-}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 0bcda9c51e9b..de79cde233de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -25,10 +25,6 @@ static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
#define QLCNIC_ADDR_ERROR (0xffffffff)
-static void
-qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring);
-
static int
qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
@@ -250,7 +246,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->dma_size =
QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ if (adapter->ahw->capabilities &
+ QLCNIC_FW_CAPABILITY_HW_LRO)
rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
rds_ring->skb_size =
@@ -659,7 +656,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
"Not an Ethernet NIC func=%u\n", val);
return -EIO;
}
- adapter->physical_port = (val >> 2);
+ adapter->ahw->physical_port = (val >> 2);
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
timeo = QLCNIC_INIT_TIMEOUT_SECS;
@@ -778,15 +775,15 @@ qlcnic_has_mn(struct qlcnic_adapter *adapter)
static
struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
{
- u32 i;
+ u32 i, entries;
struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
- __le32 entries = cpu_to_le32(directory->num_entries);
+ entries = le32_to_cpu(directory->num_entries);
for (i = 0; i < entries; i++) {
- __le32 offs = cpu_to_le32(directory->findex) +
- (i * cpu_to_le32(directory->entry_size));
- __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+ u32 offs = le32_to_cpu(directory->findex) +
+ i * le32_to_cpu(directory->entry_size);
+ u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
if (tab_type == section)
return (struct uni_table_desc *) &unirom[offs];
@@ -802,17 +799,16 @@ qlcnic_validate_header(struct qlcnic_adapter *adapter)
{
const u8 *unirom = adapter->fw->data;
struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
- __le32 fw_file_size = adapter->fw->size;
- __le32 entries;
- __le32 entry_size;
- __le32 tab_size;
+ u32 entries, entry_size, tab_size, fw_file_size;
+
+ fw_file_size = adapter->fw->size;
if (fw_file_size < FILEHEADER_SIZE)
return -EINVAL;
- entries = cpu_to_le32(directory->num_entries);
- entry_size = cpu_to_le32(directory->entry_size);
- tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+ entries = le32_to_cpu(directory->num_entries);
+ entry_size = le32_to_cpu(directory->entry_size);
+ tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
if (fw_file_size < tab_size)
return -EINVAL;
@@ -825,29 +821,29 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
{
struct uni_table_desc *tab_desc;
struct uni_data_desc *descr;
+ u32 offs, tab_size, data_size, idx;
const u8 *unirom = adapter->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
- QLCNIC_UNI_BOOTLD_IDX_OFF));
- __le32 offs;
- __le32 tab_size;
- __le32 data_size;
+ __le32 temp;
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_BOOTLD_IDX_OFF);
+ idx = le32_to_cpu(temp);
tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
if (!tab_desc)
return -EINVAL;
- tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+ tab_size = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * (idx + 1);
if (adapter->fw->size < tab_size)
return -EINVAL;
- offs = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * (idx));
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+ data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -861,27 +857,27 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
struct uni_table_desc *tab_desc;
struct uni_data_desc *descr;
const u8 *unirom = adapter->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
- QLCNIC_UNI_FIRMWARE_IDX_OFF));
- __le32 offs;
- __le32 tab_size;
- __le32 data_size;
+ u32 offs, tab_size, data_size, idx;
+ __le32 temp;
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ idx = le32_to_cpu(temp);
tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
if (!tab_desc)
return -EINVAL;
- tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+ tab_size = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * (idx + 1);
if (adapter->fw->size < tab_size)
return -EINVAL;
- offs = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * (idx));
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+ data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -895,19 +891,17 @@ qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
struct uni_table_desc *ptab_descr;
const u8 *unirom = adapter->fw->data;
int mn_present = qlcnic_has_mn(adapter);
- __le32 entries;
- __le32 entry_size;
- __le32 tab_size;
- u32 i;
+ u32 entries, entry_size, tab_size, i;
+ __le32 temp;
ptab_descr = qlcnic_get_table_desc(unirom,
QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
if (!ptab_descr)
return -EINVAL;
- entries = cpu_to_le32(ptab_descr->num_entries);
- entry_size = cpu_to_le32(ptab_descr->entry_size);
- tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+ entries = le32_to_cpu(ptab_descr->num_entries);
+ entry_size = le32_to_cpu(ptab_descr->entry_size);
+ tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -915,16 +909,16 @@ qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
nomn:
for (i = 0; i < entries; i++) {
- __le32 flags, file_chiprev, offs;
+ u32 flags, file_chiprev, offs;
u8 chiprev = adapter->ahw->revision_id;
u32 flagbit;
- offs = cpu_to_le32(ptab_descr->findex) +
- (i * cpu_to_le32(ptab_descr->entry_size));
- flags = cpu_to_le32(*((int *)&unirom[offs] +
- QLCNIC_UNI_FLAGS_OFF));
- file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
- QLCNIC_UNI_CHIP_REV_OFF));
+ offs = le32_to_cpu(ptab_descr->findex) +
+ i * le32_to_cpu(ptab_descr->entry_size);
+ temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
+ flags = le32_to_cpu(temp);
+ temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
+ file_chiprev = le32_to_cpu(temp);
flagbit = mn_present ? 1 : 2;
@@ -976,18 +970,20 @@ struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
u32 section, u32 idx_offset)
{
const u8 *unirom = adapter->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
- idx_offset));
struct uni_table_desc *tab_desc;
- __le32 offs;
+ u32 offs, idx;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
+ idx = le32_to_cpu(temp);
tab_desc = qlcnic_get_table_desc(unirom, section);
if (tab_desc == NULL)
return NULL;
- offs = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * idx);
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
return (struct uni_data_desc *)&unirom[offs];
}
@@ -996,11 +992,13 @@ static u8 *
qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
{
u32 offs = QLCNIC_BOOTLD_START;
+ struct uni_data_desc *data_desc;
- if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
- offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
- QLCNIC_UNI_DIR_SECT_BOOTLD,
- QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF);
+
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = le32_to_cpu(data_desc->findex);
return (u8 *)&adapter->fw->data[offs];
}
@@ -1009,43 +1007,48 @@ static u8 *
qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
{
u32 offs = QLCNIC_IMAGE_START;
+ struct uni_data_desc *data_desc;
- if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
- offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
- QLCNIC_UNI_DIR_SECT_FW,
- QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = le32_to_cpu(data_desc->findex);
return (u8 *)&adapter->fw->data[offs];
}
-static __le32
-qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
{
- if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
- return cpu_to_le32((qlcnic_get_data_desc(adapter,
- QLCNIC_UNI_DIR_SECT_FW,
- QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+ struct uni_data_desc *data_desc;
+ const u8 *unirom = adapter->fw->data;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return le32_to_cpu(data_desc->size);
else
- return cpu_to_le32(
- *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+ return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
}
-static __le32
-qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
{
struct uni_data_desc *fw_data_desc;
const struct firmware *fw = adapter->fw;
- __le32 major, minor, sub;
+ u32 major, minor, sub;
+ __le32 version_offset;
const u8 *ver_str;
int i, ret;
- if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
- return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+ if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+ version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
+ return le32_to_cpu(version_offset);
+ }
fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
QLCNIC_UNI_FIRMWARE_IDX_OFF);
- ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
- cpu_to_le32(fw_data_desc->size) - 17;
+ ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
+ le32_to_cpu(fw_data_desc->size) - 17;
for (i = 0; i < 12; i++) {
if (!strncmp(&ver_str[i], "REV=", 4)) {
@@ -1061,18 +1064,20 @@ qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
return 0;
}
-static __le32
-qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
{
const struct firmware *fw = adapter->fw;
- __le32 bios_ver, prd_off = adapter->file_prd_off;
+ u32 bios_ver, prd_off = adapter->file_prd_off;
+ u8 *version_offset;
+ __le32 temp;
- if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
- return cpu_to_le32(
- *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+ if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+ version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
+ return le32_to_cpu(*(__le32 *)version_offset);
+ }
- bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
- + QLCNIC_UNI_BIOS_VERSION_OFF));
+ temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
+ bios_ver = le32_to_cpu(temp);
return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
}
@@ -1131,24 +1136,24 @@ static const char *fw_name[] = {
int
qlcnic_load_firmware(struct qlcnic_adapter *adapter)
{
- u64 *ptr64;
+ __le64 *ptr64;
u32 i, flashaddr, size;
const struct firmware *fw = adapter->fw;
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "loading firmware from %s\n",
- fw_name[adapter->fw_type]);
+ fw_name[adapter->ahw->fw_type]);
if (fw) {
- __le64 data;
+ u64 data;
size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
- ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+ ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
flashaddr = QLCNIC_BOOTLD_START;
for (i = 0; i < size; i++) {
- data = cpu_to_le64(ptr64[i]);
+ data = le64_to_cpu(ptr64[i]);
if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
return -EIO;
@@ -1156,13 +1161,13 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
- size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+ size = qlcnic_get_fw_size(adapter) / 8;
- ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+ ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
flashaddr = QLCNIC_IMAGE_START;
for (i = 0; i < size; i++) {
- data = cpu_to_le64(ptr64[i]);
+ data = le64_to_cpu(ptr64[i]);
if (qlcnic_pci_mem_write_2M(adapter,
flashaddr, data))
@@ -1171,9 +1176,9 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
- size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+ size = qlcnic_get_fw_size(adapter) % 8;
if (size) {
- data = cpu_to_le64(ptr64[i]);
+ data = le64_to_cpu(ptr64[i]);
if (qlcnic_pci_mem_write_2M(adapter,
flashaddr, data))
@@ -1225,11 +1230,11 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
static int
qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
{
- __le32 val;
+ u32 val;
u32 ver, bios, min_size;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
- u8 fw_type = adapter->fw_type;
+ u8 fw_type = adapter->ahw->fw_type;
if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
if (qlcnic_validate_unified_romimage(adapter))
@@ -1237,8 +1242,8 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
min_size = QLCNIC_UNI_FW_MIN_SIZE;
} else {
- val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
- if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+ val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if (val != QLCNIC_BDINFO_MAGIC)
return -EINVAL;
min_size = QLCNIC_FW_MIN_SIZE;
@@ -1259,7 +1264,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
val = qlcnic_get_bios_version(adapter);
qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
- if ((__force u32)val != bios) {
+ if (val != bios) {
dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
fw_name[fw_type]);
return -EINVAL;
@@ -1274,7 +1279,7 @@ qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
{
u8 fw_type;
- switch (adapter->fw_type) {
+ switch (adapter->ahw->fw_type) {
case QLCNIC_UNKNOWN_ROMIMAGE:
fw_type = QLCNIC_UNIFIED_ROMIMAGE;
break;
@@ -1285,7 +1290,7 @@ qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
break;
}
- adapter->fw_type = fw_type;
+ adapter->ahw->fw_type = fw_type;
}
@@ -1295,16 +1300,17 @@ void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int rc;
- adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+ adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
next:
qlcnic_get_next_fwtype(adapter);
- if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) {
adapter->fw = NULL;
} else {
rc = request_firmware(&adapter->fw,
- fw_name[adapter->fw_type], &pdev->dev);
+ fw_name[adapter->ahw->fw_type],
+ &pdev->dev);
if (rc != 0)
goto next;
@@ -1324,633 +1330,3 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
release_firmware(adapter->fw);
adapter->fw = NULL;
}
-
-static void
-qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
- struct qlcnic_fw_msg *msg)
-{
- u32 cable_OUI;
- u16 cable_len;
- u16 link_speed;
- u8 link_status, module, duplex, autoneg;
- u8 lb_status = 0;
- struct net_device *netdev = adapter->netdev;
-
- adapter->has_link_events = 1;
-
- cable_OUI = msg->body[1] & 0xffffffff;
- cable_len = (msg->body[1] >> 32) & 0xffff;
- link_speed = (msg->body[1] >> 48) & 0xffff;
-
- link_status = msg->body[2] & 0xff;
- duplex = (msg->body[2] >> 16) & 0xff;
- autoneg = (msg->body[2] >> 24) & 0xff;
- lb_status = (msg->body[2] >> 32) & 0x3;
-
- module = (msg->body[2] >> 8) & 0xff;
- if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
- dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
- "length %d\n", cable_OUI, cable_len);
- else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
- dev_info(&netdev->dev, "unsupported cable length %d\n",
- cable_len);
-
- if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
- lb_status == QLCNIC_ELB_MODE))
- adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
-
- qlcnic_advert_link_change(adapter, link_status);
-
- if (duplex == LINKEVENT_FULL_DUPLEX)
- adapter->link_duplex = DUPLEX_FULL;
- else
- adapter->link_duplex = DUPLEX_HALF;
-
- adapter->module_type = module;
- adapter->link_autoneg = autoneg;
-
- if (link_status) {
- adapter->link_speed = link_speed;
- } else {
- adapter->link_speed = SPEED_UNKNOWN;
- adapter->link_duplex = DUPLEX_UNKNOWN;
- }
-}
-
-static void
-qlcnic_handle_fw_message(int desc_cnt, int index,
- struct qlcnic_host_sds_ring *sds_ring)
-{
- struct qlcnic_fw_msg msg;
- struct status_desc *desc;
- struct qlcnic_adapter *adapter;
- struct device *dev;
- int i = 0, opcode, ret;
-
- while (desc_cnt > 0 && i < 8) {
- desc = &sds_ring->desc_head[index];
- msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
- msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
-
- index = get_next_index(index, sds_ring->num_desc);
- desc_cnt--;
- }
-
- adapter = sds_ring->adapter;
- dev = &adapter->pdev->dev;
- opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
-
- switch (opcode) {
- case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
- qlcnic_handle_linkevent(adapter, &msg);
- break;
- case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
- ret = (u32)(msg.body[1]);
- switch (ret) {
- case 0:
- adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
- break;
- case 1:
- dev_info(dev, "loopback already in progress\n");
- adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
- break;
- case 2:
- dev_info(dev, "loopback cable is not connected\n");
- adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
- break;
- default:
- dev_info(dev, "loopback configure request failed,"
- " ret %x\n", ret);
- adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
- break;
- }
- break;
- default:
- break;
- }
-}
-
-static int
-qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring,
- struct qlcnic_rx_buffer *buffer)
-{
- struct sk_buff *skb;
- dma_addr_t dma;
- struct pci_dev *pdev = adapter->pdev;
-
- skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
- if (!skb) {
- adapter->stats.skb_alloc_failure++;
- return -ENOMEM;
- }
-
- skb_reserve(skb, NET_IP_ALIGN);
-
- dma = pci_map_single(pdev, skb->data,
- rds_ring->dma_size, PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(pdev, dma)) {
- adapter->stats.rx_dma_map_error++;
- dev_kfree_skb_any(skb);
- return -ENOMEM;
- }
-
- buffer->skb = skb;
- buffer->dma = dma;
-
- return 0;
-}
-
-static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
-{
- struct qlcnic_rx_buffer *buffer;
- struct sk_buff *skb;
-
- buffer = &rds_ring->rx_buf_arr[index];
-
- if (unlikely(buffer->skb == NULL)) {
- WARN_ON(1);
- return NULL;
- }
-
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
-
- skb = buffer->skb;
-
- if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
- (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
- adapter->stats.csummed++;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb_checksum_none_assert(skb);
- }
-
- buffer->skb = NULL;
-
- return skb;
-}
-
-static inline int
-qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
- u16 *vlan_tag)
-{
- struct ethhdr *eth_hdr;
-
- if (!__vlan_get_tag(skb, vlan_tag)) {
- eth_hdr = (struct ethhdr *) skb->data;
- memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
- if (!adapter->pvid)
- return 0;
-
- if (*vlan_tag == adapter->pvid) {
- /* Outer vlan tag. Packet should follow non-vlan path */
- *vlan_tag = 0xffff;
- return 0;
- }
- if (adapter->flags & QLCNIC_TAGGING_ENABLED)
- return 0;
-
- return -EINVAL;
-}
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_rcv(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring,
- int ring, u64 sts_data0)
-{
- struct net_device *netdev = adapter->netdev;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- struct qlcnic_rx_buffer *buffer;
- struct sk_buff *skb;
- struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
- u16 vid = 0xffff;
-
- if (unlikely(ring >= adapter->max_rds_rings))
- return NULL;
-
- rds_ring = &recv_ctx->rds_rings[ring];
-
- index = qlcnic_get_sts_refhandle(sts_data0);
- if (unlikely(index >= rds_ring->num_desc))
- return NULL;
-
- buffer = &rds_ring->rx_buf_arr[index];
-
- length = qlcnic_get_sts_totallength(sts_data0);
- cksum = qlcnic_get_sts_status(sts_data0);
- pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
- skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
- if (!skb)
- return buffer;
-
- if (length > rds_ring->skb_size)
- skb_put(skb, rds_ring->skb_size);
- else
- skb_put(skb, length);
-
- if (pkt_offset)
- skb_pull(skb, pkt_offset);
-
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
- adapter->stats.rxdropped++;
- dev_kfree_skb(skb);
- return buffer;
- }
-
- skb->protocol = eth_type_trans(skb, netdev);
-
- if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
-
- napi_gro_receive(&sds_ring->napi, skb);
-
- adapter->stats.rx_pkts++;
- adapter->stats.rxbytes += length;
-
- return buffer;
-}
-
-#define QLC_TCP_HDR_SIZE 20
-#define QLC_TCP_TS_OPTION_SIZE 12
-#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_lro(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring,
- int ring, u64 sts_data0, u64 sts_data1)
-{
- struct net_device *netdev = adapter->netdev;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- struct qlcnic_rx_buffer *buffer;
- struct sk_buff *skb;
- struct qlcnic_host_rds_ring *rds_ring;
- struct iphdr *iph;
- struct tcphdr *th;
- bool push, timestamp;
- int l2_hdr_offset, l4_hdr_offset;
- int index;
- u16 lro_length, length, data_offset;
- u32 seq_number;
- u16 vid = 0xffff;
-
- if (unlikely(ring > adapter->max_rds_rings))
- return NULL;
-
- rds_ring = &recv_ctx->rds_rings[ring];
-
- index = qlcnic_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
- return NULL;
-
- buffer = &rds_ring->rx_buf_arr[index];
-
- timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
- lro_length = qlcnic_get_lro_sts_length(sts_data0);
- l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
- l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
- push = qlcnic_get_lro_sts_push_flag(sts_data0);
- seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
-
- skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
- if (!skb)
- return buffer;
-
- if (timestamp)
- data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
- else
- data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
-
- skb_put(skb, lro_length + data_offset);
-
- skb_pull(skb, l2_hdr_offset);
-
- if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
- adapter->stats.rxdropped++;
- dev_kfree_skb(skb);
- return buffer;
- }
-
- skb->protocol = eth_type_trans(skb, netdev);
-
- iph = (struct iphdr *)skb->data;
- th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
-
- length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
- iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- th->psh = push;
- th->seq = htonl(seq_number);
-
- length = skb->len;
-
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
- skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
-
- if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
- netif_receive_skb(skb);
-
- adapter->stats.lro_pkts++;
- adapter->stats.lrobytes += length;
-
- return buffer;
-}
-
-int
-qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
-{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
- struct list_head *cur;
- struct status_desc *desc;
- struct qlcnic_rx_buffer *rxbuf;
- u64 sts_data0, sts_data1;
-
- int count = 0;
- int opcode, ring, desc_cnt;
- u32 consumer = sds_ring->consumer;
-
- while (count < max) {
- desc = &sds_ring->desc_head[consumer];
- sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
- if (!(sts_data0 & STATUS_OWNER_HOST))
- break;
-
- desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
- opcode = qlcnic_get_sts_opcode(sts_data0);
-
- switch (opcode) {
- case QLCNIC_RXPKT_DESC:
- case QLCNIC_OLD_RXPKT_DESC:
- case QLCNIC_SYN_OFFLOAD:
- ring = qlcnic_get_sts_type(sts_data0);
- rxbuf = qlcnic_process_rcv(adapter, sds_ring,
- ring, sts_data0);
- break;
- case QLCNIC_LRO_DESC:
- ring = qlcnic_get_lro_sts_type(sts_data0);
- sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
- rxbuf = qlcnic_process_lro(adapter, sds_ring,
- ring, sts_data0, sts_data1);
- break;
- case QLCNIC_RESPONSE_DESC:
- qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
- default:
- goto skip;
- }
-
- WARN_ON(desc_cnt > 1);
-
- if (likely(rxbuf))
- list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
- else
- adapter->stats.null_rxbuf++;
-
-skip:
- for (; desc_cnt > 0; desc_cnt--) {
- desc = &sds_ring->desc_head[consumer];
- desc->status_desc_data[0] =
- cpu_to_le64(STATUS_OWNER_PHANTOM);
- consumer = get_next_index(consumer, sds_ring->num_desc);
- }
- count++;
- }
-
- for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- struct qlcnic_host_rds_ring *rds_ring =
- &adapter->recv_ctx->rds_rings[ring];
-
- if (!list_empty(&sds_ring->free_list[ring])) {
- list_for_each(cur, &sds_ring->free_list[ring]) {
- rxbuf = list_entry(cur,
- struct qlcnic_rx_buffer, list);
- qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
- }
- spin_lock(&rds_ring->lock);
- list_splice_tail_init(&sds_ring->free_list[ring],
- &rds_ring->free_list);
- spin_unlock(&rds_ring->lock);
- }
-
- qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
- }
-
- if (count) {
- sds_ring->consumer = consumer;
- writel(consumer, sds_ring->crb_sts_consumer);
- }
-
- return count;
-}
-
-void
-qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
-{
- struct rcv_desc *pdesc;
- struct qlcnic_rx_buffer *buffer;
- int count = 0;
- u32 producer;
- struct list_head *head;
-
- producer = rds_ring->producer;
-
- head = &rds_ring->free_list;
- while (!list_empty(head)) {
-
- buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
-
- if (!buffer->skb) {
- if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
- break;
- }
-
- count++;
- list_del(&buffer->list);
-
- /* make a rcv descriptor */
- pdesc = &rds_ring->desc_head[producer];
- pdesc->addr_buffer = cpu_to_le64(buffer->dma);
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
- pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
-
- producer = get_next_index(producer, rds_ring->num_desc);
- }
-
- if (count) {
- rds_ring->producer = producer;
- writel((producer-1) & (rds_ring->num_desc-1),
- rds_ring->crb_rcv_producer);
- }
-}
-
-static void
-qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
-{
- struct rcv_desc *pdesc;
- struct qlcnic_rx_buffer *buffer;
- int count = 0;
- uint32_t producer;
- struct list_head *head;
-
- if (!spin_trylock(&rds_ring->lock))
- return;
-
- producer = rds_ring->producer;
-
- head = &rds_ring->free_list;
- while (!list_empty(head)) {
-
- buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
-
- if (!buffer->skb) {
- if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
- break;
- }
-
- count++;
- list_del(&buffer->list);
-
- /* make a rcv descriptor */
- pdesc = &rds_ring->desc_head[producer];
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
- pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
- pdesc->addr_buffer = cpu_to_le64(buffer->dma);
-
- producer = get_next_index(producer, rds_ring->num_desc);
- }
-
- if (count) {
- rds_ring->producer = producer;
- writel((producer - 1) & (rds_ring->num_desc - 1),
- rds_ring->crb_rcv_producer);
- }
- spin_unlock(&rds_ring->lock);
-}
-
-static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
-{
- int i;
- unsigned char *data = skb->data;
-
- printk(KERN_INFO "\n");
- for (i = 0; i < skb->len; i++) {
- QLCDB(adapter, DRV, "%02x ", data[i]);
- if ((i & 0x0f) == 8)
- printk(KERN_INFO "\n");
- }
-}
-
-void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring,
- int ring, u64 sts_data0)
-{
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- struct sk_buff *skb;
- struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
-
- if (unlikely(ring >= adapter->max_rds_rings))
- return;
-
- rds_ring = &recv_ctx->rds_rings[ring];
-
- index = qlcnic_get_sts_refhandle(sts_data0);
- length = qlcnic_get_sts_totallength(sts_data0);
- if (unlikely(index >= rds_ring->num_desc))
- return;
-
- cksum = qlcnic_get_sts_status(sts_data0);
- pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
- skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
- if (!skb)
- return;
-
- if (length > rds_ring->skb_size)
- skb_put(skb, rds_ring->skb_size);
- else
- skb_put(skb, length);
-
- if (pkt_offset)
- skb_pull(skb, pkt_offset);
-
- if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
- adapter->diag_cnt++;
- else
- dump_skb(skb, adapter);
-
- dev_kfree_skb_any(skb);
- adapter->stats.rx_pkts++;
- adapter->stats.rxbytes += length;
-
- return;
-}
-
-void
-qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
-{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
- struct status_desc *desc;
- u64 sts_data0;
- int ring, opcode, desc_cnt;
-
- u32 consumer = sds_ring->consumer;
-
- desc = &sds_ring->desc_head[consumer];
- sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
- if (!(sts_data0 & STATUS_OWNER_HOST))
- return;
-
- desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
- opcode = qlcnic_get_sts_opcode(sts_data0);
- switch (opcode) {
- case QLCNIC_RESPONSE_DESC:
- qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
- break;
- default:
- ring = qlcnic_get_sts_type(sts_data0);
- qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0);
- break;
- }
-
- for (; desc_cnt > 0; desc_cnt--) {
- desc = &sds_ring->desc_head[consumer];
- desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
- consumer = get_next_index(consumer, sds_ring->num_desc);
- }
-
- sds_ring->consumer = consumer;
- writel(consumer, sds_ring->crb_sts_consumer);
-}
-
-void
-qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
- u8 alt_mac, u8 *mac)
-{
- u32 mac_low, mac_high;
- int i;
-
- mac_low = off1;
- mac_high = off2;
-
- if (alt_mac) {
- mac_low |= (mac_low >> 16) | (mac_high << 16);
- mac_high >>= 16;
- }
-
- for (i = 0; i < 2; i++)
- mac[i] = (u8)(mac_high >> ((1 - i) * 8));
- for (i = 2; i < 6; i++)
- mac[i] = (u8)(mac_low >> ((5 - i) * 8));
-}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644
index 000000000000..6f82812d0fab
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -0,0 +1,1309 @@
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+
+#include "qlcnic.h"
+
+#define QLCNIC_MAC_HASH(MAC)\
+ ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode |= \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x07FFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP 0
+#define STATUS_CKSUM_OK 2
+
+static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 uaddr, __le16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u32 producer;
+ u64 word;
+
+ producer = tx_ring->producer;
+ hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+ req = (struct qlcnic_nic_req *)hwdesc;
+ memset(req, 0, sizeof(struct qlcnic_nic_req));
+ req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+ req->req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ vlan_req->vlan_id = vlan_id;
+
+ tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+ smp_mb();
+}
+
+static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ u64 src_addr = 0;
+ __le16 vlan_id = 0;
+ u8 hindex;
+
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ return;
+
+ /* Only NPAR capable devices support vlan based learning*/
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ vlan_id = first_desc->vlan_TCI;
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ head = &(adapter->fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+
+ if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, src_addr, vlan_id,
+ tx_ring);
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+
+ spin_lock(&adapter->mac_learn_lock);
+
+ hlist_add_head(&(fil->fnode), head);
+ adapter->fhash.fnum++;
+
+ spin_unlock(&adapter->mac_learn_lock);
+}
+
+static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+{
+ u8 l4proto, opcode = 0, hdr_len = 0;
+ u16 flags = 0, vlan_tci = 0;
+ int copied, offset, copy_len, size;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u16 protocol = ntohs(skb->protocol);
+ u32 producer = tx_ring->producer;
+
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ flags = FLAGS_VLAN_TAGGED;
+ vlan_tci = ntohs(vh->h_vlan_TCI);
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ } else if (vlan_tx_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = vlan_tx_tag_get(skb);
+ }
+ if (unlikely(adapter->pvid)) {
+ if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = adapter->pvid;
+ }
+set_flags:
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (*(skb->data) & BIT_0) {
+ flags |= BIT_0;
+ memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+ }
+ opcode = TX_ETHER_PKT;
+ if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring */
+ copied = 0;
+ offset = 2;
+
+ if (flags & FLAGS_VLAN_OOB) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+
+ /* Create a TSO vlan header template for firmware */
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vlan_tci);
+
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16,
+ copy_len - 16);
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+ size = (int)sizeof(struct cmd_desc_type0) - offset;
+ copy_len = min(size, (hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc +
+ offset, copy_len);
+ copied += copy_len;
+ offset = 0;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ smp_mb();
+ adapter->stats.lso_frames++;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (protocol == ETH_P_IP) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == ETH_P_IPV6) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
+static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = skb_frag_size(frag);
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ pbuf->skb = NULL;
+}
+
+static inline void qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+ desc[7] = 0ULL;
+}
+
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ struct ethhdr *phdr;
+ int i, k, frag_count, delta = 0;
+ u32 producer, num_txd;
+
+ num_txd = tx_ring->num_desc;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (adapter->flags & QLCNIC_MACSPOOF) {
+ phdr = (struct ethhdr *)skb->data;
+ if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ goto drop_packet;
+ }
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_stop_queue(netdev);
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_start_queue(netdev);
+ } else {
+ adapter->stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pdev = adapter->pdev;
+ first_desc = &tx_ring->desc_head[producer];
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
+ goto drop_packet;
+ }
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+ smp_mb();
+
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ goto unwind_buff;
+
+ if (adapter->mac_learn)
+ qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ qlcnic_update_cmd_producer(tx_ring);
+
+ return NETDEV_TX_OK;
+
+unwind_buff:
+ qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw->linkup && !linkup) {
+ netdev_info(netdev, "NIC Link is down\n");
+ adapter->ahw->linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw->linkup && linkup) {
+ netdev_info(netdev, "NIC Link is up\n");
+ adapter->ahw->linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+ if (!skb) {
+ adapter->stats.skb_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+
+ return 0;
+}
+
+static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ uint32_t producer;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+ head = &rds_ring->free_list;
+
+ while (!list_empty(head)) {
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+
+ spin_unlock(&rds_ring->lock);
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int i, done, count = 0;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_wake_queue(netdev);
+ adapter->stats.xmit_on++;
+ }
+ }
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ int tx_complete, work_done;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter);
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len, link_speed;
+ u8 link_status, module, duplex, autoneg, lb_status = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->ahw->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+ lb_status = (msg->body[2] >> 32) & 0x3;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev,
+ "unsupported cable: OUI 0x%x, length %d\n",
+ cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+ lb_status == QLCNIC_ELB_MODE))
+ adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->ahw->link_duplex = DUPLEX_FULL;
+ else
+ adapter->ahw->link_duplex = DUPLEX_HALF;
+
+ adapter->ahw->module_type = module;
+ adapter->ahw->link_autoneg = autoneg;
+
+ if (link_status) {
+ adapter->ahw->link_speed = link_speed;
+ } else {
+ adapter->ahw->link_speed = SPEED_UNKNOWN;
+ adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+}
+
+static void qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ struct qlcnic_adapter *adapter;
+ struct device *dev;
+ int i = 0, opcode, ret;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ adapter = sds_ring->adapter;
+ dev = &adapter->pdev->dev;
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(adapter, &msg);
+ break;
+ case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+ ret = (u32)(msg.body[1]);
+ switch (ret) {
+ case 0:
+ adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+ break;
+ case 1:
+ dev_info(dev, "loopback already in progress\n");
+ adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ break;
+ case 2:
+ dev_info(dev, "loopback cable is not connected\n");
+ adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ break;
+ default:
+ dev_info(dev,
+ "loopback configure request failed, err %x\n",
+ ret);
+ adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static struct sk_buff *
+qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u16 index,
+ u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ if (unlikely(buffer->skb == NULL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+ (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ buffer->skb = NULL;
+
+ return skb;
+}
+
+static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, u16 *vlan_tag)
+{
+ struct ethhdr *eth_hdr;
+
+ if (!__vlan_get_tag(skb, vlan_tag)) {
+ eth_hdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+ if (!adapter->pvid)
+ return 0;
+
+ if (*vlan_tag == adapter->pvid) {
+ /* Outer vlan tag. Packet should follow non-vlan path */
+ *vlan_tag = 0xffff;
+ return 0;
+ }
+ if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring, int ring,
+ u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+ u16 vid = 0xffff;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int index, l2_hdr_offset, l4_hdr_offset;
+ u16 lro_length, length, data_offset, vid = 0xffff;
+ u32 seq_number;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+
+ return buffer;
+}
+
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0, sts_data1;
+ __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ int opcode, ring, desc_cnt, count = 0;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
+ sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
+ sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = owner_phantom;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ u32 producer;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ head = &rds_ring->free_list;
+
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+ int i;
+ unsigned char *data = skb->data;
+
+ pr_info(KERN_INFO "\n");
+ for (i = 0; i < skb->len; i++) {
+ QLCDB(adapter, DRV, "%02x ", data[i]);
+ if ((i & 0x0f) == 8)
+ pr_info(KERN_INFO "\n");
+ }
+}
+
+static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
+ u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ length = qlcnic_get_sts_totallength(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return;
+}
+
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data0;
+ int ring, opcode, desc_cnt;
+
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ break;
+ default:
+ ring = qlcnic_get_sts_type(sts_data0);
+ qlcnic_process_rcv_diag(adapter, ring, sts_data0);
+ break;
+ }
+
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
+{
+ u32 mac_low, mac_high;
+ int i;
+
+ mac_low = off1;
+ mac_high = off2;
+
+ if (alt_mac) {
+ mac_low |= (mac_low >> 16) | (mac_high << 16);
+ mac_high >>= 16;
+ }
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}
+
+int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring, max_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ max_sds_rings = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (ring == max_sds_rings - 1)
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+ QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ else
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
+ QLCNIC_NETDEV_WEIGHT*2);
+ }
+
+ return 0;
+}
+
+void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+}
+
+void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_int(sds_ring);
+ }
+}
+
+void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 24ad17ec7fcd..d833f5927891 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -34,29 +34,28 @@ static int qlcnic_mac_learn;
module_param(qlcnic_mac_learn, int, 0444);
MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
-static int use_msi = 1;
-module_param(use_msi, int, 0444);
+static int qlcnic_use_msi = 1;
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+module_param_named(use_msi, qlcnic_use_msi, int, 0444);
-static int use_msi_x = 1;
-module_param(use_msi_x, int, 0444);
+static int qlcnic_use_msi_x = 1;
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
-static int auto_fw_reset = 1;
-module_param(auto_fw_reset, int, 0644);
+static int qlcnic_auto_fw_reset = 1;
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
-static int load_fw_file;
-module_param(load_fw_file, int, 0444);
+static int qlcnic_load_fw_file;
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
static int qlcnic_config_npars;
module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
-static int __devinit qlcnic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
@@ -66,17 +65,10 @@ static void qlcnic_fw_poll_work(struct work_struct *work);
static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
work_func_t func, int delay);
static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
-static int qlcnic_poll(struct napi_struct *napi, int budget);
-static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
-static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
-
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -92,14 +84,15 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
-static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
-static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
static int qlcnic_vlan_rx_add(struct net_device *, u16);
static int qlcnic_vlan_rx_del(struct net_device *, u16);
+#define QLCNIC_IS_TSO_CAPABLE(adapter) \
+ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -115,9 +108,7 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
-inline void
-qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
+inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
{
writel(tx_ring->producer, tx_ring->crb_cmd_producer);
}
@@ -129,26 +120,34 @@ static const u32 msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
-static const
-struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
-
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
-{
- writel(0, sds_ring->crb_intr_mask);
-}
-
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
-{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
+static const struct qlcnic_board_info qlcnic_boards[] = {
+ {0x1077, 0x8020, 0x1077, 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card"},
+ {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+};
- writel(0x1, sds_ring->crb_intr_mask);
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
- if (!QLCNIC_IS_MSI_FAMILY(adapter))
- writel(0xfbff, adapter->tgt_mask_reg);
-}
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
-static int
-qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
{
int size = sizeof(struct qlcnic_host_sds_ring) * count;
@@ -157,8 +156,7 @@ qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
return recv_ctx->sds_rings == NULL;
}
-static void
-qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
{
if (recv_ctx->sds_rings != NULL)
kfree(recv_ctx->sds_rings);
@@ -166,80 +164,6 @@ qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static int
-qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
-{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
- return -ENOMEM;
-
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
-
- if (ring == adapter->max_sds_rings - 1)
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
- QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
- else
- netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
- }
-
- return 0;
-}
-
-static void
-qlcnic_napi_del(struct qlcnic_adapter *adapter)
-{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_del(&sds_ring->napi);
- }
-
- qlcnic_free_sds_rings(adapter->recv_ctx);
-}
-
-static void
-qlcnic_napi_enable(struct qlcnic_adapter *adapter)
-{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
- return;
-
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- napi_enable(&sds_ring->napi);
- qlcnic_enable_int(sds_ring);
- }
-}
-
-static void
-qlcnic_napi_disable(struct qlcnic_adapter *adapter)
-{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
- return;
-
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
- napi_synchronize(&sds_ring->napi);
- napi_disable(&sds_ring->napi);
- }
-}
-
static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
@@ -363,7 +287,7 @@ static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
qlcnic_set_msix_bit(pdev, 0);
- if (adapter->msix_supported) {
+ if (adapter->ahw->msix_supported) {
enable_msix:
qlcnic_init_msix_entries(adapter, num_msix);
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
@@ -385,32 +309,31 @@ static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
return err;
}
-
static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
+ u32 offset, mask_reg;
const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct pci_dev *pdev = adapter->pdev;
- if (use_msi && !pci_enable_msi(pdev)) {
+ if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
adapter->flags |= QLCNIC_MSI_ENABLED;
- adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
- msi_tgt_status[adapter->ahw->pci_func]);
+ offset = msi_tgt_status[adapter->ahw->pci_func];
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
+ offset);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
return;
}
legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
-
- adapter->int_vec_bit = legacy_intrp->int_vec_bit;
- adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
- legacy_intrp->tgt_status_reg);
- adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
- legacy_intrp->tgt_mask_reg);
- adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
-
- adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
- ISR_INT_STATE_REG);
+ adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
+ offset = legacy_intrp->tgt_status_reg;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
+ mask_reg = legacy_intrp->tgt_mask_reg;
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
}
@@ -420,7 +343,7 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
{
int num_msix;
- if (adapter->msix_supported) {
+ if (adapter->ahw->msix_supported) {
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
QLCNIC_DEF_NUM_STS_DESC_RINGS));
} else
@@ -448,19 +371,25 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
iounmap(adapter->ahw->pci_base0);
}
-static int
-qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
- int i, ret = 0;
+ int i, ret = 0, j = 0;
+ u16 act_pci_func;
u8 pfn;
pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret)
+ goto err_pci_info;
+
+ act_pci_func = adapter->ahw->act_pci_func;
+
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
- QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+ act_pci_func, GFP_KERNEL);
if (!adapter->npars) {
ret = -ENOMEM;
goto err_pci_info;
@@ -473,21 +402,25 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
goto err_npars;
}
- ret = qlcnic_get_pci_info(adapter, pci_info);
- if (ret)
- goto err_eswitch;
-
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pfn = pci_info[i].id;
+
if (pfn >= QLCNIC_MAX_PCI_FUNC) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
- adapter->npars[pfn].active = (u8)pci_info[i].active;
- adapter->npars[pfn].type = (u8)pci_info[i].type;
- adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
- adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
- adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
+
+ if (!pci_info[i].active ||
+ (pci_info[i].type != QLCNIC_TYPE_NIC))
+ continue;
+
+ adapter->npars[j].pci_func = pfn;
+ adapter->npars[j].active = (u8)pci_info[i].active;
+ adapter->npars[j].type = (u8)pci_info[i].type;
+ adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
+ adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+ j++;
}
for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
@@ -512,33 +445,29 @@ static int
qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
{
u8 id;
- u32 ref_count;
int i, ret = 1;
u32 data = QLCNIC_MGMT_FUNC;
- void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* If other drivers are not in use set their privilege level */
- ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
ret = qlcnic_api_lock(adapter);
if (ret)
goto err_lock;
if (qlcnic_config_npars) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- id = i;
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
- id == adapter->ahw->pci_func)
+ for (i = 0; i < ahw->act_pci_func; i++) {
+ id = adapter->npars[i].pci_func;
+ if (id == ahw->pci_func)
continue;
data |= (qlcnic_config_npars &
QLC_DEV_SET_DRV(0xf, id));
}
} else {
- data = readl(priv_op);
- data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
+ data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
- adapter->ahw->pci_func));
+ ahw->pci_func));
}
- writel(data, priv_op);
+ QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
qlcnic_api_unlock(adapter);
err_lock:
return ret;
@@ -554,8 +483,8 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
- QLCNIC_FW_API);
+ adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
+ QLCNIC_FW_API);
/* Find PCI function number */
pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
@@ -573,29 +502,39 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (priv_level == QLCNIC_NON_PRIV_FUNC) {
- adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
+ adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
dev_info(&adapter->pdev->dev,
"HAL Version: %d Non Privileged function\n",
- adapter->fw_hal_version);
+ adapter->ahw->fw_hal_version);
adapter->nic_ops = &qlcnic_vf_ops;
} else
adapter->nic_ops = &qlcnic_ops;
}
-static int
-qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
{
- void __iomem *mem_ptr0 = NULL;
- resource_size_t mem_base;
- unsigned long mem_len, pci_len0 = 0;
+ switch (dev_id) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
+ *bar = QLCNIC_82XX_BAR0_LENGTH;
+ break;
+ default:
+ *bar = 0;
+ }
+}
- struct pci_dev *pdev = adapter->pdev;
+static int qlcnic_setup_pci_map(struct pci_dev *pdev,
+ struct qlcnic_hardware_context *ahw)
+{
+ u32 offset;
+ void __iomem *mem_ptr0 = NULL;
+ unsigned long mem_len, pci_len0 = 0, bar0_len;
/* remap phys address */
- mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
- if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+ qlcnic_get_bar_length(pdev->device, &bar0_len);
+ if (mem_len >= bar0_len) {
mem_ptr0 = pci_ioremap_bar(pdev, 0);
if (mem_ptr0 == NULL) {
@@ -608,20 +547,15 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
}
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
-
- adapter->ahw->pci_base0 = mem_ptr0;
- adapter->ahw->pci_len0 = pci_len0;
-
- qlcnic_check_vf(adapter);
-
- adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
- adapter->ahw->pci_func)));
+ ahw->pci_base0 = mem_ptr0;
+ ahw->pci_len0 = pci_len0;
+ offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
+ qlcnic_get_ioaddr(ahw, offset);
return 0;
}
-static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
{
struct pci_dev *pdev = adapter->pdev;
int i, found = 0;
@@ -659,7 +593,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
if (fw_dump->tmpl_hdr == NULL ||
adapter->fw_version > prev_fw_version) {
if (fw_dump->tmpl_hdr)
@@ -691,7 +625,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
- adapter->msix_supported = !!use_msi_x;
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
adapter->num_txd = MAX_CMD_DESCRIPTORS;
@@ -704,19 +638,20 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
int err;
struct qlcnic_info nic_info;
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
if (err)
return err;
- adapter->physical_port = (u8)nic_info.phys_port;
- adapter->switch_mode = nic_info.switch_mode;
- adapter->max_tx_ques = nic_info.max_tx_ques;
- adapter->max_rx_ques = nic_info.max_rx_ques;
- adapter->capabilities = nic_info.capabilities;
- adapter->max_mac_filters = nic_info.max_mac_filters;
- adapter->max_mtu = nic_info.max_mtu;
+ adapter->ahw->physical_port = (u8)nic_info.phys_port;
+ adapter->ahw->switch_mode = nic_info.switch_mode;
+ adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
+ adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
+ adapter->ahw->capabilities = nic_info.capabilities;
+ adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
+ adapter->ahw->max_mtu = nic_info.max_mtu;
- if (adapter->capabilities & BIT_6)
+ if (adapter->ahw->capabilities & BIT_6)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -724,9 +659,8 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
return err;
}
-static void
-qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
{
if (esw_cfg->discard_tagged)
adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
@@ -757,9 +691,8 @@ qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
return 0;
}
-static void
-qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
{
adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
QLCNIC_PROMISC_DISABLED);
@@ -776,8 +709,7 @@ qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
qlcnic_set_netdev_features(adapter, esw_cfg);
}
-static int
-qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
@@ -805,7 +737,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
features |= (NETIF_F_TSO | NETIF_F_TSO6);
vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
@@ -851,7 +783,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
if (priv_level == QLCNIC_MGMT_FUNC) {
- adapter->op_mode = QLCNIC_MGMT_FUNC;
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
err = qlcnic_init_pci_info(adapter);
if (err)
return err;
@@ -859,12 +791,12 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
qlcnic_set_function_modes(adapter);
dev_info(&adapter->pdev->dev,
"HAL Version: %d, Management function\n",
- adapter->fw_hal_version);
+ adapter->ahw->fw_hal_version);
} else if (priv_level == QLCNIC_PRIV_FUNC) {
- adapter->op_mode = QLCNIC_PRIV_FUNC;
+ adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
dev_info(&adapter->pdev->dev,
"HAL Version: %d, Privileged function\n",
- adapter->fw_hal_version);
+ adapter->ahw->fw_hal_version);
}
}
@@ -873,8 +805,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
return err;
}
-static int
-qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
struct qlcnic_npar_info *npar;
@@ -883,16 +814,16 @@ qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
return 0;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
- esw_cfg.pci_func = i;
- esw_cfg.offload_flags = BIT_0;
+ esw_cfg.pci_func = adapter->npars[i].pci_func;
esw_cfg.mac_override = BIT_0;
esw_cfg.promisc_mode = BIT_0;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
- esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ if (qlcnic_82xx_check(adapter)) {
+ esw_cfg.offload_flags = BIT_0;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ }
if (qlcnic_config_switch_port(adapter, &esw_cfg))
return -EIO;
npar = &adapter->npars[i];
@@ -930,22 +861,24 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
return 0;
}
-static int
-qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
int i, err;
struct qlcnic_npar_info *npar;
struct qlcnic_info nic_info;
+ u8 pci_func;
- if (!adapter->need_fw_reset)
- return 0;
+ if (qlcnic_82xx_check(adapter))
+ if (!adapter->need_fw_reset)
+ return 0;
/* Set the NPAR config data after FW reset */
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
npar = &adapter->npars[i];
- if (npar->type != QLCNIC_TYPE_NIC)
- continue;
- err = qlcnic_get_nic_info(adapter, &nic_info, i);
+ pci_func = npar->pci_func;
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter,
+ &nic_info, pci_func);
if (err)
return err;
nic_info.min_tx_bw = npar->min_bw;
@@ -956,11 +889,12 @@ qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
if (npar->enable_pm) {
err = qlcnic_config_port_mirroring(adapter,
- npar->dest_npar, 1, i);
+ npar->dest_npar, 1,
+ pci_func);
if (err)
return err;
}
- err = qlcnic_reset_eswitch_config(adapter, npar, i);
+ err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
if (err)
return err;
}
@@ -972,7 +906,7 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
u32 npar_state;
- if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
@@ -994,7 +928,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
int err;
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode != QLCNIC_MGMT_FUNC)
+ adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return 0;
err = qlcnic_set_default_offload_settings(adapter);
@@ -1021,14 +955,14 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
else if (!err)
goto check_fw_status;
- if (load_fw_file)
+ if (qlcnic_load_fw_file)
qlcnic_request_firmware(adapter);
else {
err = qlcnic_check_flash_fw_ver(adapter);
if (err)
goto err_out;
- adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
+ adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
}
err = qlcnic_need_fw_reset(adapter);
@@ -1089,7 +1023,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
handler = qlcnic_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1148,7 +1082,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
@@ -1179,7 +1113,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_linkevent_request(adapter, 1);
- adapter->reset_context = 0;
+ adapter->ahw->reset_context = 0;
set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -1312,7 +1246,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
- if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
@@ -1323,7 +1257,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
qlcnic_detach(adapter);
- adapter->diag_test = 0;
+ adapter->ahw->diag_test = 0;
adapter->max_sds_rings = max_sds_rings;
if (qlcnic_attach(adapter))
@@ -1393,7 +1327,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
adapter->max_sds_rings = 1;
- adapter->diag_test = test;
+ adapter->ahw->diag_test = test;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1413,14 +1347,14 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_post_rx_buffers(adapter, rds_ring);
}
- if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
}
- if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
adapter->ahw->loopback_state = 0;
qlcnic_linkevent_request(adapter, 1);
}
@@ -1485,14 +1419,14 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
}
static int
-qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
- struct net_device *netdev, u8 pci_using_dac)
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
+ int pci_using_dac)
{
int err;
struct pci_dev *pdev = adapter->pdev;
- adapter->mc_enabled = 0;
- adapter->max_mc_count = 38;
+ adapter->ahw->mc_enabled = 0;
+ adapter->ahw->max_mc_count = 38;
netdev->netdev_ops = &qlcnic_netdev_ops;
netdev->watchdog_timeo = 5*HZ;
@@ -1504,16 +1438,16 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (pci_using_dac)
+ if (pci_using_dac == 1)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->hw_features;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
netdev->hw_features |= NETIF_F_HW_VLAN_TX;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->hw_features |= NETIF_F_LRO;
netdev->features |= netdev->hw_features |
@@ -1530,7 +1464,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
{
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
@@ -1559,15 +1493,14 @@ qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
return -ENOMEM;
}
-static int __devinit
+static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
- int err;
+ int err, pci_using_dac = -1;
uint8_t revision_id;
- uint8_t pci_using_dac;
- char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
err = pci_enable_device(pdev);
if (err)
@@ -1616,9 +1549,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- err = qlcnic_setup_pci_map(adapter);
+ err = qlcnic_setup_pci_map(pdev, adapter->ahw);
if (err)
goto err_out_free_hw;
+ qlcnic_check_vf(adapter);
/* This will be reset for mezz cards */
adapter->portnum = adapter->ahw->pci_func;
@@ -1646,16 +1580,15 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "failed to read mac addr\n");
if (adapter->portnum == 0) {
- get_brd_name(adapter, brd_name);
-
+ qlcnic_get_board_name(adapter, board_name);
pr_info("%s: %s Board Chip rev 0x%x\n",
- module_name(THIS_MODULE),
- brd_name, adapter->ahw->revision_id);
+ module_name(THIS_MODULE),
+ board_name, adapter->ahw->revision_id);
}
qlcnic_clear_stats(adapter);
- err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
+ err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
if (err)
goto err_out_decr_ref;
@@ -1667,7 +1600,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
switch (adapter->ahw->port_type) {
case QLCNIC_GBE:
@@ -1724,7 +1659,7 @@ err_out_maintenance_mode:
return 0;
}
-static void __devexit qlcnic_remove(struct pci_dev *pdev)
+static void qlcnic_remove(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter;
struct net_device *netdev;
@@ -1746,7 +1681,8 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
if (adapter->eswitch != NULL)
kfree(adapter->eswitch);
- qlcnic_clr_all_drv_state(adapter, 0);
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_clr_all_drv_state(adapter, 0);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1782,7 +1718,8 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- qlcnic_clr_all_drv_state(adapter, 0);
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_clr_all_drv_state(adapter, 0);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1790,9 +1727,11 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (retval)
return retval;
- if (qlcnic_wol_supported(adapter)) {
- pci_enable_wake(pdev, PCI_D3cold, 1);
- pci_enable_wake(pdev, PCI_D3hot, 1);
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
}
return 0;
@@ -1927,435 +1866,14 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
adapter->fhash.fmax = 0;
}
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
-{
- struct cmd_desc_type0 *hwdesc;
- struct qlcnic_nic_req *req;
- struct qlcnic_mac_req *mac_req;
- struct qlcnic_vlan_req *vlan_req;
- u32 producer;
- u64 word;
-
- producer = tx_ring->producer;
- hwdesc = &tx_ring->desc_head[tx_ring->producer];
-
- req = (struct qlcnic_nic_req *)hwdesc;
- memset(req, 0, sizeof(struct qlcnic_nic_req));
- req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
-
- word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
- req->req_hdr = cpu_to_le64(word);
-
- mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
- mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
-
- vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
- vlan_req->vlan_id = vlan_id;
-
- tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
- smp_mb();
-}
-
-#define QLCNIC_MAC_HASH(MAC)\
- ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
-
-static void
-qlcnic_send_filter(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring,
- struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
-{
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
- struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
- struct hlist_head *head;
- u64 src_addr = 0;
- __le16 vlan_id = 0;
- u8 hindex;
-
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
-
- if (adapter->fhash.fnum >= adapter->fhash.fmax)
- return;
-
- /* Only NPAR capable devices support vlan based learning*/
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- vlan_id = first_desc->vlan_TCI;
- memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
- head = &(adapter->fhash.fhead[hindex]);
-
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
-
- if (jiffies >
- (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
- qlcnic_change_filter(adapter, src_addr, vlan_id,
- tx_ring);
- tmp_fil->ftime = jiffies;
- return;
- }
- }
-
- fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
- if (!fil)
- return;
-
- qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
- fil->ftime = jiffies;
- fil->vlan_id = vlan_id;
- memcpy(fil->faddr, &src_addr, ETH_ALEN);
- spin_lock(&adapter->mac_learn_lock);
- hlist_add_head(&(fil->fnode), head);
- adapter->fhash.fnum++;
- spin_unlock(&adapter->mac_learn_lock);
-}
-
-static int
-qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
- struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
-{
- u8 opcode = 0, hdr_len = 0;
- u16 flags = 0, vlan_tci = 0;
- int copied, offset, copy_len;
- struct cmd_desc_type0 *hwdesc;
- struct vlan_ethhdr *vh;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
- u16 protocol = ntohs(skb->protocol);
- u32 producer = tx_ring->producer;
-
- if (protocol == ETH_P_8021Q) {
- vh = (struct vlan_ethhdr *)skb->data;
- flags = FLAGS_VLAN_TAGGED;
- vlan_tci = vh->h_vlan_TCI;
- protocol = ntohs(vh->h_vlan_encapsulated_proto);
- } else if (vlan_tx_tag_present(skb)) {
- flags = FLAGS_VLAN_OOB;
- vlan_tci = vlan_tx_tag_get(skb);
- }
- if (unlikely(adapter->pvid)) {
- if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
- return -EIO;
- if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
- goto set_flags;
-
- flags = FLAGS_VLAN_OOB;
- vlan_tci = adapter->pvid;
- }
-set_flags:
- qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
- qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-
- if (*(skb->data) & BIT_0) {
- flags |= BIT_0;
- memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
- }
- opcode = TX_ETHER_PKT;
- if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
- skb_shinfo(skb)->gso_size > 0) {
-
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-
- first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_desc->total_hdr_length = hdr_len;
-
- opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
-
- /* For LSO, we need to copy the MAC/IP/TCP headers into
- * the descriptor ring */
- copied = 0;
- offset = 2;
-
- if (flags & FLAGS_VLAN_OOB) {
- first_desc->total_hdr_length += VLAN_HLEN;
- first_desc->tcp_hdr_offset = VLAN_HLEN;
- first_desc->ip_hdr_offset = VLAN_HLEN;
- /* Only in case of TSO on vlan device */
- flags |= FLAGS_VLAN_TAGGED;
-
- /* Create a TSO vlan header template for firmware */
-
- hwdesc = &tx_ring->desc_head[producer];
- tx_ring->cmd_buf_arr[producer].skb = NULL;
-
- copy_len = min((int)sizeof(struct cmd_desc_type0) -
- offset, hdr_len + VLAN_HLEN);
-
- vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
- skb_copy_from_linear_data(skb, vh, 12);
- vh->h_vlan_proto = htons(ETH_P_8021Q);
- vh->h_vlan_TCI = htons(vlan_tci);
-
- skb_copy_from_linear_data_offset(skb, 12,
- (char *)vh + 16, copy_len - 16);
-
- copied = copy_len - VLAN_HLEN;
- offset = 0;
-
- producer = get_next_index(producer, tx_ring->num_desc);
- }
-
- while (copied < hdr_len) {
-
- copy_len = min((int)sizeof(struct cmd_desc_type0) -
- offset, (hdr_len - copied));
-
- hwdesc = &tx_ring->desc_head[producer];
- tx_ring->cmd_buf_arr[producer].skb = NULL;
-
- skb_copy_from_linear_data_offset(skb, copied,
- (char *) hwdesc + offset, copy_len);
-
- copied += copy_len;
- offset = 0;
-
- producer = get_next_index(producer, tx_ring->num_desc);
- }
-
- tx_ring->producer = producer;
- smp_mb();
- adapter->stats.lso_frames++;
-
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 l4proto;
-
- if (protocol == ETH_P_IP) {
- l4proto = ip_hdr(skb)->protocol;
-
- if (l4proto == IPPROTO_TCP)
- opcode = TX_TCP_PKT;
- else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDP_PKT;
- } else if (protocol == ETH_P_IPV6) {
- l4proto = ipv6_hdr(skb)->nexthdr;
-
- if (l4proto == IPPROTO_TCP)
- opcode = TX_TCPV6_PKT;
- else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDPV6_PKT;
- }
- }
- first_desc->tcp_hdr_offset += skb_transport_offset(skb);
- first_desc->ip_hdr_offset += skb_network_offset(skb);
- qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
-
- return 0;
-}
-
-static int
-qlcnic_map_tx_skb(struct pci_dev *pdev,
- struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
-{
- struct qlcnic_skb_frag *nf;
- struct skb_frag_struct *frag;
- int i, nr_frags;
- dma_addr_t map;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- nf = &pbuf->frag_array[0];
-
- map = pci_map_single(pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
- goto out_err;
-
- nf->dma = map;
- nf->length = skb_headlen(skb);
-
- for (i = 0; i < nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- nf = &pbuf->frag_array[i+1];
-
- map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, map))
- goto unwind;
-
- nf->dma = map;
- nf->length = skb_frag_size(frag);
- }
-
- return 0;
-
-unwind:
- while (--i >= 0) {
- nf = &pbuf->frag_array[i+1];
- pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
- }
-
- nf = &pbuf->frag_array[0];
- pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
-
-out_err:
- return -ENOMEM;
-}
-
-static void
-qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
- struct qlcnic_cmd_buffer *pbuf)
-{
- struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
- int nr_frags = skb_shinfo(skb)->nr_frags;
- int i;
-
- for (i = 0; i < nr_frags; i++) {
- nf = &pbuf->frag_array[i+1];
- pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
- }
-
- nf = &pbuf->frag_array[0];
- pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
- pbuf->skb = NULL;
-}
-
-static inline void
-qlcnic_clear_cmddesc(u64 *desc)
-{
- desc[0] = 0ULL;
- desc[2] = 0ULL;
- desc[7] = 0ULL;
-}
-
-netdev_tx_t
-qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
- struct qlcnic_cmd_buffer *pbuf;
- struct qlcnic_skb_frag *buffrag;
- struct cmd_desc_type0 *hwdesc, *first_desc;
- struct pci_dev *pdev;
- struct ethhdr *phdr;
- int delta = 0;
- int i, k;
-
- u32 producer;
- int frag_count;
- u32 num_txd = tx_ring->num_desc;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- if (adapter->flags & QLCNIC_MACSPOOF) {
- phdr = (struct ethhdr *)skb->data;
- if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
- goto drop_packet;
- }
-
- frag_count = skb_shinfo(skb)->nr_frags + 1;
- /* 14 frags supported for normal packet and
- * 32 frags supported for TSO packet
- */
- if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
-
- for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
- delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
-
- if (!__pskb_pull_tail(skb, delta))
- goto drop_packet;
-
- frag_count = 1 + skb_shinfo(skb)->nr_frags;
- }
-
- if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
- netif_stop_queue(netdev);
- if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
- netif_start_queue(netdev);
- else {
- adapter->stats.xmit_off++;
- return NETDEV_TX_BUSY;
- }
- }
-
- producer = tx_ring->producer;
- pbuf = &tx_ring->cmd_buf_arr[producer];
-
- pdev = adapter->pdev;
-
- first_desc = hwdesc = &tx_ring->desc_head[producer];
- qlcnic_clear_cmddesc((u64 *)hwdesc);
-
- if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
- adapter->stats.tx_dma_map_error++;
- goto drop_packet;
- }
-
- pbuf->skb = skb;
- pbuf->frag_count = frag_count;
-
- qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
- qlcnic_set_tx_port(first_desc, adapter->portnum);
-
- for (i = 0; i < frag_count; i++) {
-
- k = i % 4;
-
- if ((k == 0) && (i > 0)) {
- /* move to next desc.*/
- producer = get_next_index(producer, num_txd);
- hwdesc = &tx_ring->desc_head[producer];
- qlcnic_clear_cmddesc((u64 *)hwdesc);
- tx_ring->cmd_buf_arr[producer].skb = NULL;
- }
-
- buffrag = &pbuf->frag_array[i];
-
- hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
- switch (k) {
- case 0:
- hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
- break;
- case 1:
- hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
- break;
- case 2:
- hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
- break;
- case 3:
- hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
- break;
- }
- }
-
- tx_ring->producer = get_next_index(producer, num_txd);
- smp_mb();
-
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
- goto unwind_buff;
-
- if (adapter->mac_learn)
- qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
-
- adapter->stats.txbytes += skb->len;
- adapter->stats.xmitcalled++;
-
- qlcnic_update_cmd_producer(adapter, tx_ring);
-
- return NETDEV_TX_OK;
-
-unwind_buff:
- qlcnic_unmap_buffers(pdev, skb, pbuf);
-drop_packet:
- adapter->stats.txdropped++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u32 temp, temp_state, temp_val;
+ u32 temp_state, temp_val, temp = 0;
int rv = 0;
- temp = QLCRD32(adapter, CRB_TEMP_STATE);
+ if (qlcnic_82xx_check(adapter))
+ temp = QLCRD32(adapter, CRB_TEMP_STATE);
temp_state = qlcnic_get_temp_state(temp);
temp_val = qlcnic_get_temp_val(temp);
@@ -2367,7 +1885,7 @@ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
temp_val);
rv = 1;
} else if (temp_state == QLCNIC_TEMP_WARN) {
- if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+ if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
dev_err(&netdev->dev,
"Device temperature %d degrees C "
"exceeds operating range."
@@ -2375,37 +1893,16 @@ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
temp_val);
}
} else {
- if (adapter->temp == QLCNIC_TEMP_WARN) {
+ if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
dev_info(&netdev->dev,
"Device temperature is now %d degrees C"
" in normal range.\n", temp_val);
}
}
- adapter->temp = temp_state;
+ adapter->ahw->temp = temp_state;
return rv;
}
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
-{
- struct net_device *netdev = adapter->netdev;
-
- if (adapter->ahw->linkup && !linkup) {
- netdev_info(netdev, "NIC Link is down\n");
- adapter->ahw->linkup = 0;
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- }
- } else if (!adapter->ahw->linkup && linkup) {
- netdev_info(netdev, "NIC Link is up\n");
- adapter->ahw->linkup = 1;
- if (netif_running(netdev)) {
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
- }
- }
-}
-
static void qlcnic_tx_timeout(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -2418,7 +1915,7 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
adapter->need_fw_reset = 1;
else
- adapter->reset_context = 1;
+ adapter->ahw->reset_context = 1;
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -2442,7 +1939,7 @@ static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
status = readl(adapter->isr_int_vec);
- if (!(status & adapter->int_vec_bit))
+ if (!(status & adapter->ahw->int_vec_bit))
return IRQ_NONE;
/* check interrupt state machine, to be sure */
@@ -2474,7 +1971,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
return IRQ_NONE;
done:
- adapter->diag_cnt++;
+ adapter->ahw->diag_cnt++;
qlcnic_enable_int(sds_ring);
return IRQ_HANDLED;
}
@@ -2512,122 +2009,6 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
-{
- u32 sw_consumer, hw_consumer;
- int count = 0, i;
- struct qlcnic_cmd_buffer *buffer;
- struct pci_dev *pdev = adapter->pdev;
- struct net_device *netdev = adapter->netdev;
- struct qlcnic_skb_frag *frag;
- int done;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
-
- if (!spin_trylock(&adapter->tx_clean_lock))
- return 1;
-
- sw_consumer = tx_ring->sw_consumer;
- hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
-
- while (sw_consumer != hw_consumer) {
- buffer = &tx_ring->cmd_buf_arr[sw_consumer];
- if (buffer->skb) {
- frag = &buffer->frag_array[0];
- pci_unmap_single(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
- frag->dma = 0ULL;
- for (i = 1; i < buffer->frag_count; i++) {
- frag++;
- pci_unmap_page(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
- frag->dma = 0ULL;
- }
-
- adapter->stats.xmitfinished++;
- dev_kfree_skb_any(buffer->skb);
- buffer->skb = NULL;
- }
-
- sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
- if (++count >= MAX_STATUS_HANDLE)
- break;
- }
-
- if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
-
- smp_mb();
-
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
- if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_wake_queue(netdev);
- adapter->stats.xmit_on++;
- }
- }
- adapter->tx_timeo_cnt = 0;
- }
- /*
- * If everything is freed up to consumer then check if the ring is full
- * If the ring is full then check if more needs to be freed and
- * schedule the call back again.
- *
- * This happens when there are 2 CPUs. One could be freeing and the
- * other filling it. If the ring is full when we get out of here and
- * the card has already interrupted the host then the host can miss the
- * interrupt.
- *
- * There is still a possible race condition and the host could miss an
- * interrupt. The card has to take care of this.
- */
- hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
- done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
-
- return done;
-}
-
-static int qlcnic_poll(struct napi_struct *napi, int budget)
-{
- struct qlcnic_host_sds_ring *sds_ring =
- container_of(napi, struct qlcnic_host_sds_ring, napi);
-
- struct qlcnic_adapter *adapter = sds_ring->adapter;
-
- int tx_complete;
- int work_done;
-
- tx_complete = qlcnic_process_cmd_ring(adapter);
-
- work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
- if ((work_done < budget) && tx_complete) {
- napi_complete(&sds_ring->napi);
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
- }
-
- return work_done;
-}
-
-static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
-{
- struct qlcnic_host_sds_ring *sds_ring =
- container_of(napi, struct qlcnic_host_sds_ring, napi);
-
- struct qlcnic_adapter *adapter = sds_ring->adapter;
- int work_done;
-
- work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
- if (work_done < budget) {
- napi_complete(&sds_ring->napi);
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
- }
-
- return work_done;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
@@ -2871,7 +2252,7 @@ qlcnic_fwinit_work(struct work_struct *work)
return;
}
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
qlcnic_api_unlock(adapter);
goto wait_npar;
}
@@ -2987,9 +2368,9 @@ qlcnic_detach_work(struct work_struct *work)
goto err_ret;
}
- if (adapter->temp == QLCNIC_TEMP_PANIC) {
+ if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
- adapter->temp);
+ adapter->ahw->temp);
goto err_ret;
}
@@ -3114,7 +2495,7 @@ qlcnic_attach_work(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 npar_state;
- if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
qlcnic_clr_all_drv_state(adapter, 0);
@@ -3171,7 +2552,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->reset_context && auto_fw_reset) {
+ if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
qlcnic_reset_hw_context(adapter);
adapter->netdev->trans_start = jiffies;
}
@@ -3186,7 +2567,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
qlcnic_dev_request_reset(adapter);
- if (auto_fw_reset)
+ if (qlcnic_auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_err(&adapter->pdev->dev, "firmware hang detected\n");
@@ -3211,8 +2592,8 @@ detach:
adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
QLCNIC_DEV_NEED_RESET;
- if (auto_fw_reset &&
- !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
+ &adapter->state)) {
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
QLCDB(adapter, DRV, "fw recovery scheduled.\n");
@@ -3283,7 +2664,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (qlcnic_api_lock(adapter))
return -EINVAL;
- if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
+ if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@ -3395,96 +2776,9 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-static int
-qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
-{
- return -EOPNOTSUPP;
-}
-
-static int
-qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
-{
- return -EOPNOTSUPP;
-}
-
-static ssize_t
-qlcnic_store_bridged_mode(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- unsigned long new;
- int ret = -EINVAL;
-
- if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
- goto err_out;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- goto err_out;
-
- if (strict_strtoul(buf, 2, &new))
- goto err_out;
-
- if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
- ret = len;
-
-err_out:
- return ret;
-}
-
-static ssize_t
-qlcnic_show_bridged_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int bridged_mode = 0;
-
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
- bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
-
- return sprintf(buf, "%d\n", bridged_mode);
-}
-
-static struct device_attribute dev_attr_bridged_mode = {
- .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
- .show = qlcnic_show_bridged_mode,
- .store = qlcnic_store_bridged_mode,
-};
-
-static ssize_t
-qlcnic_store_diag_mode(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- unsigned long new;
-
- if (strict_strtoul(buf, 2, &new))
- return -EINVAL;
-
- if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
- adapter->flags ^= QLCNIC_DIAG_ENABLED;
-
- return len;
-}
-
-static ssize_t
-qlcnic_show_diag_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n",
- !!(adapter->flags & QLCNIC_DIAG_ENABLED));
-}
-
-static struct device_attribute dev_attr_diag_mode = {
- .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
- .show = qlcnic_show_diag_mode,
- .store = qlcnic_store_diag_mode,
-};
-
int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
{
- if (!use_msi_x && !use_msi) {
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
netdev_info(netdev, "no msix or msi support, hence no rss\n");
return -EINVAL;
}
@@ -3532,859 +2826,6 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
return err;
}
-static int
-qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
- u8 *rate)
-{
- *rate = LSB(beacon);
- *state = MSB(beacon);
-
- QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
-
- if (!*state) {
- *rate = __QLCNIC_MAX_LED_RATE;
- return 0;
- } else if (*state > __QLCNIC_MAX_LED_STATE)
- return -EINVAL;
-
- if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
- return -EINVAL;
-
- return 0;
-}
-
-static ssize_t
-qlcnic_store_beacon(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int max_sds_rings = adapter->max_sds_rings;
- u16 beacon;
- u8 b_state, b_rate;
- int err;
-
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
- dev_warn(dev, "LED test not supported for non "
- "privilege function\n");
- return -EOPNOTSUPP;
- }
-
- if (len != sizeof(u16))
- return QL_STATUS_INVALID_PARAM;
-
- memcpy(&beacon, buf, sizeof(u16));
- err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
- if (err)
- return err;
-
- if (adapter->ahw->beacon_state == b_state)
- return len;
-
- rtnl_lock();
-
- if (!adapter->ahw->beacon_state)
- if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
- rtnl_unlock();
- return -EBUSY;
- }
-
- if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
- err = -EIO;
- goto out;
- }
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
- if (err)
- goto out;
- set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
- }
-
- err = qlcnic_config_led(adapter, b_state, b_rate);
-
- if (!err) {
- err = len;
- adapter->ahw->beacon_state = b_state;
- }
-
- if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
-
- out:
- if (!adapter->ahw->beacon_state)
- clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
- rtnl_unlock();
-
- return err;
-}
-
-static ssize_t
-qlcnic_show_beacon(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
-}
-
-static struct device_attribute dev_attr_beacon = {
- .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
- .show = qlcnic_show_beacon,
- .store = qlcnic_store_beacon,
-};
-
-static int
-qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
- loff_t offset, size_t size)
-{
- size_t crb_size = 4;
-
- if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
- return -EIO;
-
- if (offset < QLCNIC_PCI_CRBSPACE) {
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
- QLCNIC_PCI_CAMQM_END))
- crb_size = 8;
- else
- return -EINVAL;
- }
-
- if ((size != crb_size) || (offset & (crb_size-1)))
- return -EINVAL;
-
- return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
- int ret;
-
- ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
- if (ret != 0)
- return ret;
-
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
- memcpy(buf, &qmdata, size);
- } else {
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
- }
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
- int ret;
-
- ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
- if (ret != 0)
- return ret;
-
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- memcpy(&qmdata, buf, size);
- qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
- } else {
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
- }
- return size;
-}
-
-static int
-qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
- loff_t offset, size_t size)
-{
- if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
- return -EIO;
-
- if ((size != 8) || (offset & 0x7))
- return -EIO;
-
- return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u64 data;
- int ret;
-
- ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
- if (ret != 0)
- return ret;
-
- if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
- return -EIO;
-
- memcpy(buf, &data, size);
-
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u64 data;
- int ret;
-
- ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
- if (ret != 0)
- return ret;
-
- memcpy(&data, buf, size);
-
- if (qlcnic_pci_mem_write_2M(adapter, offset, data))
- return -EIO;
-
- return size;
-}
-
-static struct bin_attribute bin_attr_crb = {
- .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_read_crb,
- .write = qlcnic_sysfs_write_crb,
-};
-
-static struct bin_attribute bin_attr_mem = {
- .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_read_mem,
- .write = qlcnic_sysfs_write_mem,
-};
-
-static int
-validate_pm_config(struct qlcnic_adapter *adapter,
- struct qlcnic_pm_func_cfg *pm_cfg, int count)
-{
-
- u8 src_pci_func, s_esw_id, d_esw_id;
- u8 dest_pci_func;
- int i;
-
- for (i = 0; i < count; i++) {
- src_pci_func = pm_cfg[i].pci_func;
- dest_pci_func = pm_cfg[i].dest_npar;
- if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
- || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
- return QL_STATUS_INVALID_PARAM;
-
- s_esw_id = adapter->npars[src_pci_func].phy_port;
- d_esw_id = adapter->npars[dest_pci_func].phy_port;
-
- if (s_esw_id != d_esw_id)
- return QL_STATUS_INVALID_PARAM;
-
- }
- return 0;
-
-}
-
-static ssize_t
-qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg *pm_cfg;
- u32 id, action, pci_func;
- int count, rem, i, ret;
-
- count = size / sizeof(struct qlcnic_pm_func_cfg);
- rem = size % sizeof(struct qlcnic_pm_func_cfg);
- if (rem)
- return QL_STATUS_INVALID_PARAM;
-
- pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
-
- ret = validate_pm_config(adapter, pm_cfg, count);
- if (ret)
- return ret;
- for (i = 0; i < count; i++) {
- pci_func = pm_cfg[i].pci_func;
- action = !!pm_cfg[i].action;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_port_mirroring(adapter, id,
- action, pci_func);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < count; i++) {
- pci_func = pm_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
- adapter->npars[pci_func].dest_npar = id;
- }
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
-
- if (size != sizeof(pm_cfg))
- return QL_STATUS_INVALID_PARAM;
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- pm_cfg[i].action = adapter->npars[i].enable_pm;
- pm_cfg[i].dest_npar = 0;
- pm_cfg[i].pci_func = i;
- }
- memcpy(buf, &pm_cfg, size);
-
- return size;
-}
-
-static int
-validate_esw_config(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg, int count)
-{
- u32 op_mode;
- u8 pci_func;
- int i;
-
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
-
- for (i = 0; i < count; i++) {
- pci_func = esw_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->op_mode == QLCNIC_MGMT_FUNC)
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
- return QL_STATUS_INVALID_PARAM;
-
- switch (esw_cfg[i].op_mode) {
- case QLCNIC_PORT_DEFAULTS:
- if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
- QLCNIC_NON_PRIV_FUNC) {
- if (esw_cfg[i].mac_anti_spoof != 0)
- return QL_STATUS_INVALID_PARAM;
- if (esw_cfg[i].mac_override != 1)
- return QL_STATUS_INVALID_PARAM;
- if (esw_cfg[i].promisc_mode != 1)
- return QL_STATUS_INVALID_PARAM;
- }
- break;
- case QLCNIC_ADD_VLAN:
- if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
- return QL_STATUS_INVALID_PARAM;
- if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
- break;
- case QLCNIC_DEL_VLAN:
- if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
- break;
- default:
- return QL_STATUS_INVALID_PARAM;
- }
- }
- return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg *esw_cfg;
- struct qlcnic_npar_info *npar;
- int count, rem, i, ret;
- u8 pci_func, op_mode = 0;
-
- count = size / sizeof(struct qlcnic_esw_func_cfg);
- rem = size % sizeof(struct qlcnic_esw_func_cfg);
- if (rem)
- return QL_STATUS_INVALID_PARAM;
-
- esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
- ret = validate_esw_config(adapter, esw_cfg, count);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- if (adapter->op_mode == QLCNIC_MGMT_FUNC)
- if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
- continue;
-
- op_mode = esw_cfg[i].op_mode;
- qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
- esw_cfg[i].op_mode = op_mode;
- esw_cfg[i].pci_func = adapter->ahw->pci_func;
-
- switch (esw_cfg[i].op_mode) {
- case QLCNIC_PORT_DEFAULTS:
- qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
- break;
- case QLCNIC_ADD_VLAN:
- qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
- break;
- case QLCNIC_DEL_VLAN:
- esw_cfg[i].vlan_id = 0;
- qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
- break;
- }
- }
-
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
- goto out;
-
- for (i = 0; i < count; i++) {
- pci_func = esw_cfg[i].pci_func;
- npar = &adapter->npars[pci_func];
- switch (esw_cfg[i].op_mode) {
- case QLCNIC_PORT_DEFAULTS:
- npar->promisc_mode = esw_cfg[i].promisc_mode;
- npar->mac_override = esw_cfg[i].mac_override;
- npar->offload_flags = esw_cfg[i].offload_flags;
- npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
- npar->discard_tagged = esw_cfg[i].discard_tagged;
- break;
- case QLCNIC_ADD_VLAN:
- npar->pvid = esw_cfg[i].vlan_id;
- break;
- case QLCNIC_DEL_VLAN:
- npar->pvid = 0;
- break;
- }
- }
-out:
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- u8 i;
-
- if (size != sizeof(esw_cfg))
- return QL_STATUS_INVALID_PARAM;
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- esw_cfg[i].pci_func = i;
- if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
- }
- memcpy(buf, &esw_cfg, size);
-
- return size;
-}
-
-static int
-validate_npar_config(struct qlcnic_adapter *adapter,
- struct qlcnic_npar_func_cfg *np_cfg, int count)
-{
- u8 pci_func, i;
-
- for (i = 0; i < count; i++) {
- pci_func = np_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
- return QL_STATUS_INVALID_PARAM;
-
- if (!IS_VALID_BW(np_cfg[i].min_bw) ||
- !IS_VALID_BW(np_cfg[i].max_bw))
- return QL_STATUS_INVALID_PARAM;
- }
- return 0;
-}
-
-static ssize_t
-qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg *np_cfg;
- int i, count, rem, ret;
- u8 pci_func;
-
- count = size / sizeof(struct qlcnic_npar_func_cfg);
- rem = size % sizeof(struct qlcnic_npar_func_cfg);
- if (rem)
- return QL_STATUS_INVALID_PARAM;
-
- np_cfg = (struct qlcnic_npar_func_cfg *) buf;
- ret = validate_npar_config(adapter, np_cfg, count);
- if (ret)
- return ret;
-
- for (i = 0; i < count ; i++) {
- pci_func = np_cfg[i].pci_func;
- ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
- if (ret)
- return ret;
- nic_info.pci_func = pci_func;
- nic_info.min_tx_bw = np_cfg[i].min_bw;
- nic_info.max_tx_bw = np_cfg[i].max_bw;
- ret = qlcnic_set_nic_info(adapter, &nic_info);
- if (ret)
- return ret;
- adapter->npars[i].min_bw = nic_info.min_tx_bw;
- adapter->npars[i].max_bw = nic_info.max_tx_bw;
- }
-
- return size;
-
-}
-static ssize_t
-qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
- int i, ret;
-
- if (size != sizeof(np_cfg))
- return QL_STATUS_INVALID_PARAM;
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- ret = qlcnic_get_nic_info(adapter, &nic_info, i);
- if (ret)
- return ret;
-
- np_cfg[i].pci_func = i;
- np_cfg[i].op_mode = (u8)nic_info.op_mode;
- np_cfg[i].port_num = nic_info.phys_port;
- np_cfg[i].fw_capab = nic_info.capabilities;
- np_cfg[i].min_bw = nic_info.min_tx_bw ;
- np_cfg[i].max_bw = nic_info.max_tx_bw;
- np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
- np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
- }
- memcpy(buf, &np_cfg, size);
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_statistics port_stats;
- int ret;
-
- if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
-
- if (offset >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- memset(&port_stats, 0, size);
- ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
- &port_stats.rx);
- if (ret)
- return ret;
-
- ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
- &port_stats.tx);
- if (ret)
- return ret;
-
- memcpy(buf, &port_stats, size);
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_statistics esw_stats;
- int ret;
-
- if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
-
- if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
-
- memset(&esw_stats, 0, size);
- ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
- &esw_stats.rx);
- if (ret)
- return ret;
-
- ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
- &esw_stats.tx);
- if (ret)
- return ret;
-
- memcpy(buf, &esw_stats, size);
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int ret;
-
- if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
-
- ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
- QLCNIC_QUERY_RX_COUNTER);
- if (ret)
- return ret;
-
- ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
- QLCNIC_QUERY_TX_COUNTER);
- if (ret)
- return ret;
-
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
-
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int ret;
-
- if (offset >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
- QLCNIC_QUERY_RX_COUNTER);
- if (ret)
- return ret;
-
- ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
- QLCNIC_QUERY_TX_COUNTER);
- if (ret)
- return ret;
-
- return size;
-}
-
-static ssize_t
-qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
- struct qlcnic_pci_info *pci_info;
- int i, ret;
-
- if (size != sizeof(pci_cfg))
- return QL_STATUS_INVALID_PARAM;
-
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
- if (!pci_info)
- return -ENOMEM;
-
- ret = qlcnic_get_pci_info(adapter, pci_info);
- if (ret) {
- kfree(pci_info);
- return ret;
- }
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
- pci_cfg[i].pci_func = pci_info[i].id;
- pci_cfg[i].func_type = pci_info[i].type;
- pci_cfg[i].port_num = pci_info[i].default_port;
- pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
- pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
- memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
- }
- memcpy(buf, &pci_cfg, size);
- kfree(pci_info);
- return size;
-}
-static struct bin_attribute bin_attr_npar_config = {
- .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_read_npar_config,
- .write = qlcnic_sysfs_write_npar_config,
-};
-
-static struct bin_attribute bin_attr_pci_config = {
- .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
-};
-
-static struct bin_attribute bin_attr_port_stats = {
- .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_get_port_stats,
- .write = qlcnic_sysfs_clear_port_stats,
-};
-
-static struct bin_attribute bin_attr_esw_stats = {
- .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_get_esw_stats,
- .write = qlcnic_sysfs_clear_esw_stats,
-};
-
-static struct bin_attribute bin_attr_esw_config = {
- .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_read_esw_config,
- .write = qlcnic_sysfs_write_esw_config,
-};
-
-static struct bin_attribute bin_attr_pm_config = {
- .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
- .size = 0,
- .read = qlcnic_sysfs_read_pm_config,
- .write = qlcnic_sysfs_write_pm_config,
-};
-
-static void
-qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
-{
- struct device *dev = &adapter->pdev->dev;
-
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
- if (device_create_file(dev, &dev_attr_bridged_mode))
- dev_warn(dev,
- "failed to create bridged_mode sysfs entry\n");
-}
-
-static void
-qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
-{
- struct device *dev = &adapter->pdev->dev;
-
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
- device_remove_file(dev, &dev_attr_bridged_mode);
-}
-
-static void
-qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
-{
- struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-
- if (device_create_bin_file(dev, &bin_attr_port_stats))
- dev_info(dev, "failed to create port stats sysfs entry");
-
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
- return;
- if (device_create_file(dev, &dev_attr_diag_mode))
- dev_info(dev, "failed to create diag_mode sysfs entry\n");
- if (device_create_bin_file(dev, &bin_attr_crb))
- dev_info(dev, "failed to create crb sysfs entry\n");
- if (device_create_bin_file(dev, &bin_attr_mem))
- dev_info(dev, "failed to create mem sysfs entry\n");
-
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
-
- if (device_create_bin_file(dev, &bin_attr_pci_config))
- dev_info(dev, "failed to create pci config sysfs entry");
- if (device_create_file(dev, &dev_attr_beacon))
- dev_info(dev, "failed to create beacon sysfs entry");
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return;
- if (device_create_bin_file(dev, &bin_attr_esw_config))
- dev_info(dev, "failed to create esw config sysfs entry");
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
- return;
- if (device_create_bin_file(dev, &bin_attr_npar_config))
- dev_info(dev, "failed to create npar config sysfs entry");
- if (device_create_bin_file(dev, &bin_attr_pm_config))
- dev_info(dev, "failed to create pm config sysfs entry");
- if (device_create_bin_file(dev, &bin_attr_esw_stats))
- dev_info(dev, "failed to create eswitch stats sysfs entry");
-}
-
-static void
-qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
-{
- struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-
- device_remove_bin_file(dev, &bin_attr_port_stats);
-
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
- return;
- device_remove_file(dev, &dev_attr_diag_mode);
- device_remove_bin_file(dev, &bin_attr_crb);
- device_remove_bin_file(dev, &bin_attr_mem);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
- device_remove_bin_file(dev, &bin_attr_pci_config);
- device_remove_file(dev, &dev_attr_beacon);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return;
- device_remove_bin_file(dev, &bin_attr_esw_config);
- if (adapter->op_mode != QLCNIC_MGMT_FUNC)
- return;
- device_remove_bin_file(dev, &bin_attr_npar_config);
- device_remove_bin_file(dev, &bin_attr_pm_config);
- device_remove_bin_file(dev, &bin_attr_esw_stats);
-}
-
#ifdef CONFIG_INET
#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
@@ -4523,7 +2964,7 @@ static void
qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
-static const struct pci_error_handlers qlcnic_err_handler = {
+static struct pci_error_handlers qlcnic_err_handler = {
.error_detected = qlcnic_io_error_detected,
.slot_reset = qlcnic_io_slot_reset,
.resume = qlcnic_io_resume,
@@ -4533,7 +2974,7 @@ static struct pci_driver qlcnic_driver = {
.name = qlcnic_driver_name,
.id_table = qlcnic_pci_tbl,
.probe = qlcnic_probe,
- .remove = __devexit_p(qlcnic_remove),
+ .remove = qlcnic_remove,
#ifdef CONFIG_PM
.suspend = qlcnic_suspend,
.resume = qlcnic_resume,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644
index 000000000000..0b8d8625834c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -0,0 +1,628 @@
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
+#include <net/ip.h>
+
+#define QLCNIC_DUMP_WCRB BIT_0
+#define QLCNIC_DUMP_RWCRB BIT_1
+#define QLCNIC_DUMP_ANDCRB BIT_2
+#define QLCNIC_DUMP_ORCRB BIT_3
+#define QLCNIC_DUMP_POLLCRB BIT_4
+#define QLCNIC_DUMP_RD_SAVE BIT_5
+#define QLCNIC_DUMP_WRT_SAVED BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
+#define QLCNIC_DUMP_SKIP BIT_7
+
+#define QLCNIC_DUMP_MASK_MAX 0xff
+
+struct qlcnic_common_entry_hdr {
+ u32 type;
+ u32 offset;
+ u32 cap_size;
+ u8 mask;
+ u8 rsvd[2];
+ u8 flags;
+} __packed;
+
+struct __crb {
+ u32 addr;
+ u8 stride;
+ u8 rsvd1[3];
+ u32 data_size;
+ u32 no_ops;
+ u32 rsvd2[4];
+} __packed;
+
+struct __ctrl {
+ u32 addr;
+ u8 stride;
+ u8 index_a;
+ u16 timeout;
+ u32 data_size;
+ u32 no_ops;
+ u8 opcode;
+ u8 index_v;
+ u8 shl_val;
+ u8 shr_val;
+ u32 val1;
+ u32 val2;
+ u32 val3;
+} __packed;
+
+struct __cache {
+ u32 addr;
+ u16 stride;
+ u16 init_tag_val;
+ u32 size;
+ u32 no_ops;
+ u32 ctrl_addr;
+ u32 ctrl_val;
+ u32 read_addr;
+ u8 read_addr_stride;
+ u8 read_addr_num;
+ u8 rsvd1[2];
+} __packed;
+
+struct __ocm {
+ u8 rsvd[8];
+ u32 size;
+ u32 no_ops;
+ u8 rsvd1[8];
+ u32 read_addr;
+ u32 read_addr_stride;
+} __packed;
+
+struct __mem {
+ u8 rsvd[24];
+ u32 addr;
+ u32 size;
+} __packed;
+
+struct __mux {
+ u32 addr;
+ u8 rsvd[4];
+ u32 size;
+ u32 no_ops;
+ u32 val;
+ u32 val_stride;
+ u32 read_addr;
+ u8 rsvd2[4];
+} __packed;
+
+struct __queue {
+ u32 sel_addr;
+ u16 stride;
+ u8 rsvd[2];
+ u32 size;
+ u32 no_ops;
+ u8 rsvd2[8];
+ u32 read_addr;
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u8 rsvd3[2];
+} __packed;
+
+struct qlcnic_dump_entry {
+ struct qlcnic_common_entry_hdr hdr;
+ union {
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ } region;
+} __packed;
+
+enum qlcnic_minidump_opcode {
+ QLCNIC_DUMP_NOP = 0,
+ QLCNIC_DUMP_READ_CRB = 1,
+ QLCNIC_DUMP_READ_MUX = 2,
+ QLCNIC_DUMP_QUEUE = 3,
+ QLCNIC_DUMP_BRD_CONFIG = 4,
+ QLCNIC_DUMP_READ_OCM = 6,
+ QLCNIC_DUMP_PEG_REG = 7,
+ QLCNIC_DUMP_L1_DTAG = 8,
+ QLCNIC_DUMP_L1_ITAG = 9,
+ QLCNIC_DUMP_L1_DATA = 11,
+ QLCNIC_DUMP_L1_INST = 12,
+ QLCNIC_DUMP_L2_DTAG = 21,
+ QLCNIC_DUMP_L2_ITAG = 22,
+ QLCNIC_DUMP_L2_DATA = 23,
+ QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_READ_ROM = 71,
+ QLCNIC_DUMP_READ_MEM = 72,
+ QLCNIC_DUMP_READ_CTRL = 98,
+ QLCNIC_DUMP_TLHDR = 99,
+ QLCNIC_DUMP_RDEND = 255
+};
+
+struct qlcnic_dump_operations {
+ enum qlcnic_minidump_opcode opcode;
+ u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+ __le32 *);
+};
+
+static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+ u32 dest;
+ void __iomem *window_reg;
+
+ dest = addr & 0xFFFF0000;
+ window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, window_reg);
+ readl(window_reg);
+ window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ *data = readl(window_reg);
+}
+
+static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+ u32 dest;
+ void __iomem *window_reg;
+
+ dest = addr & 0xFFFF0000;
+ window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, window_reg);
+ readl(window_reg);
+ window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ writel(data, window_reg);
+ readl(window_reg);
+}
+
+/* FW dump related functions */
+static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 addr, data;
+ struct __crb *crb = &entry->region.crb;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ addr = crb->addr;
+
+ for (i = 0; i < crb->no_ops; i++) {
+ qlcnic_read_dump_reg(addr, base, &data);
+ *buffer++ = cpu_to_le32(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += crb->stride;
+ }
+ return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, k, timeout = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ u32 addr, data;
+ u8 no_ops;
+ struct __ctrl *ctr = &entry->region.ctrl;
+ struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+
+ addr = ctr->addr;
+ no_ops = ctr->no_ops;
+
+ for (i = 0; i < no_ops; i++) {
+ k = 0;
+ for (k = 0; k < 8; k++) {
+ if (!(ctr->opcode & (1 << k)))
+ continue;
+ switch (1 << k) {
+ case QLCNIC_DUMP_WCRB:
+ qlcnic_write_dump_reg(addr, base, ctr->val1);
+ break;
+ case QLCNIC_DUMP_RWCRB:
+ qlcnic_read_dump_reg(addr, base, &data);
+ qlcnic_write_dump_reg(addr, base, data);
+ break;
+ case QLCNIC_DUMP_ANDCRB:
+ qlcnic_read_dump_reg(addr, base, &data);
+ qlcnic_write_dump_reg(addr, base,
+ data & ctr->val2);
+ break;
+ case QLCNIC_DUMP_ORCRB:
+ qlcnic_read_dump_reg(addr, base, &data);
+ qlcnic_write_dump_reg(addr, base,
+ data | ctr->val3);
+ break;
+ case QLCNIC_DUMP_POLLCRB:
+ while (timeout <= ctr->timeout) {
+ qlcnic_read_dump_reg(addr, base, &data);
+ if ((data & ctr->val2) == ctr->val1)
+ break;
+ msleep(1);
+ timeout++;
+ }
+ if (timeout > ctr->timeout) {
+ dev_info(&adapter->pdev->dev,
+ "Timed out, aborting poll CRB\n");
+ return -EINVAL;
+ }
+ break;
+ case QLCNIC_DUMP_RD_SAVE:
+ if (ctr->index_a)
+ addr = t_hdr->saved_state[ctr->index_a];
+ qlcnic_read_dump_reg(addr, base, &data);
+ t_hdr->saved_state[ctr->index_v] = data;
+ break;
+ case QLCNIC_DUMP_WRT_SAVED:
+ if (ctr->index_v)
+ data = t_hdr->saved_state[ctr->index_v];
+ else
+ data = ctr->val1;
+ if (ctr->index_a)
+ addr = t_hdr->saved_state[ctr->index_a];
+ qlcnic_write_dump_reg(addr, base, data);
+ break;
+ case QLCNIC_DUMP_MOD_SAVE_ST:
+ data = t_hdr->saved_state[ctr->index_v];
+ data <<= ctr->shl_val;
+ data >>= ctr->shr_val;
+ if (ctr->val2)
+ data &= ctr->val2;
+ data |= ctr->val3;
+ data += ctr->val1;
+ t_hdr->saved_state[ctr->index_v] = data;
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode\n");
+ break;
+ }
+ }
+ addr += ctr->stride;
+ }
+ return 0;
+}
+
+static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int loop;
+ u32 val, data = 0;
+ struct __mux *mux = &entry->region.mux;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ val = mux->val;
+ for (loop = 0; loop < mux->no_ops; loop++) {
+ qlcnic_write_dump_reg(mux->addr, base, val);
+ qlcnic_read_dump_reg(mux->read_addr, base, &data);
+ *buffer++ = cpu_to_le32(val);
+ *buffer++ = cpu_to_le32(data);
+ val += mux->val_stride;
+ }
+ return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, loop;
+ u32 cnt, addr, data, que_id = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __queue *que = &entry->region.que;
+
+ addr = que->read_addr;
+ cnt = que->read_addr_cnt;
+
+ for (loop = 0; loop < que->no_ops; loop++) {
+ qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+ addr = que->read_addr;
+ for (i = 0; i < cnt; i++) {
+ qlcnic_read_dump_reg(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += que->read_addr_stride;
+ }
+ que_id += que->stride;
+ }
+ return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 data;
+ void __iomem *addr;
+ struct __ocm *ocm = &entry->region.ocm;
+
+ addr = adapter->ahw->pci_base0 + ocm->read_addr;
+ for (i = 0; i < ocm->no_ops; i++) {
+ data = readl(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += ocm->read_addr_stride;
+ }
+ return ocm->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, count = 0;
+ u32 fl_addr, size, val, lck_val, addr;
+ struct __mem *rom = &entry->region.mem;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ fl_addr = rom->addr;
+ size = rom->size/4;
+lock_try:
+ lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ msleep(10);
+ count++;
+ goto lock_try;
+ }
+ writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ for (i = 0; i < size; i++) {
+ addr = fl_addr & 0xFFFF0000;
+ qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+ addr = LSW(fl_addr) + FLASH_ROM_DATA;
+ qlcnic_read_dump_reg(addr, base, &val);
+ fl_addr += 4;
+ *buffer++ = cpu_to_le32(val);
+ }
+ readl(base + QLCNIC_FLASH_SEM2_ULK);
+ return rom->size;
+}
+
+static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __cache *l1 = &entry->region.cache;
+
+ val = l1->init_tag_val;
+
+ for (i = 0; i < l1->no_ops; i++) {
+ qlcnic_write_dump_reg(l1->addr, base, val);
+ qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ addr = l1->read_addr;
+ cnt = l1->read_addr_num;
+ while (cnt) {
+ qlcnic_read_dump_reg(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += l1->read_addr_stride;
+ cnt--;
+ }
+ val += l1->stride;
+ }
+ return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ u8 poll_mask, poll_to, time_out = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __cache *l2 = &entry->region.cache;
+
+ val = l2->init_tag_val;
+ poll_mask = LSB(MSW(l2->ctrl_val));
+ poll_to = MSB(MSW(l2->ctrl_val));
+
+ for (i = 0; i < l2->no_ops; i++) {
+ qlcnic_write_dump_reg(l2->addr, base, val);
+ if (LSW(l2->ctrl_val))
+ qlcnic_write_dump_reg(l2->ctrl_addr, base,
+ LSW(l2->ctrl_val));
+ if (!poll_mask)
+ goto skip_poll;
+ do {
+ qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+ if (!(data & poll_mask))
+ break;
+ msleep(1);
+ time_out++;
+ } while (time_out <= poll_to);
+
+ if (time_out > poll_to) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return -EINVAL;
+ }
+skip_poll:
+ addr = l2->read_addr;
+ cnt = l2->read_addr_num;
+ while (cnt) {
+ qlcnic_read_dump_reg(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += l2->read_addr_stride;
+ cnt--;
+ }
+ val += l2->stride;
+ }
+ return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 addr, data, test, ret = 0;
+ int i, reg_read;
+ struct __mem *mem = &entry->region.mem;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ reg_read = mem->size;
+ addr = mem->addr;
+ /* check for data size of multiple of 16 and 16 byte alignment */
+ if ((addr & 0xf) || (reg_read%16)) {
+ dev_info(&adapter->pdev->dev,
+ "Unaligned memory addr:0x%x size:0x%x\n",
+ addr, reg_read);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ while (reg_read != 0) {
+ qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
+ qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
+ qlcnic_write_dump_reg(MIU_TEST_CTR, base,
+ TA_CTL_ENABLE | TA_CTL_START);
+
+ for (i = 0; i < MAX_CTL_CHECK; i++) {
+ qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+ if (!(test & TA_CTL_BUSY))
+ break;
+ }
+ if (i == MAX_CTL_CHECK) {
+ if (printk_ratelimit()) {
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
+ &data);
+ *buffer++ = cpu_to_le32(data);
+ }
+ addr += 16;
+ reg_read -= 16;
+ ret += 16;
+ }
+out:
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return mem->size;
+}
+
+static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ return 0;
+}
+
+static const struct qlcnic_dump_operations fw_dump_ops[] = {
+ { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
+ { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
+ { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
+ { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
+ { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
+ { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
+ { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
+ { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
+ { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
+ { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
+ { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
+ { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
+};
+
+/* Walk the template and collect dump for each entry in the dump template */
+static int
+qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
+ u32 size)
+{
+ int ret = 1;
+ if (size != entry->hdr.cap_size) {
+ dev_info(dev,
+ "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
+ dev_info(dev, "Aborting further dump capture\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+ __le32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
+ int i, k, ops_cnt, ops_index, dump_size = 0;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ struct qlcnic_dump_entry *entry;
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not capturing dump\n");
+ return -EIO;
+ }
+ /* Calculate the size for dump data area only */
+ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+ if (i & tmpl_hdr->drv_cap_mask)
+ dump_size += tmpl_hdr->cap_sizes[k];
+ if (!dump_size)
+ return -EIO;
+
+ fw_dump->data = vzalloc(dump_size);
+ if (!fw_dump->data) {
+ dev_info(&adapter->pdev->dev,
+ "Unable to allocate (%d KB) for fw dump\n",
+ dump_size / 1024);
+ return -ENOMEM;
+ }
+ buffer = fw_dump->data;
+ fw_dump->size = dump_size;
+ no_entries = tmpl_hdr->num_entries;
+ ops_cnt = ARRAY_SIZE(fw_dump_ops);
+ entry_offset = tmpl_hdr->offset;
+ tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
+ tmpl_hdr->sys_info[1] = adapter->fw_version;
+
+ for (i = 0; i < no_entries; i++) {
+ entry = (void *)tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+ /* Find the handler for this entry */
+ ops_index = 0;
+ while (ops_index < ops_cnt) {
+ if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+ break;
+ ops_index++;
+ }
+ if (ops_index == ops_cnt) {
+ dev_info(&adapter->pdev->dev,
+ "Invalid entry type %d, exiting dump\n",
+ entry->hdr.type);
+ goto error;
+ }
+ /* Collect dump for this entry */
+ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+ if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
+ dump))
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ buf_offset += entry->hdr.cap_size;
+ entry_offset += entry->hdr.offset;
+ buffer = fw_dump->data + buf_offset;
+ }
+ if (dump_size != buf_offset) {
+ dev_info(&adapter->pdev->dev,
+ "Captured(%d) and expected size(%d) do not match\n",
+ buf_offset, dump_size);
+ goto error;
+ } else {
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
+ adapter->netdev->name);
+ dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
+ fw_dump->size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
+ return 0;
+ }
+error:
+ vfree(fw_dump->data);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644
index 000000000000..341d37c867ff
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -0,0 +1,960 @@
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static ssize_t qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static ssize_t qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
+ u8 *state, u8 *rate)
+{
+ *rate = LSB(beacon);
+ *state = MSB(beacon);
+
+ QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+ if (!*state) {
+ *rate = __QLCNIC_MAX_LED_RATE;
+ return 0;
+ } else if (*state > __QLCNIC_MAX_LED_STATE) {
+ return -EINVAL;
+ }
+
+ if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t qlcnic_store_beacon(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int max_sds_rings = adapter->max_sds_rings;
+ u16 beacon;
+ u8 b_state, b_rate;
+ int err;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(dev,
+ "LED test not supported in non privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (len != sizeof(u16))
+ return QL_STATUS_INVALID_PARAM;
+
+ memcpy(&beacon, buf, sizeof(u16));
+ err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+ if (err)
+ return err;
+
+ if (adapter->ahw->beacon_state == b_state)
+ return len;
+
+ rtnl_lock();
+
+ if (!adapter->ahw->beacon_state)
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+ if (err)
+ goto out;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ err = qlcnic_config_led(adapter, b_state, b_rate);
+
+ if (!err) {
+ err = len;
+ adapter->ahw->beacon_state = b_state;
+ }
+
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+ qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+
+ out:
+ if (!adapter->ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+ rtnl_unlock();
+
+ return err;
+}
+
+static ssize_t qlcnic_show_beacon(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+ return size;
+}
+
+static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+static int validate_pm_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+ u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ src_pci_func = pm_cfg[i].pci_func;
+ dest_pci_func = pm_cfg[i].dest_npar;
+ if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
+ dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ s_esw_id = adapter->npars[src_pci_func].phy_port;
+ d_esw_id = adapter->npars[dest_pci_func].phy_port;
+
+ if (s_esw_id != d_esw_id)
+ return QL_STATUS_INVALID_PARAM;
+ }
+ return 0;
+
+}
+
+static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u32 id, action, pci_func;
+ int count, rem, i, ret;
+
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ rem = size % sizeof(struct qlcnic_pm_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+
+ ret = validate_pm_config(adapter, pm_cfg, count);
+ if (ret)
+ return ret;
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ action = !!pm_cfg[i].action;
+ id = adapter->npars[pci_func].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id, action,
+ pci_func);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ id = adapter->npars[pci_func].phy_port;
+ adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[pci_func].dest_npar = id;
+ }
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i;
+
+ if (size != sizeof(pm_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ pm_cfg[i].action = adapter->npars[i].enable_pm;
+ pm_cfg[i].dest_npar = 0;
+ pm_cfg[i].pci_func = i;
+ }
+ memcpy(buf, &pm_cfg, size);
+
+ return size;
+}
+
+static int validate_esw_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+ u32 op_mode;
+ u8 pci_func;
+ int i;
+
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+ }
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
+ QLCNIC_NON_PRIV_FUNC) {
+ if (esw_cfg[i].mac_anti_spoof != 0)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].mac_override != 1)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].promisc_mode != 1)
+ return QL_STATUS_INVALID_PARAM;
+ }
+ break;
+ case QLCNIC_ADD_VLAN:
+ if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+ return QL_STATUS_INVALID_PARAM;
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ case QLCNIC_DEL_VLAN:
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ default:
+ return QL_STATUS_INVALID_PARAM;
+ }
+ }
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ struct qlcnic_npar_info *npar;
+ int count, rem, i, ret;
+ u8 pci_func, op_mode = 0;
+
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ rem = size % sizeof(struct qlcnic_esw_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+ ret = validate_esw_config(adapter, esw_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+ }
+
+ if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+ continue;
+
+ op_mode = esw_cfg[i].op_mode;
+ qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+ esw_cfg[i].op_mode = op_mode;
+ esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_ADD_VLAN:
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_DEL_VLAN:
+ esw_cfg[i].vlan_id = 0;
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ }
+ }
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ npar = &adapter->npars[pci_func];
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ npar->promisc_mode = esw_cfg[i].promisc_mode;
+ npar->mac_override = esw_cfg[i].mac_override;
+ npar->offload_flags = esw_cfg[i].offload_flags;
+ npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+ npar->discard_tagged = esw_cfg[i].discard_tagged;
+ break;
+ case QLCNIC_ADD_VLAN:
+ npar->pvid = esw_cfg[i].vlan_id;
+ break;
+ case QLCNIC_DEL_VLAN:
+ npar->pvid = 0;
+ break;
+ }
+ }
+out:
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ u8 i;
+
+ if (size != sizeof(esw_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ esw_cfg[i].pci_func = i;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ memcpy(buf, &esw_cfg, size);
+
+ return size;
+}
+
+static int validate_npar_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_func_cfg *np_cfg,
+ int count)
+{
+ u8 pci_func, i;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+ if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+ !IS_VALID_BW(np_cfg[i].max_bw))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg *np_cfg;
+ int i, count, rem, ret;
+ u8 pci_func;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ rem = size % sizeof(struct qlcnic_npar_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+ ret = validate_npar_config(adapter, np_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count ; i++) {
+ pci_func = np_cfg[i].pci_func;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+ nic_info.pci_func = pci_func;
+ nic_info.min_tx_bw = np_cfg[i].min_bw;
+ nic_info.max_tx_bw = np_cfg[i].max_bw;
+ ret = qlcnic_set_nic_info(adapter, &nic_info);
+ if (ret)
+ return ret;
+ adapter->npars[i].min_bw = nic_info.min_tx_bw;
+ adapter->npars[i].max_bw = nic_info.max_tx_bw;
+ }
+
+ return size;
+
+}
+
+static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i, ret;
+
+ if (size != sizeof(np_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+ if (ret)
+ return ret;
+
+ np_cfg[i].pci_func = i;
+ np_cfg[i].op_mode = (u8)nic_info.op_mode;
+ np_cfg[i].port_num = nic_info.phys_port;
+ np_cfg[i].fw_capab = nic_info.capabilities;
+ np_cfg[i].min_bw = nic_info.min_tx_bw;
+ np_cfg[i].max_bw = nic_info.max_tx_bw;
+ np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+ }
+ memcpy(buf, &np_cfg, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&port_stats, 0, size);
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &port_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &port_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &port_stats, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics esw_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&esw_stats, 0, size);
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &esw_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &esw_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &esw_stats, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info *pci_info;
+ int i, ret;
+
+ if (size != sizeof(pci_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret) {
+ kfree(pci_info);
+ return ret;
+ }
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ pci_cfg[i].pci_func = pci_info[i].id;
+ pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].port_num = pci_info[i].default_port;
+ pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+ pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+ }
+ memcpy(buf, &pci_cfg, size);
+ kfree(pci_info);
+ return size;
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static struct device_attribute dev_attr_beacon = {
+ .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_beacon,
+ .store = qlcnic_store_beacon,
+};
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static struct bin_attribute bin_attr_npar_config = {
+ .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_npar_config,
+ .write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+ .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pci_config,
+ .write = NULL,
+};
+
+static struct bin_attribute bin_attr_port_stats = {
+ .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+ .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+ .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_esw_config,
+ .write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+ .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pm_config,
+ .write = qlcnic_sysfs_write_pm_config,
+};
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (device_create_bin_file(dev, &bin_attr_port_stats))
+ dev_info(dev, "failed to create port stats sysfs entry");
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+
+ if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+ return;
+
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
+ if (device_create_file(dev, &dev_attr_beacon))
+ dev_info(dev, "failed to create beacon sysfs entry");
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ if (device_create_bin_file(dev, &bin_attr_npar_config))
+ dev_info(dev, "failed to create npar config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_pm_config))
+ dev_info(dev, "failed to create pm config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_esw_stats))
+ dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ device_remove_bin_file(dev, &bin_attr_port_stats);
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+ if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+ return;
+ device_remove_bin_file(dev, &bin_attr_pci_config);
+ device_remove_file(dev, &dev_attr_beacon);
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ device_remove_bin_file(dev, &bin_attr_npar_config);
+ device_remove_bin_file(dev, &bin_attr_pm_config);
+ device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 58185b604b72..10093f0c4c0f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -86,7 +86,7 @@ exit:
}
/* Read out the SERDES registers */
-static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -364,7 +364,7 @@ exit:
/* Read the 400 xgmac control/statistics registers
* skipping unused locations.
*/
-static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
unsigned int other_function)
{
int status = 0;
@@ -405,7 +405,7 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
return status;
}
-static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
{
int status = 0;
int i;
@@ -423,7 +423,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
return status;
}
-static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
{
int i;
@@ -434,7 +434,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
}
}
-static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
{
int i, status;
u32 value[3];
@@ -471,7 +471,7 @@ err:
return status;
}
-static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
{
int status;
u32 value, i;
@@ -496,7 +496,7 @@ err:
}
/* Read the MPI Processor shadow registers */
-static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
{
u32 i;
int status;
@@ -515,7 +515,7 @@ end:
}
/* Read the MPI Processor core registers */
-static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
u32 offset, u32 count)
{
int i, status = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b262d6156816..3e73742024b0 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4491,8 +4491,8 @@ static void ql_release_all(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-static int __devinit ql_init_device(struct pci_dev *pdev,
- struct net_device *ndev, int cards_found)
+static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
+ int cards_found)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
@@ -4656,8 +4656,8 @@ static void ql_timer(unsigned long data)
mod_timer(&qdev->timer, jiffies + (5*HZ));
}
-static int __devinit qlge_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_entry)
+static int qlge_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
{
struct net_device *ndev = NULL;
struct ql_adapter *qdev = NULL;
@@ -4678,7 +4678,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
@@ -4729,7 +4729,7 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
return ql_clean_inbound_rx_ring(rx_ring, budget);
}
-static void __devexit qlge_remove(struct pci_dev *pdev)
+static void qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -4921,7 +4921,7 @@ static struct pci_driver qlge_driver = {
.name = DRV_NAME,
.id_table = qlge_pci_tbl,
.probe = qlge_probe,
- .remove = __devexit_p(qlge_remove),
+ .remove = qlge_remove,
#ifdef CONFIG_PM
.suspend = qlge_suspend,
.resume = qlge_resume,
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 557a26545d75..63c13125db6c 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -206,7 +206,7 @@ struct r6040_private {
int old_duplex;
};
-static char version[] __devinitdata = DRV_NAME
+static char version[] = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
@@ -1073,8 +1073,7 @@ static int r6040_mii_probe(struct net_device *dev)
return 0;
}
-static int __devinit r6040_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct r6040_private *lp;
@@ -1246,7 +1245,7 @@ err_out:
return err;
}
-static void __devexit r6040_remove_one(struct pci_dev *pdev)
+static void r6040_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r6040_private *lp = netdev_priv(dev);
@@ -1274,7 +1273,7 @@ static struct pci_driver r6040_driver = {
.name = DRV_NAME,
.id_table = r6040_pci_tbl,
.probe = r6040_init_one,
- .remove = __devexit_p(r6040_remove_one),
+ .remove = r6040_remove_one,
};
module_pci_driver(r6040_driver);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 609125a249d9..5ac93323a40c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -577,28 +577,30 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct cp_private *cp;
+ int handled = 0;
u16 status;
if (unlikely(dev == NULL))
return IRQ_NONE;
cp = netdev_priv(dev);
+ spin_lock(&cp->lock);
+
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
- return IRQ_NONE;
+ goto out_unlock;
+
+ handled = 1;
netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
- spin_lock(&cp->lock);
-
/* close possible race's with dev_close */
if (unlikely(!netif_running(dev))) {
cpw16(IntrMask, 0);
- spin_unlock(&cp->lock);
- return IRQ_HANDLED;
+ goto out_unlock;
}
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
@@ -612,7 +614,6 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
if (status & LinkChg)
mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
- spin_unlock(&cp->lock);
if (status & PciErr) {
u16 pci_status;
@@ -625,7 +626,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
/* TODO: reset hardware */
}
- return IRQ_HANDLED;
+out_unlock:
+ spin_unlock(&cp->lock);
+
+ return IRQ_RETVAL(handled);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -648,6 +652,7 @@ static void cp_tx (struct cp_private *cp)
{
unsigned tx_head = cp->tx_head;
unsigned tx_tail = cp->tx_tail;
+ unsigned bytes_compl = 0, pkts_compl = 0;
while (tx_tail != tx_head) {
struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +671,9 @@ static void cp_tx (struct cp_private *cp)
le32_to_cpu(txd->opts1) & 0xffff,
PCI_DMA_TODEVICE);
+ bytes_compl += skb->len;
+ pkts_compl++;
+
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +705,7 @@ static void cp_tx (struct cp_private *cp)
cp->tx_tail = tx_tail;
+ netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(cp->dev);
}
@@ -843,6 +852,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
wmb();
}
cp->tx_head = entry;
+
+ netdev_sent_queue(dev, skb->len);
netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +948,8 @@ static void cp_stop_hw (struct cp_private *cp)
cp->rx_tail = 0;
cp->tx_head = cp->tx_tail = 0;
+
+ netdev_reset_queue(cp->dev);
}
static void cp_reset_hw (struct cp_private *cp)
@@ -957,8 +970,38 @@ static void cp_reset_hw (struct cp_private *cp)
static inline void cp_start_hw (struct cp_private *cp)
{
+ dma_addr_t ring_dma;
+
cpw16(CpCmd, cp->cpcmd);
+
+ /*
+ * These (at least TxRingAddr) need to be configured after the
+ * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
+ * (C+ Command Register) recommends that these and more be configured
+ * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
+ * it's been observed that the TxRingAddr is actually reset to garbage
+ * when C+ mode Tx is enabled in CpCmd.
+ */
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ /*
+ * Strictly speaking, the datasheet says this should be enabled
+ * *before* setting the descriptor addresses. But what, then, would
+ * prevent it from doing DMA to random unconfigured addresses?
+ * This variant appears to work fine.
+ */
cpw8(Cmd, RxOn | TxOn);
+
+ netdev_reset_queue(cp->dev);
}
static void cp_enable_irq(struct cp_private *cp)
@@ -969,7 +1012,6 @@ static void cp_enable_irq(struct cp_private *cp)
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
- dma_addr_t ring_dma;
cp_reset_hw(cp);
@@ -992,17 +1034,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw8(Config5, cpr8(Config5) & PMEStatus);
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1197,18 +1228,16 @@ static void cp_tx_timeout(struct net_device *dev)
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
+ cp_enable_irq(cp);
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
}
-#ifdef BROKEN
static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
struct cp_private *cp = netdev_priv(dev);
- int rc;
- unsigned long flags;
/* check for invalid MTU, according to hardware limits */
if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
@@ -1221,22 +1250,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- spin_lock_irqsave(&cp->lock, flags);
-
- cp_stop_hw(cp); /* stop h/w and free rings */
- cp_clean_rings(cp);
-
+ /* network IS up, close it, reset MTU, and come up again. */
+ cp_close(dev);
dev->mtu = new_mtu;
- cp_set_rxbufsize(cp); /* set new rx buf size */
-
- rc = cp_init_rings(cp); /* realloc and restart h/w */
- cp_start_hw(cp);
-
- spin_unlock_irqrestore(&cp->lock, flags);
-
- return rc;
+ cp_set_rxbufsize(cp);
+ return cp_open(dev);
}
-#endif /* BROKEN */
static const char mii_2_8139_map[8] = {
BasicModeCtrl,
@@ -1812,9 +1831,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
-#ifdef BROKEN
.ndo_change_mtu = cp_change_mtu,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ed7add23c12..5dc161630127 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -228,7 +228,7 @@ typedef enum {
static const struct {
const char *name;
u32 hw_flags;
-} board_info[] __devinitconst = {
+} board_info[] = {
{ "RealTek RTL8139", RTL8139_CAPS },
{ "RealTek RTL8129", RTL8129_CAPS },
};
@@ -748,7 +748,7 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
}
-static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
+static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
{
struct device *d = &pdev->dev;
void __iomem *ioaddr;
@@ -935,8 +935,8 @@ static const struct net_device_ops rtl8139_netdev_ops = {
.ndo_set_features = rtl8139_set_features,
};
-static int __devinit rtl8139_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int rtl8139_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct rtl8139_private *tp;
@@ -1103,7 +1103,7 @@ err_out:
}
-static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
+static void rtl8139_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct rtl8139_private *tp = netdev_priv(dev);
@@ -1141,7 +1141,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
-static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len)
+static int read_eeprom(void __iomem *ioaddr, int location, int addr_len)
{
int i;
unsigned retval = 0;
@@ -2652,7 +2652,7 @@ static struct pci_driver rtl8139_pci_driver = {
.name = DRV_NAME,
.id_table = rtl8139_pci_tbl,
.probe = rtl8139_init_one,
- .remove = __devexit_p(rtl8139_remove_one),
+ .remove = rtl8139_remove_one,
#ifdef CONFIG_PM
.suspend = rtl8139_suspend,
.resume = rtl8139_resume,
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index e02f04d7f3ad..9f2d416de750 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -175,8 +175,7 @@ struct net_local {
unsigned int tx_unit_busy:1;
unsigned char re_tx, /* Number of packet retransmissions. */
addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
- pac_cnt_in_tx_buf,
- chip_type;
+ pac_cnt_in_tx_buf;
};
/* This code, written by wwc@super.org, resets the adapter every
@@ -339,7 +338,6 @@ static int __init atp_probe1(long ioaddr)
write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
lp = netdev_priv(dev);
- lp->chip_type = RTL8002;
lp->addr_mode = CMR2h_Normal;
spin_lock_init(&lp->lock);
@@ -852,7 +850,7 @@ net_close(struct net_device *dev)
* Set or clear the multicast filter for this adapter.
*/
-static void set_rx_mode_8002(struct net_device *dev)
+static void set_rx_mode(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
@@ -864,58 +862,6 @@ static void set_rx_mode_8002(struct net_device *dev)
write_reg_high(ioaddr, CMR2, lp->addr_mode);
}
-static void set_rx_mode_8012(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
- int i;
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- new_mode = CMR2h_PROMISC;
- } else if ((netdev_mc_count(dev) > 1000) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- new_mode = CMR2h_Normal;
- } else {
- struct netdev_hw_addr *ha;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
- mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
- }
- new_mode = CMR2h_Normal;
- }
- lp->addr_mode = new_mode;
- write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
- for (i = 0; i < 8; i++)
- write_reg_byte(ioaddr, i, mc_filter[i]);
- if (net_debug > 2 || 1) {
- lp->addr_mode = 1;
- printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
- dev->name, lp->addr_mode);
- for (i = 0; i < 8; i++)
- printk(" %2.2x", mc_filter[i]);
- printk(".\n");
- }
-
- write_reg_high(ioaddr, CMR2, lp->addr_mode);
- write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
-}
-
-static void set_rx_mode(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- if (lp->chip_type == RTL8002)
- return set_rx_mode_8002(dev);
- else
- return set_rx_mode_8012(dev);
-}
-
-
static int __init atp_init_module(void) {
if (debug) /* Emit version even if no cards detected. */
printk(KERN_INFO "%s", version);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 0edc642c2c2f..040b13739947 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -16,8 +16,6 @@ struct rx_header {
#define PAR_STATUS 1
#define PAR_CONTROL 2
-enum chip_type { RTL8002, RTL8012 };
-
#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
#define Ctrl_HNibRead 0
#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 927aa33d4349..11702324a071 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -78,24 +78,18 @@ static const int multicast_filter_limit = 32;
#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
-#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
-#define RX_BUF_SIZE 1536 /* Rx Buffer size */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ)
#define RTL8169_PHY_TIMEOUT (10*HZ)
-#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
-#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
-#define RTL_EEPROM_SIG_ADDR 0x0000
-
/* write/read MMIO register */
#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
@@ -456,6 +450,7 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
+#define FORCE_CLK (1 << 15) /* force clock request */
};
enum rtl_register_content {
@@ -519,6 +514,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ ClkReqEn = (1 << 7), /* Clock Request Enable */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -539,6 +535,7 @@ enum rtl_register_content {
Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+ ASPM_en = (1 << 0), /* ASPM enable */
/* TBICSR p.28 */
TBIReset = 0x80000000,
@@ -687,6 +684,7 @@ enum features {
RTL_FEATURE_WOL = (1 << 0),
RTL_FEATURE_MSI = (1 << 1),
RTL_FEATURE_GMII = (1 << 2),
+ RTL_FEATURE_FW_LOADED = (1 << 3),
};
struct rtl8169_counters {
@@ -1816,8 +1814,7 @@ static int rtl8169_set_features(struct net_device *dev,
}
-static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
- struct sk_buff *skb)
+static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
{
return (vlan_tx_tag_present(skb)) ?
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
@@ -1829,8 +1826,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
if (opts2 & RxVlanTag)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
-
- desc->opts2 = 0;
}
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2394,8 +2389,10 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
struct rtl_fw *rtl_fw = tp->rtl_fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw))
+ if (!IS_ERR_OR_NULL(rtl_fw)) {
rtl_phy_write_fw(tp, rtl_fw);
+ tp->features |= RTL_FEATURE_FW_LOADED;
+ }
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2406,6 +2403,31 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
+static void r810x_aldps_disable(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+}
+
+static void r810x_aldps_enable(struct rtl8169_private *tp)
+{
+ if (!(tp->features & RTL_FEATURE_FW_LOADED))
+ return;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x8310);
+}
+
+static void r8168_aldps_enable_1(struct rtl8169_private *tp)
+{
+ if (!(tp->features & RTL_FEATURE_FW_LOADED))
+ return;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3096,6 +3118,23 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, 0x0000);
}
+static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
+{
+ const u16 w[] = {
+ addr[0] | (addr[1] << 8),
+ addr[2] | (addr[3] << 8),
+ addr[4] | (addr[5] << 8)
+ };
+ const struct exgmac_reg e[] = {
+ { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
+ { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
+ { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
+ { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
+ };
+
+ rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
+}
+
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3178,6 +3217,11 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_aldps_enable_1(tp);
+
+ /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
+ rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
}
static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
@@ -3250,6 +3294,8 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3257,6 +3303,8 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3354,6 +3402,8 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_aldps_enable_1(tp);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3439,21 +3489,19 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
+ r810x_aldps_disable(tp);
rtl_apply_firmware(tp);
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ r810x_aldps_enable(tp);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(20);
+ r810x_aldps_disable(tp);
rtl_apply_firmware(tp);
@@ -3463,6 +3511,8 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
+
+ r810x_aldps_enable(tp);
}
static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3475,9 +3525,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
+ r810x_aldps_disable(tp);
rtl_apply_firmware(tp);
@@ -3485,6 +3533,8 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ r810x_aldps_enable(tp);
}
static void rtl_hw_phy_config(struct net_device *dev)
@@ -3708,33 +3758,19 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
{
void __iomem *ioaddr = tp->mmio_addr;
- u32 high;
- u32 low;
-
- low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
- high = addr[4] | (addr[5] << 8);
rtl_lock_work(tp);
RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W32(MAC4, high);
+ RTL_W32(MAC4, addr[4] | addr[5] << 8);
RTL_R32(MAC4);
- RTL_W32(MAC0, low);
+ RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
RTL_R32(MAC0);
- if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
- const struct exgmac_reg e[] = {
- { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
- { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
- { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
- { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
- low >> 16 },
- };
-
- rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
- }
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34)
+ rtl_rar_exgmac_set(tp, addr);
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3796,7 +3832,7 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
}
}
-static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
+static void rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
@@ -4048,7 +4084,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
rtl_generic_op(tp, tp->pll_power_ops.up);
}
-static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
+static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
{
struct pll_power_ops *ops = &tp->pll_power_ops;
@@ -4242,7 +4278,7 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
}
-static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
+static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
{
struct jumbo_ops *ops = &tp->jumbo_ops;
@@ -4683,7 +4719,7 @@ static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(CSIDR) : ~0;
}
-static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
+static void rtl_init_csi_ops(struct rtl8169_private *tp)
{
struct csi_ops *ops = &tp->csi_ops;
@@ -5015,8 +5051,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
- rtl_disable_clock_request(pdev);
-
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
@@ -5025,7 +5059,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5050,13 +5085,12 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
- rtl_disable_clock_request(pdev);
-
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
+ RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5113,8 +5147,10 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5330,6 +5366,9 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
@@ -5355,6 +5394,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5376,7 +5418,10 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W32(MISC,
+ (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
+ RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
+ RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
@@ -5774,7 +5819,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
opts[0] = DescOwn;
rtl8169_tso_csum(tp, skb, opts);
@@ -6017,8 +6062,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
!(status & (RxRWT | RxFOVF)) &&
(dev->features & NETIF_F_RXALL))
goto process_pkt;
-
- rtl8169_mark_to_asic(desc, rx_buf_sz);
} else {
struct sk_buff *skb;
dma_addr_t addr;
@@ -6039,16 +6082,14 @@ process_pkt:
if (unlikely(rtl8169_fragmented_frame(status))) {
dev->stats.rx_dropped++;
dev->stats.rx_length_errors++;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- continue;
+ goto release_descriptor;
}
skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
tp, pkt_size, addr);
- rtl8169_mark_to_asic(desc, rx_buf_sz);
if (!skb) {
dev->stats.rx_dropped++;
- continue;
+ goto release_descriptor;
}
rtl8169_rx_csum(skb, status);
@@ -6064,13 +6105,10 @@ process_pkt:
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
}
-
- /* Work around for AMD plateform. */
- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- desc->opts2 = 0;
- cur_rx++;
- }
+release_descriptor:
+ desc->opts2 = 0;
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
}
count = cur_rx - tp->cur_rx;
@@ -6569,7 +6607,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
pm_runtime_put_noidle(d);
}
-static void __devexit rtl_remove_one(struct pci_dev *pdev)
+static void rtl_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
@@ -6689,7 +6727,7 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
}
-static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
+static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
u32 data;
@@ -6723,7 +6761,7 @@ static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
return;
}
-static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
+static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_40:
@@ -6736,7 +6774,7 @@ static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
}
}
-static int __devinit
+static int
rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -6987,20 +7025,9 @@ static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
.probe = rtl_init_one,
- .remove = __devexit_p(rtl_remove_one),
+ .remove = rtl_remove_one,
.shutdown = rtl_shutdown,
.driver.pm = RTL8169_PM_OPS,
};
-static int __init rtl8169_init_module(void)
-{
- return pci_register_driver(&rtl8169_pci_driver);
-}
-
-static void __exit rtl8169_cleanup_module(void)
-{
- pci_unregister_driver(&rtl8169_pci_driver);
-}
-
-module_init(rtl8169_init_module);
-module_exit(rtl8169_cleanup_module);
+module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8bfea0524dd..3d705862bd7d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2286,7 +2286,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
for (i = 0; i < PHY_MAX_ADDR; i++)
mdp->mii_bus->irq[i] = PHY_POLL;
- /* regist mdio bus */
+ /* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
if (ret)
goto out_free_irq;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 2ed3ab4b3c2d..72fc57dd084d 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -954,7 +954,7 @@ static struct net_device_stats *s6gmac_stats(struct net_device *dev)
return st;
}
-static int __devinit s6gmac_probe(struct platform_device *pdev)
+static int s6gmac_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct s6gmac *pd;
@@ -1030,7 +1030,7 @@ errirq:
return res;
}
-static int __devexit s6gmac_remove(struct platform_device *pdev)
+static int s6gmac_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
if (dev) {
@@ -1046,7 +1046,7 @@ static int __devexit s6gmac_remove(struct platform_device *pdev)
static struct platform_driver s6gmac_driver = {
.probe = s6gmac_probe,
- .remove = __devexit_p(s6gmac_remove),
+ .remove = s6gmac_remove,
.driver = {
.name = "s6gmac",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 6a40dd03a32f..3aca57853ed4 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -67,7 +67,7 @@
#include <asm/ecard.h>
#include <asm/io.h>
-static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
+static char version[] = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
#include "ether3.h"
@@ -194,7 +194,7 @@ static inline void ether3_ledon(struct net_device *dev)
* Read the ethernet address string from the on board rom.
* This is an ascii string!!!
*/
-static int __devinit
+static int
ether3_addr(char *addr, struct expansion_card *ec)
{
struct in_chunk_dir cd;
@@ -219,7 +219,7 @@ ether3_addr(char *addr, struct expansion_card *ec)
/* --------------------------------------------------------------------------- */
-static int __devinit
+static int
ether3_ramtest(struct net_device *dev, unsigned char byte)
{
unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
@@ -268,7 +268,7 @@ ether3_ramtest(struct net_device *dev, unsigned char byte)
/* ------------------------------------------------------------------------------- */
-static int __devinit ether3_init_2(struct net_device *dev)
+static int ether3_init_2(struct net_device *dev)
{
int i;
@@ -399,12 +399,6 @@ ether3_probe_bus_16(struct net_device *dev, int val)
static int
ether3_open(struct net_device *dev)
{
- if (!is_valid_ether_addr(dev->dev_addr)) {
- printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
- dev->name);
- return -EINVAL;
- }
-
if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
return -EAGAIN;
@@ -748,7 +742,7 @@ static void ether3_tx(struct net_device *dev)
}
}
-static void __devinit ether3_banner(void)
+static void ether3_banner(void)
{
static unsigned version_printed = 0;
@@ -767,7 +761,7 @@ static const struct net_device_ops ether3_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit
+static int
ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
{
const struct ether3_data *data = id->data;
@@ -864,7 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit ether3_remove(struct expansion_card *ec)
+static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
@@ -894,7 +888,7 @@ static const struct ecard_id ether3_ids[] = {
static struct ecard_driver ether3_driver = {
.probe = ether3_probe,
- .remove = __devexit_p(ether3_remove),
+ .remove = ether3_remove,
.id_table = ether3_ids,
.drv = {
.name = "ether3",
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 4d15bf413bdc..0fde9ca28269 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit sgiseeq_probe(struct platform_device *pdev)
+static int sgiseeq_probe(struct platform_device *pdev)
{
struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
struct hpc3_regs *hpcregs = pd->hpc;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 25906c1d1b15..435b4f1e3488 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,10 +1,11 @@
config SFC
tristate "Solarflare SFC4000/SFC9000-family support"
- depends on PCI && INET
+ depends on PCI
select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
+ select PTP_1588_CLOCK
---help---
This driver supports 10-gigabit Ethernet cards based on
the Solarflare SFC4000 and SFC9000-family controllers.
@@ -34,10 +35,3 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
-config SFC_PTP
- bool "Solarflare SFC9000-family PTP support"
- depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
- default y
- ---help---
- This enables support for the Precision Time Protocol (PTP)
- on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index e11f2ecf69d9..945bf06e69ef 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,9 +2,8 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
falcon_xmac.o mcdi_mac.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o mcdi_mon.o
+ mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
-sfc-$(CONFIG_SFC_PTP) += ptp.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4f86d0cd516a..bf57b3cb16ab 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,8 +106,8 @@ static struct workqueue_struct *reset_workqueue;
*
* This is only used in MSI-X interrupt mode
*/
-static unsigned int separate_tx_channels;
-module_param(separate_tx_channels, uint, 0444);
+static bool separate_tx_channels;
+module_param(separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX");
@@ -160,8 +160,8 @@ static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
-static int phy_flash_cfg;
-module_param(phy_flash_cfg, int, 0644);
+static bool phy_flash_cfg;
+module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
static unsigned irq_adapt_low_thresh = 8000;
@@ -2279,7 +2279,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method));
- netif_device_detach(efx->net_dev);
+ efx_device_detach_sync(efx);
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
@@ -2669,8 +2669,8 @@ static int efx_pci_probe_main(struct efx_nic *efx)
* transmission; this is left to the first time one of the network
* interfaces is brought up (i.e. efx_net_open).
*/
-static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *entry)
+static int efx_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
{
struct net_device *net_dev;
struct efx_nic *efx;
@@ -2758,7 +2758,7 @@ static int efx_pm_freeze(struct device *dev)
if (efx->state != STATE_DISABLED) {
efx->state = STATE_UNINIT;
- netif_device_detach(efx->net_dev);
+ efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_stop_interrupts(efx, false);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index f11170bc48bf..50247dfe8f57 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -163,4 +163,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
extern void efx_link_set_advertising(struct efx_nic *efx, u32);
extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+static inline void efx_device_detach_sync(struct efx_nic *efx)
+{
+ struct net_device *dev = efx->net_dev;
+
+ /* Lock/freeze all TX queues so that we can be sure the
+ * TX scheduler is stopped when we're done and before
+ * netif_device_present() becomes false.
+ */
+ netif_tx_lock(dev);
+ netif_device_detach(dev);
+ netif_tx_unlock(dev);
+}
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 90f078eff8e6..8e61cd06f66a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
/* MAC address mask including only MC flag */
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
+#define PORT_FULL_MASK ((__force __be16)~0)
+
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
{
@@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
&ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc);
- ip_mask->ip4src = ~0;
- ip_mask->psrc = ~0;
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ ip_mask->psrc = PORT_FULL_MASK;
}
rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
- ip_mask->ip4dst = ~0;
- ip_mask->pdst = ~0;
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ ip_mask->pdst = PORT_FULL_MASK;
return rc;
}
@@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
/* Check for unsupported extensions */
if ((rule->flow_type & FLOW_EXT) &&
- (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
+ (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
rule->m_ext.data[1]))
return -EINVAL;
@@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
IPPROTO_TCP : IPPROTO_UDP);
/* Must match all of destination, */
- if ((__force u32)~ip_mask->ip4dst |
- (__force u16)~ip_mask->pdst)
+ if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
+ ip_mask->pdst == PORT_FULL_MASK))
return -EINVAL;
/* all or none of source, */
- if ((ip_mask->ip4src | ip_mask->psrc) &&
- ((__force u32)~ip_mask->ip4src |
- (__force u16)~ip_mask->psrc))
+ if ((ip_mask->ip4src || ip_mask->psrc) &&
+ !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
+ ip_mask->psrc == PORT_FULL_MASK))
return -EINVAL;
/* and nothing else */
- if (ip_mask->tos | rule->m_ext.vlan_tci)
+ if (ip_mask->tos || rule->m_ext.vlan_tci)
return -EINVAL;
if (ip_mask->ip4src)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 12b573a8e82b..49bcd196e10d 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1792,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush,
+ .finish_flush = efx_port_dummy_op_void,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
@@ -1834,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush,
+ .finish_flush = efx_port_dummy_op_void,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 751d1ec112cc..96759aee1c6c 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -22,22 +22,21 @@
*
* Notes on locking strategy:
*
- * Most CSRs are 128-bit (oword) and therefore cannot be read or
- * written atomically. Access from the host is buffered by the Bus
- * Interface Unit (BIU). Whenever the host reads from the lowest
- * address of such a register, or from the address of a different such
- * register, the BIU latches the register's value. Subsequent reads
- * from higher addresses of the same register will read the latched
- * value. Whenever the host writes part of such a register, the BIU
- * collects the written value and does not write to the underlying
- * register until all 4 dwords have been written. A similar buffering
- * scheme applies to host access to the NIC's 64-bit SRAM.
+ * Many CSRs are very wide and cannot be read or written atomically.
+ * Writes from the host are buffered by the Bus Interface Unit (BIU)
+ * up to 128 bits. Whenever the host writes part of such a register,
+ * the BIU collects the written value and does not write to the
+ * underlying register until all 4 dwords have been written. A
+ * similar buffering scheme applies to host access to the NIC's 64-bit
+ * SRAM.
*
- * Access to different CSRs and 64-bit SRAM words must be serialised,
- * since interleaved access can result in lost writes or lost
- * information from read-to-clear fields. We use efx_nic::biu_lock
- * for this. (We could use separate locks for read and write, but
- * this is not normally a performance bottleneck.)
+ * Writes to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes. We use
+ * efx_nic::biu_lock for this.
+ *
+ * We also serialise reads from 128-bit CSRs and SRAM with the same
+ * spinlock. This may not be necessary, but it doesn't really matter
+ * as there are no such reads on the fast path.
*
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* 128-bit but are special-cased in the BIU to avoid the need for
@@ -204,20 +203,6 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
-static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int index)
-{
- efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
-}
-
-/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
-static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int index)
-{
- efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
-}
-
/* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index aea43cbd0520..0095ce95150b 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -22,7 +22,7 @@
**************************************************************************
*/
-#define MCDI_RPC_TIMEOUT 10 /*seconds */
+#define MCDI_RPC_TIMEOUT (10 * HZ)
#define MCDI_PDU(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int time, finish;
+ unsigned long time, finish;
unsigned int respseq, respcmd, error;
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int rc, spins;
@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
* and poll once a jiffy (approximately)
*/
spins = TICK_USEC;
- finish = get_seconds() + MCDI_RPC_TIMEOUT;
+ finish = jiffies + MCDI_RPC_TIMEOUT;
while (1) {
if (spins != 0) {
@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
schedule_timeout_uninterruptible(1);
}
- time = get_seconds();
+ time = jiffies;
rmb();
efx_readd(efx, &reg, pdu);
@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
break;
- if (time >= finish)
+ if (time_after(time, finish))
return -ETIMEDOUT;
}
@@ -207,7 +207,9 @@ out:
return 0;
}
-/* Test and clear MC-rebooted flag for this port/function */
+/* Test and clear MC-rebooted flag for this port/function; reset
+ * software state as necessary.
+ */
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
@@ -223,6 +225,11 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
if (value == 0)
return 0;
+ /* MAC statistics have been cleared on the NIC; clear our copy
+ * so that efx_update_diff_stat() can continue to work.
+ */
+ memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
+
EFX_ZERO_DWORD(reg);
efx_writed(efx, &reg, addr);
@@ -250,7 +257,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
if (wait_event_timeout(
mcdi->wq,
atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
- msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
+ MCDI_RPC_TIMEOUT) == 0)
return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -1216,7 +1223,7 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
count * sizeof(*qid), NULL, 0, NULL);
- WARN_ON(rc > 0);
+ WARN_ON(rc < 0);
kfree(qid);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 576a31091165..2d756c1d7142 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -200,6 +200,7 @@ struct efx_tx_queue {
/* Members shared between paths and sometimes updated */
unsigned int empty_read_count ____cacheline_aligned_in_smp;
#define EFX_EMPTY_COUNT_VALID 0x80000000
+ atomic_t flush_outstanding;
};
/**
@@ -868,9 +869,7 @@ struct efx_nic {
struct work_struct peer_work;
#endif
-#ifdef CONFIG_SFC_PTP
struct efx_ptp_data *ptp_data;
-#endif
/* The following fields may be written more often */
@@ -909,6 +908,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
+ * @finish_flush: Clean up after flushing the DMA queues
* @update_stats: Update statistics not provided by event handling
* @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics
@@ -956,6 +956,7 @@ struct efx_nic_type {
void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
void (*prepare_flush)(struct efx_nic *efx);
+ void (*finish_flush)(struct efx_nic *efx);
void (*update_stats)(struct efx_nic *efx);
void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aab7cacb2e34..0ad790cc473c 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -73,6 +73,8 @@
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
(_tx_queue)->queue)
+static void efx_magic_event(struct efx_channel *channel, u32 magic);
+
/**************************************************************************
*
* Solarstorm hardware access
@@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
- /* All zeros is a potentially valid event so memset to 0xff */
- memset(buffer->addr, 0xff, len);
-
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
@@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq;
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
EFX_POPULATE_OWORD_2(tx_flush_descq,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx)
&& atomic_read(&efx->rxq_flush_pending) > 0));
}
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
+{
+ bool i = true;
+ efx_oword_t txd_ptr_tbl;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment drain_pending as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ efx_magic_event(channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
/* Flush all the transmit queues, and continue flushing receive queues until
* they're all flushed. Wait for the DRAIN events to be recieved so that there
* are no more RX and TX events left on any channel. */
@@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
int rc = 0;
- efx->fc_disable++;
efx->type->prepare_flush(efx);
efx_for_each_channel(channel, efx) {
@@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx)
timeout);
}
- if (atomic_read(&efx->drain_pending)) {
+ if (atomic_read(&efx->drain_pending) &&
+ !efx_check_tx_flush_complete(efx)) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
"(rx %d+%d)\n", atomic_read(&efx->drain_pending),
atomic_read(&efx->rxq_flush_outstanding),
@@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
atomic_set(&efx->rxq_flush_outstanding, 0);
}
- efx->fc_disable--;
+ efx->type->finish_flush(efx);
return rc;
}
@@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask);
- efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
- channel->channel);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ efx_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
}
/* Use HW to insert a SW defined event */
@@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
-
- efx_magic_event(tx_queue->channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ efx_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
}
}
@@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
efx->rx_indir_table[i]);
- efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
+ efx_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
}
}
@@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
for (i = 0; i < table->rows; i++) {
switch (table->step) {
- case 4: /* 32-bit register or SRAM */
- efx_readd_table(efx, buf, table->offset, i);
+ case 4: /* 32-bit SRAM */
+ efx_readd(efx, buf, table->offset + 4 * i);
break;
case 8: /* 64-bit SRAM */
efx_sram_readq(efx,
efx->membase + table->offset,
buf, i);
break;
- case 16: /* 128-bit register */
+ case 16: /* 128-bit-readable register */
efx_reado_table(efx, buf, table->offset, i);
break;
case 32: /* 128-bit register, interleaved */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 438cef11f727..1b0003323498 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -252,7 +252,6 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
struct ethtool_ts_info;
-#ifdef CONFIG_SFC_PTP
extern void efx_ptp_probe(struct efx_nic *efx);
extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
extern int efx_ptp_get_ts_info(struct net_device *net_dev,
@@ -260,31 +259,6 @@ extern int efx_ptp_get_ts_info(struct net_device *net_dev,
extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-#else
-static inline void efx_ptp_probe(struct efx_nic *efx) {}
-static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
-{
- return -EOPNOTSUPP;
-}
-static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
-{
- ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE);
- ts_info->phc_index = -1;
-
- return 0;
-}
-static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
- return false;
-}
-static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
- return NETDEV_TX_OK;
-}
-static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
-#endif
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
@@ -370,6 +344,8 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
+extern void siena_prepare_flush(struct efx_nic *efx);
+extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 9e0ad1b75c33..d780a0d096b4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -187,7 +187,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
- void *page_addr;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
@@ -207,12 +206,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
- page_addr = page_address(page);
- state = page_addr;
+ state = page_address(page);
state->refcnt = 0;
state->dma_addr = dma_addr;
- page_addr += sizeof(struct efx_rx_page_state);
dma_addr += sizeof(struct efx_rx_page_state);
split:
@@ -230,7 +227,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
- page_addr += (PAGE_SIZE >> 1);
++count;
goto split;
}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index ce72ae4f399f..2069f51b2aa9 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -373,7 +373,7 @@ static void efx_iterate_state(struct efx_nic *efx)
/* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5;
- payload->ip.check = htons(0xdead);
+ payload->ip.check = (__force __sum16) htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP;
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire.
*/
- netif_device_detach(efx->net_dev);
+ efx_device_detach_sync(efx);
if (efx->type->test_chip) {
rc_reset = efx->type->test_chip(efx, tests);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 84b41bf08a38..ba40f67e4f05 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -127,6 +127,18 @@ static void siena_remove_port(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
+void siena_prepare_flush(struct efx_nic *efx)
+{
+ if (efx->fc_disable++ == 0)
+ efx_mcdi_set_mac(efx);
+}
+
+void siena_finish_flush(struct efx_nic *efx)
+{
+ if (--efx->fc_disable == 0)
+ efx_mcdi_set_mac(efx);
+}
+
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
@@ -158,7 +170,7 @@ static const struct efx_nic_register_test siena_register_tests[] = {
static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- enum reset_type reset_method = reset_method;
+ enum reset_type reset_method = RESET_TYPE_ALL;
int rc, rc2;
efx_reset_down(efx, reset_method);
@@ -659,7 +671,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.reset = siena_reset_hw,
.probe_port = siena_probe_port,
.remove_port = siena_remove_port,
- .prepare_flush = efx_port_dummy_op_void,
+ .prepare_flush = siena_prepare_flush,
+ .finish_flush = siena_finish_flush,
.update_stats = siena_update_nic_stats,
.start_stats = siena_start_nic_stats,
.stop_stats = siena_stop_nic_stats,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index d49b53dc2a50..90f8d1604f5f 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -695,8 +695,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return VFDI_RC_ENOMEM;
rtnl_lock();
- if (efx->fc_disable++ == 0)
- efx_mcdi_set_mac(efx);
+ siena_prepare_flush(efx);
rtnl_unlock();
/* Flush all the initialized queues */
@@ -733,8 +732,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
}
rtnl_lock();
- if (--efx->fc_disable == 0)
- efx_mcdi_set_mac(efx);
+ siena_finish_flush(efx);
rtnl_unlock();
/* Irrespective of success/failure, fini the queues */
@@ -995,7 +993,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
- efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
+ efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
mutex_unlock(&vf->status_lock);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 3e5519a0acc7..dc171b4961e4 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1143,7 +1143,7 @@ static int ioc3_is_menet(struct pci_dev *pdev)
* Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
* registered.
*/
-static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
+static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
{
#define COSMISC_CONSTANT 6
@@ -1169,7 +1169,7 @@ static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
serial8250_register_8250_port(&port);
}
-static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
+static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
{
/*
* We need to recognice and treat the fourth MENET serial as it
@@ -1229,8 +1229,7 @@ static const struct net_device_ops ioc3_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int __devinit ioc3_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int sw_physid1, sw_physid2;
struct net_device *dev = NULL;
@@ -1368,7 +1367,7 @@ out:
return err;
}
-static void __devexit ioc3_remove_one (struct pci_dev *pdev)
+static void ioc3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
@@ -1396,7 +1395,7 @@ static struct pci_driver ioc3_driver = {
.name = "ioc3-eth",
.id_table = ioc3_pci_tbl,
.probe = ioc3_probe,
- .remove = __devexit_p(ioc3_remove_one),
+ .remove = ioc3_remove_one,
};
static int __init ioc3_init_module(void)
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 53efe7c7b1c0..79ad9c94a21b 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -825,7 +825,7 @@ static const struct net_device_ops meth_netdev_ops = {
/*
* The init function.
*/
-static int __devinit meth_probe(struct platform_device *pdev)
+static int meth_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct meth_private *priv;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 32e55664df6e..b2315324cc6d 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1395,8 +1395,7 @@ static const struct net_device_ops sc92031_netdev_ops = {
#endif
};
-static int __devinit sc92031_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err;
void __iomem* port_base;
@@ -1489,7 +1488,7 @@ out_enable_device:
return err;
}
-static void __devexit sc92031_remove(struct pci_dev *pdev)
+static void sc92031_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct sc92031_priv *priv = netdev_priv(dev);
@@ -1574,7 +1573,7 @@ static struct pci_driver sc92031_pci_driver = {
.name = SC92031_NAME,
.id_table = sc92031_pci_device_id_table,
.probe = sc92031_probe,
- .remove = __devexit_p(sc92031_remove),
+ .remove = sc92031_remove,
.suspend = sc92031_suspend,
.resume = sc92031_resume,
};
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index d8166012b7d4..9a9c379420d1 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -415,7 +415,7 @@ static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
return mdio_read(ioaddr, phy_id, reg);
}
-static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
+static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
{
u16 data = 0xffff;
unsigned int i;
@@ -1379,7 +1379,7 @@ static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
* Identify and set current phy if found one,
* return error if it failed to found.
*/
-static int __devinit sis190_mii_probe(struct net_device *dev)
+static int sis190_mii_probe(struct net_device *dev)
{
struct sis190_private *tp = netdev_priv(dev);
struct mii_if_info *mii_if = &tp->mii_if;
@@ -1451,7 +1451,7 @@ static void sis190_release_board(struct pci_dev *pdev)
free_netdev(dev);
}
-static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
+static struct net_device *sis190_init_board(struct pci_dev *pdev)
{
struct sis190_private *tp;
struct net_device *dev;
@@ -1573,8 +1573,8 @@ static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
}
-static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
- struct net_device *dev)
+static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
+ struct net_device *dev)
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -1615,10 +1615,10 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
* APC CMOS RAM is accessed through ISA bridge.
* MAC address is read into @net_dev->dev_addr.
*/
-static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
- struct net_device *dev)
+static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+ struct net_device *dev)
{
- static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
+ static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
struct sis190_private *tp = netdev_priv(dev);
struct pci_dev *isa_bridge;
u8 reg, tmp8;
@@ -1693,8 +1693,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
SIS_PCI_COMMIT();
}
-static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
- struct net_device *dev)
+static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -1845,8 +1844,8 @@ static const struct net_device_ops sis190_netdev_ops = {
#endif
};
-static int __devinit sis190_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sis190_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version = 0;
struct sis190_private *tp;
@@ -1916,7 +1915,7 @@ err_release_board:
goto out;
}
-static void __devexit sis190_remove_one(struct pci_dev *pdev)
+static void sis190_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct sis190_private *tp = netdev_priv(dev);
@@ -1932,7 +1931,7 @@ static struct pci_driver sis190_pci_driver = {
.name = DRV_NAME,
.id_table = sis190_pci_tbl,
.probe = sis190_init_one,
- .remove = __devexit_p(sis190_remove_one),
+ .remove = sis190_remove_one,
};
static int __init sis190_init_module(void)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index edf5edb13140..5bffd9749a58 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -81,7 +81,7 @@
#define SIS900_MODULE_NAME "sis900"
#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
-static const char version[] __devinitconst =
+static const char version[] =
KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
static int max_interrupt_work = 40;
@@ -251,7 +251,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev->perm_addr.
*/
-static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
+static int sis900_get_mac_addr(struct pci_dev *pci_dev,
+ struct net_device *net_dev)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
@@ -287,8 +288,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
* @net_dev->perm_addr.
*/
-static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
- struct net_device *net_dev)
+static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
+ struct net_device *net_dev)
{
struct pci_dev *isa_bridge = NULL;
u8 reg;
@@ -330,8 +331,8 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
* @net_dev->dev_addr and @net_dev->perm_addr.
*/
-static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
- struct net_device *net_dev)
+static int sis635_get_mac_addr(struct pci_dev *pci_dev,
+ struct net_device *net_dev)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
@@ -377,8 +378,8 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
* MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
*/
-static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
- struct net_device *net_dev)
+static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
+ struct net_device *net_dev)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
@@ -433,8 +434,8 @@ static const struct net_device_ops sis900_netdev_ops = {
* ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc.
*/
-static int __devinit sis900_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
+static int sis900_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
{
struct sis900_private *sis_priv;
struct net_device *net_dev;
@@ -605,7 +606,7 @@ err_out_cleardev:
* return error if it failed to found.
*/
-static int __devinit sis900_mii_probe(struct net_device * net_dev)
+static int sis900_mii_probe(struct net_device *net_dev)
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
const char *dev_name = pci_name(sis_priv->pci_dev);
@@ -824,7 +825,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
* Note that location is in word (16 bits) unit
*/
-static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
+static u16 read_eeprom(void __iomem *ioaddr, int location)
{
u32 read_cmd = location | EEread;
int i;
@@ -2410,7 +2411,7 @@ static void sis900_reset(struct net_device *net_dev)
* remove and release SiS900 net device
*/
-static void __devexit sis900_remove(struct pci_dev *pci_dev)
+static void sis900_remove(struct pci_dev *pci_dev)
{
struct net_device *net_dev = pci_get_drvdata(pci_dev);
struct sis900_private *sis_priv = netdev_priv(net_dev);
@@ -2496,7 +2497,7 @@ static struct pci_driver sis900_pci_driver = {
.name = SIS900_MODULE_NAME,
.id_table = sis900_pci_tbl,
.probe = sis900_probe,
- .remove = __devexit_p(sis900_remove),
+ .remove = sis900_remove,
#ifdef CONFIG_PM
.suspend = sis900_suspend,
.resume = sis900_resume,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index d01e59c348ad..03b256af7ed5 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -90,9 +90,9 @@ static int rx_copybreak;
#include <asm/byteorder.h>
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
-static char version2[] __devinitdata =
+static char version2[] =
" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -318,8 +318,7 @@ static const struct net_device_ops epic_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit epic_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int card_idx = -1;
void __iomem *ioaddr;
@@ -569,7 +568,7 @@ static inline void epic_napi_irq_on(struct net_device *dev,
ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
}
-static int __devinit read_eeprom(struct epic_private *ep, int location)
+static int read_eeprom(struct epic_private *ep, int location)
{
void __iomem *ioaddr = ep->ioaddr;
int i;
@@ -1524,7 +1523,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
-static void __devexit epic_remove_one(struct pci_dev *pdev)
+static void epic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
@@ -1577,7 +1576,7 @@ static struct pci_driver epic_driver = {
.name = DRV_NAME,
.id_table = epic_pci_tbl,
.probe = epic_init_one,
- .remove = __devexit_p(epic_remove_one),
+ .remove = epic_remove_one,
#ifdef CONFIG_PM
.suspend = epic_suspend,
.resume = epic_resume,
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8d15f7a74b45..59a6f88da867 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1400,16 +1400,6 @@ smc911x_open(struct net_device *dev)
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
- /*
- * Check that the address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
- */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- PRINTK("%s: no valid ethernet hw addr\n", __func__);
- return -EINVAL;
- }
-
/* reset the hardware */
smc911x_reset(dev);
@@ -1722,7 +1712,7 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
* This routine has a simple purpose -- make the SMC chip generate an
* interrupt, so an auto-detect routine can detect it, and find the IRQ,
*/
-static int __devinit smc911x_findirq(struct net_device *dev)
+static int smc911x_findirq(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int timeout = 20;
@@ -1800,7 +1790,7 @@ static const struct net_device_ops smc911x_netdev_ops = {
* o actually GRAB the irq.
* o GRAB the region
*/
-static int __devinit smc911x_probe(struct net_device *dev)
+static int smc911x_probe(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int i, retval;
@@ -2040,7 +2030,7 @@ err_out:
* 0 --> there is a device
* anything else, error
*/
-static int __devinit smc911x_drv_probe(struct platform_device *pdev)
+static int smc911x_drv_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct resource *res;
@@ -2115,7 +2105,7 @@ out:
return ret;
}
-static int __devexit smc911x_drv_remove(struct platform_device *pdev)
+static int smc911x_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc911x_local *lp = netdev_priv(ndev);
@@ -2186,7 +2176,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
static struct platform_driver smc911x_driver = {
.probe = smc911x_drv_probe,
- .remove = __devexit_p(smc911x_drv_remove),
+ .remove = smc911x_drv_remove,
.suspend = smc911x_drv_suspend,
.resume = smc911x_drv_resume,
.driver = {
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 3269292efecc..d51261ba4642 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -159,12 +159,12 @@ static inline void SMC_insl(struct smc911x_local *lp, int reg,
void __iomem *ioaddr = lp->base + reg;
if (lp->cfg.flags & SMC911X_USE_32BIT) {
- readsl(ioaddr, addr, count);
+ ioread32_rep(ioaddr, addr, count);
return;
}
if (lp->cfg.flags & SMC911X_USE_16BIT) {
- readsw(ioaddr, addr, count * 2);
+ ioread16_rep(ioaddr, addr, count * 2);
return;
}
@@ -177,12 +177,12 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
void __iomem *ioaddr = lp->base + reg;
if (lp->cfg.flags & SMC911X_USE_32BIT) {
- writesl(ioaddr, addr, count);
+ iowrite32_rep(ioaddr, addr, count);
return;
}
if (lp->cfg.flags & SMC911X_USE_16BIT) {
- writesw(ioaddr, addr, count * 2);
+ iowrite16_rep(ioaddr, addr, count * 2);
return;
}
@@ -196,14 +196,14 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
writew(v & 0xFFFF, (lp)->base + (r)); \
writew(v >> 16, (lp)->base + (r) + 2); \
} while (0)
-#define SMC_insl(lp, r, p, l) readsw((short*)((lp)->base + (r)), p, l*2)
-#define SMC_outsl(lp, r, p, l) writesw((short*)((lp)->base + (r)), p, l*2)
+#define SMC_insl(lp, r, p, l) ioread16_rep((short*)((lp)->base + (r)), p, l*2)
+#define SMC_outsl(lp, r, p, l) iowrite16_rep((short*)((lp)->base + (r)), p, l*2)
#elif SMC_USE_32BIT
#define SMC_inl(lp, r) readl((lp)->base + (r))
#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r))
-#define SMC_insl(lp, r, p, l) readsl((int*)((lp)->base + (r)), p, l)
-#define SMC_outsl(lp, r, p, l) writesl((int*)((lp)->base + (r)), p, l)
+#define SMC_insl(lp, r, p, l) ioread32_rep((int*)((lp)->base + (r)), p, l)
+#define SMC_outsl(lp, r, p, l) iowrite32_rep((int*)((lp)->base + (r)), p, l)
#endif /* SMC_USE_16BIT */
#endif /* SMC_DYNAMIC_BUS_CONFIG */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 318adc935a53..a670d23d9340 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1474,16 +1474,6 @@ smc_open(struct net_device *dev)
DBG(2, "%s: %s\n", dev->name, __func__);
- /*
- * Check that the address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
- */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- PRINTK("%s: no valid ethernet hw addr\n", __func__);
- return -EINVAL;
- }
-
/* Setup the default Register Modes */
lp->tcr_cur_mode = TCR_DEFAULT;
lp->rcr_cur_mode = RCR_DEFAULT;
@@ -1789,7 +1779,7 @@ static const struct net_device_ops smc_netdev_ops = {
* I just deleted auto_irq.c, since it was never built...
* --jgarzik
*/
-static int __devinit smc_findirq(struct smc_local *lp)
+static int smc_findirq(struct smc_local *lp)
{
void __iomem *ioaddr = lp->base;
int timeout = 20;
@@ -1863,8 +1853,8 @@ static int __devinit smc_findirq(struct smc_local *lp)
* o actually GRAB the irq.
* o GRAB the region
*/
-static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
- unsigned long irq_flags)
+static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
+ unsigned long irq_flags)
{
struct smc_local *lp = netdev_priv(dev);
static int version_printed = 0;
@@ -2211,7 +2201,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
* 0 --> there is a device
* anything else, error
*/
-static int __devinit smc_drv_probe(struct platform_device *pdev)
+static int smc_drv_probe(struct platform_device *pdev)
{
struct smc91x_platdata *pd = pdev->dev.platform_data;
struct smc_local *lp;
@@ -2324,7 +2314,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit smc_drv_remove(struct platform_device *pdev)
+static int smc_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc_local *lp = netdev_priv(ndev);
@@ -2396,8 +2386,6 @@ static const struct of_device_id smc91x_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, smc91x_match);
-#else
-#define smc91x_match NULL
#endif
static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2407,12 +2395,12 @@ static struct dev_pm_ops smc_drv_pm_ops = {
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
- .remove = __devexit_p(smc_drv_remove),
+ .remove = smc_drv_remove,
.driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
- .of_match_table = smc91x_match,
+ .of_match_table = of_match_ptr(smc91x_match),
},
};
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5f53fbbf67be..370e13dde115 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -286,16 +286,16 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
#define SMC_IO_SHIFT (lp->io_shift)
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+#define SMC_inb(a, r) ioread8((a) + (r))
+#define SMC_inw(a, r) ioread16((a) + (r))
+#define SMC_inl(a, r) ioread32((a) + (r))
+#define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
+#define SMC_outw(v, a, r) iowrite16(v, (a) + (r))
+#define SMC_outl(v, a, r) iowrite32(v, (a) + (r))
+#define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l)
+#define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l)
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c53c0f4e2ce3..e112877d15d3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -253,7 +253,7 @@ smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
}
if (pdata->config.flags & SMSC911X_USE_32BIT) {
- writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
+ iowrite32_rep(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
goto out;
}
@@ -285,7 +285,7 @@ smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
}
if (pdata->config.flags & SMSC911X_USE_32BIT) {
- writesl(pdata->ioaddr + __smsc_shift(pdata,
+ iowrite32_rep(pdata->ioaddr + __smsc_shift(pdata,
TX_DATA_FIFO), buf, wordcount);
goto out;
}
@@ -319,7 +319,7 @@ smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
}
if (pdata->config.flags & SMSC911X_USE_32BIT) {
- readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
+ ioread32_rep(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
goto out;
}
@@ -351,7 +351,7 @@ smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
}
if (pdata->config.flags & SMSC911X_USE_32BIT) {
- readsl(pdata->ioaddr + __smsc_shift(pdata,
+ ioread32_rep(pdata->ioaddr + __smsc_shift(pdata,
RX_DATA_FIFO), buf, wordcount);
goto out;
}
@@ -1031,8 +1031,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return 0;
}
-static int __devinit smsc911x_mii_init(struct platform_device *pdev,
- struct net_device *dev)
+static int smsc911x_mii_init(struct platform_device *pdev,
+ struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
int err = -ENXIO, i;
@@ -1463,11 +1463,6 @@ static int smsc911x_open(struct net_device *dev)
return -EAGAIN;
}
- if (!is_valid_ether_addr(dev->dev_addr)) {
- SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
- return -EADDRNOTAVAIL;
- }
-
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata)) {
SMSC_WARN(pdata, hw, "soft reset failed");
@@ -2092,7 +2087,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
};
/* copies the current mac address from hardware to dev->dev_addr */
-static void __devinit smsc911x_read_mac_address(struct net_device *dev)
+static void smsc911x_read_mac_address(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
@@ -2107,7 +2102,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
}
/* Initializing private device structures, only called from probe */
-static int __devinit smsc911x_init(struct net_device *dev)
+static int smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int byte_test, mask;
@@ -2244,7 +2239,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
return 0;
}
-static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
+static int smsc911x_drv_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct smsc911x_data *pdata;
@@ -2301,9 +2296,8 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
};
#ifdef CONFIG_OF
-static int __devinit smsc911x_probe_config_dt(
- struct smsc911x_platform_config *config,
- struct device_node *np)
+static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
+ struct device_node *np)
{
const char *mac;
u32 width = 0;
@@ -2351,7 +2345,7 @@ static inline int smsc911x_probe_config_dt(
}
#endif /* CONFIG_OF */
-static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
+static int smsc911x_drv_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
@@ -2581,20 +2575,22 @@ static const struct dev_pm_ops smsc911x_pm_ops = {
#define SMSC911X_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
static const struct of_device_id smsc911x_dt_ids[] = {
{ .compatible = "smsc,lan9115", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
+#endif
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
- .remove = __devexit_p(smsc911x_drv_remove),
+ .remove = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
.owner = THIS_MODULE,
.pm = SMSC911X_PM_OPS,
- .of_match_table = smsc911x_dt_ids,
+ .of_match_table = of_match_ptr(smsc911x_dt_ids),
},
};
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 1fcd914ec39b..3c586585e1b3 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1577,7 +1577,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
#endif /* CONFIG_NET_POLL_CONTROLLER */
};
-static int __devinit
+static int
smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net_device *dev;
@@ -1702,7 +1702,7 @@ out_0:
return -ENODEV;
}
-static void __devexit smsc9420_remove(struct pci_dev *pdev)
+static void smsc9420_remove(struct pci_dev *pdev)
{
struct net_device *dev;
struct smsc9420_pdata *pd;
@@ -1736,7 +1736,7 @@ static struct pci_driver smsc9420_driver = {
.name = DRV_NAME,
.id_table = smsc9420_id_table,
.probe = smsc9420_probe,
- .remove = __devexit_p(smsc9420_remove),
+ .remove = smsc9420_remove,
#ifdef CONFIG_PM
.suspend = smsc9420_suspend,
.resume = smsc9420_resume,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 9f448279e12a..1164930a40a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -54,31 +54,6 @@ config STMMAC_DA
By default, the DMA arbitration scheme is based on Round-robin
(rx:tx priority is 1:1).
-config STMMAC_TIMER
- bool "STMMAC Timer optimisation"
- default n
- depends on RTC_HCTOSYS_DEVICE
- ---help---
- Use an external timer for mitigating the number of network
- interrupts. Currently, for SH architectures, it is possible
- to use the TMU channel 2 and the SH-RTC device.
-
-choice
- prompt "Select Timer device"
- depends on STMMAC_TIMER
-
-config STMMAC_TMU_TIMER
- bool "TMU channel 2"
- depends on CPU_SH4
- ---help---
-
-config STMMAC_RTC_TIMER
- bool "Real time clock"
- depends on RTC_CLASS
- ---help---
-
-endchoice
-
choice
prompt "Select the DMA TX/RX descriptor operating modes"
depends on STMMAC_ETH
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index bc965ac9e025..c8e8ea60ac19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,5 +1,4 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 719be3912aa9..186d14806122 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -48,6 +48,10 @@
#define CHIP_DBG(fmt, args...) do { } while (0)
#endif
+/* Synopsys Core versions */
+#define DWMAC_CORE_3_40 0x34
+#define DWMAC_CORE_3_50 0x35
+
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
@@ -81,7 +85,7 @@ struct stmmac_extra_stats {
unsigned long rx_missed_cntr;
unsigned long rx_overflow_cntr;
unsigned long rx_vlan;
- /* Tx/Rx IRQ errors */
+ /* Tx/Rx IRQ error info */
unsigned long tx_undeflow_irq;
unsigned long tx_process_stopped_irq;
unsigned long tx_jabber_irq;
@@ -91,18 +95,23 @@ struct stmmac_extra_stats {
unsigned long rx_watchdog_irq;
unsigned long tx_early_irq;
unsigned long fatal_bus_error_irq;
- /* Extra info */
+ /* Tx/Rx IRQ Events */
+ unsigned long rx_early_irq;
unsigned long threshold;
unsigned long tx_pkt_n;
unsigned long rx_pkt_n;
- unsigned long poll_n;
- unsigned long sched_timer_n;
unsigned long normal_irq_n;
+ unsigned long rx_normal_irq_n;
+ unsigned long napi_poll;
+ unsigned long tx_normal_irq_n;
+ unsigned long tx_clean;
+ unsigned long tx_reset_ic_bit;
+ unsigned long irq_receive_pmt_irq_n;
+ /* MMC info */
unsigned long mmc_tx_irq_n;
unsigned long mmc_rx_irq_n;
unsigned long mmc_rx_csum_offload_irq_n;
/* EEE */
- unsigned long irq_receive_pmt_irq_n;
unsigned long irq_tx_path_in_lpi_mode_n;
unsigned long irq_tx_path_exit_lpi_mode_n;
unsigned long irq_rx_path_in_lpi_mode_n;
@@ -162,6 +171,15 @@ struct stmmac_extra_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
#define DEFAULT_DMA_PBL 8
+/* Max/Min RI Watchdog Timer count value */
+#define MAX_DMA_RIWT 0xff
+#define MIN_DMA_RIWT 0x20
+/* Tx coalesce parameters */
+#define STMMAC_COAL_TX_TIMER 40000
+#define STMMAC_MAX_COAL_TX_TICK 100000
+#define STMMAC_TX_MAX_FRAMES 256
+#define STMMAC_TX_FRAMES 64
+
enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
@@ -169,10 +187,11 @@ enum rx_frame_status { /* IPC status */
llc_snap = 4,
};
-enum tx_dma_irq_status {
- tx_hard_error = 1,
- tx_hard_error_bump_tc = 2,
- handle_tx_rx = 3,
+enum dma_irq_status {
+ tx_hard_error = 0x1,
+ tx_hard_error_bump_tc = 0x2,
+ handle_rx = 0x4,
+ handle_tx = 0x8,
};
enum core_specific_irq_mask {
@@ -296,6 +315,8 @@ struct stmmac_dma_ops {
struct stmmac_extra_stats *x);
/* If supported then get the optional core features */
unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+ /* Program the HW RX Watchdog */
+ void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
};
struct stmmac_ops {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 0e4cacedc1f0..7ad56afd6324 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -230,8 +230,5 @@ enum rtc_control {
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
-/* Synopsys Core versions */
-#define DWMAC_CORE_3_40 0x34
-
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 033500090f55..bf83c03bfd06 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -174,6 +174,11 @@ static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
return readl(ioaddr + DMA_HW_FEATURE);
}
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+}
+
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.init = dwmac1000_dma_init,
.dump_regs = dwmac1000_dump_dma_regs,
@@ -187,4 +192,5 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
.get_hw_feature = dwmac1000_get_hw_feature,
+ .rx_watchdog = dwmac1000_rx_watchdog,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e49c9a0fd6ff..ab4896ecac1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -35,7 +35,10 @@
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
-#define DMA_AXI_BUS_MODE 0x00001028 /* AXI Bus Mode */
+/* Rx watchdog register */
+#define DMA_RX_WATCHDOG 0x00001024
+/* AXI Bus Mode */
+#define DMA_AXI_BUS_MODE 0x00001028
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
@@ -77,8 +80,6 @@
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
-#define DMA_STATUS_GMI 0x08000000
-#define DMA_STATUS_GLI 0x04000000
#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 4e0e18a44fcc..491d7e930603 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -204,16 +204,28 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
}
}
/* TX/RX NORMAL interrupts */
- if (intr_status & DMA_STATUS_NIS) {
+ if (likely(intr_status & DMA_STATUS_NIS)) {
x->normal_irq_n++;
- if (likely((intr_status & DMA_STATUS_RI) ||
- (intr_status & (DMA_STATUS_TI))))
- ret = handle_tx_rx;
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
}
/* Optional hardware blocks, interrupts should be disabled */
if (unlikely(intr_status &
(DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 7d51a65ab099..b05df8983be5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,16 +24,13 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "March_2012"
+#define DRV_MODULE_VERSION "Nov_2012"
#include <linux/clk.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/pci.h>
#include "common.h"
-#ifdef CONFIG_STMMAC_TIMER
-#include "stmmac_timer.h"
-#endif
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
@@ -77,9 +74,6 @@ struct stmmac_priv {
spinlock_t tx_lock;
int wolopts;
int wol_irq;
-#ifdef CONFIG_STMMAC_TIMER
- struct stmmac_timer *tm;
-#endif
struct plat_stmmacenet_data *plat;
struct stmmac_counters mmc;
struct dma_features dma_cap;
@@ -93,6 +87,12 @@ struct stmmac_priv {
int eee_enabled;
int eee_active;
int tx_lpi_timer;
+ struct timer_list txtimer;
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timer;
+ int use_riwt;
+ u32 rx_riwt;
};
extern int phyaddr;
@@ -127,14 +127,14 @@ static inline int stmmac_register_platform(void)
}
static inline void stmmac_unregister_platform(void)
{
- platform_driver_register(&stmmac_pltfr_driver);
+ platform_driver_unregister(&stmmac_pltfr_driver);
}
#else
static inline int stmmac_register_platform(void)
{
pr_debug("stmmac: do not register the platf driver\n");
- return -EINVAL;
+ return 0;
}
static inline void stmmac_unregister_platform(void)
{
@@ -162,7 +162,7 @@ static inline int stmmac_register_pci(void)
{
pr_debug("stmmac: do not register the PCI driver\n");
- return -EINVAL;
+ return 0;
}
static inline void stmmac_unregister_pci(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 76fd61aa005f..1372ce210b58 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -76,7 +76,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
STMMAC_STAT(rx_vlan),
- /* Tx/Rx IRQ errors */
+ /* Tx/Rx IRQ error info */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
STMMAC_STAT(tx_jabber_irq),
@@ -86,18 +86,23 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_watchdog_irq),
STMMAC_STAT(tx_early_irq),
STMMAC_STAT(fatal_bus_error_irq),
- /* Extra info */
+ /* Tx/Rx IRQ Events */
+ STMMAC_STAT(rx_early_irq),
STMMAC_STAT(threshold),
STMMAC_STAT(tx_pkt_n),
STMMAC_STAT(rx_pkt_n),
- STMMAC_STAT(poll_n),
- STMMAC_STAT(sched_timer_n),
- STMMAC_STAT(normal_irq_n),
STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(rx_normal_irq_n),
+ STMMAC_STAT(napi_poll),
+ STMMAC_STAT(tx_normal_irq_n),
+ STMMAC_STAT(tx_clean),
+ STMMAC_STAT(tx_reset_ic_bit),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ /* MMC info */
STMMAC_STAT(mmc_tx_irq_n),
STMMAC_STAT(mmc_rx_irq_n),
STMMAC_STAT(mmc_rx_csum_offload_irq_n),
- STMMAC_STAT(irq_receive_pmt_irq_n),
+ /* EEE */
STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
@@ -519,6 +524,87 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
return phy_ethtool_set_eee(priv->phydev, edata);
}
+static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+{
+ unsigned long clk = clk_get_rate(priv->stmmac_clk);
+
+ if (!clk)
+ return 0;
+
+ return (usec * (clk / 1000000)) / 256;
+}
+
+static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
+{
+ unsigned long clk = clk_get_rate(priv->stmmac_clk);
+
+ if (!clk)
+ return 0;
+
+ return (riwt * 256) / (clk / 1000000);
+}
+
+static int stmmac_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ ec->tx_coalesce_usecs = priv->tx_coal_timer;
+ ec->tx_max_coalesced_frames = priv->tx_coal_frames;
+
+ if (priv->use_riwt)
+ ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
+
+ return 0;
+}
+
+static int stmmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int rx_riwt;
+
+ /* Check not supported parameters */
+ if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+ (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+ (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_coalesce_usecs == 0)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs == 0) &&
+ (ec->tx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) ||
+ (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
+ return -EINVAL;
+
+ rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+ if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
+ return -EINVAL;
+ else if (!priv->use_riwt)
+ return -EOPNOTSUPP;
+
+ /* Only copy relevant parameters, ignore all others. */
+ priv->tx_coal_frames = ec->tx_max_coalesced_frames;
+ priv->tx_coal_timer = ec->tx_coalesce_usecs;
+ priv->rx_riwt = rx_riwt;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+ return 0;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -539,6 +625,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = stmmac_get_coalesce,
+ .set_coalesce = stmmac_set_coalesce,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c6cdbc4eb05e..f07c0612abf6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -115,16 +115,6 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-/* Pay attention to tune this parameter; take care of both
- * hardware capability and network stabitily/performance impact.
- * Many tests showed that ~4ms latency seems to be good enough. */
-#ifdef CONFIG_STMMAC_TIMER
-#define DEFAULT_PERIODIC_RATE 256
-static int tmrate = DEFAULT_PERIODIC_RATE;
-module_param(tmrate, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
-#endif
-
#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
static int buf_sz = DMA_BUFFER_SIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
@@ -147,6 +137,8 @@ static int stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(void);
#endif
+#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+
/**
* stmmac_verify_args - verify the driver parameters.
* Description: it verifies if some wrong parameter is passed to the driver.
@@ -536,12 +528,6 @@ static void init_dma_desc_rings(struct net_device *dev)
else
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-#ifdef CONFIG_STMMAC_TIMER
- /* Disable interrupts on completion for the reception if timer is on */
- if (likely(priv->tm->enable))
- dis_ic = 1;
-#endif
-
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
@@ -617,6 +603,8 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ if (priv->use_riwt)
+ dis_ic = 1;
/* Clear the Rx/Tx descriptors */
priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
@@ -704,16 +692,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_tx:
- * @priv: private driver structure
+ * stmmac_tx_clean:
+ * @priv: private data pointer
* Description: it reclaims resources after transmission completes.
*/
-static void stmmac_tx(struct stmmac_priv *priv)
+static void stmmac_tx_clean(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
spin_lock(&priv->tx_lock);
+ priv->xstats.tx_clean++;
+
while (priv->dirty_tx != priv->cur_tx) {
int last;
unsigned int entry = priv->dirty_tx % txsize;
@@ -773,69 +763,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
spin_unlock(&priv->tx_lock);
}
-static inline void stmmac_enable_irq(struct stmmac_priv *priv)
-{
-#ifdef CONFIG_STMMAC_TIMER
- if (likely(priv->tm->enable))
- priv->tm->timer_start(tmrate);
- else
-#endif
- priv->hw->dma->enable_dma_irq(priv->ioaddr);
-}
-
-static inline void stmmac_disable_irq(struct stmmac_priv *priv)
-{
-#ifdef CONFIG_STMMAC_TIMER
- if (likely(priv->tm->enable))
- priv->tm->timer_stop();
- else
-#endif
- priv->hw->dma->disable_dma_irq(priv->ioaddr);
-}
-
-static int stmmac_has_work(struct stmmac_priv *priv)
-{
- unsigned int has_work = 0;
- int rxret, tx_work = 0;
-
- rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
- (priv->cur_rx % priv->dma_rx_size));
-
- if (priv->dirty_tx != priv->cur_tx)
- tx_work = 1;
-
- if (likely(!rxret || tx_work))
- has_work = 1;
-
- return has_work;
-}
-
-static inline void _stmmac_schedule(struct stmmac_priv *priv)
+static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
{
- if (likely(stmmac_has_work(priv))) {
- stmmac_disable_irq(priv);
- napi_schedule(&priv->napi);
- }
+ priv->hw->dma->enable_dma_irq(priv->ioaddr);
}
-#ifdef CONFIG_STMMAC_TIMER
-void stmmac_schedule(struct net_device *dev)
+static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- priv->xstats.sched_timer_n++;
-
- _stmmac_schedule(priv);
+ priv->hw->dma->disable_dma_irq(priv->ioaddr);
}
-static void stmmac_no_timer_started(unsigned int x)
-{;
-};
-
-static void stmmac_no_timer_stopped(void)
-{;
-};
-#endif
/**
* stmmac_tx_err:
@@ -858,16 +795,18 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
netif_wake_queue(priv->dev);
}
-
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
int status;
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
- if (likely(status == handle_tx_rx))
- _stmmac_schedule(priv);
-
- else if (unlikely(status == tx_hard_error_bump_tc)) {
+ if (likely((status & handle_rx)) || (status & handle_tx)) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ stmmac_disable_dma_irq(priv);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
tc += 64;
@@ -983,7 +922,6 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
/* Alternate (enhanced) DESC mode*/
priv->dma_cap.enh_desc =
(hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
-
}
return hw_cap;
@@ -1025,6 +963,38 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
/**
+ * stmmac_tx_timer:
+ * @data: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the stmmac_tx_clean.
+ */
+static void stmmac_tx_timer(unsigned long data)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)data;
+
+ stmmac_tx_clean(priv);
+}
+
+/**
+ * stmmac_tx_timer:
+ * @priv: private data structure
+ * Description:
+ * This inits the transmit coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
+{
+ priv->tx_coal_frames = STMMAC_TX_FRAMES;
+ priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
+ init_timer(&priv->txtimer);
+ priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
+ priv->txtimer.data = (unsigned long)priv;
+ priv->txtimer.function = stmmac_tx_timer;
+ add_timer(&priv->txtimer);
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -1038,23 +1008,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
-#ifdef CONFIG_STMMAC_TIMER
- priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
- if (unlikely(priv->tm == NULL))
- return -ENOMEM;
-
- priv->tm->freq = tmrate;
-
- /* Test if the external timer can be actually used.
- * In case of failure continue without timer. */
- if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
- pr_warning("stmmaceth: cannot attach the external timer.\n");
- priv->tm->freq = 0;
- priv->tm->timer_start = stmmac_no_timer_started;
- priv->tm->timer_stop = stmmac_no_timer_stopped;
- } else
- priv->tm->enable = 1;
-#endif
clk_prepare_enable(priv->stmmac_clk);
stmmac_check_ether_addr(priv);
@@ -1141,10 +1094,6 @@ static int stmmac_open(struct net_device *dev)
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
-#ifdef CONFIG_STMMAC_TIMER
- priv->tm->timer_start(tmrate);
-#endif
-
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
priv->hw->mac->dump_regs(priv->ioaddr);
@@ -1157,6 +1106,13 @@ static int stmmac_open(struct net_device *dev)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_init_tx_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ }
+
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1170,9 +1126,6 @@ open_error_wolirq:
free_irq(dev->irq, dev);
open_error:
-#ifdef CONFIG_STMMAC_TIMER
- kfree(priv->tm);
-#endif
if (priv->phydev)
phy_disconnect(priv->phydev);
@@ -1203,14 +1156,10 @@ static int stmmac_release(struct net_device *dev)
netif_stop_queue(dev);
-#ifdef CONFIG_STMMAC_TIMER
- /* Stop and release the timer */
- stmmac_close_ext_timer();
- if (priv->tm != NULL)
- kfree(priv->tm);
-#endif
napi_disable(&priv->napi);
+ del_timer_sync(&priv->txtimer);
+
/* Free the IRQ lines */
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
@@ -1273,11 +1222,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if ((skb->len > ETH_FRAME_LEN) || nfrags)
- pr_info("stmmac xmit:\n"
- "\tskb addr %p - len: %d - nopaged_len: %d\n"
- "\tn_frags: %d - ip_summed: %d - %s gso\n",
- skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
- !skb_is_gso(skb) ? "isn't" : "is");
+ pr_debug("stmmac xmit: [entry %d]\n"
+ "\tskb addr %p - len: %d - nopaged_len: %d\n"
+ "\tn_frags: %d - ip_summed: %d - %s gso\n"
+ "\ttx_count_frames %d\n", entry,
+ skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
+ !skb_is_gso(skb) ? "isn't" : "is",
+ priv->tx_count_frames);
#endif
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
@@ -1287,9 +1238,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
- pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
- "\t\tn_frags: %d, ip_summed: %d\n",
- skb->len, nopaged_len, nfrags, skb->ip_summed);
+ pr_debug("\tskb len: %d, nopaged_len: %d,\n"
+ "\t\tn_frags: %d, ip_summed: %d\n",
+ skb->len, nopaged_len, nfrags, skb->ip_summed);
#endif
priv->tx_skbuff[entry] = skb;
@@ -1320,16 +1271,24 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
}
- /* Interrupt on completition only for the latest segment */
+ /* Finalize the latest segment. */
priv->hw->desc->close_tx_desc(desc);
-#ifdef CONFIG_STMMAC_TIMER
- /* Clean IC while using timer */
- if (likely(priv->tm->enable))
- priv->hw->desc->clear_tx_ic(desc);
-#endif
-
wmb();
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment could be reset and the timer re-started to invoke the
+ * stmmac_tx function. This approach takes care about the fragments.
+ */
+ priv->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames > priv->tx_count_frames) {
+ priv->hw->desc->clear_tx_ic(desc);
+ priv->xstats.tx_reset_ic_bit++;
+ TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
+ priv->tx_count_frames);
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else
+ priv->tx_count_frames = 0;
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
@@ -1471,14 +1430,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
#endif
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!priv->plat->rx_coe)) {
- /* No RX COE for old mac10/100 devices */
+ if (unlikely(!priv->plat->rx_coe))
skb_checksum_none_assert(skb);
- netif_receive_skb(skb);
- } else {
+ else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&priv->napi, skb);
- }
+
+ napi_gro_receive(&priv->napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -1500,21 +1457,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
- * Also it runs the TX completion thread
+ * To look at the incoming frames and clear the tx resources.
*/
static int stmmac_poll(struct napi_struct *napi, int budget)
{
struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
int work_done = 0;
- priv->xstats.poll_n++;
- stmmac_tx(priv);
- work_done = stmmac_rx(priv, budget);
+ priv->xstats.napi_poll++;
+ stmmac_tx_clean(priv);
+ work_done = stmmac_rx(priv, budget);
if (work_done < budget) {
napi_complete(napi);
- stmmac_enable_irq(priv);
+ stmmac_enable_dma_irq(priv);
}
return work_done;
}
@@ -1523,7 +1479,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
* stmmac_tx_timeout
* @dev : Pointer to net device structure
* Description: this function is called when a packet transmission fails to
- * complete within a reasonable tmrate. The driver will mark the error in the
+ * complete within a reasonable time. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
* in order to transmit a new packet.
*/
@@ -2050,6 +2006,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
@@ -2141,11 +2107,9 @@ int stmmac_suspend(struct net_device *ndev)
netif_device_detach(ndev);
netif_stop_queue(ndev);
-#ifdef CONFIG_STMMAC_TIMER
- priv->tm->timer_stop();
- if (likely(priv->tm->enable))
+ if (priv->use_riwt)
dis_ic = 1;
-#endif
+
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
@@ -2196,10 +2160,6 @@ int stmmac_resume(struct net_device *ndev)
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
-#ifdef CONFIG_STMMAC_TIMER
- if (likely(priv->tm->enable))
- priv->tm->timer_start(tmrate);
-#endif
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -2234,18 +2194,20 @@ int stmmac_restore(struct net_device *ndev)
*/
static int __init stmmac_init(void)
{
- int err_plt = 0;
- int err_pci = 0;
-
- err_plt = stmmac_register_platform();
- err_pci = stmmac_register_pci();
-
- if ((err_pci) && (err_plt)) {
- pr_err("stmmac: driver registration failed\n");
- return -EINVAL;
- }
+ int ret;
+ ret = stmmac_register_platform();
+ if (ret)
+ goto err;
+ ret = stmmac_register_pci();
+ if (ret)
+ goto err_pci;
return 0;
+err_pci:
+ stmmac_unregister_platform();
+err:
+ pr_err("stmmac: driver registration failed\n");
+ return ret;
}
static void __exit stmmac_exit(void)
@@ -2295,11 +2257,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "eee_timer:", 6)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
-#ifdef CONFIG_STMMAC_TIMER
- } else if (!strncmp(opt, "tmrate:", 7)) {
- if (kstrtoint(opt + 7, 0, &tmrate))
- goto err;
-#endif
}
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 1f069b0f6af5..064eaac9616f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -61,8 +61,8 @@ static void stmmac_default_data(void)
* matches the device. The probe functions returns zero when the driver choose
* to take "ownership" of the device or an error code(-ve no) otherwise.
*/
-static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
int ret = 0;
void __iomem *addr = NULL;
@@ -130,7 +130,7 @@ err_out_req_reg_failed:
* Description: this function calls the main to free the net resources
* and releases the PCI resources.
*/
-static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+static void stmmac_pci_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -182,7 +182,7 @@ struct pci_driver stmmac_pci_driver = {
.name = STMMAC_RESOURCE_NAME,
.id_table = stmmac_id_table,
.probe = stmmac_pci_probe,
- .remove = __devexit_p(stmmac_pci_remove),
+ .remove = stmmac_pci_remove,
#ifdef CONFIG_PM
.suspend = stmmac_pci_suspend,
.resume = stmmac_pci_resume,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ed112b55ae7f..b43d68b40e50 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -29,9 +29,9 @@
#include "stmmac.h"
#ifdef CONFIG_OF
-static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+static int stmmac_probe_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const char **mac)
{
struct device_node *np = pdev->dev.of_node;
@@ -59,9 +59,9 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
return 0;
}
#else
-static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+static int stmmac_probe_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const char **mac)
{
return -ENOSYS;
}
@@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary resources and invokes the main to init
* the net device, register the mdio bus etc.
*/
-static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
+static int stmmac_pltfr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
deleted file mode 100644
index 4ccd4e2977b7..000000000000
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*******************************************************************************
- STMMAC external timer support.
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/etherdevice.h>
-#include "stmmac_timer.h"
-
-static void stmmac_timer_handler(void *data)
-{
- struct net_device *dev = (struct net_device *)data;
-
- stmmac_schedule(dev);
-}
-
-#define STMMAC_TIMER_MSG(timer, freq) \
-printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
-
-#if defined(CONFIG_STMMAC_RTC_TIMER)
-#include <linux/rtc.h>
-static struct rtc_device *stmmac_rtc;
-static rtc_task_t stmmac_task;
-
-static void stmmac_rtc_start(unsigned int new_freq)
-{
- rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
- rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
-}
-
-static void stmmac_rtc_stop(void)
-{
- rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
-}
-
-int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
-{
- stmmac_task.private_data = dev;
- stmmac_task.func = stmmac_timer_handler;
-
- stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
- if (stmmac_rtc == NULL) {
- pr_err("open rtc device failed\n");
- return -ENODEV;
- }
-
- rtc_irq_register(stmmac_rtc, &stmmac_task);
-
- /* Periodic mode is not supported */
- if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
- pr_err("set periodic failed\n");
- rtc_irq_unregister(stmmac_rtc, &stmmac_task);
- rtc_class_close(stmmac_rtc);
- return -1;
- }
-
- STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
-
- tm->timer_start = stmmac_rtc_start;
- tm->timer_stop = stmmac_rtc_stop;
-
- return 0;
-}
-
-int stmmac_close_ext_timer(void)
-{
- rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
- rtc_irq_unregister(stmmac_rtc, &stmmac_task);
- rtc_class_close(stmmac_rtc);
- return 0;
-}
-
-#elif defined(CONFIG_STMMAC_TMU_TIMER)
-#include <linux/clk.h>
-#define TMU_CHANNEL "tmu2_clk"
-static struct clk *timer_clock;
-
-static void stmmac_tmu_start(unsigned int new_freq)
-{
- clk_set_rate(timer_clock, new_freq);
- clk_prepare_enable(timer_clock);
-}
-
-static void stmmac_tmu_stop(void)
-{
- clk_disable_unprepare(timer_clock);
-}
-
-int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
-{
- timer_clock = clk_get(NULL, TMU_CHANNEL);
-
- if (IS_ERR(timer_clock))
- return -1;
-
- if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
- timer_clock = NULL;
- return -1;
- }
-
- STMMAC_TIMER_MSG("TMU2", tm->freq);
- tm->timer_start = stmmac_tmu_start;
- tm->timer_stop = stmmac_tmu_stop;
-
- return 0;
-}
-
-int stmmac_close_ext_timer(void)
-{
- clk_disable_unprepare(timer_clock);
- tmu2_unregister_user();
- clk_put(timer_clock);
- return 0;
-}
-#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
deleted file mode 100644
index aea9b14cdfbe..000000000000
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*******************************************************************************
- STMMAC external timer Header File.
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-#ifndef __STMMAC_TIMER_H__
-#define __STMMAC_TIMER_H__
-
-struct stmmac_timer {
- void (*timer_start) (unsigned int new_freq);
- void (*timer_stop) (void);
- unsigned int freq;
- unsigned int enable;
-};
-
-/* Open the HW timer device and return 0 in case of success */
-int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
-/* Stop the timer and release it */
-int stmmac_close_ext_timer(void);
-/* Function used for scheduling task within the stmmac */
-void stmmac_schedule(struct net_device *dev);
-
-#if defined(CONFIG_STMMAC_TMU_TIMER)
-extern int tmu2_register_user(void *fnt, void *data);
-extern void tmu2_unregister_user(void);
-#endif
-
-#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index c8251be104d6..4c682a3d0424 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -185,7 +185,7 @@
#define CAS_RESET_SPARE 3
#endif
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
@@ -222,7 +222,7 @@ static int link_transition_timeout;
-static u16 link_modes[] __devinitdata = {
+static u16 link_modes[] = {
BMCR_ANENABLE, /* 0 : autoneg */
0, /* 1 : 10bt half duplex */
BMCR_SPEED100, /* 2 : 100bt half duplex */
@@ -4820,7 +4820,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
* only subordinate device and we can tweak the bridge settings to
* reflect that fact.
*/
-static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+static void cas_program_bridge(struct pci_dev *cas_pdev)
{
struct pci_dev *pdev = cas_pdev->bus->self;
u32 val;
@@ -4916,8 +4916,7 @@ static const struct net_device_ops cas_netdev_ops = {
#endif
};
-static int __devinit cas_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int cas_version_printed = 0;
unsigned long casreg_len;
@@ -5175,7 +5174,7 @@ err_out_disable_pdev:
return -ENODEV;
}
-static void __devexit cas_remove_one(struct pci_dev *pdev)
+static void cas_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp;
@@ -5273,7 +5272,7 @@ static struct pci_driver cas_driver = {
.name = DRV_MODULE_NAME,
.id_table = cas_pci_tbl,
.probe = cas_init_one,
- .remove = __devexit_p(cas_remove_one),
+ .remove = cas_remove_one,
#ifdef CONFIG_PM
.suspend = cas_suspend,
.resume = cas_resume
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 275b430aeb75..a0bdf0779466 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -38,7 +38,7 @@
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "Apr 22, 2010"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
@@ -7977,7 +7977,7 @@ static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
return 0;
}
-static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
+static int niu_pci_eeprom_read(struct niu *np, u32 addr)
{
u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
(addr << ESPC_PIO_STAT_ADDR_SHIFT));
@@ -8020,7 +8020,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
}
-static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
+static int niu_pci_eeprom_read16(struct niu *np, u32 off)
{
int err = niu_pci_eeprom_read(np, off);
u16 val;
@@ -8036,7 +8036,7 @@ static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
return val;
}
-static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
+static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
{
int err = niu_pci_eeprom_read(np, off);
u16 val;
@@ -8054,10 +8054,8 @@ static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
return val;
}
-static int __devinit niu_pci_vpd_get_propname(struct niu *np,
- u32 off,
- char *namebuf,
- int namebuf_len)
+static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
+ int namebuf_len)
{
int i;
@@ -8075,7 +8073,7 @@ static int __devinit niu_pci_vpd_get_propname(struct niu *np,
return i + 1;
}
-static void __devinit niu_vpd_parse_version(struct niu *np)
+static void niu_vpd_parse_version(struct niu *np)
{
struct niu_vpd *vpd = &np->vpd;
int len = strlen(vpd->version) + 1;
@@ -8102,8 +8100,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static int __devinit niu_pci_vpd_scan_props(struct niu *np,
- u32 start, u32 end)
+static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
{
unsigned int found_mask = 0;
#define FOUND_MASK_MODEL 0x00000001
@@ -8189,7 +8186,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
}
/* ESPC_PIO_EN_ENABLE must be set */
-static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
+static void niu_pci_vpd_fetch(struct niu *np, u32 start)
{
u32 offset;
int err;
@@ -8224,7 +8221,7 @@ static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static u32 __devinit niu_pci_vpd_offset(struct niu *np)
+static u32 niu_pci_vpd_offset(struct niu *np)
{
u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
int err;
@@ -8279,8 +8276,7 @@ static u32 __devinit niu_pci_vpd_offset(struct niu *np)
return 0;
}
-static int __devinit niu_phy_type_prop_decode(struct niu *np,
- const char *phy_prop)
+static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
{
if (!strcmp(phy_prop, "mif")) {
/* 1G copper, MII */
@@ -8334,7 +8330,7 @@ static int niu_pci_vpd_get_nports(struct niu *np)
return ports;
}
-static void __devinit niu_pci_vpd_validate(struct niu *np)
+static void niu_pci_vpd_validate(struct niu *np)
{
struct net_device *dev = np->dev;
struct niu_vpd *vpd = &np->vpd;
@@ -8380,7 +8376,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
}
-static int __devinit niu_pci_probe_sprom(struct niu *np)
+static int niu_pci_probe_sprom(struct niu *np)
{
struct net_device *dev = np->dev;
int len, i;
@@ -8538,7 +8534,7 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
return 0;
}
-static int __devinit niu_get_and_validate_port(struct niu *np)
+static int niu_get_and_validate_port(struct niu *np)
{
struct niu_parent *parent = np->parent;
@@ -8572,10 +8568,8 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
return 0;
}
-static int __devinit phy_record(struct niu_parent *parent,
- struct phy_probe_info *p,
- int dev_id_1, int dev_id_2, u8 phy_port,
- int type)
+static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
+ int dev_id_1, int dev_id_2, u8 phy_port, int type)
{
u32 id = (dev_id_1 << 16) | dev_id_2;
u8 idx;
@@ -8611,7 +8605,7 @@ static int __devinit phy_record(struct niu_parent *parent,
return 0;
}
-static int __devinit port_has_10g(struct phy_probe_info *p, int port)
+static int port_has_10g(struct phy_probe_info *p, int port)
{
int i;
@@ -8627,7 +8621,7 @@ static int __devinit port_has_10g(struct phy_probe_info *p, int port)
return 0;
}
-static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
+static int count_10g_ports(struct phy_probe_info *p, int *lowest)
{
int port, cnt;
@@ -8644,7 +8638,7 @@ static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
return cnt;
}
-static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
+static int count_1g_ports(struct phy_probe_info *p, int *lowest)
{
*lowest = 32;
if (p->cur[PHY_TYPE_MII])
@@ -8653,7 +8647,7 @@ static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
return p->cur[PHY_TYPE_MII];
}
-static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
+static void niu_n2_divide_channels(struct niu_parent *parent)
{
int num_ports = parent->num_ports;
int i;
@@ -8669,8 +8663,8 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
}
}
-static void __devinit niu_divide_channels(struct niu_parent *parent,
- int num_10g, int num_1g)
+static void niu_divide_channels(struct niu_parent *parent,
+ int num_10g, int num_1g)
{
int num_ports = parent->num_ports;
int rx_chans_per_10g, rx_chans_per_1g;
@@ -8731,8 +8725,8 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
}
}
-static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
- int num_10g, int num_1g)
+static void niu_divide_rdc_groups(struct niu_parent *parent,
+ int num_10g, int num_1g)
{
int i, num_ports = parent->num_ports;
int rdc_group, rdc_groups_per_port;
@@ -8776,9 +8770,8 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
}
}
-static int __devinit fill_phy_probe_info(struct niu *np,
- struct niu_parent *parent,
- struct phy_probe_info *info)
+static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
+ struct phy_probe_info *info)
{
unsigned long flags;
int port, err;
@@ -8819,7 +8812,7 @@ static int __devinit fill_phy_probe_info(struct niu *np,
return err;
}
-static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
+static int walk_phys(struct niu *np, struct niu_parent *parent)
{
struct phy_probe_info *info = &parent->phy_probe_info;
int lowest_10g, lowest_1g;
@@ -8948,7 +8941,7 @@ unknown_vg_1g_port:
return -EINVAL;
}
-static int __devinit niu_probe_ports(struct niu *np)
+static int niu_probe_ports(struct niu *np)
{
struct niu_parent *parent = np->parent;
int err, i;
@@ -8969,7 +8962,7 @@ static int __devinit niu_probe_ports(struct niu *np)
return 0;
}
-static int __devinit niu_classifier_swstate_init(struct niu *np)
+static int niu_classifier_swstate_init(struct niu *np)
{
struct niu_classifier *cp = &np->clas;
@@ -8981,7 +8974,7 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
return fflp_early_init(np);
}
-static void __devinit niu_link_config_init(struct niu *np)
+static void niu_link_config_init(struct niu *np)
{
struct niu_link_config *lp = &np->link_config;
@@ -9006,7 +8999,7 @@ static void __devinit niu_link_config_init(struct niu *np)
#endif
}
-static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
+static int niu_init_mac_ipp_pcs_base(struct niu *np)
{
switch (np->port) {
case 0:
@@ -9045,7 +9038,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
return 0;
}
-static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
+static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
{
struct msix_entry msi_vec[NIU_NUM_LDG];
struct niu_parent *parent = np->parent;
@@ -9084,7 +9077,7 @@ retry:
np->num_ldg = num_irqs;
}
-static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
+static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
{
#ifdef CONFIG_SPARC64
struct platform_device *op = np->op;
@@ -9108,7 +9101,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
#endif
}
-static int __devinit niu_ldg_init(struct niu *np)
+static int niu_ldg_init(struct niu *np)
{
struct niu_parent *parent = np->parent;
u8 ldg_num_map[NIU_NUM_LDG];
@@ -9225,13 +9218,13 @@ static int __devinit niu_ldg_init(struct niu *np)
return 0;
}
-static void __devexit niu_ldg_free(struct niu *np)
+static void niu_ldg_free(struct niu *np)
{
if (np->flags & NIU_FLAGS_MSIX)
pci_disable_msix(np->pdev);
}
-static int __devinit niu_get_of_props(struct niu *np)
+static int niu_get_of_props(struct niu *np)
{
#ifdef CONFIG_SPARC64
struct net_device *dev = np->dev;
@@ -9300,7 +9293,7 @@ static int __devinit niu_get_of_props(struct niu *np)
#endif
}
-static int __devinit niu_get_invariants(struct niu *np)
+static int niu_get_invariants(struct niu *np)
{
int err, have_props;
u32 offset;
@@ -9479,9 +9472,8 @@ static struct device_attribute niu_parent_attributes[] = {
{}
};
-static struct niu_parent * __devinit niu_new_parent(struct niu *np,
- union niu_parent_id *id,
- u8 ptype)
+static struct niu_parent *niu_new_parent(struct niu *np,
+ union niu_parent_id *id, u8 ptype)
{
struct platform_device *plat_dev;
struct niu_parent *p;
@@ -9544,9 +9536,8 @@ fail_unregister:
return NULL;
}
-static struct niu_parent * __devinit niu_get_parent(struct niu *np,
- union niu_parent_id *id,
- u8 ptype)
+static struct niu_parent *niu_get_parent(struct niu *np,
+ union niu_parent_id *id, u8 ptype)
{
struct niu_parent *p, *tmp;
int port = np->port;
@@ -9662,7 +9653,7 @@ static const struct niu_ops niu_pci_ops = {
.unmap_single = niu_pci_unmap_single,
};
-static void __devinit niu_driver_version(void)
+static void niu_driver_version(void)
{
static int niu_version_printed;
@@ -9670,10 +9661,10 @@ static void __devinit niu_driver_version(void)
pr_info("%s", version);
}
-static struct net_device * __devinit niu_alloc_and_init(
- struct device *gen_dev, struct pci_dev *pdev,
- struct platform_device *op, const struct niu_ops *ops,
- u8 port)
+static struct net_device *niu_alloc_and_init(struct device *gen_dev,
+ struct pci_dev *pdev,
+ struct platform_device *op,
+ const struct niu_ops *ops, u8 port)
{
struct net_device *dev;
struct niu *np;
@@ -9714,14 +9705,14 @@ static const struct net_device_ops niu_netdev_ops = {
.ndo_change_mtu = niu_change_mtu,
};
-static void __devinit niu_assign_netdev_ops(struct net_device *dev)
+static void niu_assign_netdev_ops(struct net_device *dev)
{
dev->netdev_ops = &niu_netdev_ops;
dev->ethtool_ops = &niu_ethtool_ops;
dev->watchdog_timeo = NIU_TX_TIMEOUT;
}
-static void __devinit niu_device_announce(struct niu *np)
+static void niu_device_announce(struct niu *np)
{
struct net_device *dev = np->dev;
@@ -9750,14 +9741,14 @@ static void __devinit niu_device_announce(struct niu *np)
}
}
-static void __devinit niu_set_basic_features(struct net_device *dev)
+static void niu_set_basic_features(struct net_device *dev)
{
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
dev->features |= dev->hw_features | NETIF_F_RXCSUM;
}
-static int __devinit niu_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int niu_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
union niu_parent_id parent_id;
struct net_device *dev;
@@ -9895,7 +9886,7 @@ err_out_disable_pdev:
return err;
}
-static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
+static void niu_pci_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -9980,7 +9971,7 @@ static struct pci_driver niu_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = niu_pci_tbl,
.probe = niu_pci_init_one,
- .remove = __devexit_p(niu_pci_remove_one),
+ .remove = niu_pci_remove_one,
.suspend = niu_suspend,
.resume = niu_resume,
};
@@ -10044,7 +10035,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_single = niu_phys_unmap_single,
};
-static int __devinit niu_of_probe(struct platform_device *op)
+static int niu_of_probe(struct platform_device *op)
{
union niu_parent_id parent_id;
struct net_device *dev;
@@ -10158,7 +10149,7 @@ err_out:
return err;
}
-static int __devexit niu_of_remove(struct platform_device *op)
+static int niu_of_remove(struct platform_device *op)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
@@ -10211,7 +10202,7 @@ static struct platform_driver niu_of_driver = {
.of_match_table = niu_match,
},
.probe = niu_of_probe,
- .remove = __devexit_p(niu_of_remove),
+ .remove = niu_of_remove,
};
#endif /* CONFIG_SPARC64 */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c9c977bf02ac..be82f6d13c51 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1074,8 +1074,8 @@ static const struct net_device_ops bigmac_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit bigmac_ether_init(struct platform_device *op,
- struct platform_device *qec_op)
+static int bigmac_ether_init(struct platform_device *op,
+ struct platform_device *qec_op)
{
static int version_printed;
struct net_device *dev;
@@ -1233,7 +1233,7 @@ fail_and_cleanup:
/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
* the latter.
*/
-static int __devinit bigmac_sbus_probe(struct platform_device *op)
+static int bigmac_sbus_probe(struct platform_device *op)
{
struct device *parent = op->dev.parent;
struct platform_device *qec_op;
@@ -1243,7 +1243,7 @@ static int __devinit bigmac_sbus_probe(struct platform_device *op)
return bigmac_ether_init(op, qec_op);
}
-static int __devexit bigmac_sbus_remove(struct platform_device *op)
+static int bigmac_sbus_remove(struct platform_device *op)
{
struct bigmac *bp = dev_get_drvdata(&op->dev);
struct device *parent = op->dev.parent;
@@ -1286,7 +1286,7 @@ static struct platform_driver bigmac_sbus_driver = {
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
- .remove = __devexit_p(bigmac_sbus_remove),
+ .remove = bigmac_sbus_remove,
};
module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 6c8695ec7cb9..5f3f9d52757d 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -77,7 +77,7 @@
#define DRV_VERSION "1.0"
#define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
-static char version[] __devinitdata =
+static char version[] =
DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
MODULE_AUTHOR(DRV_AUTHOR);
@@ -2763,7 +2763,7 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
}
#endif /* not Sparc and not PPC */
-static int __devinit gem_get_device_address(struct gem *gp)
+static int gem_get_device_address(struct gem *gp)
{
#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
struct net_device *dev = gp->dev;
@@ -2827,8 +2827,7 @@ static const struct net_device_ops gem_netdev_ops = {
#endif
};
-static int __devinit gem_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned long gemreg_base, gemreg_len;
struct net_device *dev;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 73f341b8befb..a1bff49a8155 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2499,7 +2499,7 @@ static int hme_version_printed;
*
* Return NULL on failure.
*/
-static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
+static struct quattro *quattro_sbus_find(struct platform_device *child)
{
struct device *parent = child->dev.parent;
struct platform_device *op;
@@ -2580,7 +2580,7 @@ static void quattro_sbus_free_irqs(void)
#endif /* CONFIG_SBUS */
#ifdef CONFIG_PCI
-static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev)
+static struct quattro *quattro_pci_find(struct pci_dev *pdev)
{
struct pci_dev *bdev = pdev->bus->self;
struct quattro *qp;
@@ -2623,7 +2623,7 @@ static const struct net_device_ops hme_netdev_ops = {
};
#ifdef CONFIG_SBUS
-static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
+static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
{
struct device_node *dp = op->dev.of_node, *sbus_dp;
struct quattro *qp = NULL;
@@ -2927,8 +2927,8 @@ static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
}
#endif /* !(CONFIG_SPARC) */
-static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int happy_meal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct quattro *qp = NULL;
#ifdef CONFIG_SPARC
@@ -3162,7 +3162,7 @@ err_out:
return err;
}
-static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
+static void happy_meal_pci_remove(struct pci_dev *pdev)
{
struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
struct net_device *net_dev = hp->dev;
@@ -3190,7 +3190,7 @@ static struct pci_driver hme_pci_driver = {
.name = "hme",
.id_table = happymeal_pci_ids,
.probe = happy_meal_pci_probe,
- .remove = __devexit_p(happy_meal_pci_remove),
+ .remove = happy_meal_pci_remove,
};
static int __init happy_meal_pci_init(void)
@@ -3216,7 +3216,7 @@ static void happy_meal_pci_exit(void)
#ifdef CONFIG_SBUS
static const struct of_device_id hme_sbus_match[];
-static int __devinit hme_sbus_probe(struct platform_device *op)
+static int hme_sbus_probe(struct platform_device *op)
{
const struct of_device_id *match;
struct device_node *dp = op->dev.of_node;
@@ -3234,7 +3234,7 @@ static int __devinit hme_sbus_probe(struct platform_device *op)
return happy_meal_sbus_probe_one(op, is_qfe);
}
-static int __devexit hme_sbus_remove(struct platform_device *op)
+static int hme_sbus_remove(struct platform_device *op)
{
struct happy_meal *hp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = hp->dev;
@@ -3284,7 +3284,7 @@ static struct platform_driver hme_sbus_driver = {
.of_match_table = hme_sbus_match,
},
.probe = hme_sbus_probe,
- .remove = __devexit_p(hme_sbus_remove),
+ .remove = hme_sbus_remove,
};
static int __init happy_meal_sbus_init(void)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index aeded7ff1c8f..1dcee6915843 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -744,7 +744,7 @@ static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
qecp->gregs + GLOB_RSIZE);
}
-static u8 __devinit qec_get_burst(struct device_node *dp)
+static u8 qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
@@ -764,7 +764,7 @@ static u8 __devinit qec_get_burst(struct device_node *dp)
return bsizes;
}
-static struct sunqec * __devinit get_qec(struct platform_device *child)
+static struct sunqec *get_qec(struct platform_device *child)
{
struct platform_device *op = to_platform_device(child->dev.parent);
struct sunqec *qecp;
@@ -830,7 +830,7 @@ static const struct net_device_ops qec_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit qec_ether_init(struct platform_device *op)
+static int qec_ether_init(struct platform_device *op)
{
static unsigned version_printed;
struct net_device *dev;
@@ -929,12 +929,12 @@ fail:
return res;
}
-static int __devinit qec_sbus_probe(struct platform_device *op)
+static int qec_sbus_probe(struct platform_device *op)
{
return qec_ether_init(op);
}
-static int __devexit qec_sbus_remove(struct platform_device *op)
+static int qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = qp->dev;
@@ -971,7 +971,7 @@ static struct platform_driver qec_sbus_driver = {
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
- .remove = __devexit_p(qec_sbus_remove),
+ .remove = qec_sbus_remove,
};
static int __init qec_init(void)
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a108db35924e..e1b895530827 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -25,7 +25,7 @@
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "June 25, 2007"
-static char version[] __devinitdata =
+static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun LDOM virtual network driver");
@@ -937,7 +937,7 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
}
}
-static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
+static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
{
struct vio_dring_state *dr;
unsigned long len;
@@ -1019,7 +1019,7 @@ static const struct net_device_ops vnet_ops = {
.ndo_start_xmit = vnet_start_xmit,
};
-static struct vnet * __devinit vnet_new(const u64 *local_mac)
+static struct vnet *vnet_new(const u64 *local_mac)
{
struct net_device *dev;
struct vnet *vp;
@@ -1067,7 +1067,7 @@ err_out_free_dev:
return ERR_PTR(err);
}
-static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+static struct vnet *vnet_find_or_create(const u64 *local_mac)
{
struct vnet *iter, *vp;
@@ -1088,7 +1088,7 @@ static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
static const char *local_mac_prop = "local-mac-address";
-static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
u64 port_node)
{
const u64 *local_mac = NULL;
@@ -1125,15 +1125,14 @@ static struct vio_driver_ops vnet_vio_ops = {
.handshake_complete = vnet_handshake_complete,
};
-static void __devinit print_version(void)
+static void print_version(void)
{
printk_once(KERN_INFO "%s", version);
}
const char *remote_macaddr_prop = "remote-mac-address";
-static int __devinit vnet_port_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
+static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vnet_port *port;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6ce9edd95c04..1e4d743ff03e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1914,7 +1914,7 @@ static const struct net_device_ops bdx_netdev_ops = {
*/
/* TBD: netif_msg should be checked and implemented. I disable it for now */
-static int __devinit
+static int
bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *ndev;
@@ -2427,7 +2427,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit bdx_remove(struct pci_dev *pdev)
+static void bdx_remove(struct pci_dev *pdev)
{
struct pci_nic *nic = pci_get_drvdata(pdev);
struct net_device *ndev;
@@ -2458,7 +2458,7 @@ static struct pci_driver bdx_pci_driver = {
.name = BDX_DRV_NAME,
.id_table = bdx_pci_tbl,
.probe = bdx_probe,
- .remove = __devexit_p(bdx_remove),
+ .remove = bdx_remove,
};
/*
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 2c41894d5472..4426151d4ac9 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -60,6 +60,15 @@ config TI_CPSW
To compile this driver as a module, choose M here: the module
will be called cpsw.
+config TI_CPTS
+ boolean "TI Common Platform Time Sync (CPTS) Support"
+ depends on TI_CPSW
+ select PTP_1588_CLOCK
+ ---help---
+ This driver supports the Common Platform Time Sync unit of
+ the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
+ and Layer 2 packets, and the driver offers a PTP Hardware Clock.
+
config TLAN
tristate "TI ThunderLAN support"
depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 91bd8bba78ff..c65148e8aa1d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw_ale.o cpsw.o
+ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 860c2526f08d..d9625f62b026 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1110,7 +1110,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
static int external_switch;
-static int __devinit cpmac_probe(struct platform_device *pdev)
+static int cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id;
char mdio_bus_id[MII_BUS_ID_SIZE];
@@ -1204,7 +1204,7 @@ fail:
return rc;
}
-static int __devexit cpmac_remove(struct platform_device *pdev)
+static int cpmac_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
@@ -1216,10 +1216,10 @@ static struct platform_driver cpmac_driver = {
.driver.name = "cpmac",
.driver.owner = THIS_MODULE,
.probe = cpmac_probe,
- .remove = __devexit_p(cpmac_remove),
+ .remove = cpmac_remove,
};
-int __devinit cpmac_init(void)
+int cpmac_init(void)
{
u32 mask;
int i, res;
@@ -1290,7 +1290,7 @@ fail_alloc:
return res;
}
-void __devexit cpmac_exit(void)
+void cpmac_exit(void)
{
platform_driver_unregister(&cpmac_driver);
mdiobus_unregister(cpmac_mii);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index df55e2403746..40aff684aa23 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -24,6 +24,7 @@
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
@@ -35,6 +36,7 @@
#include <linux/platform_data/cpsw.h>
#include "cpsw_ale.h"
+#include "cpts.h"
#include "davinci_cpdma.h"
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
@@ -70,10 +72,37 @@ do { \
dev_notice(priv->dev, format, ## __VA_ARGS__); \
} while (0)
+#define ALE_ALL_PORTS 0x7
+
#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+#define CPSW_VERSION_1 0x19010a
+#define CPSW_VERSION_2 0x19010c
+
+#define HOST_PORT_NUM 0
+#define SLIVER_SIZE 0x40
+
+#define CPSW1_HOST_PORT_OFFSET 0x028
+#define CPSW1_SLAVE_OFFSET 0x050
+#define CPSW1_SLAVE_SIZE 0x040
+#define CPSW1_CPDMA_OFFSET 0x100
+#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_CPTS_OFFSET 0x500
+#define CPSW1_ALE_OFFSET 0x600
+#define CPSW1_SLIVER_OFFSET 0x700
+
+#define CPSW2_HOST_PORT_OFFSET 0x108
+#define CPSW2_SLAVE_OFFSET 0x200
+#define CPSW2_SLAVE_SIZE 0x100
+#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_STATERAM_OFFSET 0xa00
+#define CPSW2_CPTS_OFFSET 0xc00
+#define CPSW2_ALE_OFFSET 0xd00
+#define CPSW2_SLIVER_OFFSET 0xd80
+#define CPSW2_BD_OFFSET 0x2000
+
#define CPDMA_RXTHRESH 0x0c0
#define CPDMA_RXFREE 0x0e0
#define CPDMA_TXHDP 0x00
@@ -81,21 +110,6 @@ do { \
#define CPDMA_TXCP 0x40
#define CPDMA_RXCP 0x60
-#define cpsw_dma_regs(base, offset) \
- (void __iomem *)((base) + (offset))
-#define cpsw_dma_rxthresh(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
-#define cpsw_dma_rxfree(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXFREE)
-#define cpsw_dma_txhdp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_TXHDP)
-#define cpsw_dma_rxhdp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXHDP)
-#define cpsw_dma_txcp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_TXCP)
-#define cpsw_dma_rxcp(base, offset) \
- (void __iomem *)((base) + (offset) + CPDMA_RXCP)
-
#define CPSW_POLL_WEIGHT 64
#define CPSW_MIN_PACKET_SIZE 60
#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
@@ -129,7 +143,7 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
-struct cpsw_ss_regs {
+struct cpsw_wr_regs {
u32 id_ver;
u32 soft_reset;
u32 control;
@@ -140,26 +154,98 @@ struct cpsw_ss_regs {
u32 misc_en;
};
-struct cpsw_regs {
+struct cpsw_ss_regs {
u32 id_ver;
u32 control;
u32 soft_reset;
u32 stat_port_en;
u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 vlan_ltype;
+ u32 ts_ltype;
+ u32 dlr_ltype;
};
-struct cpsw_slave_regs {
- u32 max_blks;
- u32 blk_cnt;
- u32 flow_thresh;
- u32 port_vlan;
- u32 tx_pri_map;
- u32 ts_ctl;
- u32 ts_seq_ltype;
- u32 ts_vlan;
- u32 sa_lo;
- u32 sa_hi;
-};
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL 0x00 /* Control Register */
+#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
+#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
+#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
+#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
+#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
+#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
+
+#define CTRL_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
+ TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
+#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN BIT(0)
+#define CPSW_V1_TS_TX_EN BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS 16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
struct cpsw_host_regs {
u32 max_blks;
@@ -185,7 +271,7 @@ struct cpsw_sliver_regs {
};
struct cpsw_slave {
- struct cpsw_slave_regs __iomem *regs;
+ void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
int slave_num;
u32 mac_control;
@@ -193,19 +279,30 @@ struct cpsw_slave {
struct phy_device *phy;
};
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+ return __raw_readl(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+ __raw_writel(val, slave->regs + offset);
+}
+
struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
struct resource *cpsw_res;
- struct resource *cpsw_ss_res;
+ struct resource *cpsw_wr_res;
struct napi_struct napi;
struct device *dev;
struct cpsw_platform_data data;
- struct cpsw_regs __iomem *regs;
- struct cpsw_ss_regs __iomem *ss_regs;
+ struct cpsw_ss_regs __iomem *regs;
+ struct cpsw_wr_regs __iomem *wr_regs;
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
+ u32 version;
struct net_device_stats stats;
int rx_packet_max;
int host_port;
@@ -218,6 +315,7 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
+ struct cpts cpts;
};
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
@@ -228,10 +326,34 @@ struct cpsw_priv {
(func)((priv)->slaves + idx, ##arg); \
} while (0)
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+ return;
+ }
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ /* program multicast address list into ALE register */
+ netdev_for_each_mc_addr(ha, ndev) {
+ cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
+ ALE_ALL_PORTS << priv->host_port, 0, 0);
+ }
+ }
+}
+
static void cpsw_intr_enable(struct cpsw_priv *priv)
{
- __raw_writel(0xFF, &priv->ss_regs->tx_en);
- __raw_writel(0xFF, &priv->ss_regs->rx_en);
+ __raw_writel(0xFF, &priv->wr_regs->tx_en);
+ __raw_writel(0xFF, &priv->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(priv->dma, true);
return;
@@ -239,8 +361,8 @@ static void cpsw_intr_enable(struct cpsw_priv *priv)
static void cpsw_intr_disable(struct cpsw_priv *priv)
{
- __raw_writel(0, &priv->ss_regs->tx_en);
- __raw_writel(0, &priv->ss_regs->rx_en);
+ __raw_writel(0, &priv->wr_regs->tx_en);
+ __raw_writel(0, &priv->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(priv->dma, false);
return;
@@ -254,6 +376,7 @@ void cpsw_tx_handler(void *token, int len, int status)
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
+ cpts_tx_timestamp(&priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -274,6 +397,7 @@ void cpsw_rx_handler(void *token, int len, int status)
}
if (likely(status >= 0)) {
skb_put(skb, len);
+ cpts_rx_timestamp(&priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
@@ -359,8 +483,8 @@ static inline void soft_reset(const char *module, void __iomem *reg)
static void cpsw_set_slave_mac(struct cpsw_slave *slave,
struct cpsw_priv *priv)
{
- __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
- __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
+ slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+ slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
}
static void _cpsw_adjust_link(struct cpsw_slave *slave,
@@ -446,7 +570,15 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
/* setup priority mapping */
__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
- __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map);
+
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ break;
+ case CPSW_VERSION_2:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ break;
+ }
/* setup max packet size, and mac address */
__raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
@@ -505,7 +637,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
pm_runtime_get_sync(&priv->pdev->dev);
- reg = __raw_readl(&priv->regs->id_ver);
+ reg = priv->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
@@ -566,12 +698,12 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
- cpdma_ctlr_stop(priv->dma);
netif_stop_queue(priv->ndev);
napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
cpsw_ale_stop(priv->ale);
for_each_slave(priv, cpsw_slave_stop, priv);
pm_runtime_put_sync(&priv->pdev->dev);
@@ -592,6 +724,11 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+
ret = cpdma_chan_submit(priv->txch, skb, skb->data,
skb->len, GFP_KERNEL);
if (unlikely(ret != 0)) {
@@ -629,6 +766,129 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
}
+#ifdef CONFIG_TI_CPTS
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+ struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ u32 ts_en, seq_id;
+
+ if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+ if (priv->cpts.tx_enable)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+ if (priv->cpts.rx_enable)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+ slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+ struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ u32 ctrl, mtype;
+
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ ctrl &= ~CTRL_ALL_TS_MASK;
+
+ if (priv->cpts.tx_enable)
+ ctrl |= CTRL_TX_TS_BITS;
+
+ if (priv->cpts.rx_enable)
+ ctrl |= CTRL_RX_TS_BITS;
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+ slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+ slave_write(slave, ctrl, CPSW2_CONTROL);
+ __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
+}
+
+static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpts *cpts = &priv->cpts;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ cpts->tx_enable = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ cpts->tx_enable = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ cpts->rx_enable = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cpts->rx_enable = 1;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ cpsw_hwtstamp_v1(priv);
+ break;
+ case CPSW_VERSION_2:
+ cpsw_hwtstamp_v2(priv);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+
+#ifdef CONFIG_TI_CPTS
+ if (cmd == SIOCSHWTSTAMP)
+ return cpsw_hwtstamp_ioctl(dev, req);
+#endif
+ return -ENOTSUPP;
+}
+
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -669,10 +929,12 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
+ .ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_get_stats = cpsw_ndo_get_stats,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
@@ -699,22 +961,56 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+static int cpsw_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+#ifdef CONFIG_TI_CPTS
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = priv->cpts.phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+#else
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+#endif
+ return 0;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
};
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
+ u32 slave_reg_ofs, u32 sliver_reg_ofs)
{
void __iomem *regs = priv->regs;
int slave_num = slave->slave_num;
struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
slave->data = data;
- slave->regs = regs + data->slave_reg_ofs;
- slave->sliver = regs + data->sliver_reg_ofs;
+ slave->regs = regs + slave_reg_ofs;
+ slave->sliver = regs + sliver_reg_ofs;
}
static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -734,49 +1030,40 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->slaves = prop;
- data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
- data->slaves, GFP_KERNEL);
- if (!data->slave_data) {
- pr_err("Could not allocate slave memory.\n");
- return -EINVAL;
- }
-
- data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
-
- if (of_property_read_u32(node, "cpdma_channels", &prop)) {
- pr_err("Missing cpdma_channels property in the DT.\n");
+ if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
+ pr_err("Missing cpts_active_slave property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->channels = prop;
+ data->cpts_active_slave = prop;
- if (of_property_read_u32(node, "host_port_no", &prop)) {
- pr_err("Missing host_port_no property in the DT.\n");
+ if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
+ pr_err("Missing cpts_clock_mult property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->host_port_num = prop;
+ data->cpts_clock_mult = prop;
- if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
- pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+ if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
+ pr_err("Missing cpts_clock_shift property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->cpdma_reg_ofs = prop;
+ data->cpts_clock_shift = prop;
- if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
- pr_err("Missing cpdma_sram_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
+ data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+ data->slaves, GFP_KERNEL);
+ if (!data->slave_data) {
+ pr_err("Could not allocate slave memory.\n");
+ return -EINVAL;
}
- data->cpdma_sram_ofs = prop;
- if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
- pr_err("Missing ale_reg_ofs property in the DT.\n");
+ if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+ pr_err("Missing cpdma_channels property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->ale_reg_ofs = prop;
+ data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
pr_err("Missing ale_entries property in the DT.\n");
@@ -785,27 +1072,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->ale_entries = prop;
- if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
- pr_err("Missing host_port_reg_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- data->host_port_reg_ofs = prop;
-
- if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
- pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- data->hw_stats_reg_ofs = prop;
-
- if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
- pr_err("Missing bd_ram_ofs property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- data->bd_ram_ofs = prop;
-
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
pr_err("Missing bd_ram_size property in the DT.\n");
ret = -EINVAL;
@@ -827,33 +1093,34 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
- for_each_child_of_node(node, slave_node) {
+ /*
+ * Populate all the child nodes here...
+ */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ pr_warn("Doesn't have any child node\n");
+
+ for_each_node_by_name(slave_node, "slave") {
struct cpsw_slave_data *slave_data = data->slave_data + i;
- const char *phy_id = NULL;
const void *mac_addr = NULL;
-
- if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+ u32 phyid;
+ int lenp;
+ const __be32 *parp;
+ struct device_node *mdio_node;
+ struct platform_device *mdio;
+
+ parp = of_get_property(slave_node, "phy_id", &lenp);
+ if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
ret = -EINVAL;
goto error_ret;
}
- slave_data->phy_id = phy_id;
-
- if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
- pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
- ret = -EINVAL;
- goto error_ret;
- }
- slave_data->slave_reg_ofs = prop;
-
- if (of_property_read_u32(slave_node, "sliver_reg_ofs",
- &prop)) {
- pr_err("Missing slave[%d] sliver_reg_ofs property\n",
- i);
- ret = -EINVAL;
- goto error_ret;
- }
- slave_data->sliver_reg_ofs = prop;
+ mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ phyid = be32_to_cpup(parp+1);
+ mdio = of_find_device_by_node(mdio_node);
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
@@ -869,15 +1136,16 @@ error_ret:
return ret;
}
-static int __devinit cpsw_probe(struct platform_device *pdev)
+static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data = pdev->dev.platform_data;
struct net_device *ndev;
struct cpsw_priv *priv;
struct cpdma_params dma_params;
struct cpsw_ale_params ale_params;
- void __iomem *regs;
+ void __iomem *ss_regs, *wr_regs;
struct resource *res;
+ u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i, k = 0;
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
@@ -895,6 +1163,11 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ /*
+ * This may be required here for child devices.
+ */
+ pm_runtime_enable(&pdev->dev);
+
if (cpsw_probe_dt(&priv->data, pdev)) {
pr_err("cpsw: platform data missing\n");
ret = -ENODEV;
@@ -921,7 +1194,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
- pm_runtime_enable(&pdev->dev);
priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "fck is not found\n");
@@ -935,63 +1207,86 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
ret = -ENOENT;
goto clean_clk_ret;
}
-
if (!request_mem_region(priv->cpsw_res->start,
resource_size(priv->cpsw_res), ndev->name)) {
dev_err(priv->dev, "failed request i/o region\n");
ret = -ENXIO;
goto clean_clk_ret;
}
-
- regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
- if (!regs) {
+ ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
+ if (!ss_regs) {
dev_err(priv->dev, "unable to map i/o region\n");
goto clean_cpsw_iores_ret;
}
- priv->regs = regs;
- priv->host_port = data->host_port_num;
- priv->host_port_regs = regs + data->host_port_reg_ofs;
+ priv->regs = ss_regs;
+ priv->version = __raw_readl(&priv->regs->id_ver);
+ priv->host_port = HOST_PORT_NUM;
- priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!priv->cpsw_ss_res) {
+ priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!priv->cpsw_wr_res) {
dev_err(priv->dev, "error getting i/o resource\n");
ret = -ENOENT;
- goto clean_clk_ret;
+ goto clean_iomap_ret;
}
-
- if (!request_mem_region(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res), ndev->name)) {
+ if (!request_mem_region(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res), ndev->name)) {
dev_err(priv->dev, "failed request i/o region\n");
ret = -ENXIO;
- goto clean_clk_ret;
+ goto clean_iomap_ret;
}
-
- regs = ioremap(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res));
- if (!regs) {
+ wr_regs = ioremap(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res));
+ if (!wr_regs) {
dev_err(priv->dev, "unable to map i/o region\n");
- goto clean_cpsw_ss_iores_ret;
+ goto clean_cpsw_wr_iores_ret;
}
- priv->ss_regs = regs;
-
- for_each_slave(priv, cpsw_slave_init, priv);
+ priv->wr_regs = wr_regs;
memset(&dma_params, 0, sizeof(dma_params));
+ memset(&ale_params, 0, sizeof(ale_params));
+
+ switch (priv->version) {
+ case CPSW_VERSION_1:
+ priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
+ dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
+ slave_offset = CPSW1_SLAVE_OFFSET;
+ slave_size = CPSW1_SLAVE_SIZE;
+ sliver_offset = CPSW1_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = 0;
+ break;
+ case CPSW_VERSION_2:
+ priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
+ dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
+ slave_offset = CPSW2_SLAVE_OFFSET;
+ slave_size = CPSW2_SLAVE_SIZE;
+ sliver_offset = CPSW2_SLIVER_OFFSET;
+ dma_params.desc_mem_phys =
+ (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+ break;
+ default:
+ dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
+ ret = -ENODEV;
+ goto clean_cpsw_wr_iores_ret;
+ }
+ for (i = 0; i < priv->data.slaves; i++) {
+ struct cpsw_slave *slave = &priv->slaves[i];
+ cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
+ slave_offset += slave_size;
+ sliver_offset += SLIVER_SIZE;
+ }
+
dma_params.dev = &pdev->dev;
- dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs,
- data->cpdma_reg_ofs);
- dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs,
- data->cpdma_reg_ofs);
- dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs,
- data->cpdma_reg_ofs);
- dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs,
- data->cpdma_sram_ofs);
- dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs,
- data->cpdma_sram_ofs);
- dma_params.txcp = cpsw_dma_txcp((u32)priv->regs,
- data->cpdma_sram_ofs);
- dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs,
- data->cpdma_sram_ofs);
+ dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
+ dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
+ dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
+ dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
+ dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
dma_params.num_chan = data->channels;
dma_params.has_soft_reset = true;
@@ -999,16 +1294,13 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
dma_params.desc_mem_size = data->bd_ram_size;
dma_params.desc_align = 16;
dma_params.has_ext_regs = true;
- dma_params.desc_mem_phys = data->no_bd_ram ? 0 :
- (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
- dma_params.desc_hw_addr = data->hw_ram_addr ?
- data->hw_ram_addr : dma_params.desc_mem_phys ;
+ dma_params.desc_hw_addr = dma_params.desc_mem_phys;
priv->dma = cpdma_ctlr_create(&dma_params);
if (!priv->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_iomap_ret;
+ goto clean_wr_iomap_ret;
}
priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -1022,10 +1314,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
goto clean_dma_ret;
}
- memset(&ale_params, 0, sizeof(ale_params));
ale_params.dev = &ndev->dev;
- ale_params.ale_regs = (void *)((u32)priv->regs) +
- ((u32)data->ale_reg_ofs);
ale_params.ale_ageout = ale_ageout;
ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = data->slaves;
@@ -1072,6 +1361,10 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
goto clean_irq_ret;
}
+ if (cpts_register(&pdev->dev, &priv->cpts,
+ data->cpts_clock_mult, data->cpts_clock_shift))
+ dev_err(priv->dev, "error registering cpts device\n");
+
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
priv->cpsw_res->start, ndev->irq);
@@ -1085,11 +1378,13 @@ clean_dma_ret:
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
+clean_wr_iomap_ret:
+ iounmap(priv->wr_regs);
+clean_cpsw_wr_iores_ret:
+ release_mem_region(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res));
clean_iomap_ret:
iounmap(priv->regs);
-clean_cpsw_ss_iores_ret:
- release_mem_region(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res));
clean_cpsw_iores_ret:
release_mem_region(priv->cpsw_res->start,
resource_size(priv->cpsw_res));
@@ -1103,7 +1398,7 @@ clean_ndev_ret:
return ret;
}
-static int __devexit cpsw_remove(struct platform_device *pdev)
+static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1111,6 +1406,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
pr_info("removing device");
platform_set_drvdata(pdev, NULL);
+ cpts_unregister(&priv->cpts);
free_irq(ndev->irq, priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
@@ -1119,8 +1415,9 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
iounmap(priv->regs);
release_mem_region(priv->cpsw_res->start,
resource_size(priv->cpsw_res));
- release_mem_region(priv->cpsw_ss_res->start,
- resource_size(priv->cpsw_ss_res));
+ iounmap(priv->wr_regs);
+ release_mem_region(priv->cpsw_wr_res->start,
+ resource_size(priv->cpsw_wr_res));
pm_runtime_disable(&pdev->dev);
clk_put(priv->clk);
kfree(priv->slaves);
@@ -1170,7 +1467,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = of_match_ptr(cpsw_of_mtable),
},
.probe = cpsw_probe,
- .remove = __devexit_p(cpsw_remove),
+ .remove = cpsw_remove,
};
static int __init cpsw_init(void)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ca0d48a7e508..0e9ccc2cf91f 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include <linux/etherdevice.h>
#include "cpsw_ale.h"
@@ -211,10 +212,34 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
mask &= ~port_mask;
/* free if only remaining port is host port */
- if (mask == BIT(ale->params.ale_ports))
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
- else
+ if (mask)
cpsw_ale_set_port_mask(ale_entry, mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int ret, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ ret = cpsw_ale_get_entry_type(ale_entry);
+ if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+ continue;
+
+ if (cpsw_ale_get_mcast(ale_entry)) {
+ u8 addr[6];
+
+ cpsw_ale_get_addr(ale_entry, addr);
+ if (!is_broadcast_ether_addr(addr))
+ cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+ }
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+ return 0;
}
static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index a95b37beb02d..2bd09cbce522 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -80,6 +80,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 000000000000..463597f919f1
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,424 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/err.h>
+#include <linux/if.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include "cpts.h"
+
+#ifdef CONFIG_TI_CPTS
+
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
+#define cpts_read32(c, r) __raw_readl(&c->reg->r)
+#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
+
+static int event_expired(struct cpts_event *event)
+{
+ return time_after(jiffies, event->tmo);
+}
+
+static int event_type(struct cpts_event *event)
+{
+ return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+}
+
+static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
+{
+ u32 r = cpts_read32(cpts, intstat_raw);
+
+ if (r & TS_PEND_RAW) {
+ *high = cpts_read32(cpts, event_high);
+ *low = cpts_read32(cpts, event_low);
+ cpts_write32(cpts, EVENT_POP, event_pop);
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * Returns zero if matching event type was found.
+ */
+static int cpts_fifo_read(struct cpts *cpts, int match)
+{
+ int i, type = -1;
+ u32 hi, lo;
+ struct cpts_event *event;
+
+ for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
+ if (cpts_fifo_pop(cpts, &hi, &lo))
+ break;
+ if (list_empty(&cpts->pool)) {
+ pr_err("cpts: event pool is empty\n");
+ return -1;
+ }
+ event = list_first_entry(&cpts->pool, struct cpts_event, list);
+ event->tmo = jiffies + 2;
+ event->high = hi;
+ event->low = lo;
+ type = event_type(event);
+ switch (type) {
+ case CPTS_EV_PUSH:
+ case CPTS_EV_RX:
+ case CPTS_EV_TX:
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+ break;
+ case CPTS_EV_ROLL:
+ case CPTS_EV_HALF:
+ case CPTS_EV_HW:
+ break;
+ default:
+ pr_err("cpts: unkown event type\n");
+ break;
+ }
+ if (type == match)
+ break;
+ }
+ return type == match ? 0 : -1;
+}
+
+static cycle_t cpts_systim_read(const struct cyclecounter *cc)
+{
+ u64 val = 0;
+ struct cpts_event *event;
+ struct list_head *this, *next;
+ struct cpts *cpts = container_of(cc, struct cpts, cc);
+
+ cpts_write32(cpts, TS_PUSH, ts_push);
+ if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
+ pr_err("cpts: unable to obtain a time stamp\n");
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_type(event) == CPTS_EV_PUSH) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ val = event->low;
+ break;
+ }
+ }
+
+ return val;
+}
+
+/* PTP clock operations */
+
+static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, mult;
+ int neg_adj = 0;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ mult = cpts->cc_mult;
+ adj = mult;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+
+ timecounter_read(&cpts->tc);
+
+ cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return 0;
+}
+
+static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ now = timecounter_read(&cpts->tc);
+ now += delta;
+ timecounter_init(&cpts->tc, &cpts->cc, now);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return 0;
+}
+
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ns = timecounter_read(&cpts->tc);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ timecounter_init(&cpts->tc, &cpts->cc, ns);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return 0;
+}
+
+static int cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info cpts_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .max_adj = 1000000,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfreq = cpts_ptp_adjfreq,
+ .adjtime = cpts_ptp_adjtime,
+ .gettime = cpts_ptp_gettime,
+ .settime = cpts_ptp_settime,
+ .enable = cpts_ptp_enable,
+};
+
+static void cpts_overflow_check(struct work_struct *work)
+{
+ struct timespec ts;
+ struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
+
+ cpts_write32(cpts, CPTS_EN, control);
+ cpts_write32(cpts, TS_PEND_EN, int_enable);
+ cpts_ptp_gettime(&cpts->info, &ts);
+ pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+}
+
+#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
+
+static void cpts_clk_init(struct cpts *cpts)
+{
+ cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+ if (IS_ERR(cpts->refclk)) {
+ pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+ cpts->refclk = NULL;
+ return;
+ }
+ clk_prepare_enable(cpts->refclk);
+}
+
+static void cpts_clk_release(struct cpts *cpts)
+{
+ clk_disable(cpts->refclk);
+ clk_put(cpts->refclk);
+}
+
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+ u16 ts_seqid, u8 ts_msgtype)
+{
+ u16 *seqid;
+ unsigned int offset;
+ u8 *msgtype, *data = skb->data;
+
+ switch (ptp_class) {
+ case PTP_CLASS_V1_IPV4:
+ case PTP_CLASS_V2_IPV4:
+ offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ case PTP_CLASS_V2_IPV6:
+ offset = OFF_PTP6;
+ break;
+ case PTP_CLASS_V2_L2:
+ offset = ETH_HLEN;
+ break;
+ case PTP_CLASS_V2_VLAN:
+ offset = ETH_HLEN + VLAN_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return 0;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ msgtype = data + offset + OFF_PTP_CONTROL;
+ else
+ msgtype = data + offset;
+
+ seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+}
+
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+{
+ u64 ns = 0;
+ struct cpts_event *event;
+ struct list_head *this, *next;
+ unsigned int class = sk_run_filter(skb, ptp_filter);
+ unsigned long flags;
+ u16 seqid;
+ u8 mtype;
+
+ if (class == PTP_CLASS_NONE)
+ return 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ cpts_fifo_read(cpts, CPTS_EV_PUSH);
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_expired(event)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ continue;
+ }
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+ if (ev_type == event_type(event) &&
+ cpts_match(skb, class, seqid, mtype)) {
+ ns = timecounter_cyc2time(&cpts->tc, event->low);
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ u64 ns;
+ struct skb_shared_hwtstamps *ssh;
+
+ if (!cpts->rx_enable)
+ return;
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+ if (!ns)
+ return;
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ u64 ns;
+ struct skb_shared_hwtstamps ssh;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
+ if (!ns)
+ return;
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+int cpts_register(struct device *dev, struct cpts *cpts,
+ u32 mult, u32 shift)
+{
+#ifdef CONFIG_TI_CPTS
+ int err, i;
+ unsigned long flags;
+
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+ pr_err("cpts: bad ptp filter\n");
+ return -EINVAL;
+ }
+ cpts->info = cpts_info;
+ cpts->clock = ptp_clock_register(&cpts->info, dev);
+ if (IS_ERR(cpts->clock)) {
+ err = PTR_ERR(cpts->clock);
+ cpts->clock = NULL;
+ return err;
+ }
+ spin_lock_init(&cpts->lock);
+
+ cpts->cc.read = cpts_systim_read;
+ cpts->cc.mask = CLOCKSOURCE_MASK(32);
+ cpts->cc_mult = mult;
+ cpts->cc.mult = mult;
+ cpts->cc.shift = shift;
+
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ for (i = 0; i < CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts_clk_init(cpts);
+ cpts_write32(cpts, CPTS_EN, control);
+ cpts_write32(cpts, TS_PEND_EN, int_enable);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
+ schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+
+ cpts->phc_index = ptp_clock_index(cpts->clock);
+#endif
+ return 0;
+}
+
+void cpts_unregister(struct cpts *cpts)
+{
+#ifdef CONFIG_TI_CPTS
+ if (cpts->clock) {
+ ptp_clock_unregister(cpts->clock);
+ cancel_delayed_work_sync(&cpts->overflow_work);
+ }
+ if (cpts->refclk)
+ cpts_clk_release(cpts);
+#endif
+}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 000000000000..fe993cdd7e23
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,145 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _TI_CPTS_H_
+#define _TI_CPTS_H_
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clocksource.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/skbuff.h>
+
+struct cpsw_cpts {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 res1;
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val; /* Time stamp load value */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 res2[2];
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 res3;
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_low; /* 32 Bit Event Time Stamp */
+ u32 event_high; /* Event Type Fields */
+};
+
+/* Bit definitions for the IDVER register */
+#define TX_IDENT_SHIFT (16) /* TX Identification Value */
+#define TX_IDENT_MASK (0xffff)
+#define RTL_VER_SHIFT (11) /* RTL Version Value */
+#define RTL_VER_MASK (0x1f)
+#define MAJOR_VER_SHIFT (8) /* Major Version Value */
+#define MAJOR_VER_MASK (0x7)
+#define MINOR_VER_SHIFT (0) /* Minor Version Value */
+#define MINOR_VER_MASK (0xff)
+
+/* Bit definitions for the CONTROL register */
+#define HW4_TS_PUSH_EN (1<<11) /* Hardware push 4 enable */
+#define HW3_TS_PUSH_EN (1<<10) /* Hardware push 3 enable */
+#define HW2_TS_PUSH_EN (1<<9) /* Hardware push 2 enable */
+#define HW1_TS_PUSH_EN (1<<8) /* Hardware push 1 enable */
+#define INT_TEST (1<<1) /* Interrupt Test */
+#define CPTS_EN (1<<0) /* Time Sync Enable */
+
+/*
+ * Definitions for the single bit resisters:
+ * TS_PUSH TS_LOAD_EN INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
+ */
+#define TS_PUSH (1<<0) /* Time stamp event push */
+#define TS_LOAD_EN (1<<0) /* Time Stamp Load */
+#define TS_PEND_RAW (1<<0) /* int read (before enable) */
+#define TS_PEND (1<<0) /* masked interrupt read (after enable) */
+#define TS_PEND_EN (1<<0) /* masked interrupt enable */
+#define EVENT_POP (1<<0) /* writing discards one event */
+
+/* Bit definitions for the EVENT_HIGH register */
+#define PORT_NUMBER_SHIFT (24) /* Indicates Ethernet port or HW pin */
+#define PORT_NUMBER_MASK (0x1f)
+#define EVENT_TYPE_SHIFT (20) /* Time sync event type */
+#define EVENT_TYPE_MASK (0xf)
+#define MESSAGE_TYPE_SHIFT (16) /* PTP message type */
+#define MESSAGE_TYPE_MASK (0xf)
+#define SEQUENCE_ID_SHIFT (0) /* PTP message sequence ID */
+#define SEQUENCE_ID_MASK (0xffff)
+
+enum {
+ CPTS_EV_PUSH, /* Time Stamp Push Event */
+ CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ CPTS_EV_RX, /* Ethernet Receive Event */
+ CPTS_EV_TX, /* Ethernet Transmit Event */
+};
+
+/* This covers any input clock up to about 500 MHz. */
+#define CPTS_OVERFLOW_PERIOD (HZ * 8)
+
+#define CPTS_FIFO_DEPTH 16
+#define CPTS_MAX_EVENTS 32
+
+struct cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 high;
+ u32 low;
+};
+
+struct cpts {
+ struct cpsw_cpts __iomem *reg;
+ int tx_enable;
+ int rx_enable;
+#ifdef CONFIG_TI_CPTS
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ spinlock_t lock; /* protects time registers */
+ u32 cc_mult; /* for the nominal frequency */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work overflow_work;
+ int phc_index;
+ struct clk *refclk;
+ struct list_head events;
+ struct list_head pool;
+ struct cpts_event pool_data[CPTS_MAX_EVENTS];
+#endif
+};
+
+#ifdef CONFIG_TI_CPTS
+extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+#else
+static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+#endif
+
+extern int cpts_register(struct device *dev, struct cpts *cpts,
+ u32 mult, u32 shift);
+extern void cpts_unregister(struct cpts *cpts);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index fce89a0ab06e..2a3e2c56bc60 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1850,7 +1850,7 @@ static struct emac_platform_data
* resource information from platform init and register a network device
* and allocate resources necessary for driver to perform
*/
-static int __devinit davinci_emac_probe(struct platform_device *pdev)
+static int davinci_emac_probe(struct platform_device *pdev)
{
int rc = 0;
struct resource *res;
@@ -2039,7 +2039,7 @@ no_ndev:
* Called when removing the device driver. We disable clock usage and release
* the resources taken up by the driver and unregister network device
*/
-static int __devexit davinci_emac_remove(struct platform_device *pdev)
+static int davinci_emac_remove(struct platform_device *pdev)
{
struct resource *res;
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -2107,7 +2107,7 @@ static struct platform_driver davinci_emac_driver = {
.of_match_table = of_match_ptr(davinci_emac_of_match),
},
.probe = davinci_emac_probe,
- .remove = __devexit_p(davinci_emac_remove),
+ .remove = davinci_emac_remove,
};
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 51a96dbee9ac..cca25509b039 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -310,7 +310,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
}
-static int __devinit davinci_mdio_probe(struct platform_device *pdev)
+static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
@@ -416,7 +416,7 @@ bail_out:
return ret;
}
-static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+static int davinci_mdio_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -465,7 +465,7 @@ static int davinci_mdio_resume(struct device *dev)
u32 ctrl;
spin_lock(&data->lock);
- pm_runtime_put_sync(data->dev);
+ pm_runtime_get_sync(data->dev);
/* restart the scan state machine */
ctrl = __raw_readl(&data->regs->control);
@@ -496,7 +496,7 @@ static struct platform_driver davinci_mdio_driver = {
.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
.probe = davinci_mdio_probe,
- .remove = __devexit_p(davinci_mdio_remove),
+ .remove = davinci_mdio_remove,
};
static int __init davinci_mdio_init(void)
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 3e6abf0f2771..22725386c5de 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -300,7 +300,7 @@ these functions are more or less common to all linux network drivers.
**************************************************************/
-static void __devexit tlan_remove_one(struct pci_dev *pdev)
+static void tlan_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tlan_priv *priv = netdev_priv(dev);
@@ -392,7 +392,7 @@ static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
- .remove = __devexit_p(tlan_remove_one),
+ .remove = tlan_remove_one,
.suspend = tlan_suspend,
.resume = tlan_resume,
};
@@ -434,7 +434,7 @@ err_out_pci_free:
}
-static int __devinit tlan_init_one(struct pci_dev *pdev,
+static int tlan_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
return tlan_probe1(pdev, -1, -1, 0, ent);
@@ -460,9 +460,8 @@ static int __devinit tlan_init_one(struct pci_dev *pdev,
*
**************************************************************/
-static int __devinit tlan_probe1(struct pci_dev *pdev,
- long ioaddr, int irq, int rev,
- const struct pci_device_id *ent)
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
+ const struct pci_device_id *ent)
{
struct net_device *dev;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 5ee82a77723b..e321d0b6fc88 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -234,10 +234,9 @@ static void gelic_card_free_chain(struct gelic_card *card,
*
* returns 0 on success, <0 on failure
*/
-static int __devinit gelic_card_init_chain(struct gelic_card *card,
- struct gelic_descr_chain *chain,
- struct gelic_descr *start_descr,
- int no)
+static int gelic_card_init_chain(struct gelic_card *card,
+ struct gelic_descr_chain *chain,
+ struct gelic_descr *start_descr, int no)
{
int i;
struct gelic_descr *descr;
@@ -428,7 +427,7 @@ rewind:
*
* returns 0 on success, < 0 on failure
*/
-static int __devinit gelic_card_alloc_rx_skbs(struct gelic_card *card)
+static int gelic_card_alloc_rx_skbs(struct gelic_card *card)
{
struct gelic_descr_chain *chain;
int ret;
@@ -1468,8 +1467,8 @@ static const struct net_device_ops gelic_netdevice_ops = {
*
* fills out function pointers in the net_device structure
*/
-static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev,
- struct napi_struct *napi)
+static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
+ struct napi_struct *napi)
{
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */
@@ -1489,8 +1488,7 @@ static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev,
* gelic_ether_setup_netdev initializes the net_device structure
* and register it.
**/
-int __devinit gelic_net_setup_netdev(struct net_device *netdev,
- struct gelic_card *card)
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
{
int status;
u64 v1, v2;
@@ -1542,7 +1540,7 @@ int __devinit gelic_net_setup_netdev(struct net_device *netdev,
* the card and net_device structures are linked to each other
*/
#define GELIC_ALIGN (32)
-static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **netdev)
+static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
{
struct gelic_card *card;
struct gelic_port *port;
@@ -1593,7 +1591,7 @@ static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **ne
return card;
}
-static void __devinit gelic_card_get_vlan_info(struct gelic_card *card)
+static void gelic_card_get_vlan_info(struct gelic_card *card)
{
u64 v1, v2;
int status;
@@ -1667,7 +1665,7 @@ static void __devinit gelic_card_get_vlan_info(struct gelic_card *card)
/**
* ps3_gelic_driver_probe - add a device to the control of this driver
*/
-static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
+static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
{
struct gelic_card *card;
struct net_device *netdev;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 961c8321451f..d568af1eb4f4 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -452,7 +452,7 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
if (rsn)
*buf++ = WLAN_EID_RSN;
else
- *buf++ = WLAN_EID_GENERIC;
+ *buf++ = WLAN_EID_VENDOR_SPECIFIC;
/* length filed; set later */
buf++;
@@ -540,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
break;
switch (item_id) {
- case WLAN_EID_GENERIC:
+ case WLAN_EID_VENDOR_SPECIFIC:
if ((OUI_LEN + 1 <= item_len) &&
!memcmp(pos, wpa_oui, OUI_LEN) &&
pos[OUI_LEN] == 0x01) {
@@ -2305,7 +2305,7 @@ static const struct iw_handler_def gelic_wl_wext_handler_def = {
.get_wireless_stats = gelic_wl_get_wireless_stats,
};
-static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
+static struct net_device *gelic_wl_alloc(struct gelic_card *card)
{
struct net_device *netdev;
struct gelic_port *port;
@@ -2582,7 +2582,7 @@ static const struct ethtool_ops gelic_wl_ethtool_ops = {
.get_link = gelic_wl_get_link,
};
-static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev)
+static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
{
struct gelic_wl_info *wl;
wl = port_wl(netdev_priv(netdev));
@@ -2598,7 +2598,7 @@ static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev)
/*
* driver probe/remove
*/
-int __devinit gelic_wl_driver_probe(struct gelic_card *card)
+int gelic_wl_driver_probe(struct gelic_card *card)
{
int ret;
struct net_device *netdev;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index c1ebfe9efcb3..f1b91fd7e41c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2492,7 +2492,7 @@ out_disable_dev:
* spider_net_probe initializes pdev and registers a net_device
* structure for it. After that, the device can be ifconfig'ed up
**/
-static int __devinit
+static int
spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -EIO;
@@ -2531,7 +2531,7 @@ out:
* spider_net_remove is called to remove the device and unregisters the
* net_device
**/
-static void __devexit
+static void
spider_net_remove(struct pci_dev *pdev)
{
struct net_device *netdev;
@@ -2559,7 +2559,7 @@ static struct pci_driver spider_net_driver = {
.name = spider_net_driver_name,
.id_table = spider_net_pci_tbl,
.probe = spider_net_probe,
- .remove = __devexit_p(spider_net_remove)
+ .remove = spider_net_remove
};
/**
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 651a70c55e6e..9819349eaa1e 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -60,7 +60,7 @@ enum tc35815_chiptype {
/* indexed by tc35815_chiptype, above */
static const struct {
const char *name;
-} chip_info[] __devinitdata = {
+} chip_info[] = {
{ "TOSHIBA TC35815CF 10/100BaseTX" },
{ "TOSHIBA TC35815 with Wake on LAN" },
{ "TOSHIBA TC35815/TX4939" },
@@ -719,7 +719,7 @@ err_out:
* should provide a "tc35815-mac" device with a MAC address in its
* platform_data.
*/
-static int __devinit tc35815_mac_match(struct device *dev, void *data)
+static int tc35815_mac_match(struct device *dev, void *data)
{
struct platform_device *plat_dev = to_platform_device(dev);
struct pci_dev *pci_dev = data;
@@ -727,7 +727,7 @@ static int __devinit tc35815_mac_match(struct device *dev, void *data)
return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
}
-static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
+static int tc35815_read_plat_dev_addr(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct device *pd = bus_find_device(&platform_bus_type, NULL,
@@ -741,13 +741,13 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
return -ENODEV;
}
#else
-static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
+static int tc35815_read_plat_dev_addr(struct net_device *dev)
{
return -ENODEV;
}
#endif
-static int __devinit tc35815_init_dev_addr(struct net_device *dev)
+static int tc35815_init_dev_addr(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
@@ -785,8 +785,8 @@ static const struct net_device_ops tc35815_netdev_ops = {
#endif
};
-static int __devinit tc35815_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int tc35815_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
void __iomem *ioaddr = NULL;
struct net_device *dev;
@@ -878,7 +878,7 @@ err_out:
}
-static void __devexit tc35815_remove_one(struct pci_dev *pdev)
+static void tc35815_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
@@ -2198,7 +2198,7 @@ static struct pci_driver tc35815_pci_driver = {
.name = MODNAME,
.id_table = tc35815_pci_tbl,
.probe = tc35815_init_one,
- .remove = __devexit_p(tc35815_remove_one),
+ .remove = tc35815_remove_one,
#ifdef CONFIG_PM
.suspend = tc35815_suspend,
.resume = tc35815_resume,
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 0459c096629f..78ace59efd29 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -113,7 +113,7 @@ static const int multicast_filter_limit = 32;
#include <linux/dmi.h>
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitconst =
+static const char version[] =
"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
/* This driver was written to use PCI memory space. Some early versions
@@ -657,7 +657,7 @@ static void enable_mmio(long pioaddr, u32 quirks)
* Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
* (plus 0x6C for Rhine-I/II)
*/
-static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
+static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
@@ -823,7 +823,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
return work_done;
}
-static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
+static void rhine_hw_init(struct net_device *dev, long pioaddr)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -856,8 +856,7 @@ static const struct net_device_ops rhine_netdev_ops = {
#endif
};
-static int __devinit rhine_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct rhine_private *rp;
@@ -1802,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ dev_kfree_skb(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -2011,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
if (intr_status & IntrPCIErr)
netif_warn(rp, hw, dev, "PCI error\n");
- napi_disable(&rp->napi);
- rhine_irq_disable(rp);
- /* Slow and safe. Consider __napi_schedule as a replacement ? */
- napi_enable(&rp->napi);
- napi_schedule(&rp->napi);
+ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
out_unlock:
mutex_unlock(&rp->task_lock);
@@ -2232,7 +2227,7 @@ static int rhine_close(struct net_device *dev)
}
-static void __devexit rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2359,7 +2354,7 @@ static struct pci_driver rhine_driver = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
.probe = rhine_init_one,
- .remove = __devexit_p(rhine_remove_one),
+ .remove = rhine_remove_one,
.shutdown = rhine_shutdown,
.driver.pm = RHINE_PM_OPS,
};
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index a46c19859683..1bc7f9fd2583 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(pci, velocity_id_table);
* Given a chip identifier return a suitable description. Returns
* a pointer a static string valid while the driver is loaded.
*/
-static const char __devinit *get_chip_name(enum chip_type chip_id)
+static const char *get_chip_name(enum chip_type chip_id)
{
int i;
for (i = 0; chip_info_table[i].name != NULL; i++)
@@ -392,7 +392,7 @@ static const char __devinit *get_chip_name(enum chip_type chip_id)
* unload for each active device that is present. Disconnects
* the device from the network layer and frees all the resources
*/
-static void __devexit velocity_remove1(struct pci_dev *pdev)
+static void velocity_remove1(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct velocity_info *vptr = netdev_priv(dev);
@@ -421,7 +421,8 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
* all the verification and checking as well as reporting so that
* we don't duplicate code for each option.
*/
-static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
+static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
+ char *name, const char *devname)
{
if (val == -1)
*opt = def;
@@ -449,7 +450,8 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
* all the verification and checking as well as reporting so that
* we don't duplicate code for each option.
*/
-static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
+static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
+ char *name, const char *devname)
{
(*opt) &= (~flag);
if (val == -1)
@@ -474,7 +476,8 @@ static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag
* Turn the module and command options into a single structure
* for the current device
*/
-static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
+static void velocity_get_options(struct velocity_opt *opts, int index,
+ const char *devname)
{
velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
@@ -2627,9 +2630,8 @@ static const struct net_device_ops velocity_netdev_ops = {
* Set up the initial velocity_info struct for the device that has been
* discovered.
*/
-static void __devinit velocity_init_info(struct pci_dev *pdev,
- struct velocity_info *vptr,
- const struct velocity_info_tbl *info)
+static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
+ const struct velocity_info_tbl *info)
{
memset(vptr, 0, sizeof(struct velocity_info));
@@ -2648,7 +2650,8 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
* Retrieve the PCI configuration space data that interests us from
* the kernel PCI layer
*/
-static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
+static int velocity_get_pci_info(struct velocity_info *vptr,
+ struct pci_dev *pdev)
{
vptr->rev_id = pdev->revision;
@@ -2685,7 +2688,7 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
* Print per driver data as the kernel driver finds Velocity
* hardware
*/
-static void __devinit velocity_print_info(struct velocity_info *vptr)
+static void velocity_print_info(struct velocity_info *vptr)
{
struct net_device *dev = vptr->dev;
@@ -2709,7 +2712,8 @@ static u32 velocity_get_link(struct net_device *dev)
* Configure a discovered adapter from scratch. Return a negative
* errno error code on failure paths.
*/
-static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int velocity_found1(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int first = 1;
struct net_device *dev;
@@ -3108,7 +3112,7 @@ static struct pci_driver velocity_driver = {
.name = VELOCITY_NAME,
.id_table = velocity_id_table,
.probe = velocity_found1,
- .remove = __devexit_p(velocity_remove1),
+ .remove = velocity_remove1,
#ifdef CONFIG_PM
.suspend = velocity_suspend,
.resume = velocity_resume,
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2c08bf6e7bf3..352383890326 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -580,8 +580,6 @@ static int w5100_open(struct net_device *ndev)
struct w5100_priv *priv = netdev_priv(ndev);
netif_info(priv, ifup, ndev, "enabling\n");
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EINVAL;
w5100_hw_start(priv);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -623,7 +621,7 @@ static const struct net_device_ops w5100_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int __devinit w5100_hw_probe(struct platform_device *pdev)
+static int w5100_hw_probe(struct platform_device *pdev)
{
struct wiznet_platform_data *data = pdev->dev.platform_data;
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -698,7 +696,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
return 0;
}
-static int __devinit w5100_probe(struct platform_device *pdev)
+static int w5100_probe(struct platform_device *pdev)
{
struct w5100_priv *priv;
struct net_device *ndev;
@@ -741,7 +739,7 @@ err_register:
return err;
}
-static int __devexit w5100_remove(struct platform_device *pdev)
+static int w5100_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5100_priv *priv = netdev_priv(ndev);
@@ -801,7 +799,7 @@ static struct platform_driver w5100_driver = {
.pm = &w5100_pm_ops,
},
.probe = w5100_probe,
- .remove = __devexit_p(w5100_remove),
+ .remove = w5100_remove,
};
module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 88943d90c765..9d1d986f8d40 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -500,8 +500,6 @@ static int w5300_open(struct net_device *ndev)
struct w5300_priv *priv = netdev_priv(ndev);
netif_info(priv, ifup, ndev, "enabling\n");
- if (!is_valid_ether_addr(ndev->dev_addr))
- return -EINVAL;
w5300_hw_start(priv);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -543,7 +541,7 @@ static const struct net_device_ops w5300_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int __devinit w5300_hw_probe(struct platform_device *pdev)
+static int w5300_hw_probe(struct platform_device *pdev)
{
struct wiznet_platform_data *data = pdev->dev.platform_data;
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -610,7 +608,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
return 0;
}
-static int __devinit w5300_probe(struct platform_device *pdev)
+static int w5300_probe(struct platform_device *pdev)
{
struct w5300_priv *priv;
struct net_device *ndev;
@@ -653,7 +651,7 @@ err_register:
return err;
}
-static int __devexit w5300_remove(struct platform_device *pdev)
+static int w5300_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5300_priv *priv = netdev_priv(ndev);
@@ -713,7 +711,7 @@ static struct platform_driver w5300_driver = {
.pm = &w5300_pm_ops,
},
.probe = w5300_probe,
- .remove = __devexit_p(w5300_remove),
+ .remove = w5300_remove,
};
module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 5778a4ae1164..122d60c0481b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -27,7 +27,7 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on (PPC32 || MICROBLAZE)
+ depends on MICROBLAZE
select PHYLIB
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index f8e351880119..aad909d793d7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1002,7 +1002,7 @@ static const struct ethtool_ops temac_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-static int __devinit temac_of_probe(struct platform_device *op)
+static int temac_of_probe(struct platform_device *op)
{
struct device_node *np;
struct temac_local *lp;
@@ -1144,7 +1144,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
return rc;
}
-static int __devexit temac_of_remove(struct platform_device *op)
+static int temac_of_remove(struct platform_device *op)
{
struct net_device *ndev = dev_get_drvdata(&op->dev);
struct temac_local *lp = netdev_priv(ndev);
@@ -1163,7 +1163,7 @@ static int __devexit temac_of_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id temac_of_match[] __devinitdata = {
+static struct of_device_id temac_of_match[] = {
{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
@@ -1174,7 +1174,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match);
static struct platform_driver temac_of_driver = {
.probe = temac_of_probe,
- .remove = __devexit_p(temac_of_remove),
+ .remove = temac_of_remove,
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_temac",
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index a788501e978e..6f47100e58d7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -48,7 +48,7 @@
#define AXIENET_REGS_N 32
/* Match table for of_platform binding */
-static struct of_device_id axienet_of_match[] __devinitdata = {
+static struct of_device_id axienet_of_match[] = {
{ .compatible = "xlnx,axi-ethernet-1.00.a", },
{ .compatible = "xlnx,axi-ethernet-1.01.a", },
{ .compatible = "xlnx,axi-ethernet-2.01.a", },
@@ -1482,7 +1482,7 @@ static void axienet_dma_err_handler(unsigned long data)
* device. Parses through device tree and populates fields of
* axienet_local. It registers the Ethernet device.
*/
-static int __devinit axienet_of_probe(struct platform_device *op)
+static int axienet_of_probe(struct platform_device *op)
{
__be32 *p;
int size, ret = 0;
@@ -1590,7 +1590,7 @@ static int __devinit axienet_of_probe(struct platform_device *op)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&op->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto err_iounmap_2;
@@ -1632,7 +1632,7 @@ nodev:
return ret;
}
-static int __devexit axienet_of_remove(struct platform_device *op)
+static int axienet_of_remove(struct platform_device *op)
{
struct net_device *ndev = dev_get_drvdata(&op->dev);
struct axienet_local *lp = netdev_priv(ndev);
@@ -1656,7 +1656,7 @@ static int __devexit axienet_of_remove(struct platform_device *op)
static struct platform_driver axienet_of_driver = {
.probe = axienet_of_probe,
- .remove = __devexit_p(axienet_of_remove),
+ .remove = axienet_of_remove,
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_axienet",
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 77cfe5110318..919b983114e9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1107,7 +1107,7 @@ static struct net_device_ops xemaclite_netdev_ops;
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
-static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
+static int xemaclite_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -1229,7 +1229,7 @@ error2:
*
* Return: 0, always.
*/
-static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
+static int xemaclite_of_remove(struct platform_device *of_dev)
{
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1280,7 +1280,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
};
/* Match table for OF platform binding */
-static struct of_device_id xemaclite_of_match[] __devinitdata = {
+static struct of_device_id xemaclite_of_match[] = {
{ .compatible = "xlnx,opb-ethernetlite-1.01.a", },
{ .compatible = "xlnx,opb-ethernetlite-1.01.b", },
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
@@ -1298,7 +1298,7 @@ static struct platform_driver xemaclite_of_driver = {
.of_match_table = xemaclite_of_match,
},
.probe = xemaclite_of_probe,
- .remove = __devexit_p(xemaclite_of_remove),
+ .remove = xemaclite_of_remove,
};
module_platform_driver(xemaclite_of_driver);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 477d6729b17f..d3ebb73277be 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1379,7 +1379,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit eth_init_one(struct platform_device *pdev)
+static int eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
@@ -1480,7 +1480,7 @@ err_free:
return err;
}
-static int __devexit eth_remove_one(struct platform_device *pdev)
+static int eth_remove_one(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct port *port = netdev_priv(dev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6695a1dadf4e..502c8ff1d985 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -228,7 +228,7 @@
#define DRV_VERSION "v1.10"
#define DRV_RELDATE "2006/12/14"
-static char version[] __devinitdata =
+static char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
" Lawrence V. Stefani and others\n";
@@ -515,7 +515,7 @@ static const struct net_device_ops dfx_netdev_ops = {
* initialized and the board resources are read and stored in
* the device structure.
*/
-static int __devinit dfx_register(struct device *bdev)
+static int dfx_register(struct device *bdev)
{
static int version_disp;
int dfx_bus_pci = DFX_BUS_PCI(bdev);
@@ -663,7 +663,7 @@ err_out:
* enabled yet.
*/
-static void __devinit dfx_bus_init(struct net_device *dev)
+static void dfx_bus_init(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
@@ -809,7 +809,7 @@ static void __devinit dfx_bus_init(struct net_device *dev)
* Interrupts are disabled at the adapter bus-specific logic.
*/
-static void __devexit dfx_bus_uninit(struct net_device *dev)
+static void dfx_bus_uninit(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
@@ -866,7 +866,7 @@ static void __devexit dfx_bus_uninit(struct net_device *dev)
* None
*/
-static void __devinit dfx_bus_config_check(DFX_board_t *bp)
+static void dfx_bus_config_check(DFX_board_t *bp)
{
struct device __maybe_unused *bdev = bp->bus_dev;
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
@@ -962,9 +962,8 @@ static void __devinit dfx_bus_config_check(DFX_board_t *bp)
* returning from this routine.
*/
-static int __devinit dfx_driver_init(struct net_device *dev,
- const char *print_name,
- resource_size_t bar_start)
+static int dfx_driver_init(struct net_device *dev, const char *print_name,
+ resource_size_t bar_start)
{
DFX_board_t *bp = netdev_priv(dev);
struct device *bdev = bp->bus_dev;
@@ -3579,7 +3578,7 @@ static void dfx_xmt_flush( DFX_board_t *bp )
* Device structures for FDDI adapters (fddi0, fddi1, etc) are
* freed.
*/
-static void __devexit dfx_unregister(struct device *bdev)
+static void dfx_unregister(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
DFX_board_t *bp = netdev_priv(dev);
@@ -3619,13 +3618,12 @@ static void __devexit dfx_unregister(struct device *bdev)
}
-static int __devinit __maybe_unused dfx_dev_register(struct device *);
-static int __devexit __maybe_unused dfx_dev_unregister(struct device *);
+static int __maybe_unused dfx_dev_register(struct device *);
+static int __maybe_unused dfx_dev_unregister(struct device *);
#ifdef CONFIG_PCI
-static int __devinit dfx_pci_register(struct pci_dev *,
- const struct pci_device_id *);
-static void __devexit dfx_pci_unregister(struct pci_dev *);
+static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
+static void dfx_pci_unregister(struct pci_dev *);
static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
@@ -3637,16 +3635,16 @@ static struct pci_driver dfx_pci_driver = {
.name = "defxx",
.id_table = dfx_pci_table,
.probe = dfx_pci_register,
- .remove = __devexit_p(dfx_pci_unregister),
+ .remove = dfx_pci_unregister,
};
-static __devinit int dfx_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int dfx_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
return dfx_register(&pdev->dev);
}
-static void __devexit dfx_pci_unregister(struct pci_dev *pdev)
+static void dfx_pci_unregister(struct pci_dev *pdev)
{
dfx_unregister(&pdev->dev);
}
@@ -3668,7 +3666,7 @@ static struct eisa_driver dfx_eisa_driver = {
.name = "defxx",
.bus = &eisa_bus_type,
.probe = dfx_dev_register,
- .remove = __devexit_p(dfx_dev_unregister),
+ .remove = dfx_dev_unregister,
},
};
#endif /* CONFIG_EISA */
@@ -3689,12 +3687,12 @@ static struct tc_driver dfx_tc_driver = {
.name = "defxx",
.bus = &tc_bus_type,
.probe = dfx_dev_register,
- .remove = __devexit_p(dfx_dev_unregister),
+ .remove = dfx_dev_unregister,
},
};
#endif /* CONFIG_TC */
-static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
+static int __maybe_unused dfx_dev_register(struct device *dev)
{
int status;
@@ -3704,7 +3702,7 @@ static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
return status;
}
-static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
+static int __maybe_unused dfx_dev_unregister(struct device *dev)
{
put_device(dev);
dfx_unregister(dev);
@@ -3712,7 +3710,7 @@ static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
}
-static int __devinit dfx_init(void)
+static int dfx_init(void)
{
int status;
@@ -3724,7 +3722,7 @@ static int __devinit dfx_init(void)
return status;
}
-static void __devexit dfx_cleanup(void)
+static void dfx_cleanup(void)
{
tc_unregister_driver(&dfx_tc_driver);
eisa_driver_unregister(&dfx_eisa_driver);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 3d9a4596a423..d5bd563ac131 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -321,7 +321,7 @@ err_out1:
/*
* Called for each adapter board from pci_unregister_driver
*/
-static void __devexit skfp_remove_one(struct pci_dev *pdev)
+static void skfp_remove_one(struct pci_dev *pdev)
{
struct net_device *p = pci_get_drvdata(pdev);
struct s_smc *lp = netdev_priv(p);
@@ -2243,7 +2243,7 @@ static struct pci_driver skfddi_pci_driver = {
.name = "skfddi",
.id_table = skfddi_pci_tbl,
.probe = skfp_init_one,
- .remove = __devexit_p(skfp_remove_one),
+ .remove = skfp_remove_one,
};
static int __init skfd_init(void)
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index d4719632ffc6..e5b19b056909 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -61,7 +61,7 @@ MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
MODULE_LICENSE("GPL");
-static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
+static char version[] = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
static const struct net_device_ops rr_netdev_ops = {
@@ -88,8 +88,7 @@ static const struct net_device_ops rr_netdev_ops = {
* stack will need to know about I/O vectors or something similar.
*/
-static int __devinit rr_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
static int version_disp;
@@ -221,7 +220,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
return ret;
}
-static void __devexit rr_remove_one (struct pci_dev *pdev)
+static void rr_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rr_private *rr = netdev_priv(dev);
@@ -503,7 +502,7 @@ static unsigned int write_eeprom(struct rr_private *rrpriv,
}
-static int __devinit rr_init(struct net_device *dev)
+static int rr_init(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
@@ -1681,7 +1680,7 @@ static struct pci_driver rr_driver = {
.name = "rrunner",
.id_table = rr_pci_tbl,
.probe = rr_init_one,
- .remove = __devexit_p(rr_remove_one),
+ .remove = rr_remove_one,
};
static int __init rr_init_module(void)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 5fd6f4674326..e6fe0d80d612 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -84,7 +84,7 @@ struct hv_netvsc_packet {
};
struct netvsc_device_info {
- unsigned char mac_adr[6];
+ unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
};
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f825a629a699..8264f0ef7692 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -349,7 +349,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndevctx = netdev_priv(ndev);
struct hv_device *hdev = ndevctx->device_ctx;
struct sockaddr *addr = p;
- char save_adr[14];
+ char save_adr[ETH_ALEN];
unsigned char save_aatype;
int err;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 928148cc3220..2b657d4d63a8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -363,11 +363,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
rndis_pkt = &msg->msg.pkt;
- /*
- * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
- * netvsc packet (ie TotalDataBufferLength != MessageLength)
- */
-
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
@@ -610,8 +605,11 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
return -EBUSY;
} else {
set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS)
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ set_complete->status);
ret = -EINVAL;
+ }
}
cleanup:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ba753d87a32f..a4a62e170ec0 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -778,7 +778,7 @@ static int at86rf230_fill_data(struct spi_device *spi)
return 0;
}
-static int __devinit at86rf230_probe(struct spi_device *spi)
+static int at86rf230_probe(struct spi_device *spi)
{
struct ieee802154_dev *dev;
struct at86rf230_local *lp;
@@ -920,7 +920,7 @@ err_fill:
return rc;
}
-static int __devexit at86rf230_remove(struct spi_device *spi)
+static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -947,7 +947,7 @@ static struct spi_driver at86rf230_driver = {
.owner = THIS_MODULE,
},
.probe = at86rf230_probe,
- .remove = __devexit_p(at86rf230_remove),
+ .remove = at86rf230_remove,
.suspend = at86rf230_suspend,
.resume = at86rf230_resume,
};
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 7d39add7d467..1e9cb0bbf62c 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -354,7 +354,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
}
-static int __devinit ieee802154fake_probe(struct platform_device *pdev)
+static int ieee802154fake_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct fakehard_priv *priv;
@@ -412,7 +412,7 @@ out:
return err;
}
-static int __devexit ieee802154fake_remove(struct platform_device *pdev)
+static int ieee802154fake_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
@@ -423,7 +423,7 @@ static struct platform_device *ieee802154fake_dev;
static struct platform_driver ieee802154fake_driver = {
.probe = ieee802154fake_probe,
- .remove = __devexit_p(ieee802154fake_remove),
+ .remove = ieee802154fake_remove,
.driver = {
.name = "ieee802154hardmac",
.owner = THIS_MODULE,
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index e7456fcd0913..b8d22173925d 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -221,7 +221,7 @@ static void fakelb_del(struct fakelb_dev_priv *priv)
ieee802154_free_device(priv->dev);
}
-static int __devinit fakelb_probe(struct platform_device *pdev)
+static int fakelb_probe(struct platform_device *pdev)
{
struct fakelb_priv *priv;
struct fakelb_dev_priv *dp;
@@ -253,7 +253,7 @@ err_alloc:
return err;
}
-static int __devexit fakelb_remove(struct platform_device *pdev)
+static int fakelb_remove(struct platform_device *pdev)
{
struct fakelb_priv *priv = platform_get_drvdata(pdev);
struct fakelb_dev_priv *dp, *temp;
@@ -269,7 +269,7 @@ static struct platform_device *ieee802154fake_dev;
static struct platform_driver ieee802154fake_driver = {
.probe = fakelb_probe,
- .remove = __devexit_p(fakelb_remove),
+ .remove = fakelb_remove,
.driver = {
.name = "ieee802154fakelb",
.owner = THIS_MODULE,
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ed7521693980..3f2c7aaf28c4 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -618,7 +618,7 @@ out:
enable_irq(devrec->spi->irq);
}
-static int __devinit mrf24j40_probe(struct spi_device *spi)
+static int mrf24j40_probe(struct spi_device *spi)
{
int ret = -ENOMEM;
u8 val;
@@ -711,7 +711,7 @@ err_devrec:
return ret;
}
-static int __devexit mrf24j40_remove(struct spi_device *spi)
+static int mrf24j40_remove(struct spi_device *spi)
{
struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
@@ -746,7 +746,7 @@ static struct spi_driver mrf24j40_driver = {
},
.id_table = mrf24j40_ids,
.probe = mrf24j40_probe,
- .remove = __devexit_p(mrf24j40_remove),
+ .remove = mrf24j40_remove,
};
static int __init mrf24j40_init(void)
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index e09417df8f39..b5151e4ced61 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -760,7 +760,7 @@ static const struct net_device_ops au1k_irda_netdev_ops = {
.ndo_do_ioctl = au1k_irda_ioctl,
};
-static int __devinit au1k_irda_net_init(struct net_device *dev)
+static int au1k_irda_net_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
struct db_dest *pDB, *pDBfree;
@@ -849,7 +849,7 @@ out1:
return retval;
}
-static int __devinit au1k_irda_probe(struct platform_device *pdev)
+static int au1k_irda_probe(struct platform_device *pdev)
{
struct au1k_private *aup;
struct net_device *dev;
@@ -921,7 +921,7 @@ out:
return err;
}
-static int __devexit au1k_irda_remove(struct platform_device *pdev)
+static int au1k_irda_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct au1k_private *aup = netdev_priv(dev);
@@ -949,7 +949,7 @@ static struct platform_driver au1k_irda_driver = {
.owner = THIS_MODULE,
},
.probe = au1k_irda_probe,
- .remove = __devexit_p(au1k_irda_remove),
+ .remove = au1k_irda_remove,
};
static int __init au1k_irda_load(void)
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c6a0299aa9f9..fed4a05d55c7 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -31,7 +31,7 @@ static void turnaround_delay(unsigned long last_jif, int mtt)
schedule_timeout_uninterruptible(ticks);
}
-static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
+static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
{
int i;
struct resource *res;
@@ -688,7 +688,7 @@ static const struct net_device_ops bfin_sir_ndo = {
.ndo_get_stats = bfin_sir_stats,
};
-static int __devinit bfin_sir_probe(struct platform_device *pdev)
+static int bfin_sir_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct bfin_sir_self *self;
@@ -775,7 +775,7 @@ err_mem_0:
return err;
}
-static int __devexit bfin_sir_remove(struct platform_device *pdev)
+static int bfin_sir_remove(struct platform_device *pdev)
{
struct bfin_sir_port *sir_port;
struct net_device *dev = NULL;
@@ -798,7 +798,7 @@ static int __devexit bfin_sir_remove(struct platform_device *pdev)
static struct platform_driver bfin_ir_driver = {
.probe = bfin_sir_probe,
- .remove = __devexit_p(bfin_sir_remove),
+ .remove = bfin_sir_remove,
.suspend = bfin_sir_suspend,
.resume = bfin_sir_resume,
.driver = {
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
index f83c5b881d2d..5fe1f4dd3369 100644
--- a/drivers/net/irda/ep7211-sir.c
+++ b/drivers/net/irda/ep7211-sir.c
@@ -1,52 +1,18 @@
/*
- * IR port driver for the Cirrus Logic EP7211 processor.
+ * IR port driver for the Cirrus Logic CLPS711X processors
*
* Copyright 2001, Blue Mug Inc. All rights reserved.
* Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
*/
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <net/irda/irda.h>
-#include <net/irda/irda_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
-#include <asm/io.h>
#include <mach/hardware.h>
#include "sir-dev.h"
-#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
-#define MAX_DELAY 10000 /* 1 ms */
-
-static int ep7211_open(struct sir_dev *dev);
-static int ep7211_close(struct sir_dev *dev);
-static int ep7211_change_speed(struct sir_dev *dev, unsigned speed);
-static int ep7211_reset(struct sir_dev *dev);
-
-static struct dongle_driver ep7211 = {
- .owner = THIS_MODULE,
- .driver_name = "EP7211 IR driver",
- .type = IRDA_EP7211_DONGLE,
- .open = ep7211_open,
- .close = ep7211_close,
- .reset = ep7211_reset,
- .set_speed = ep7211_change_speed,
-};
-
-static int __init ep7211_sir_init(void)
-{
- return irda_register_dongle(&ep7211);
-}
-
-static void __exit ep7211_sir_cleanup(void)
-{
- irda_unregister_dongle(&ep7211);
-}
-
-static int ep7211_open(struct sir_dev *dev)
+static int clps711x_dongle_open(struct sir_dev *dev)
{
unsigned int syscon;
@@ -58,7 +24,7 @@ static int ep7211_open(struct sir_dev *dev)
return 0;
}
-static int ep7211_close(struct sir_dev *dev)
+static int clps711x_dongle_close(struct sir_dev *dev)
{
unsigned int syscon;
@@ -70,20 +36,35 @@ static int ep7211_close(struct sir_dev *dev)
return 0;
}
-static int ep7211_change_speed(struct sir_dev *dev, unsigned speed)
+static struct dongle_driver clps711x_dongle = {
+ .owner = THIS_MODULE,
+ .driver_name = "EP7211 IR driver",
+ .type = IRDA_EP7211_DONGLE,
+ .open = clps711x_dongle_open,
+ .close = clps711x_dongle_close,
+};
+
+static int clps711x_sir_probe(struct platform_device *pdev)
{
- return 0;
+ return irda_register_dongle(&clps711x_dongle);
}
-static int ep7211_reset(struct sir_dev *dev)
+static int clps711x_sir_remove(struct platform_device *pdev)
{
- return 0;
+ return irda_unregister_dongle(&clps711x_dongle);
}
+static struct platform_driver clps711x_sir_driver = {
+ .driver = {
+ .name = "sir-clps711x",
+ .owner = THIS_MODULE,
+ },
+ .probe = clps711x_sir_probe,
+ .remove = clps711x_sir_remove,
+};
+module_platform_driver(clps711x_sir_driver);
+
MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
MODULE_DESCRIPTION("EP7211 IR dongle driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
-
-module_init(ep7211_sir_init);
-module_exit(ep7211_sir_cleanup);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4b746d9bd8e7..9448587de453 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -33,11 +33,7 @@
#define DRIVER_NAME "sh_irda"
-#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
-#define __IRDARAM_LEN 0x13FF
-#else
#define __IRDARAM_LEN 0x1039
-#endif
#define IRTMR 0x1F00 /* Transfer mode */
#define IRCFR 0x1F02 /* Configuration */
@@ -757,7 +753,7 @@ static const struct net_device_ops sh_irda_ndo = {
************************************************************************/
-static int __devinit sh_irda_probe(struct platform_device *pdev)
+static int sh_irda_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct sh_irda_self *self;
@@ -829,7 +825,7 @@ exit:
return err;
}
-static int __devexit sh_irda_remove(struct platform_device *pdev)
+static int sh_irda_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_irda_self *self = netdev_priv(ndev);
@@ -866,7 +862,7 @@ static const struct dev_pm_ops sh_irda_pm_ops = {
static struct platform_driver sh_irda_driver = {
.probe = sh_irda_probe,
- .remove = __devexit_p(sh_irda_remove),
+ .remove = sh_irda_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &sh_irda_pm_ops,
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 624ac1939e85..24aefcd84065 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -705,7 +705,7 @@ static const struct net_device_ops sh_sir_ndo = {
************************************************************************/
-static int __devinit sh_sir_probe(struct platform_device *pdev)
+static int sh_sir_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct sh_sir_self *self;
@@ -783,7 +783,7 @@ exit:
return err;
}
-static int __devexit sh_sir_remove(struct platform_device *pdev)
+static int sh_sir_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_sir_self *self = netdev_priv(ndev);
@@ -803,7 +803,7 @@ static int __devexit sh_sir_remove(struct platform_device *pdev)
static struct platform_driver sh_sir_driver = {
.probe = sh_sir_probe,
- .remove = __devexit_p(sh_sir_remove),
+ .remove = sh_sir_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index a926813ee91d..5290952b60c2 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,8 +376,8 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
static int pnp_driver_registered;
#ifdef CONFIG_PNP
-static int __devinit smsc_ircc_pnp_probe(struct pnp_dev *dev,
- const struct pnp_device_id *dev_id)
+static int smsc_ircc_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
{
unsigned int firbase, sirbase;
u8 dma, irq;
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
* Try to open driver instance
*
*/
-static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
{
struct smsc_ircc_cb *self;
struct net_device *dev;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 1a89fd459dd5..f9033c6a888c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -77,7 +77,7 @@ static int dongle_id = 0; /* default: probe */
module_param(dongle_id, int, 0);
/* Some prototypes */
-static int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
+static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
unsigned int id);
static int via_ircc_dma_receive(struct via_ircc_cb *self);
static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
@@ -102,8 +102,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
static void hwreset(struct via_ircc_cb *self);
static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
static int upload_rxdata(struct via_ircc_cb *self, int iobase);
-static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
-static void __devexit via_remove_one (struct pci_dev *pdev);
+static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
+static void via_remove_one(struct pci_dev *pdev);
/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
static void iodelay(int udelay)
@@ -132,7 +132,7 @@ static struct pci_driver via_driver = {
.name = VIA_MODULE_NAME,
.id_table = via_pci_tbl,
.probe = via_init_one,
- .remove = __devexit_p(via_remove_one),
+ .remove = via_remove_one,
};
@@ -156,7 +156,7 @@ static int __init via_ircc_init(void)
return 0;
}
-static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
+static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
{
int rc;
u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
@@ -286,8 +286,7 @@ static const struct net_device_ops via_ircc_fir_ops = {
* Open driver instance
*
*/
-static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
- unsigned int id)
+static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
{
struct net_device *dev;
struct via_ircc_cb *self;
@@ -424,7 +423,7 @@ static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
* Close driver instance
*
*/
-static void __devexit via_remove_one(struct pci_dev *pdev)
+static void via_remove_one(struct pci_dev *pdev)
{
struct via_ircc_cb *self = pci_get_drvdata(pdev);
int iobase;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 9021d0131727..2f99f8881dfc 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1627,7 +1627,7 @@ static int vlsi_irda_init(struct net_device *ndev)
/**************************************************************/
-static int __devinit
+static int
vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net_device *ndev;
@@ -1699,7 +1699,7 @@ out:
return -ENODEV;
}
-static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
+static void vlsi_irda_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
vlsi_irda_dev_t *idev;
@@ -1832,7 +1832,7 @@ static struct pci_driver vlsi_irda_driver = {
.name = drivername,
.id_table = vlsi_irda_table,
.probe = vlsi_irda_probe,
- .remove = __devexit_p(vlsi_irda_remove),
+ .remove = vlsi_irda_remove,
#ifdef CONFIG_PM
.suspend = vlsi_irda_suspend,
.resume = vlsi_irda_resume,
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 81f8f9e31db5..fcbf680c3e62 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb_orphan(skb);
+ /* Before queueing this packet to netif_rx(),
+ * make sure dst is refcounted.
+ */
+ skb_dst_force(skb);
+
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 68a43fe602e7..d3fb97d97cbc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -822,7 +822,10 @@ static int macvlan_changelink(struct net_device *dev,
static size_t macvlan_get_size(const struct net_device *dev)
{
- return nla_total_size(4);
+ return (0
+ + nla_total_size(4) /* IFLA_MACVLAN_MODE */
+ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ );
}
static int macvlan_fill_info(struct sk_buff *skb,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index b3321129a83c..6989ebe2bc79 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -56,6 +56,10 @@ static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
+static bool oops_only = false;
+module_param(oops_only, bool, 0600);
+MODULE_PARM_DESC(oops_only, "Only log oops messages");
+
#ifndef MODULE
static int __init option_setup(char *opt)
{
@@ -683,6 +687,8 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
struct netconsole_target *nt;
const char *tmp;
+ if (oops_only && !oops_in_progress)
+ return;
/* Avoid taking lock and disabling interrupts unnecessarily */
if (list_empty(&target_list))
return;
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 81c7bc010dd8..383e8338ad86 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,18 +150,24 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x00181b80,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 24e05c43bff8..7490b6c866e6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -48,6 +48,21 @@
#define CAL_TRIGGER 7
#define PER_TRIGGER 6
+#define MII_DP83640_MICR 0x11
+#define MII_DP83640_MISR 0x12
+
+#define MII_DP83640_MICR_OE 0x1
+#define MII_DP83640_MICR_IE 0x2
+
+#define MII_DP83640_MISR_RHF_INT_EN 0x01
+#define MII_DP83640_MISR_FHF_INT_EN 0x02
+#define MII_DP83640_MISR_ANC_INT_EN 0x04
+#define MII_DP83640_MISR_DUP_INT_EN 0x08
+#define MII_DP83640_MISR_SPD_INT_EN 0x10
+#define MII_DP83640_MISR_LINK_INT_EN 0x20
+#define MII_DP83640_MISR_ED_INT_EN 0x40
+#define MII_DP83640_MISR_LQ_INT_EN 0x80
+
/* phyter seems to miss the mark by 16 ns */
#define ADJTIME_FIX 16
@@ -1043,6 +1058,65 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83640_MISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83640_config_intr(struct phy_device *phydev)
+{
+ int micr;
+ int misr;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ misr = phy_read(phydev, MII_DP83640_MISR);
+ if (misr < 0)
+ return misr;
+ misr |=
+ (MII_DP83640_MISR_ANC_INT_EN |
+ MII_DP83640_MISR_DUP_INT_EN |
+ MII_DP83640_MISR_SPD_INT_EN |
+ MII_DP83640_MISR_LINK_INT_EN);
+ err = phy_write(phydev, MII_DP83640_MISR, misr);
+ if (err < 0)
+ return err;
+
+ micr = phy_read(phydev, MII_DP83640_MICR);
+ if (micr < 0)
+ return micr;
+ micr |=
+ (MII_DP83640_MICR_OE |
+ MII_DP83640_MICR_IE);
+ return phy_write(phydev, MII_DP83640_MICR, micr);
+ } else {
+ micr = phy_read(phydev, MII_DP83640_MICR);
+ if (micr < 0)
+ return micr;
+ micr &=
+ ~(MII_DP83640_MICR_OE |
+ MII_DP83640_MICR_IE);
+ err = phy_write(phydev, MII_DP83640_MICR, micr);
+ if (err < 0)
+ return err;
+
+ misr = phy_read(phydev, MII_DP83640_MISR);
+ if (misr < 0)
+ return misr;
+ misr &=
+ ~(MII_DP83640_MISR_ANC_INT_EN |
+ MII_DP83640_MISR_DUP_INT_EN |
+ MII_DP83640_MISR_SPD_INT_EN |
+ MII_DP83640_MISR_LINK_INT_EN);
+ return phy_write(phydev, MII_DP83640_MISR, misr);
+ }
+}
+
static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
{
struct dp83640_private *dp83640 = phydev->priv;
@@ -1253,11 +1327,13 @@ static struct phy_driver dp83640_driver = {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
.features = PHY_BASIC_FEATURES,
- .flags = 0,
+ .flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = dp83640_ack_interrupt,
+ .config_intr = dp83640_config_intr,
.ts_info = dp83640_ts_info,
.hwtstamp = dp83640_hwtstamp,
.rxtstamp = dp83640_rxtstamp,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d5199cb4caec..b5ddd5077a80 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -36,8 +36,9 @@ MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
+#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
-#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
@@ -138,19 +139,24 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
+ if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- /* Additional delay (2ns) used to adjust RX clock phase
- * at RGMII interface */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
return c;
- c |= IP1001_PHASE_SEL_MASK;
+ c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ c |= IP1001_RXPHASE_SEL;
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ c |= IP1001_TXPHASE_SEL;
+
c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
if (c < 0)
return c;
@@ -167,6 +173,11 @@ static int ip101a_g_config_init(struct phy_device *phydev)
if (c < 0)
return c;
+ /* INTR pin used: speed/link/duplex will cause an interrupt */
+ c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+ if (c < 0)
+ return c;
+
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
c |= IP101A_G_APS_ON;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5d2a3f215887..22dec9c7ef05 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -353,15 +353,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
int err;
int temp;
- /* Enable Fiber/Copper auto selection */
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
-
- temp = phy_read(phydev, MII_BMCR);
- temp |= BMCR_RESET;
- phy_write(phydev, MII_BMCR, temp);
-
if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2ed1140df3e9..27274986ab56 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -103,9 +103,9 @@ static struct mdiobb_ops mdio_gpio_ops = {
.get_mdio_data = mdio_get,
};
-static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
- struct mdio_gpio_platform_data *pdata,
- int bus_id)
+static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
+ struct mdio_gpio_platform_data *pdata,
+ int bus_id)
{
struct mii_bus *new_bus;
struct mdio_gpio_info *bitbang;
@@ -173,7 +173,7 @@ static void mdio_gpio_bus_deinit(struct device *dev)
kfree(bitbang);
}
-static void __devexit mdio_gpio_bus_destroy(struct device *dev)
+static void mdio_gpio_bus_destroy(struct device *dev)
{
struct mii_bus *bus = dev_get_drvdata(dev);
@@ -181,7 +181,7 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
mdio_gpio_bus_deinit(dev);
}
-static int __devinit mdio_gpio_probe(struct platform_device *pdev)
+static int mdio_gpio_probe(struct platform_device *pdev)
{
struct mdio_gpio_platform_data *pdata;
struct mii_bus *new_bus;
@@ -213,7 +213,7 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int __devexit mdio_gpio_remove(struct platform_device *pdev)
+static int mdio_gpio_remove(struct platform_device *pdev)
{
mdio_gpio_bus_destroy(&pdev->dev);
@@ -227,7 +227,7 @@ static struct of_device_id mdio_gpio_of_match[] = {
static struct platform_driver mdio_gpio_driver = {
.probe = mdio_gpio_probe,
- .remove = __devexit_p(mdio_gpio_remove),
+ .remove = mdio_gpio_remove,
.driver = {
.name = "mdio-gpio",
.owner = THIS_MODULE,
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index eefe49e8713c..0c9accb1c14f 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -49,7 +49,7 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
return 0;
}
-static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev)
+static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
enum of_gpio_flags f;
struct mdio_mux_gpio_state *s;
@@ -104,7 +104,7 @@ err:
return r;
}
-static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev)
+static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
mdio_mux_uninit(s->mux_handle);
@@ -130,7 +130,7 @@ static struct platform_driver mdio_mux_gpio_driver = {
.of_match_table = mdio_mux_gpio_match,
},
.probe = mdio_mux_gpio_probe,
- .remove = __devexit_p(mdio_mux_gpio_remove),
+ .remove = mdio_mux_gpio_remove,
};
module_platform_driver(mdio_mux_gpio_driver);
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 9061ba622ac4..9733bd239a86 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -67,7 +67,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
return 0;
}
-static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
+static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
{
struct device_node *np2, *np = pdev->dev.of_node;
struct mdio_mux_mmioreg_state *s;
@@ -137,7 +137,7 @@ static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
+static int mdio_mux_mmioreg_remove(struct platform_device *pdev)
{
struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
@@ -161,7 +161,7 @@ static struct platform_driver mdio_mux_mmioreg_driver = {
.of_match_table = mdio_mux_mmioreg_match,
},
.probe = mdio_mux_mmioreg_probe,
- .remove = __devexit_p(mdio_mux_mmioreg_remove),
+ .remove = mdio_mux_mmioreg_remove,
};
module_platform_driver(mdio_mux_mmioreg_driver);
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index d4015aa663e6..09297fe05ae5 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -96,7 +96,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
return 0;
}
-static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
+static int octeon_mdiobus_probe(struct platform_device *pdev)
{
struct octeon_mdiobus *bus;
struct resource *res_mem;
@@ -159,7 +159,7 @@ fail:
return err;
}
-static int __devexit octeon_mdiobus_remove(struct platform_device *pdev)
+static int octeon_mdiobus_remove(struct platform_device *pdev)
{
struct octeon_mdiobus *bus;
union cvmx_smix_en smi_en;
@@ -188,7 +188,7 @@ static struct platform_driver octeon_mdiobus_driver = {
.of_match_table = octeon_mdiobus_match,
},
.probe = octeon_mdiobus_probe,
- .remove = __devexit_p(octeon_mdiobus_remove),
+ .remove = octeon_mdiobus_remove,
};
void octeon_mdiobus_force_mod_depencency(void)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c1ef3000ea60..044b5326459f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -431,10 +431,24 @@ static struct dev_pm_ops mdio_bus_pm_ops = {
#endif /* CONFIG_PM */
+static ssize_t
+phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
+}
+
+static struct device_attribute mdio_dev_attrs[] = {
+ __ATTR_RO(phy_id),
+ __ATTR_NULL
+};
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
.pm = MDIO_BUS_PM_OPS,
+ .dev_attrs = mdio_dev_attrs,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2165d5fdb8c0..b983596abcbb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -127,6 +127,39 @@ static int ks8051_config_init(struct phy_device *phydev)
return 0;
}
+#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
+#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
+#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
+int ksz8873mll_read_status(struct phy_device *phydev)
+{
+ int regval;
+
+ /* dummy read */
+ regval = phy_read(phydev, KSZ8873MLL_GLOBAL_CONTROL_4);
+
+ regval = phy_read(phydev, KSZ8873MLL_GLOBAL_CONTROL_4);
+
+ if (regval & KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX)
+ phydev->duplex = DUPLEX_HALF;
+ else
+ phydev->duplex = DUPLEX_FULL;
+
+ if (regval & KSZ8873MLL_GLOBAL_CONTROL_4_SPEED)
+ phydev->speed = SPEED_10;
+ else
+ phydev->speed = SPEED_100;
+
+ phydev->link = 1;
+ phydev->pause = phydev->asym_pause = 0;
+
+ return 0;
+}
+
+static int ksz8873mll_config_aneg(struct phy_device *phydev)
+{
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -204,6 +237,16 @@ static struct phy_driver ksphy_driver[] = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
+}, {
+ .phy_id = PHY_ID_KSZ8873MLL,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ8873MLL Switch",
+ .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG,
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
+ .driver = { .owner = THIS_MODULE, },
} };
static int __init ksphy_init(void)
@@ -232,6 +275,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8041, 0x00fffff0 },
{ PHY_ID_KSZ8051, 0x00fffff0 },
+ { PHY_ID_KSZ8873MLL, 0x00fffff0 },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 88e3991464e7..11f34813e23f 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,7 +43,31 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
- int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
+ if (rc < 0)
+ return rc;
+
+ /* If the SMSC PHY is in power down mode, then set it
+ * in all capable mode before using it.
+ */
+ if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
+ int timeout = 50000;
+
+ /* set "all capable" mode and reset the phy */
+ rc |= MII_LAN83C185_MODE_ALL;
+ phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
+ phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ /* wait end of reset (max 500 ms) */
+ do {
+ udelay(10);
+ if (timeout-- == 0)
+ return -1;
+ rc = phy_read(phydev, MII_BMCR);
+ } while (rc & BMCR_RESET);
+ }
+
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
@@ -56,35 +80,52 @@ static int smsc_phy_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt (phydev);
}
-static int lan87xx_config_init(struct phy_device *phydev)
+static int lan911x_config_init(struct phy_device *phydev)
{
- /*
- * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
- * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
- * to a bug on the chip.
- *
- * When the system is powered on with the network cable being
- * disconnected all the way until after ifconfig ethX up is
- * issued for the LAN port with this PHY, connecting the cable
- * afterwards does not cause LINK change detection, while the
- * expected behavior is the Link UP being detected.
- */
- int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
- if (rc < 0)
- return rc;
-
- rc &= ~MII_LAN83C185_EDPWRDOWN;
-
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
- if (rc < 0)
- return rc;
-
return smsc_phy_ack_interrupt(phydev);
}
-static int lan911x_config_init(struct phy_device *phydev)
+/*
+ * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
+ * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
+ * does send the pulses within this interval, the PHY will remained powered
+ * down.
+ *
+ * This workaround will manually toggle the PHY on/off upon calls to read_status
+ * in order to generate link test pulses if the link is down. If a link partner
+ * is present, it will respond to the pulses, which will cause the ENERGYON bit
+ * to be set and will cause the EDPD mode to be exited.
+ */
+static int lan87xx_read_status(struct phy_device *phydev)
{
- return smsc_phy_ack_interrupt(phydev);
+ int err = genphy_read_status(phydev);
+
+ if (!phydev->link) {
+ /* Disable EDPD to wake up PHY */
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc & ~MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
+ /* Sleep 64 ms to allow ~5 link test pulses to be sent */
+ msleep(64);
+
+ /* Re-enable EDPD */
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+ }
+
+ return err;
}
static struct phy_driver smsc_phy_driver[] = {
@@ -187,8 +228,8 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .config_init = lan87xx_config_init,
+ .read_status = lan87xx_read_status,
+ .config_init = smsc_phy_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 1c3abce78b6a..41eb8ffeb53d 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -264,7 +264,7 @@ static struct bin_attribute ks8995_registers_attr = {
/* ------------------------------------------------------------------------ */
-static int __devinit ks8995_probe(struct spi_device *spi)
+static int ks8995_probe(struct spi_device *spi)
{
struct ks8995_switch *ks;
struct ks8995_pdata *pdata;
@@ -332,7 +332,7 @@ err_drvdata:
return err;
}
-static int __devexit ks8995_remove(struct spi_device *spi)
+static int ks8995_remove(struct spi_device *spi)
{
struct ks8995_data *ks8995;
@@ -353,7 +353,7 @@ static struct spi_driver ks8995_driver = {
.owner = THIS_MODULE,
},
.probe = ks8995_probe,
- .remove = __devexit_p(ks8995_remove),
+ .remove = ks8995_remove,
};
static int __init ks8995_init(void)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index eb3f5cefeba3..0b2706abe3e3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1034,7 +1034,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-struct rtnl_link_stats64*
+static struct rtnl_link_stats64*
ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
{
struct ppp *ppp = netdev_priv(dev);
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 58f13adaa549..ae7cd7f3656d 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -608,7 +608,7 @@ static int bcm5421_poll_link(struct mii_phy* phy)
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
- /* try to find out wether we have a link */
+ /* try to find out whether we have a link */
phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = phy_read(phy, MII_NCONFIG);
@@ -634,7 +634,7 @@ static int bcm5421_read_link(struct mii_phy* phy)
phy->speed = SPEED_1000;
- /* find out wether we are running half- or full duplex */
+ /* find out whether we are running half- or full duplex */
phy_write(phy, MII_NCONFIG, 0x2000);
phy_reg = phy_read(phy, MII_NCONFIG);
@@ -681,7 +681,7 @@ static int bcm5461_poll_link(struct mii_phy* phy)
if ( mode == BCM54XX_COPPER)
return genmii_poll_link(phy);
- /* find out wether we have a link */
+ /* find out whether we have a link */
phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = phy_read(phy, MII_NCONFIG);
@@ -710,7 +710,7 @@ static int bcm5461_read_link(struct mii_phy* phy)
phy->speed = SPEED_1000;
- /* find out wether we are running half- or full duplex */
+ /* find out whether we are running half- or full duplex */
phy_write(phy, MII_NCONFIG, 0x7000);
phy_reg = phy_read(phy, MII_NCONFIG);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0873cdcf39be..2917a86f4c43 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -68,7 +68,6 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
-#include <net/cls_cgroup.h>
#include <asm/uaccess.h>
@@ -110,16 +109,58 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
+/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+ * the netdevice to be fit in one page. So we can make sure the success of
+ * memory allocation. TODO: increase the limit. */
+#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+#define MAX_TAP_FLOWS 4096
+
+#define TUN_FLOW_EXPIRE (3 * HZ)
+
+/* A tun_file connects an open character device to a tuntap netdevice. It
+ * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * to serve as one transmit queue for tuntap device. The sock_fprog and
+ * tap_filter were kept in tun_struct since they were used for filtering for the
+ * netdevice not for a specific queue (at least I didn't see the requirement for
+ * this).
+ *
+ * RCU usage:
+ * The tun_file and tun_struct are loosely coupled, the pointer from one to the
+ * other can only be read while rcu_read_lock or rtnl_lock is held.
+ */
struct tun_file {
- atomic_t count;
- struct tun_struct *tun;
+ struct sock sk;
+ struct socket socket;
+ struct socket_wq wq;
+ struct tun_struct __rcu *tun;
struct net *net;
+ struct fasync_struct *fasync;
+ /* only used for fasnyc */
+ unsigned int flags;
+ u16 queue_index;
+ struct list_head next;
+ struct tun_struct *detached;
};
-struct tun_sock;
+struct tun_flow_entry {
+ struct hlist_node hash_link;
+ struct rcu_head rcu;
+ struct tun_struct *tun;
+
+ u32 rxhash;
+ int queue_index;
+ unsigned long updated;
+};
+#define TUN_NUM_FLOW_ENTRIES 1024
+
+/* Since the socket were moved to tun_file, to preserve the behavior of persist
+ * device, socket filter, sndbuf and vnet header size were restore when the
+ * file were attached to a persist device.
+ */
struct tun_struct {
- struct tun_file *tfile;
+ struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
+ unsigned int numqueues;
unsigned int flags;
kuid_t owner;
kgid_t group;
@@ -128,27 +169,335 @@ struct tun_struct {
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
- struct fasync_struct *fasync;
-
- struct tap_filter txflt;
- struct socket socket;
- struct socket_wq wq;
int vnet_hdr_sz;
-
+ int sndbuf;
+ struct tap_filter txflt;
+ struct sock_fprog fprog;
+ /* protected by rtnl lock */
+ bool filter_attached;
#ifdef TUN_DEBUG
int debug;
#endif
+ spinlock_t lock;
+ struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
+ struct timer_list flow_gc_timer;
+ unsigned long ageing_time;
+ unsigned int numdisabled;
+ struct list_head disabled;
+ void *security;
+ u32 flow_count;
};
-struct tun_sock {
- struct sock sk;
- struct tun_struct *tun;
-};
+static inline u32 tun_hashfn(u32 rxhash)
+{
+ return rxhash & 0x3ff;
+}
+
+static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
+{
+ struct tun_flow_entry *e;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(e, n, head, hash_link) {
+ if (e->rxhash == rxhash)
+ return e;
+ }
+ return NULL;
+}
+
+static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
+ struct hlist_head *head,
+ u32 rxhash, u16 queue_index)
+{
+ struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
+
+ if (e) {
+ tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
+ rxhash, queue_index);
+ e->updated = jiffies;
+ e->rxhash = rxhash;
+ e->queue_index = queue_index;
+ e->tun = tun;
+ hlist_add_head_rcu(&e->hash_link, head);
+ ++tun->flow_count;
+ }
+ return e;
+}
+
+static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
+{
+ tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
+ e->rxhash, e->queue_index);
+ hlist_del_rcu(&e->hash_link);
+ kfree_rcu(e, rcu);
+ --tun->flow_count;
+}
+
+static void tun_flow_flush(struct tun_struct *tun)
+{
+ int i;
+
+ spin_lock_bh(&tun->lock);
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+ struct tun_flow_entry *e;
+ struct hlist_node *h, *n;
+
+ hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+ tun_flow_delete(tun, e);
+ }
+ spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
+{
+ int i;
+
+ spin_lock_bh(&tun->lock);
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+ struct tun_flow_entry *e;
+ struct hlist_node *h, *n;
+
+ hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ if (e->queue_index == queue_index)
+ tun_flow_delete(tun, e);
+ }
+ }
+ spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_cleanup(unsigned long data)
+{
+ struct tun_struct *tun = (struct tun_struct *)data;
+ unsigned long delay = tun->ageing_time;
+ unsigned long next_timer = jiffies + delay;
+ unsigned long count = 0;
+ int i;
+
+ tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
+
+ spin_lock_bh(&tun->lock);
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+ struct tun_flow_entry *e;
+ struct hlist_node *h, *n;
+
+ hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+ unsigned long this_timer;
+ count++;
+ this_timer = e->updated + delay;
+ if (time_before_eq(this_timer, jiffies))
+ tun_flow_delete(tun, e);
+ else if (time_before(this_timer, next_timer))
+ next_timer = this_timer;
+ }
+ }
+
+ if (count)
+ mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
+ spin_unlock_bh(&tun->lock);
+}
+
+static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
+ struct tun_file *tfile)
+{
+ struct hlist_head *head;
+ struct tun_flow_entry *e;
+ unsigned long delay = tun->ageing_time;
+ u16 queue_index = tfile->queue_index;
+
+ if (!rxhash)
+ return;
+ else
+ head = &tun->flows[tun_hashfn(rxhash)];
+
+ rcu_read_lock();
+
+ /* We may get a very small possibility of OOO during switching, not
+ * worth to optimize.*/
+ if (tun->numqueues == 1 || tfile->detached)
+ goto unlock;
+
+ e = tun_flow_find(head, rxhash);
+ if (likely(e)) {
+ /* TODO: keep queueing to old queue until it's empty? */
+ e->queue_index = queue_index;
+ e->updated = jiffies;
+ } else {
+ spin_lock_bh(&tun->lock);
+ if (!tun_flow_find(head, rxhash) &&
+ tun->flow_count < MAX_TAP_FLOWS)
+ tun_flow_create(tun, head, rxhash, queue_index);
+
+ if (!timer_pending(&tun->flow_gc_timer))
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + delay));
+ spin_unlock_bh(&tun->lock);
+ }
+
+unlock:
+ rcu_read_unlock();
+}
+
+/* We try to identify a flow through its rxhash first. The reason that
+ * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * the rxq based on the txq where the last packet of the flow comes. As
+ * the userspace application move between processors, we may get a
+ * different rxq no. here. If we could not get rxhash, then we would
+ * hope the rxq no. may help here.
+ */
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_flow_entry *e;
+ u32 txq = 0;
+ u32 numqueues = 0;
+
+ rcu_read_lock();
+ numqueues = tun->numqueues;
+
+ txq = skb_get_rxhash(skb);
+ if (txq) {
+ e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+ if (e)
+ txq = e->queue_index;
+ else
+ /* use multiply and shift instead of expensive divide */
+ txq = ((u64)txq * numqueues) >> 32;
+ } else if (likely(skb_rx_queue_recorded(skb))) {
+ txq = skb_get_rx_queue(skb);
+ while (unlikely(txq >= numqueues))
+ txq -= numqueues;
+ }
+
+ rcu_read_unlock();
+ return txq;
+}
+
+static inline bool tun_not_capable(struct tun_struct *tun)
+{
+ const struct cred *cred = current_cred();
+ struct net *net = dev_net(tun->dev);
+
+ return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN);
+}
+
+static void tun_set_real_num_queues(struct tun_struct *tun)
+{
+ netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
+ netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
+}
-static inline struct tun_sock *tun_sk(struct sock *sk)
+static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
{
- return container_of(sk, struct tun_sock, sk);
+ tfile->detached = tun;
+ list_add_tail(&tfile->next, &tun->disabled);
+ ++tun->numdisabled;
+}
+
+static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
+{
+ struct tun_struct *tun = tfile->detached;
+
+ tfile->detached = NULL;
+ list_del_init(&tfile->next);
+ --tun->numdisabled;
+ return tun;
+}
+
+static void __tun_detach(struct tun_file *tfile, bool clean)
+{
+ struct tun_file *ntfile;
+ struct tun_struct *tun;
+ struct net_device *dev;
+
+ tun = rtnl_dereference(tfile->tun);
+
+ if (tun && !tfile->detached) {
+ u16 index = tfile->queue_index;
+ BUG_ON(index >= tun->numqueues);
+ dev = tun->dev;
+
+ rcu_assign_pointer(tun->tfiles[index],
+ tun->tfiles[tun->numqueues - 1]);
+ ntfile = rtnl_dereference(tun->tfiles[index]);
+ ntfile->queue_index = index;
+
+ --tun->numqueues;
+ if (clean) {
+ rcu_assign_pointer(tfile->tun, NULL);
+ sock_put(&tfile->sk);
+ } else
+ tun_disable_queue(tun, tfile);
+
+ synchronize_net();
+ tun_flow_delete_by_queue(tun, tun->numqueues + 1);
+ /* Drop read queue */
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_set_real_num_queues(tun);
+ } else if (tfile->detached && clean) {
+ tun = tun_enable_queue(tfile);
+ sock_put(&tfile->sk);
+ }
+
+ if (clean) {
+ if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+ netif_carrier_off(tun->dev);
+
+ if (!(tun->flags & TUN_PERSIST) &&
+ tun->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(tun->dev);
+ }
+
+ BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
+ &tfile->socket.flags));
+ sk_release_kernel(&tfile->sk);
+ }
+}
+
+static void tun_detach(struct tun_file *tfile, bool clean)
+{
+ rtnl_lock();
+ __tun_detach(tfile, clean);
+ rtnl_unlock();
+}
+
+static void tun_detach_all(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile, *tmp;
+ int i, n = tun->numqueues;
+
+ for (i = 0; i < n; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ BUG_ON(!tfile);
+ wake_up_all(&tfile->wq.wait);
+ rcu_assign_pointer(tfile->tun, NULL);
+ --tun->numqueues;
+ }
+ list_for_each_entry(tfile, &tun->disabled, next) {
+ wake_up_all(&tfile->wq.wait);
+ rcu_assign_pointer(tfile->tun, NULL);
+ }
+ BUG_ON(tun->numqueues != 0);
+
+ synchronize_net();
+ for (i = 0; i < n; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ /* Drop read queue */
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ sock_put(&tfile->sk);
+ }
+ list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_enable_queue(tfile);
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ sock_put(&tfile->sk);
+ }
+ BUG_ON(tun->numdisabled != 0);
+
+ if (tun->flags & TUN_PERSIST)
+ module_put(THIS_MODULE);
}
static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -156,60 +505,60 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
struct tun_file *tfile = file->private_data;
int err;
- ASSERT_RTNL();
-
- netif_tx_lock_bh(tun->dev);
+ err = security_tun_dev_attach(tfile->socket.sk, tun->security);
+ if (err < 0)
+ goto out;
err = -EINVAL;
- if (tfile->tun)
+ if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
- if (tun->tfile)
+ if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
+ goto out;
+
+ err = -E2BIG;
+ if (!tfile->detached &&
+ tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
goto out;
err = 0;
- tfile->tun = tun;
- tun->tfile = tfile;
- tun->socket.file = file;
- netif_carrier_on(tun->dev);
- dev_hold(tun->dev);
- sock_hold(tun->socket.sk);
- atomic_inc(&tfile->count);
-out:
- netif_tx_unlock_bh(tun->dev);
- return err;
-}
+ /* Re-attach the filter to presist device */
+ if (tun->filter_attached == true) {
+ err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ if (!err)
+ goto out;
+ }
+ tfile->queue_index = tun->numqueues;
+ rcu_assign_pointer(tfile->tun, tun);
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ tun->numqueues++;
-static void __tun_detach(struct tun_struct *tun)
-{
- /* Detach from net device */
- netif_tx_lock_bh(tun->dev);
- netif_carrier_off(tun->dev);
- tun->tfile = NULL;
- netif_tx_unlock_bh(tun->dev);
+ if (tfile->detached)
+ tun_enable_queue(tfile);
+ else
+ sock_hold(&tfile->sk);
- /* Drop read queue */
- skb_queue_purge(&tun->socket.sk->sk_receive_queue);
+ tun_set_real_num_queues(tun);
- /* Drop the extra count on the net device */
- dev_put(tun->dev);
-}
+ /* device is allowed to go away first, so no need to hold extra
+ * refcnt.
+ */
-static void tun_detach(struct tun_struct *tun)
-{
- rtnl_lock();
- __tun_detach(tun);
- rtnl_unlock();
+out:
+ return err;
}
static struct tun_struct *__tun_get(struct tun_file *tfile)
{
- struct tun_struct *tun = NULL;
+ struct tun_struct *tun;
- if (atomic_inc_not_zero(&tfile->count))
- tun = tfile->tun;
+ rcu_read_lock();
+ tun = rcu_dereference(tfile->tun);
+ if (tun)
+ dev_hold(tun->dev);
+ rcu_read_unlock();
return tun;
}
@@ -221,10 +570,7 @@ static struct tun_struct *tun_get(struct file *file)
static void tun_put(struct tun_struct *tun)
{
- struct tun_file *tfile = tun->tfile;
-
- if (atomic_dec_and_test(&tfile->count))
- tun_detach(tfile->tun);
+ dev_put(tun->dev);
}
/* TAP filtering */
@@ -344,38 +690,20 @@ static const struct ethtool_ops tun_ethtool_ops;
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
- struct tun_struct *tun = netdev_priv(dev);
- struct tun_file *tfile = tun->tfile;
-
- /* Inform the methods they need to stop using the dev.
- */
- if (tfile) {
- wake_up_all(&tun->wq.wait);
- if (atomic_dec_and_test(&tfile->count))
- __tun_detach(tun);
- }
-}
-
-static void tun_free_netdev(struct net_device *dev)
-{
- struct tun_struct *tun = netdev_priv(dev);
-
- BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
-
- sk_release_kernel(tun->socket.sk);
+ tun_detach_all(dev);
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
/* Net device close. */
static int tun_net_close(struct net_device *dev)
{
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
return 0;
}
@@ -383,38 +711,36 @@ static int tun_net_close(struct net_device *dev)
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ int txq = skb->queue_mapping;
+ struct tun_file *tfile;
- tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+ rcu_read_lock();
+ tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (!tun->tfile)
+ if (txq >= tun->numqueues)
goto drop;
+ tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+
+ BUG_ON(!tfile);
+
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
if (!check_filter(&tun->txflt, skb))
goto drop;
- if (tun->socket.sk->sk_filter &&
- sk_filter(tun->socket.sk, skb))
+ if (tfile->socket.sk->sk_filter &&
+ sk_filter(tfile->socket.sk, skb))
goto drop;
- if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
- if (!(tun->flags & TUN_ONE_QUEUE)) {
- /* Normal queueing mode. */
- /* Packet scheduler handles dropping of further packets. */
- netif_stop_queue(dev);
-
- /* We won't see all dropped packets individually, so overrun
- * error is more appropriate. */
- dev->stats.tx_fifo_errors++;
- } else {
- /* Single queue mode.
- * Driver handles dropping of all packets itself. */
- goto drop;
- }
- }
+ /* Limit the number of packets queued by dividing txq length with the
+ * number of queues.
+ */
+ if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
+ >= dev->tx_queue_len / tun->numqueues)
+ goto drop;
/* Orphan the skb - required as we might hang on to it
* for indefinite time. */
@@ -423,18 +749,22 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
/* Enqueue packet */
- skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
+ skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
/* Notify and wake up reader process */
- if (tun->flags & TUN_FASYNC)
- kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
+
+ rcu_read_unlock();
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
+ skb_tx_error(skb);
kfree_skb(skb);
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -490,6 +820,7 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
+ .ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
@@ -505,11 +836,33 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
};
+static int tun_flow_init(struct tun_struct *tun)
+{
+ int i;
+
+ for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
+ INIT_HLIST_HEAD(&tun->flows[i]);
+
+ tun->ageing_time = TUN_FLOW_EXPIRE;
+ setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + tun->ageing_time));
+
+ return 0;
+}
+
+static void tun_flow_uninit(struct tun_struct *tun)
+{
+ del_timer_sync(&tun->flow_gc_timer);
+ tun_flow_flush(tun);
+}
+
/* Initialize net device. */
static void tun_net_init(struct net_device *dev)
{
@@ -535,6 +888,7 @@ static void tun_net_init(struct net_device *dev)
/* Ethernet TAP Device */
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
eth_hw_addr_random(dev);
@@ -546,7 +900,7 @@ static void tun_net_init(struct net_device *dev)
/* Character device part */
/* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
+static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
@@ -556,11 +910,11 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
if (!tun)
return POLLERR;
- sk = tun->socket.sk;
+ sk = tfile->socket.sk;
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
- poll_wait(file, &tun->wq.wait, wait);
+ poll_wait(file, &tfile->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -579,16 +933,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
-static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
size_t prepad, size_t len,
size_t linear, int noblock)
{
- struct sock *sk = tun->socket.sk;
+ struct sock *sk = tfile->socket.sk;
struct sk_buff *skb;
int err;
- sock_update_classid(sk);
-
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
@@ -685,9 +1037,9 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
}
/* Get packet from user space buffer */
-static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
- const struct iovec *iv, size_t total_len,
- size_t count, int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ void *msg_control, const struct iovec *iv,
+ size_t total_len, size_t count, int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -697,6 +1049,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
int copylen;
bool zerocopy = false;
int err;
+ u32 rxhash;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) > total_len)
@@ -757,7 +1110,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
} else
copylen = len;
- skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock);
+ skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;
@@ -849,11 +1202,14 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
}
+ skb_reset_network_header(skb);
+ rxhash = skb_get_rxhash(skb);
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
+ tun_flow_update(tun, rxhash, tfile);
return total_len;
}
@@ -862,6 +1218,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
+ struct tun_file *tfile = file->private_data;
ssize_t result;
if (!tun)
@@ -869,8 +1226,8 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
- result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count,
- file->f_flags & O_NONBLOCK);
+ result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
+ count, file->f_flags & O_NONBLOCK);
tun_put(tun);
return result;
@@ -878,6 +1235,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
+ struct tun_file *tfile,
struct sk_buff *skb,
const struct iovec *iv, int len)
{
@@ -957,7 +1315,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return total;
}
-static ssize_t tun_do_read(struct tun_struct *tun,
+static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct kiocb *iocb, const struct iovec *iv,
ssize_t len, int noblock)
{
@@ -965,15 +1323,15 @@ static ssize_t tun_do_read(struct tun_struct *tun,
struct sk_buff *skb;
ssize_t ret = 0;
- tun_debug(KERN_INFO, tun, "tun_chr_read\n");
+ tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (unlikely(!noblock))
- add_wait_queue(&tun->wq.wait, &wait);
+ add_wait_queue(&tfile->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
- if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
+ if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
if (noblock) {
ret = -EAGAIN;
break;
@@ -991,16 +1349,15 @@ static ssize_t tun_do_read(struct tun_struct *tun,
schedule();
continue;
}
- netif_wake_queue(tun->dev);
- ret = tun_put_user(tun, skb, iv, len);
+ ret = tun_put_user(tun, tfile, skb, iv, len);
kfree_skb(skb);
break;
}
current->state = TASK_RUNNING;
if (unlikely(!noblock))
- remove_wait_queue(&tun->wq.wait, &wait);
+ remove_wait_queue(&tfile->wq.wait, &wait);
return ret;
}
@@ -1021,13 +1378,24 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
- ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = tun_do_read(tun, tfile, iocb, iv, len,
+ file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
}
+static void tun_free_netdev(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ BUG_ON(!(list_empty(&tun->disabled)));
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+ free_netdev(dev);
+}
+
static void tun_setup(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1056,7 +1424,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
- struct tun_struct *tun;
+ struct tun_file *tfile;
wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
@@ -1070,37 +1438,46 @@ static void tun_sock_write_space(struct sock *sk)
wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
- tun = tun_sk(sk)->tun;
- kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
-}
-
-static void tun_sock_destruct(struct sock *sk)
-{
- free_netdev(tun_sk(sk)->tun->dev);
+ tfile = container_of(sk, struct tun_file, sk);
+ kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
- struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
- return tun_get_user(tun, m->msg_control, m->msg_iov, total_len,
- m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+ int ret;
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun = __tun_get(tfile);
+
+ if (!tun)
+ return -EBADFD;
+ ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
+ m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+ tun_put(tun);
+ return ret;
}
+
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
{
- struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun = __tun_get(tfile);
int ret;
+
+ if (!tun)
+ return -EBADFD;
+
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+ ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
+ tun_put(tun);
return ret;
}
@@ -1121,7 +1498,7 @@ static const struct proto_ops tun_socket_ops = {
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
- .obj_size = sizeof(struct tun_sock),
+ .obj_size = sizeof(struct tun_file),
};
static int tun_flags(struct tun_struct *tun)
@@ -1136,12 +1513,18 @@ static int tun_flags(struct tun_struct *tun)
if (tun->flags & TUN_NO_PI)
flags |= IFF_NO_PI;
+ /* This flag has no real effect. We track the value for backwards
+ * compatibility.
+ */
if (tun->flags & TUN_ONE_QUEUE)
flags |= IFF_ONE_QUEUE;
if (tun->flags & TUN_VNET_HDR)
flags |= IFF_VNET_HDR;
+ if (tun->flags & TUN_TAP_MQ)
+ flags |= IFF_MULTI_QUEUE;
+
return flags;
}
@@ -1178,15 +1561,16 @@ static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
- struct sock *sk;
struct tun_struct *tun;
+ struct tun_file *tfile = file->private_data;
struct net_device *dev;
int err;
+ if (tfile->detached)
+ return -EINVAL;
+
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
- const struct cred *cred = current_cred();
-
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -1196,23 +1580,27 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
return -EINVAL;
- if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
- (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
- !capable(CAP_NET_ADMIN))
+ if (tun_not_capable(tun))
return -EPERM;
- err = security_tun_dev_attach(tun->socket.sk);
+ err = security_tun_dev_open(tun->security);
if (err < 0)
return err;
err = tun_attach(tun, file);
if (err < 0)
return err;
+
+ if (tun->flags & TUN_TAP_MQ &&
+ (tun->numqueues + tun->numdisabled > 1))
+ return err;
}
else {
char *name;
unsigned long flags = 0;
+ int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+ MAX_TAP_QUEUES : 1;
- if (!capable(CAP_NET_ADMIN))
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_create();
if (err < 0)
@@ -1233,8 +1621,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (*ifr->ifr_name)
name = ifr->ifr_name;
- dev = alloc_netdev(sizeof(struct tun_struct), name,
- tun_setup);
+ dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
+ tun_setup, queues, queues);
+
if (!dev)
return -ENOMEM;
@@ -1246,48 +1635,43 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
- set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
-
- err = -ENOMEM;
- sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
- if (!sk)
- goto err_free_dev;
- sk_change_net(sk, net);
- tun->socket.wq = &tun->wq;
- init_waitqueue_head(&tun->wq.wait);
- tun->socket.ops = &tun_socket_ops;
- sock_init_data(&tun->socket, sk);
- sk->sk_write_space = tun_sock_write_space;
- sk->sk_sndbuf = INT_MAX;
- sock_set_flag(sk, SOCK_ZEROCOPY);
+ tun->filter_attached = false;
+ tun->sndbuf = tfile->socket.sk->sk_sndbuf;
- tun_sk(sk)->tun = tun;
+ spin_lock_init(&tun->lock);
- security_tun_dev_post_create(sk);
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0)
+ goto err_free_dev;
tun_net_init(dev);
+ err = tun_flow_init(tun);
+ if (err < 0)
+ goto err_free_dev;
+
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES;
dev->features = dev->hw_features;
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, file);
+ if (err < 0)
+ goto err_free_dev;
+
err = register_netdevice(tun->dev);
if (err < 0)
- goto err_free_sk;
+ goto err_free_dev;
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
-
- sk->sk_destruct = tun_sock_destruct;
-
- err = tun_attach(tun, file);
- if (err < 0)
- goto failed;
}
+ netif_carrier_on(tun->dev);
+
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
@@ -1295,6 +1679,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
tun->flags &= ~TUN_NO_PI;
+ /* This flag has no real effect. We track the value for backwards
+ * compatibility.
+ */
if (ifr->ifr_flags & IFF_ONE_QUEUE)
tun->flags |= TUN_ONE_QUEUE;
else
@@ -1305,24 +1692,26 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
tun->flags &= ~TUN_VNET_HDR;
+ if (ifr->ifr_flags & IFF_MULTI_QUEUE)
+ tun->flags |= TUN_TAP_MQ;
+ else
+ tun->flags &= ~TUN_TAP_MQ;
+
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
if (netif_running(tun->dev))
- netif_wake_queue(tun->dev);
+ netif_tx_wake_all_queues(tun->dev);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
- err_free_sk:
- tun_free_netdev(dev);
err_free_dev:
free_netdev(dev);
- failed:
return err;
}
-static int tun_get_iff(struct net *net, struct tun_struct *tun,
+static void tun_get_iff(struct net *net, struct tun_struct *tun,
struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
@@ -1331,7 +1720,6 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
ifr->ifr_flags = tun_flags(tun);
- return 0;
}
/* This is like a cut-down ethtool ops, except done via tun fd so no
@@ -1373,13 +1761,86 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
return 0;
}
+static void tun_detach_filter(struct tun_struct *tun, int n)
+{
+ int i;
+ struct tun_file *tfile;
+
+ for (i = 0; i < n; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ sk_detach_filter(tfile->socket.sk);
+ }
+
+ tun->filter_attached = false;
+}
+
+static int tun_attach_filter(struct tun_struct *tun)
+{
+ int i, ret = 0;
+ struct tun_file *tfile;
+
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ if (ret) {
+ tun_detach_filter(tun, i);
+ return ret;
+ }
+ }
+
+ tun->filter_attached = true;
+ return ret;
+}
+
+static void tun_set_sndbuf(struct tun_struct *tun)
+{
+ struct tun_file *tfile;
+ int i;
+
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ tfile->socket.sk->sk_sndbuf = tun->sndbuf;
+ }
+}
+
+static int tun_set_queue(struct file *file, struct ifreq *ifr)
+{
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun;
+ int ret = 0;
+
+ rtnl_lock();
+
+ if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
+ tun = tfile->detached;
+ if (!tun) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ ret = security_tun_dev_attach_queue(tun->security);
+ if (ret < 0)
+ goto unlock;
+ ret = tun_attach(tun, file);
+ } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
+ tun = rtnl_dereference(tfile->tun);
+ if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
+ ret = -EINVAL;
+ else
+ __tun_detach(tfile, false);
+ } else
+ ret = -EINVAL;
+
+unlock:
+ rtnl_unlock();
+ return ret;
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
- struct sock_fprog fprog;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
@@ -1387,7 +1848,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int vnet_hdr_sz;
int ret;
- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+ if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
} else {
@@ -1398,10 +1859,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF. */
return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
- IFF_VNET_HDR,
+ IFF_VNET_HDR | IFF_MULTI_QUEUE,
(unsigned int __user*)argp);
- }
+ } else if (cmd == TUNSETQUEUE)
+ return tun_set_queue(file, &ifr);
+ ret = 0;
rtnl_lock();
tun = __tun_get(tfile);
@@ -1422,14 +1885,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (!tun)
goto unlock;
- tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
+ tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
- ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
- if (ret)
- break;
+ tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
@@ -1444,11 +1905,17 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
case TUNSETPERSIST:
- /* Disable/Enable persist mode */
- if (arg)
+ /* Disable/Enable persist mode. Keep an extra reference to the
+ * module to prevent the module being unprobed.
+ */
+ if (arg && !(tun->flags & TUN_PERSIST)) {
tun->flags |= TUN_PERSIST;
- else
+ __module_get(THIS_MODULE);
+ }
+ if (!arg && (tun->flags & TUN_PERSIST)) {
tun->flags &= ~TUN_PERSIST;
+ module_put(THIS_MODULE);
+ }
tun_debug(KERN_INFO, tun, "persist %s\n",
arg ? "enabled" : "disabled");
@@ -1462,7 +1929,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->owner = owner;
- tun_debug(KERN_INFO, tun, "owner set to %d\n",
+ tun_debug(KERN_INFO, tun, "owner set to %u\n",
from_kuid(&init_user_ns, tun->owner));
break;
@@ -1474,7 +1941,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->group = group;
- tun_debug(KERN_INFO, tun, "group set to %d\n",
+ tun_debug(KERN_INFO, tun, "group set to %u\n",
from_kgid(&init_user_ns, tun->group));
break;
@@ -1526,7 +1993,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
case TUNGETSNDBUF:
- sndbuf = tun->socket.sk->sk_sndbuf;
+ sndbuf = tfile->socket.sk->sk_sndbuf;
if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
ret = -EFAULT;
break;
@@ -1537,7 +2004,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
- tun->socket.sk->sk_sndbuf = sndbuf;
+ tun->sndbuf = sndbuf;
+ tun_set_sndbuf(tun);
break;
case TUNGETVNETHDRSZ:
@@ -1565,10 +2033,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = -EFAULT;
- if (copy_from_user(&fprog, argp, sizeof(fprog)))
+ if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
break;
- ret = sk_attach_filter(&fprog, tun->socket.sk);
+ ret = tun_attach_filter(tun);
break;
case TUNDETACHFILTER:
@@ -1576,7 +2044,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
- ret = sk_detach_filter(tun->socket.sk);
+ ret = 0;
+ tun_detach_filter(tun, tun->numqueues);
break;
default:
@@ -1628,27 +2097,21 @@ static long tun_chr_compat_ioctl(struct file *file,
static int tun_chr_fasync(int fd, struct file *file, int on)
{
- struct tun_struct *tun = tun_get(file);
+ struct tun_file *tfile = file->private_data;
int ret;
- if (!tun)
- return -EBADFD;
-
- tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
-
- if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
+ if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
if (on) {
ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
if (ret)
goto out;
- tun->flags |= TUN_FASYNC;
+ tfile->flags |= TUN_FASYNC;
} else
- tun->flags &= ~TUN_FASYNC;
+ tfile->flags &= ~TUN_FASYNC;
ret = 0;
out:
- tun_put(tun);
return ret;
}
@@ -1658,44 +2121,40 @@ static int tun_chr_open(struct inode *inode, struct file * file)
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
- tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
+ &tun_proto);
if (!tfile)
return -ENOMEM;
- atomic_set(&tfile->count, 0);
- tfile->tun = NULL;
+ rcu_assign_pointer(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
+ tfile->flags = 0;
+
+ rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
+ init_waitqueue_head(&tfile->wq.wait);
+
+ tfile->socket.file = file;
+ tfile->socket.ops = &tun_socket_ops;
+
+ sock_init_data(&tfile->socket, &tfile->sk);
+ sk_change_net(&tfile->sk, tfile->net);
+
+ tfile->sk.sk_write_space = tun_sock_write_space;
+ tfile->sk.sk_sndbuf = INT_MAX;
+
file->private_data = tfile;
+ set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
+ INIT_LIST_HEAD(&tfile->next);
+
return 0;
}
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
- struct tun_struct *tun;
+ struct net *net = tfile->net;
- tun = __tun_get(tfile);
- if (tun) {
- struct net_device *dev = tun->dev;
-
- tun_debug(KERN_INFO, tun, "tun_chr_close\n");
-
- __tun_detach(tun);
-
- /* If desirable, unregister the netdevice. */
- if (!(tun->flags & TUN_PERSIST)) {
- rtnl_lock();
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(dev);
- rtnl_unlock();
- }
- }
-
- tun = tfile->tun;
- if (tun)
- sock_put(tun->socket.sk);
-
- put_net(tfile->net);
- kfree(tfile);
+ tun_detach(tfile, true);
+ put_net(net);
return 0;
}
@@ -1822,14 +2281,13 @@ static void tun_cleanup(void)
* holding a reference to the file for as long as the socket is in use. */
struct socket *tun_get_socket(struct file *file)
{
- struct tun_struct *tun;
+ struct tun_file *tfile;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
- tun = tun_get(file);
- if (!tun)
+ tfile = file->private_data;
+ if (!tfile)
return ERR_PTR(-EBADFD);
- tun_put(tun);
- return &tun->socket;
+ return &tfile->socket;
}
EXPORT_SYMBOL_GPL(tun_get_socket);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c1ae76968f47..ef976215b649 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -219,6 +219,24 @@ config USB_NET_CDC_NCM
* ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
* Ericsson F5521gw Mobile Broadband Module
+config USB_NET_CDC_MBIM
+ tristate "CDC MBIM support"
+ depends on USB_USBNET
+ select USB_WDM
+ select USB_NET_CDC_NCM
+ help
+ This driver provides support for CDC MBIM (Mobile Broadband
+ Interface Model) devices. The CDC MBIM specification is
+ available from <http://www.usb.org/>.
+
+ MBIM devices require configuration using the management
+ protocol defined by the MBIM specification. This driver
+ provides unfiltered access to the MBIM control channel
+ through the associated /dev/cdc-wdmx character device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cdc_mbim.
+
config USB_NET_DM9601
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
depends on USB_USBNET
@@ -230,6 +248,8 @@ config USB_NET_DM9601
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
+ select BITREVERSE
+ select CRC16
select CRC32
help
This option adds support for SMSC LAN95XX based USB 2.0
@@ -238,6 +258,8 @@ config USB_NET_SMSC75XX
config USB_NET_SMSC95XX
tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
+ select BITREVERSE
+ select CRC16
select CRC32
help
This option adds support for SMSC LAN95XX based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index bf063008c1af..478691326f37 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -31,4 +31,5 @@ obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
+obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 774d9ce2dafc..50d167330d38 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -25,121 +25,30 @@
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
- void *buf;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf)
- goto out;
-
- err = usb_control_msg(
- dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_GET_TIMEOUT);
- if (err == size)
- memcpy(data, buf, size);
- else if (err >= 0)
- err = -EINVAL;
- kfree(buf);
+ int ret;
+ ret = usbnet_read_cmd(dev, cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
-out:
- return err;
+ if (ret != size && ret >= 0)
+ return -EINVAL;
+ return ret;
}
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
- void *buf = NULL;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- goto out;
- }
-
- err = usb_control_msg(
- dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_SET_TIMEOUT);
- kfree(buf);
-
-out:
- return err;
-}
-
-static void asix_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
- status);
-
- kfree(req);
- usb_free_urb(urb);
+ return usbnet_write_cmd(dev, cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
}
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
- struct usb_ctrlrequest *req;
- int status;
- struct urb *urb;
-
- netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
- return;
- }
-
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
- usb_free_urb(urb);
- return;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- req->bRequest = cmd;
- req->wValue = cpu_to_le16(value);
- req->wIndex = cpu_to_le16(index);
- req->wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, size,
- asix_async_cmd_callback, req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_err(dev->net, "Error submitting the control message: status=%d\n",
- status);
- kfree(req);
- usb_free_urb(urb);
- }
+ usbnet_write_cmd_async(dev, cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
}
int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 33ab824773c5..7a6e758f48e7 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -64,6 +64,16 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
}
}
+static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
+{
+ if (is_valid_ether_addr(addr)) {
+ memcpy(dev->net->dev_addr, addr, ETH_ALEN);
+ } else {
+ netdev_info(dev->net, "invalid hw address, using random\n");
+ eth_hw_addr_random(dev->net);
+ }
+}
+
/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
static u32 asix_get_phyid(struct usbnet *dev)
{
@@ -225,7 +235,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
ret);
goto out;
}
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+ asix_set_netdev_dev_addr(dev, buf);
/* Initialize MII structure */
dev->mii.dev = dev->net;
@@ -423,7 +434,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+ asix_set_netdev_dev_addr(dev, buf);
/* Initialize MII structure */
dev->mii.dev = dev->net;
@@ -777,7 +789,8 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+ asix_set_netdev_dev_addr(dev, buf);
/* Initialize MII structure */
dev->mii.dev = dev->net;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index d0129827602b..3f3d12d766e7 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -457,12 +457,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
-static int cdc_manage_power(struct usbnet *dev, int on)
-{
- dev->intf->needs_remote_wakeup = on;
- return 0;
-}
-
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -470,7 +464,7 @@ static const struct driver_info cdc_info = {
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
- .manage_power = cdc_manage_power,
+ .manage_power = usbnet_manage_power,
};
static const struct driver_info wwan_info = {
@@ -479,7 +473,7 @@ static const struct driver_info wwan_info = {
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
- .manage_power = cdc_manage_power,
+ .manage_power = usbnet_manage_power,
};
/*-------------------------------------------------------------------------*/
@@ -487,6 +481,7 @@ static const struct driver_info wwan_info = {
#define HUAWEI_VENDOR_ID 0x12D1
#define NOVATEL_VENDOR_ID 0x1410
#define ZTE_VENDOR_ID 0x19D2
+#define DELL_VENDOR_ID 0x413C
static const struct usb_device_id products [] = {
/*
@@ -594,27 +589,29 @@ static const struct usb_device_id products [] = {
/* Novatel USB551L and MC551 - handled by qmi_wwan */
{
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0xB001,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0xB001, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* Novatel E362 - handled by qmi_wwan */
{
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = NOVATEL_VENDOR_ID,
- .idProduct = 0x9010,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9010, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8195, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8196, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
new file mode 100644
index 000000000000..248d2dc765a5
--- /dev/null
+++ b/drivers/net/usb/cdc_mbim.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2012 Smith Micro Software, Inc.
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ * This driver is based on and reuse most of cdc_ncm, which is
+ * Copyright (C) ST-Ericsson 2010-2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc-wdm.h>
+#include <linux/usb/cdc_ncm.h>
+
+/* driver specific data - must match cdc_ncm usage */
+struct cdc_mbim_state {
+ struct cdc_ncm_ctx *ctx;
+ atomic_t pmcount;
+ struct usb_driver *subdriver;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+/* using a counter to merge subdriver requests with our own into a combined state */
+static int cdc_mbim_manage_power(struct usbnet *dev, int on)
+{
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ int rv = 0;
+
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees the new value */
+ rv = usb_autopm_get_interface(dev->intf);
+ if (rv < 0)
+ goto err;
+ dev->intf->needs_remote_wakeup = on;
+ usb_autopm_put_interface(dev->intf);
+ }
+err:
+ return rv;
+}
+
+static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!dev)
+ return 0;
+
+ return cdc_mbim_manage_power(dev, status);
+}
+
+
+static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_driver *subdriver = ERR_PTR(-ENODEV);
+ int ret = -ENODEV;
+ u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM;
+ struct cdc_mbim_state *info = (void *)&dev->data;
+
+ /* see if interface supports MBIM alternate setting */
+ if (intf->num_altsetting == 2) {
+ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_MBIM);
+ data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+ }
+
+ /* Probably NCM, defer for cdc_ncm_bind */
+ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ goto err;
+
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
+ if (ret)
+ goto err;
+
+ ctx = info->ctx;
+
+ /* The MBIM descriptor and the status endpoint are required */
+ if (ctx->mbim_desc && dev->status)
+ subdriver = usb_cdc_wdm_register(ctx->control,
+ &dev->status->desc,
+ le16_to_cpu(ctx->mbim_desc->wMaxControlMessage),
+ cdc_mbim_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ ret = PTR_ERR(subdriver);
+ cdc_ncm_unbind(dev, intf);
+ goto err;
+ }
+
+ /* can't let usbnet use the interrupt endpoint */
+ dev->status = NULL;
+ info->subdriver = subdriver;
+
+ /* MBIM cannot do ARP */
+ dev->net->flags |= IFF_NOARP;
+
+ /* no need to put the VLAN tci in the packet headers */
+ dev->net->features |= NETIF_F_HW_VLAN_TX;
+err:
+ return ret;
+}
+
+static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+
+ /* disconnect subdriver from control interface */
+ if (info->subdriver && info->subdriver->disconnect)
+ info->subdriver->disconnect(ctx->control);
+ info->subdriver = NULL;
+
+ /* let NCM unbind clean up both control and data interface */
+ cdc_ncm_unbind(dev, intf);
+}
+
+
+static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *skb_out;
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
+ u16 tci = 0;
+ u8 *c;
+
+ if (!ctx)
+ goto error;
+
+ if (skb) {
+ if (skb->len <= sizeof(ETH_HLEN))
+ goto error;
+
+ /* mapping VLANs to MBIM sessions:
+ * no tag => IPS session <0>
+ * 1 - 255 => IPS session <vlanid>
+ * 256 - 511 => DSS session <vlanid - 256>
+ * 512 - 4095 => unsupported, drop
+ */
+ vlan_get_tag(skb, &tci);
+
+ switch (tci & 0x0f00) {
+ case 0x0000: /* VLAN ID 0 - 255 */
+ /* verify that datagram is IPv4 or IPv6 */
+ skb_reset_mac_header(skb);
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ goto error;
+ }
+ c = (u8 *)&sign;
+ c[3] = tci;
+ break;
+ case 0x0100: /* VLAN ID 256 - 511 */
+ sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
+ c = (u8 *)&sign;
+ c[3] = tci;
+ break;
+ default:
+ netif_err(dev, tx_err, dev->net,
+ "unsupported tci=0x%04x\n", tci);
+ goto error;
+ }
+ skb_pull(skb, ETH_HLEN);
+ }
+
+ spin_lock_bh(&ctx->mtx);
+ skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
+ spin_unlock_bh(&ctx->mtx);
+ return skb_out;
+
+error:
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return NULL;
+}
+
+static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
+{
+ __be16 proto = htons(ETH_P_802_3);
+ struct sk_buff *skb = NULL;
+
+ if (tci < 256) { /* IPS session? */
+ if (len < sizeof(struct iphdr))
+ goto err;
+
+ switch (*buf & 0xf0) {
+ case 0x40:
+ proto = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ goto err;
+ }
+ }
+
+ skb = netdev_alloc_skb_ip_align(dev->net, len + ETH_HLEN);
+ if (!skb)
+ goto err;
+
+ /* add an ethernet header */
+ skb_put(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ eth_hdr(skb)->h_proto = proto;
+ memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
+
+ /* add datagram */
+ memcpy(skb_put(skb, len), buf, len);
+
+ /* map MBIM session to VLAN */
+ if (tci)
+ vlan_put_tag(skb, tci);
+err:
+ return skb;
+}
+
+static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+ struct sk_buff *skb;
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ int len;
+ int nframes;
+ int x;
+ int offset;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ int ndpoffset;
+ int loopcount = 50; /* arbitrary max preventing infinite loop */
+ u8 *c;
+ u16 tci;
+
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ndpoffset < 0)
+ goto error;
+
+next_ndp:
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+
+ switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) {
+ case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
+ c = (u8 *)&ndp16->dwSignature;
+ tci = c[3];
+ break;
+ case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
+ c = (u8 *)&ndp16->dwSignature;
+ tci = c[3] + 256;
+ break;
+ default:
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NDP signature <0x%08x>\n",
+ le32_to_cpu(ndp16->dwSignature));
+ goto err_ndp;
+
+ }
+
+ dpe16 = ndp16->dpe16;
+ for (x = 0; x < nframes; x++, dpe16++) {
+ offset = le16_to_cpu(dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe16->wDatagramLength);
+
+ /*
+ * CDC NCM ch. 3.7
+ * All entries after first NULL entry are to be ignored
+ */
+ if ((offset == 0) || (len == 0)) {
+ if (!x)
+ goto err_ndp; /* empty NTB */
+ break;
+ }
+
+ /* sanity checking */
+ if (((offset + len) > skb_in->len) || (len > ctx->rx_max)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, len, skb_in);
+ if (!x)
+ goto err_ndp;
+ break;
+ } else {
+ skb = cdc_mbim_process_dgram(dev, skb_in->data + offset, len, tci);
+ if (!skb)
+ goto error;
+ usbnet_skb_return(dev, skb);
+ }
+ }
+err_ndp:
+ /* are there more NDPs to process? */
+ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ndpoffset && loopcount--)
+ goto next_ndp;
+
+ return 1;
+error:
+ return 0;
+}
+
+static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ int ret = 0;
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+
+ if (ctx == NULL) {
+ ret = -1;
+ goto error;
+ }
+
+ ret = usbnet_suspend(intf, message);
+ if (ret < 0)
+ goto error;
+
+ if (intf == ctx->control && info->subdriver && info->subdriver->suspend)
+ ret = info->subdriver->suspend(intf, message);
+ if (ret < 0)
+ usbnet_resume(intf);
+
+error:
+ return ret;
+}
+
+static int cdc_mbim_resume(struct usb_interface *intf)
+{
+ int ret = 0;
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+ struct cdc_ncm_ctx *ctx = info->ctx;
+ bool callsub = (intf == ctx->control && info->subdriver && info->subdriver->resume);
+
+ if (callsub)
+ ret = info->subdriver->resume(intf);
+ if (ret < 0)
+ goto err;
+ ret = usbnet_resume(intf);
+ if (ret < 0 && callsub && info->subdriver->suspend)
+ info->subdriver->suspend(intf, PMSG_SUSPEND);
+err:
+ return ret;
+}
+
+static const struct driver_info cdc_mbim_info = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+};
+
+/* MBIM and NCM devices should not need a ZLP after NTBs with
+ * dwNtbOutMaxSize length. This driver_info is for the exceptional
+ * devices requiring it anyway, allowing them to be supported without
+ * forcing the performance penalty on all the sane devices.
+ */
+static const struct driver_info cdc_mbim_info_zlp = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+};
+
+static const struct usb_device_id mbim_devs[] = {
+ /* This duplicate NCM entry is intentional. MBIM devices can
+ * be disguised as NCM by default, and this is necessary to
+ * allow us to bind the correct driver_info to such devices.
+ *
+ * bind() will sort out this for us, selecting the correct
+ * entry and reject the other
+ */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
+ },
+ /* Sierra Wireless MC7710 need ZLPs */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info,
+ },
+ {
+ },
+};
+MODULE_DEVICE_TABLE(usb, mbim_devs);
+
+static struct usb_driver cdc_mbim_driver = {
+ .name = "cdc_mbim",
+ .id_table = mbim_devs,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = cdc_mbim_suspend,
+ .resume = cdc_mbim_resume,
+ .reset_resume = cdc_mbim_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(cdc_mbim_driver);
+
+MODULE_AUTHOR("Greg Suarez <gsuarez@smithmicro.com>");
+MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
+MODULE_DESCRIPTION("USB CDC MBIM host driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 74fab1a40156..00d3b2d37828 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -51,90 +51,10 @@
#include <linux/atomic.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
+#include <linux/usb/cdc_ncm.h>
#define DRIVER_VERSION "14-Mar-2012"
-/* CDC NCM subclass 3.2.1 */
-#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
-
-/* Maximum NTB length */
-#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
-#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
-
-/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
-#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
-
-#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
-
-/* Default value for MaxDatagramSize */
-#define CDC_NCM_MAX_DATAGRAM_SIZE 8192 /* bytes */
-
-/*
- * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
- * the last NULL entry.
- */
-#define CDC_NCM_DPT_DATAGRAMS_MAX 40
-
-/* Restart the timer, if amount of datagrams is less than given value */
-#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
-#define CDC_NCM_TIMER_PENDING_CNT 2
-#define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC)
-
-/* The following macro defines the minimum header space */
-#define CDC_NCM_MIN_HDR_SIZE \
- (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
- (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
-
-struct cdc_ncm_data {
- struct usb_cdc_ncm_nth16 nth16;
- struct usb_cdc_ncm_ndp16 ndp16;
- struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
-};
-
-struct cdc_ncm_ctx {
- struct cdc_ncm_data tx_ncm;
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
- struct hrtimer tx_timer;
- struct tasklet_struct bh;
-
- const struct usb_cdc_ncm_desc *func_desc;
- const struct usb_cdc_header_desc *header_desc;
- const struct usb_cdc_union_desc *union_desc;
- const struct usb_cdc_ether_desc *ether_desc;
-
- struct net_device *netdev;
- struct usb_device *udev;
- struct usb_host_endpoint *in_ep;
- struct usb_host_endpoint *out_ep;
- struct usb_host_endpoint *status_ep;
- struct usb_interface *intf;
- struct usb_interface *control;
- struct usb_interface *data;
-
- struct sk_buff *tx_curr_skb;
- struct sk_buff *tx_rem_skb;
-
- spinlock_t mtx;
- atomic_t stop;
-
- u32 tx_timer_pending;
- u32 tx_curr_offset;
- u32 tx_curr_last_offset;
- u32 tx_curr_frame_num;
- u32 rx_speed;
- u32 tx_speed;
- u32 rx_max;
- u32 tx_max;
- u32 max_datagram_size;
- u16 tx_max_datagrams;
- u16 tx_remainder;
- u16 tx_modulus;
- u16 tx_ndp_modulus;
- u16 tx_seq;
- u16 rx_seq;
- u16 connected;
-};
-
static void cdc_ncm_txpath_bh(unsigned long param);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -158,17 +78,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
u8 flags;
u8 iface_no;
int err;
+ int eth_hlen;
u16 ntb_fmt_supported;
+ u32 min_dgram_size;
+ u32 min_hdr_size;
+ struct usbnet *dev = netdev_priv(ctx->netdev);
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
- err = usb_control_msg(ctx->udev,
- usb_rcvctrlpipe(ctx->udev, 0),
- USB_CDC_GET_NTB_PARAMETERS,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, &ctx->ncm_parm,
- sizeof(ctx->ncm_parm), 10000);
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
+ USB_TYPE_CLASS | USB_DIR_IN
+ |USB_RECIP_INTERFACE,
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm));
if (err < 0) {
pr_debug("failed GET_NTB_PARAMETERS\n");
return 1;
@@ -184,10 +106,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
- if (ctx->func_desc != NULL)
+ eth_hlen = ETH_HLEN;
+ min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+ min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
+ if (ctx->mbim_desc != NULL) {
+ flags = ctx->mbim_desc->bmNetworkCapabilities;
+ eth_hlen = 0;
+ min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
+ min_hdr_size = 0;
+ } else if (ctx->func_desc != NULL) {
flags = ctx->func_desc->bmNetworkCapabilities;
- else
+ } else {
flags = 0;
+ }
pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
"wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
@@ -215,49 +146,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* inform device about NTB input size changes */
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
- if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
- struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
-
- ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
- if (!ndp_in_sz) {
- err = -ENOMEM;
- goto size_err;
- }
-
- err = usb_control_msg(ctx->udev,
- usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_NTB_INPUT_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0, iface_no, ndp_in_sz, 8, 1000);
- kfree(ndp_in_sz);
- } else {
- __le32 *dwNtbInMaxSize;
- dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
- GFP_KERNEL);
- if (!dwNtbInMaxSize) {
- err = -ENOMEM;
- goto size_err;
- }
- *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
-
- err = usb_control_msg(ctx->udev,
- usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_NTB_INPUT_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0, iface_no, dwNtbInMaxSize, 4, 1000);
- kfree(dwNtbInMaxSize);
- }
-size_err:
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &dwNtbInMaxSize, 4);
if (err < 0)
pr_debug("Setting NTB Input Size failed\n");
}
/* verify maximum size of transmitted NTB in bytes */
if ((ctx->tx_max <
- (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+ (min_hdr_size + min_dgram_size)) ||
(ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
pr_debug("Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
@@ -299,93 +200,85 @@ size_err:
}
/* adjust TX-remainder according to NCM specification. */
- ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
- (ctx->tx_modulus - 1));
+ ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+ (ctx->tx_modulus - 1));
/* additional configuration */
/* set CRC Mode */
if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_CRC_MODE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_CRC_NOT_APPENDED,
- iface_no, NULL, 0, 1000);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0);
if (err < 0)
pr_debug("Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
- USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
- | USB_DIR_OUT | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0, 1000);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
if (err < 0)
pr_debug("Setting NTB format to 16-bit failed\n");
}
- ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+ ctx->max_datagram_size = min_dgram_size;
/* set Max Datagram Size (MTU) */
if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 *max_datagram_size;
- u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
- max_datagram_size = kzalloc(sizeof(*max_datagram_size),
- GFP_KERNEL);
- if (!max_datagram_size) {
- err = -ENOMEM;
+ __le16 max_datagram_size;
+ u16 eth_max_sz;
+ if (ctx->ether_desc != NULL)
+ eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ else if (ctx->mbim_desc != NULL)
+ eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ else
goto max_dgram_err;
- }
- err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
- USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN
- | USB_RECIP_INTERFACE,
- 0, iface_no, max_datagram_size,
- 2, 1000);
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
if (err < 0) {
pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
- CDC_NCM_MIN_DATAGRAM_SIZE);
+ min_dgram_size);
} else {
ctx->max_datagram_size =
- le16_to_cpu(*max_datagram_size);
+ le16_to_cpu(max_datagram_size);
/* Check Eth descriptor value */
if (ctx->max_datagram_size > eth_max_sz)
ctx->max_datagram_size = eth_max_sz;
if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size =
- CDC_NCM_MAX_DATAGRAM_SIZE;
+ ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
- if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size =
- CDC_NCM_MIN_DATAGRAM_SIZE;
+ if (ctx->max_datagram_size < min_dgram_size)
+ ctx->max_datagram_size = min_dgram_size;
/* if value changed, update device */
if (ctx->max_datagram_size !=
- le16_to_cpu(*max_datagram_size)) {
- err = usb_control_msg(ctx->udev,
- usb_sndctrlpipe(ctx->udev, 0),
+ le16_to_cpu(max_datagram_size)) {
+ err = usbnet_write_cmd(dev,
USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT
| USB_RECIP_INTERFACE,
0,
- iface_no, max_datagram_size,
- 2, 1000);
+ iface_no, &max_datagram_size,
+ 2);
if (err < 0)
pr_debug("SET_MAX_DGRAM_SIZE failed\n");
}
}
- kfree(max_datagram_size);
}
max_dgram_err:
- if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
- ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+ if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
+ ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
return 0;
}
@@ -451,7 +344,7 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
@@ -525,6 +418,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
break;
+ case USB_CDC_MBIM_TYPE:
+ if (buf[0] < sizeof(*(ctx->mbim_desc)))
+ break;
+
+ ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
+ break;
+
default:
break;
}
@@ -535,9 +435,16 @@ advance:
len -= temp;
}
+ /* some buggy devices have an IAD but no CDC Union */
+ if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+ ctx->control = intf;
+ ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
+ dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
+ }
+
/* check if we got everything */
if ((ctx->control == NULL) || (ctx->data == NULL) ||
- (ctx->ether_desc == NULL) || (ctx->control != intf))
+ ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
goto error;
/* claim data interface, if different from control */
@@ -559,7 +466,7 @@ advance:
goto error2;
/* configure data interface */
- temp = usb_set_interface(dev->udev, iface_no, 1);
+ temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp)
goto error2;
@@ -576,11 +483,13 @@ advance:
usb_set_intfdata(ctx->control, dev);
usb_set_intfdata(ctx->intf, dev);
- temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
- if (temp)
- goto error2;
+ if (ctx->ether_desc) {
+ temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+ if (temp)
+ goto error2;
+ dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
+ }
- dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
dev->in = usb_rcvbulkpipe(dev->udev,
ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
@@ -589,28 +498,23 @@ advance:
dev->status = ctx->status_ep;
dev->rx_urb_size = ctx->rx_max;
- /*
- * We should get an event when network connection is "connected" or
- * "disconnected". Set network connection in "disconnected" state
- * (carrier is OFF) during attach, so the IP network stack does not
- * start IPv6 negotiation and more.
- */
- netif_carrier_off(dev->net);
ctx->tx_speed = ctx->rx_speed = 0;
return 0;
error2:
usb_set_intfdata(ctx->control, NULL);
usb_set_intfdata(ctx->data, NULL);
- usb_driver_release_interface(driver, ctx->data);
+ if (ctx->data != ctx->control)
+ usb_driver_release_interface(driver, ctx->data);
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
dev_info(&dev->udev->dev, "bind() failure\n");
return -ENODEV;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
-static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_driver *driver = driver_of(intf);
@@ -644,52 +548,121 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
usb_set_intfdata(ctx->intf, NULL);
cdc_ncm_free(ctx);
}
+EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
+
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+
+ /* The MBIM spec defines a NCM compatible default altsetting,
+ * which we may have matched:
+ *
+ * "Functions that implement both NCM 1.0 and MBIM (an
+ * “NCM/MBIM function”) according to this recommendation
+ * shall provide two alternate settings for the
+ * Communication Interface. Alternate setting 0, and the
+ * associated class and endpoint descriptors, shall be
+ * constructed according to the rules given for the
+ * Communication Interface in section 5 of [USBNCM10].
+ * Alternate setting 1, and the associated class and
+ * endpoint descriptors, shall be constructed according to
+ * the rules given in section 6 (USB Device Model) of this
+ * specification."
+ *
+ * Do not bind to such interfaces, allowing cdc_mbim to handle
+ * them
+ */
+#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
+ if ((intf->num_altsetting == 2) &&
+ !usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_MBIM) &&
+ cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ return -ENODEV;
+#endif
+
+ /* NCM data altsetting is always 1 */
+ ret = cdc_ncm_bind_common(dev, intf, 1);
+
+ /*
+ * We should get an event when network connection is "connected" or
+ * "disconnected". Set network connection in "disconnected" state
+ * (carrier is OFF) during attach, so the IP network stack does not
+ * start IPv6 negotiation and more.
+ */
+ netif_carrier_off(dev->net);
+ return ret;
+}
-static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
{
- if (first >= max)
- return;
- if (first >= end)
- return;
- if (end > max)
- end = max;
- memset(ptr + first, 0, end - first);
+ size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
+
+ if (skb->len + align > max)
+ align = max - skb->len;
+ if (align && skb_tailroom(skb) >= align)
+ memset(skb_put(skb, align), 0, align);
}
-static struct sk_buff *
-cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
+ * allocating a new one within skb
+ */
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
+ struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
+ struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
+ size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
+
+ /* follow the chain of NDPs, looking for a match */
+ while (ndpoffset) {
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
+ if (ndp16->dwSignature == sign)
+ return ndp16;
+ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ }
+
+ /* align new NDP */
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+
+ /* verify that there is room for the NDP and the datagram (reserve) */
+ if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+ return NULL;
+
+ /* link to it */
+ if (ndp16)
+ ndp16->wNextNdpIndex = cpu_to_le16(skb->len);
+ else
+ nth16->wNdpIndex = cpu_to_le16(skb->len);
+
+ /* push a new empty NDP */
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+ ndp16->dwSignature = sign;
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
+ return ndp16;
+}
+
+struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
+{
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_ndp16 *ndp16;
struct sk_buff *skb_out;
- u32 rem;
- u32 offset;
- u32 last_offset;
- u16 n = 0, index;
+ u16 n = 0, index, ndplen;
u8 ready2send = 0;
/* if there is a remaining skb, it gets priority */
- if (skb != NULL)
+ if (skb != NULL) {
swap(skb, ctx->tx_rem_skb);
- else
+ swap(sign, ctx->tx_rem_sign);
+ } else {
ready2send = 1;
-
- /*
- * +----------------+
- * | skb_out |
- * +----------------+
- * ^ offset
- * ^ last_offset
- */
+ }
/* check if we are resuming an OUT skb */
- if (ctx->tx_curr_skb != NULL) {
- /* pop variables */
- skb_out = ctx->tx_curr_skb;
- offset = ctx->tx_curr_offset;
- last_offset = ctx->tx_curr_last_offset;
- n = ctx->tx_curr_frame_num;
+ skb_out = ctx->tx_curr_skb;
- } else {
- /* reset variables */
+ /* allocate a new OUT skb */
+ if (!skb_out) {
skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
@@ -698,35 +671,21 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
}
goto exit_no_skb;
}
+ /* fill out the initial 16-bit NTB header */
+ nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
+ nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
- /* make room for NTH and NDP */
- offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
- ctx->tx_ndp_modulus) +
- sizeof(struct usb_cdc_ncm_ndp16) +
- (ctx->tx_max_datagrams + 1) *
- sizeof(struct usb_cdc_ncm_dpe16);
-
- /* store last valid offset before alignment */
- last_offset = offset;
- /* align first Datagram offset correctly */
- offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
- /* zero buffer till the first IP datagram */
- cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
- n = 0;
+ /* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
}
- for (; n < ctx->tx_max_datagrams; n++) {
- /* check if end of transmit buffer is reached */
- if (offset >= ctx->tx_max) {
- ready2send = 1;
- break;
- }
- /* compute maximum buffer size */
- rem = ctx->tx_max - offset;
-
+ for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
+ /* send any remaining skb first */
if (skb == NULL) {
skb = ctx->tx_rem_skb;
+ sign = ctx->tx_rem_sign;
ctx->tx_rem_skb = NULL;
/* check for end of skb */
@@ -734,7 +693,14 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
break;
}
- if (skb->len > rem) {
+ /* get the appropriate NDP for this skb */
+ ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+
+ /* align beginning of next frame */
+ cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
+
+ /* check if we had enough room left for both NDP and frame */
+ if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -747,31 +713,30 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->netdev->stats.tx_dropped++;
}
ctx->tx_rem_skb = skb;
+ ctx->tx_rem_sign = sign;
skb = NULL;
ready2send = 1;
}
break;
}
- memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
-
- ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
- ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
-
- /* update offset */
- offset += skb->len;
-
- /* store last valid offset before alignment */
- last_offset = offset;
+ /* calculate frame number withing this NDP */
+ ndplen = le16_to_cpu(ndp16->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
- /* align offset correctly */
- offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
-
- /* zero padding */
- cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
- ctx->tx_max);
+ /* OK, add this skb */
+ ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+ ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+ ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
dev_kfree_skb_any(skb);
skb = NULL;
+
+ /* send now if this NDP is full */
+ if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
+ ready2send = 1;
+ break;
+ }
}
/* free up any dangling skb */
@@ -787,16 +752,12 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
- ctx->tx_curr_offset = offset;
- ctx->tx_curr_last_offset = last_offset;
goto exit_no_skb;
} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
- ctx->tx_curr_offset = offset;
- ctx->tx_curr_last_offset = last_offset;
/* set the pending count */
if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
@@ -807,75 +768,24 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* variables will be reset at next call */
}
- /* check for overflow */
- if (last_offset > ctx->tx_max)
- last_offset = ctx->tx_max;
-
- /* revert offset */
- offset = last_offset;
-
/*
* If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
* we send buffers as it is. If we get more data, it would be more
* efficient for USB HS mobile device with DMA engine to receive a full
* size NTB, than canceling DMA transfer and receiving a short packet.
*/
- if (offset > CDC_NCM_MIN_TX_PKT)
- offset = ctx->tx_max;
-
- /* final zero padding */
- cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
-
- /* store last offset */
- last_offset = offset;
-
- if (((last_offset < ctx->tx_max) && ((last_offset %
- le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
- (((last_offset == ctx->tx_max) && ((ctx->tx_max %
- le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
- (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
- /* force short packet */
- *(((u8 *)skb_out->data) + last_offset) = 0;
- last_offset++;
- }
+ if (skb_out->len > CDC_NCM_MIN_TX_PKT)
+ /* final zero padding */
+ memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
- /* zero the rest of the DPEs plus the last NULL entry */
- for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
- ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
- ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
- }
-
- /* fill out 16-bit NTB header */
- ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- ctx->tx_ncm.nth16.wHeaderLength =
- cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
- ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
- ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
- index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
- ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
-
- memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
- ctx->tx_seq++;
-
- /* fill out 16-bit NDP table */
- ctx->tx_ncm.ndp16.dwSignature =
- cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
- rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
- sizeof(struct usb_cdc_ncm_dpe16));
- ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
- ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
+ /* do we need to prevent a ZLP? */
+ if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
+ (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
+ *skb_put(skb_out, 1) = 0; /* force short packet */
- memcpy(((u8 *)skb_out->data) + index,
- &(ctx->tx_ncm.ndp16),
- sizeof(ctx->tx_ncm.ndp16));
-
- memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
- &(ctx->tx_ncm.dpe16),
- (ctx->tx_curr_frame_num + 1) *
- sizeof(struct usb_cdc_ncm_dpe16));
-
- /* set frame length */
- skb_put(skb_out, last_offset);
+ /* set final frame length */
+ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ nth16->wBlockLength = cpu_to_le16(skb_out->len);
/* return skb */
ctx->tx_curr_skb = NULL;
@@ -888,6 +798,7 @@ exit_no_skb:
cdc_ncm_tx_timeout_start(ctx);
return NULL;
}
+EXPORT_SYMBOL_GPL(cdc_ncm_fill_tx_frame);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
{
@@ -922,6 +833,8 @@ static void cdc_ncm_txpath_bh(unsigned long param)
netif_tx_lock_bh(ctx->netdev);
usbnet_start_xmit(NULL, ctx->netdev);
netif_tx_unlock_bh(ctx->netdev);
+ } else {
+ spin_unlock_bh(&ctx->mtx);
}
}
@@ -942,7 +855,7 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+ skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -953,17 +866,12 @@ error:
return NULL;
}
-static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+/* verify NTB header and return offset of first NDP, or negative error */
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
{
- struct sk_buff *skb;
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
- int nframes;
- int x;
- int offset;
struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
+ int len;
+ int ret = -EINVAL;
if (ctx == NULL)
goto error;
@@ -997,20 +905,23 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
}
ctx->rx_seq = le16_to_cpu(nth16->wSequence);
- len = le16_to_cpu(nth16->wNdpIndex);
- if ((len + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
- pr_debug("invalid DPT16 index <%u>\n",
- le16_to_cpu(nth16->wNdpIndex));
- goto error;
- }
+ ret = le16_to_cpu(nth16->wNdpIndex);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(((u8 *)skb_in->data) + len);
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
+{
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ int ret = -EINVAL;
- if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
- pr_debug("invalid DPT16 signature <%u>\n",
- le32_to_cpu(ndp16->dwSignature));
+ if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
+ pr_debug("invalid NDP offset <%u>\n", ndpoffset);
goto error;
}
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
pr_debug("invalid DPT16 length <%u>\n",
@@ -1018,20 +929,52 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
goto error;
}
- nframes = ((le16_to_cpu(ndp16->wLength) -
+ ret = ((le16_to_cpu(ndp16->wLength) -
sizeof(struct usb_cdc_ncm_ndp16)) /
sizeof(struct usb_cdc_ncm_dpe16));
- nframes--; /* we process NDP entries except for the last one */
+ ret--; /* we process NDP entries except for the last one */
- len += sizeof(struct usb_cdc_ncm_ndp16);
-
- if ((len + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) >
+ if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
skb_in->len) {
- pr_debug("Invalid nframes = %d\n", nframes);
- goto error;
+ pr_debug("Invalid nframes = %d\n", ret);
+ ret = -EINVAL;
}
- dpe16 = (struct usb_cdc_ncm_dpe16 *)(((u8 *)skb_in->data) + len);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+ struct sk_buff *skb;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ int len;
+ int nframes;
+ int x;
+ int offset;
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ int ndpoffset;
+ int loopcount = 50; /* arbitrary max preventing infinite loop */
+
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ndpoffset < 0)
+ goto error;
+
+next_ndp:
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+
+ if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+ pr_debug("invalid DPT16 signature <%u>\n",
+ le32_to_cpu(ndp16->dwSignature));
+ goto err_ndp;
+ }
+ dpe16 = ndp16->dpe16;
for (x = 0; x < nframes; x++, dpe16++) {
offset = le16_to_cpu(dpe16->wDatagramIndex);
@@ -1043,7 +986,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
*/
if ((offset == 0) || (len == 0)) {
if (!x)
- goto error; /* empty NTB */
+ goto err_ndp; /* empty NTB */
break;
}
@@ -1054,7 +997,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
"offset[%u]=%u, length=%u, skb=%p\n",
x, offset, len, skb_in);
if (!x)
- goto error;
+ goto err_ndp;
break;
} else {
@@ -1067,6 +1010,12 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
usbnet_skb_return(dev, skb);
}
}
+err_ndp:
+ /* are there more NDPs to process? */
+ ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ndpoffset && loopcount--)
+ goto next_ndp;
+
return 1;
error:
return 0;
@@ -1131,7 +1080,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- ctx->connected = event->wValue;
+ ctx->connected = le16_to_cpu(event->wValue);
printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
" %sconnected\n",
@@ -1188,19 +1137,13 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
usbnet_disconnect(intf);
}
-static int cdc_ncm_manage_power(struct usbnet *dev, int status)
-{
- dev->intf->needs_remote_wakeup = status;
- return 0;
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
- .manage_power = cdc_ncm_manage_power,
+ .manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
@@ -1214,7 +1157,21 @@ static const struct driver_info wwan_info = {
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
.check_connect = cdc_ncm_check_connect,
- .manage_power = cdc_ncm_manage_power,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+/* Same as wwan_info, but with FLAG_NOARP */
+static const struct driver_info wwan_noarp_info = {
+ .description = "Mobile Broadband Network Device (NO ARP)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_WWAN | FLAG_NOARP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
@@ -1258,6 +1215,16 @@ static const struct usb_device_id cdc_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
+ /* Infineon(now Intel) HSPA Modem platform */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index e0433ce6ced7..d7e99445518e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -45,6 +45,12 @@
#define DM_MCAST_ADDR 0x16 /* 8 bytes */
#define DM_GPR_CTRL 0x1e
#define DM_GPR_DATA 0x1f
+#define DM_CHIP_ID 0x2c
+#define DM_MODE_CTRL 0x91 /* only on dm9620 */
+
+/* chip id values */
+#define ID_DM9601 0
+#define ID_DM9620 1
#define DM_MAX_MCAST 64
#define DM_MCAST_SIZE 8
@@ -53,30 +59,14 @@
#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */
#define DM_TIMEOUT 1000
-
static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- void *buf;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length);
-
- buf = kmalloc(length, GFP_KERNEL);
- if (!buf)
- goto out;
-
- err = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- DM_READ_REGS,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
- if (err == length)
- memcpy(data, buf, length);
- else if (err >= 0)
+ int err;
+ err = usbnet_read_cmd(dev, DM_READ_REGS,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, length);
+ if(err != length && err >= 0)
err = -EINVAL;
- kfree(buf);
-
- out:
return err;
}
@@ -87,106 +77,35 @@ static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- void *buf = NULL;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
-
- if (data) {
- buf = kmemdup(data, length, GFP_KERNEL);
- if (!buf)
- goto out;
- }
+ int err;
+ err = usbnet_write_cmd(dev, DM_WRITE_REGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, length);
- err = usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- DM_WRITE_REGS,
- USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
- 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
- kfree(buf);
if (err >= 0 && err < length)
err = -EINVAL;
- out:
return err;
}
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n",
- reg, value);
- return usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- DM_WRITE_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
- value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT);
-}
-
-static void dm_write_async_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n",
- status);
-
- kfree(req);
- usb_free_urb(urb);
-}
-
-static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
- u16 length, void *data)
-{
- struct usb_ctrlrequest *req;
- struct urb *urb;
- int status;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
- return;
- }
-
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
- usb_free_urb(urb);
- return;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- req->bRequest = length ? DM_WRITE_REGS : DM_WRITE_REG;
- req->wValue = cpu_to_le16(value);
- req->wIndex = cpu_to_le16(reg);
- req->wLength = cpu_to_le16(length);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, length,
- dm_write_async_callback, req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_err(dev->net, "Error submitting the control message: status=%d\n",
- status);
- kfree(req);
- usb_free_urb(urb);
- }
+ return usbnet_write_cmd(dev, DM_WRITE_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, NULL, 0);
}
static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
-
- dm_write_async_helper(dev, reg, 0, length, data);
+ usbnet_write_cmd_async(dev, DM_WRITE_REGS,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, length);
}
static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
- reg, value);
-
- dm_write_async_helper(dev, reg, value, 0, NULL);
+ usbnet_write_cmd_async(dev, DM_WRITE_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, NULL, 0);
}
static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
@@ -435,7 +354,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 mac[ETH_ALEN];
+ u8 mac[ETH_ALEN], id;
ret = usbnet_get_endpoints(dev, intf);
if (ret)
@@ -476,6 +395,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
__dm9601_set_mac_address(dev);
}
+ if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
+ netdev_err(dev->net, "Error reading chip ID\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* put dm9620 devices in dm9601 mode */
+ if (id == ID_DM9620) {
+ u8 mode;
+
+ if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
+ netdev_err(dev->net, "Error reading MODE_CTRL\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
+ }
+
/* power up phy */
dm_write_reg(dev, DM_GPR_CTRL, 1);
dm_write_reg(dev, DM_GPR_DATA, 0);
@@ -658,6 +595,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 605a4baa9b7b..cd8ccb240f4b 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2274,6 +2274,7 @@ static void hso_serial_common_free(struct hso_serial *serial)
/* unlink and free TX URB */
usb_free_urb(serial->tx_urb);
kfree(serial->tx_data);
+ tty_port_destroy(&serial->port);
}
static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
@@ -2283,12 +2284,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
int minor;
int i;
+ tty_port_init(&serial->port);
+
minor = get_free_serial_index();
if (minor < 0)
goto exit;
- tty_port_init(&serial->port);
-
/* register our minor number */
serial->parent->dev = tty_port_register_device(&serial->port, tty_drv,
minor, &serial->parent->interface->dev);
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 8de641713d5f..ace9e74ffbdd 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -116,23 +116,8 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
return skb;
}
-static void int51x1_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- dev_warn(&urb->dev->dev, "async callback failed with %d\n", status);
-
- kfree(req);
- usb_free_urb(urb);
-}
-
static void int51x1_set_multicast(struct net_device *netdev)
{
- struct usb_ctrlrequest *req;
- int status;
- struct urb *urb;
struct usbnet *dev = netdev_priv(netdev);
u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
@@ -149,40 +134,9 @@ static void int51x1_set_multicast(struct net_device *netdev)
netdev_dbg(dev->net, "receive own packets only\n");
}
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_warn(dev->net, "Error allocating URB\n");
- return;
- }
-
- req = kmalloc(sizeof(*req), GFP_ATOMIC);
- if (!req) {
- netdev_warn(dev->net, "Error allocating control msg\n");
- goto out;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- req->bRequest = SET_ETHERNET_PACKET_FILTER;
- req->wValue = cpu_to_le16(filter);
- req->wIndex = 0;
- req->wLength = 0;
-
- usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
- (void *)req, NULL, 0,
- int51x1_async_cmd_callback,
- (void *)req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
- status);
- goto out1;
- }
- return;
-out1:
- kfree(req);
-out:
- usb_free_urb(urb);
+ usbnet_write_cmd_async(dev, SET_ETHERNET_PACKET_FILTER,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ filter, 0, NULL, 0);
}
static const struct net_device_ops int51x1_netdev_ops = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index cc7e72010ac3..3f3f566afa0b 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -124,93 +124,20 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
{
- struct usb_device *xdev = dev->udev;
- int ret;
- void *buffer;
-
- buffer = kmalloc(size, GFP_NOIO);
- if (buffer == NULL)
- return -ENOMEM;
-
- ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
- MCS7830_RD_BMREQ, 0x0000, index, buffer,
- size, MCS7830_CTRL_TIMEOUT);
- memcpy(data, buffer, size);
- kfree(buffer);
-
- return ret;
+ return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ,
+ 0x0000, index, data, size);
}
static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
{
- struct usb_device *xdev = dev->udev;
- int ret;
- void *buffer;
-
- buffer = kmemdup(data, size, GFP_NOIO);
- if (buffer == NULL)
- return -ENOMEM;
-
- ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
- MCS7830_WR_BMREQ, 0x0000, index, buffer,
- size, MCS7830_CTRL_TIMEOUT);
- kfree(buffer);
- return ret;
-}
-
-static void mcs7830_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "%s() failed with %d\n",
- __func__, status);
-
- kfree(req);
- usb_free_urb(urb);
+ return usbnet_write_cmd(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
+ 0x0000, index, data, size);
}
static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data)
{
- struct usb_ctrlrequest *req;
- int ret;
- struct urb *urb;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_dbg(&dev->udev->dev,
- "Error allocating URB in write_cmd_async!\n");
- return;
- }
-
- req = kmalloc(sizeof *req, GFP_ATOMIC);
- if (!req) {
- dev_err(&dev->udev->dev,
- "Failed to allocate memory for control request\n");
- goto out;
- }
- req->bRequestType = MCS7830_WR_BMREQ;
- req->bRequest = MCS7830_WR_BREQ;
- req->wValue = 0;
- req->wIndex = cpu_to_le16(index);
- req->wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, size,
- mcs7830_async_cmd_callback, req);
-
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(&dev->udev->dev,
- "Error submitting the control message: ret=%d\n", ret);
- goto out;
- }
- return;
-out:
- kfree(req);
- usb_free_urb(urb);
+ usbnet_write_cmd_async(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
+ 0x0000, index, data, size);
}
static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index c062a3e8295c..93e0716a118c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -109,13 +109,11 @@ struct nc_trailer {
static int
nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
{
- int status = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- req,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, regnum,
- retval_ptr, sizeof *retval_ptr,
- USB_CTRL_GET_TIMEOUT);
+ int status = usbnet_read_cmd(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, regnum, retval_ptr,
+ sizeof *retval_ptr);
if (status > 0)
status = 0;
if (!status)
@@ -133,13 +131,9 @@ nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
static void
nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
{
- usb_control_msg(dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- req,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, regnum,
- NULL, 0, // data is in setup packet
- USB_CTRL_SET_TIMEOUT);
+ usbnet_write_cmd(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, regnum, NULL, 0);
}
static inline void
@@ -288,37 +282,34 @@ static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
static int net1080_reset(struct usbnet *dev)
{
u16 usbctl, status, ttl;
- u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
+ u16 vp;
int retval;
- if (!vp)
- return -ENOMEM;
-
// nc_dump_registers(dev);
- if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
+ if ((retval = nc_register_read(dev, REG_STATUS, &vp)) < 0) {
netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
dev->udev->bus->bus_name, dev->udev->devpath, retval);
goto done;
}
- status = *vp;
+ status = vp;
nc_dump_status(dev, status);
- if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
+ if ((retval = nc_register_read(dev, REG_USBCTL, &vp)) < 0) {
netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
goto done;
}
- usbctl = *vp;
+ usbctl = vp;
nc_dump_usbctl(dev, usbctl);
nc_register_write(dev, REG_USBCTL,
USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
- if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
+ if ((retval = nc_register_read(dev, REG_TTL, &vp)) < 0) {
netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
goto done;
}
- ttl = *vp;
+ ttl = vp;
// nc_dump_ttl(dev, ttl);
nc_register_write(dev, REG_TTL,
@@ -331,7 +322,6 @@ static int net1080_reset(struct usbnet *dev)
retval = 0;
done:
- kfree(vp);
return retval;
}
@@ -339,13 +329,10 @@ static int net1080_check_connect(struct usbnet *dev)
{
int retval;
u16 status;
- u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
+ u16 vp;
- if (!vp)
- return -ENOMEM;
- retval = nc_register_read(dev, REG_STATUS, vp);
- status = *vp;
- kfree(vp);
+ retval = nc_register_read(dev, REG_STATUS, &vp);
+ status = vp;
if (retval != 0) {
netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
return retval;
@@ -355,59 +342,22 @@ static int net1080_check_connect(struct usbnet *dev)
return 0;
}
-static void nc_flush_complete(struct urb *urb)
-{
- kfree(urb->context);
- usb_free_urb(urb);
-}
-
static void nc_ensure_sync(struct usbnet *dev)
{
- dev->frame_errors++;
- if (dev->frame_errors > 5) {
- struct urb *urb;
- struct usb_ctrlrequest *req;
- int status;
-
- /* Send a flush */
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return;
-
- req = kmalloc(sizeof *req, GFP_ATOMIC);
- if (!req) {
- usb_free_urb(urb);
- return;
- }
+ if (++dev->frame_errors <= 5)
+ return;
- req->bRequestType = USB_DIR_OUT
- | USB_TYPE_VENDOR
- | USB_RECIP_DEVICE;
- req->bRequest = REQUEST_REGISTER;
- req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS
- | USBCTL_FLUSH_OTHER);
- req->wIndex = cpu_to_le16(REG_USBCTL);
- req->wLength = cpu_to_le16(0);
-
- /* queue an async control request, we don't need
- * to do anything when it finishes except clean up.
- */
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (unsigned char *) req,
- NULL, 0,
- nc_flush_complete, req);
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- kfree(req);
- usb_free_urb(urb);
- return;
- }
+ if (usbnet_write_cmd_async(dev, REQUEST_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ USBCTL_FLUSH_THIS |
+ USBCTL_FLUSH_OTHER,
+ REG_USBCTL, NULL, 0))
+ return;
- netif_dbg(dev, rx_err, dev->net,
- "flush net1080; too many framing errors\n");
- dev->frame_errors = 0;
- }
+ netif_dbg(dev, rx_err, dev->net,
+ "flush net1080; too many framing errors\n");
+ dev->frame_errors = 0;
}
static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 4584b9a805b3..0fcc8e65a068 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -71,13 +71,10 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- req,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- val, index,
- NULL, 0,
- USB_CTRL_GET_TIMEOUT);
+ return usbnet_read_cmd(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ val, index, NULL, 0);
}
static inline int
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 1ea91f4237f0..c8e05e27f38c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
@@ -383,6 +395,20 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Dell Wireless 5800 (Novatel E362) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* Dell Wireless 5800 V2 (Novatel E362) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8196,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
@@ -419,6 +445,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
+ {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
+ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
@@ -443,6 +471,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index c27d27701aee..18dd4257ab17 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -311,10 +311,9 @@ static int sierra_net_send_cmd(struct usbnet *dev,
struct sierra_net_data *priv = sierra_net_get_private(dev);
int status;
- status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_CDC_SEND_ENCAPSULATED_COMMAND,
- USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
- priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
+ status = usbnet_write_cmd(dev, USB_CDC_SEND_ENCAPSULATED_COMMAND,
+ USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
+ 0, priv->ifnum, cmd, cmdlen);
if (status != cmdlen && status != -ENODEV)
netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
@@ -340,7 +339,7 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
priv->tx_hdr_template[0] = 0x3F;
priv->tx_hdr_template[1] = ctx_ix;
- *((u16 *)&priv->tx_hdr_template[2]) =
+ *((__be16 *)&priv->tx_hdr_template[2]) =
cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
}
@@ -632,32 +631,22 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
{
int result = 0;
- u16 *attrdata;
-
- attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
- if (!attrdata)
- return -ENOMEM;
-
- result = usb_control_msg(
- dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- /* _u8 vendor specific request */
- SWI_USB_REQUEST_GET_FW_ATTR,
- USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
- 0x0000, /* __u16 value not used */
- 0x0000, /* __u16 index not used */
- attrdata, /* char *data */
- sizeof(*attrdata), /* __u16 size */
- USB_CTRL_SET_TIMEOUT); /* int timeout */
-
- if (result < 0) {
- kfree(attrdata);
+ __le16 attrdata;
+
+ result = usbnet_read_cmd(dev,
+ /* _u8 vendor specific request */
+ SWI_USB_REQUEST_GET_FW_ATTR,
+ USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
+ 0x0000, /* __u16 value not used */
+ 0x0000, /* __u16 index not used */
+ &attrdata, /* char *data */
+ sizeof(attrdata) /* __u16 size */
+ );
+
+ if (result < 0)
return -EIO;
- }
-
- *datap = le16_to_cpu(*attrdata);
- kfree(attrdata);
+ *datap = le16_to_cpu(attrdata);
return result;
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index b77ae76f4aa8..251a3354a4b0 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -26,6 +26,8 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
@@ -52,16 +54,15 @@
#define USB_PRODUCT_ID_LAN7500 (0x7500)
#define USB_PRODUCT_ID_LAN7505 (0x7505)
#define RXW_PADDING 2
-#define SUPPORTED_WAKE (WAKE_MAGIC)
+#define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
+ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
-#define check_warn(ret, fmt, args...) \
- ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
-
-#define check_warn_return(ret, fmt, args...) \
- ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
-
-#define check_warn_goto_done(ret, fmt, args...) \
- ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
+#define SUSPEND_SUSPEND0 (0x01)
+#define SUSPEND_SUSPEND1 (0x02)
+#define SUSPEND_SUSPEND2 (0x04)
+#define SUSPEND_SUSPEND3 (0x08)
+#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
+ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
struct smsc75xx_priv {
struct usbnet *dev;
@@ -71,6 +72,7 @@ struct smsc75xx_priv {
struct mutex dataport_mutex;
spinlock_t rfe_ctl_lock;
struct work_struct set_multicast;
+ u8 suspend_flags;
};
struct usb_context {
@@ -82,96 +84,99 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
+static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_READ_REGISTER,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (!in_pm)
+ fn = usbnet_read_cmd;
+ else
+ fn = usbnet_read_cmd_nopm;
+ ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
- netdev_warn(dev->net,
- "Failed to read reg index 0x%08x: %d", index, ret);
+ netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ index, ret);
- le32_to_cpus(buf);
- *data = *buf;
- kfree(buf);
+ le32_to_cpus(&buf);
+ *data = buf;
return ret;
}
-static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
+static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- *buf = data;
- cpu_to_le32s(buf);
+ if (!in_pm)
+ fn = usbnet_write_cmd;
+ else
+ fn = usbnet_write_cmd_nopm;
- ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_WRITE_REGISTER,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+ buf = data;
+ cpu_to_le32s(&buf);
+ ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
- netdev_warn(dev->net,
- "Failed to write reg index 0x%08x: %d", index, ret);
-
- kfree(buf);
+ netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
+ index, ret);
return ret;
}
-static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
+static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index,
+ u32 *data)
{
- if (WARN_ON_ONCE(!dev))
- return -EINVAL;
-
- cpu_to_le32s(&feature);
-
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ return __smsc75xx_read_reg(dev, index, data, 1);
}
-static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
+static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index,
+ u32 data)
{
- if (WARN_ON_ONCE(!dev))
- return -EINVAL;
+ return __smsc75xx_write_reg(dev, index, data, 1);
+}
- cpu_to_le32s(&feature);
+static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ return __smsc75xx_read_reg(dev, index, data, 0);
+}
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ return __smsc75xx_write_reg(dev, index, data, 0);
}
/* Loop until the read is completed with timeout
* called with phy_mutex held */
-static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
+static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev,
+ int in_pm)
{
unsigned long start_time = jiffies;
u32 val;
int ret;
do {
- ret = smsc75xx_read_reg(dev, MII_ACCESS, &val);
- check_warn_return(ret, "Error reading MII_ACCESS");
+ ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_ACCESS\n");
+ return ret;
+ }
if (!(val & MII_ACCESS_BUSY))
return 0;
@@ -180,7 +185,8 @@ static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
return -EIO;
}
-static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+ int in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
@@ -189,8 +195,11 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = smsc75xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read");
+ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_read\n");
+ goto done;
+ }
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
@@ -198,14 +207,23 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
| MII_ACCESS_READ | MII_ACCESS_BUSY;
- ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
- check_warn_goto_done(ret, "Error writing MII_ACCESS");
+ ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MII_ACCESS\n");
+ goto done;
+ }
- ret = smsc75xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
+ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
+ goto done;
+ }
- ret = smsc75xx_read_reg(dev, MII_DATA, &val);
- check_warn_goto_done(ret, "Error reading MII_DATA");
+ ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_DATA\n");
+ goto done;
+ }
ret = (u16)(val & 0xFFFF);
@@ -214,8 +232,8 @@ done:
return ret;
}
-static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
- int regval)
+static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id,
+ int idx, int regval, int in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
@@ -224,12 +242,18 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = smsc75xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write");
+ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_write\n");
+ goto done;
+ }
val = regval;
- ret = smsc75xx_write_reg(dev, MII_DATA, val);
- check_warn_goto_done(ret, "Error writing MII_DATA");
+ ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MII_DATA\n");
+ goto done;
+ }
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
@@ -237,16 +261,45 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
| MII_ACCESS_WRITE | MII_ACCESS_BUSY;
- ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
- check_warn_goto_done(ret, "Error writing MII_ACCESS");
+ ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MII_ACCESS\n");
+ goto done;
+ }
- ret = smsc75xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
+ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
+ goto done;
+ }
done:
mutex_unlock(&dev->phy_mutex);
}
+static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
+ int idx)
+{
+ return __smsc75xx_mdio_read(netdev, phy_id, idx, 1);
+}
+
+static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
+ int idx, int regval)
+{
+ __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1);
+}
+
+static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+ return __smsc75xx_mdio_read(netdev, phy_id, idx, 0);
+}
+
+static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
+ int regval)
+{
+ __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0);
+}
+
static int smsc75xx_wait_eeprom(struct usbnet *dev)
{
unsigned long start_time = jiffies;
@@ -255,7 +308,10 @@ static int smsc75xx_wait_eeprom(struct usbnet *dev)
do {
ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
- check_warn_return(ret, "Error reading E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading E2P_CMD\n");
+ return ret;
+ }
if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT))
break;
@@ -263,7 +319,7 @@ static int smsc75xx_wait_eeprom(struct usbnet *dev)
} while (!time_after(jiffies, start_time + HZ));
if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) {
- netdev_warn(dev->net, "EEPROM read operation timeout");
+ netdev_warn(dev->net, "EEPROM read operation timeout\n");
return -EIO;
}
@@ -278,7 +334,10 @@ static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
do {
ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
- check_warn_return(ret, "Error reading E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading E2P_CMD\n");
+ return ret;
+ }
if (!(val & E2P_CMD_BUSY))
return 0;
@@ -286,7 +345,7 @@ static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
udelay(40);
} while (!time_after(jiffies, start_time + HZ));
- netdev_warn(dev->net, "EEPROM is busy");
+ netdev_warn(dev->net, "EEPROM is busy\n");
return -EIO;
}
@@ -306,14 +365,20 @@ static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
for (i = 0; i < length; i++) {
val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR);
ret = smsc75xx_write_reg(dev, E2P_CMD, val);
- check_warn_return(ret, "Error writing E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_CMD\n");
+ return ret;
+ }
ret = smsc75xx_wait_eeprom(dev);
if (ret < 0)
return ret;
ret = smsc75xx_read_reg(dev, E2P_DATA, &val);
- check_warn_return(ret, "Error reading E2P_DATA");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading E2P_DATA\n");
+ return ret;
+ }
data[i] = val & 0xFF;
offset++;
@@ -338,7 +403,10 @@ static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
/* Issue write/erase enable command */
val = E2P_CMD_BUSY | E2P_CMD_EWEN;
ret = smsc75xx_write_reg(dev, E2P_CMD, val);
- check_warn_return(ret, "Error writing E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_CMD\n");
+ return ret;
+ }
ret = smsc75xx_wait_eeprom(dev);
if (ret < 0)
@@ -349,12 +417,18 @@ static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
/* Fill data register */
val = data[i];
ret = smsc75xx_write_reg(dev, E2P_DATA, val);
- check_warn_return(ret, "Error writing E2P_DATA");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_DATA\n");
+ return ret;
+ }
/* Send "write" command */
val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR);
ret = smsc75xx_write_reg(dev, E2P_CMD, val);
- check_warn_return(ret, "Error writing E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_CMD\n");
+ return ret;
+ }
ret = smsc75xx_wait_eeprom(dev);
if (ret < 0)
@@ -373,7 +447,10 @@ static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
for (i = 0; i < 100; i++) {
u32 dp_sel;
ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
- check_warn_return(ret, "Error reading DP_SEL");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading DP_SEL\n");
+ return ret;
+ }
if (dp_sel & DP_SEL_DPRDY)
return 0;
@@ -381,7 +458,7 @@ static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
udelay(40);
}
- netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out");
+ netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out\n");
return -EIO;
}
@@ -396,28 +473,49 @@ static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr,
mutex_lock(&pdata->dataport_mutex);
ret = smsc75xx_dataport_wait_not_busy(dev);
- check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry");
+ if (ret < 0) {
+ netdev_warn(dev->net, "smsc75xx_dataport_write busy on entry\n");
+ goto done;
+ }
ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
- check_warn_goto_done(ret, "Error reading DP_SEL");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading DP_SEL\n");
+ goto done;
+ }
dp_sel &= ~DP_SEL_RSEL;
dp_sel |= ram_select;
ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel);
- check_warn_goto_done(ret, "Error writing DP_SEL");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing DP_SEL\n");
+ goto done;
+ }
for (i = 0; i < length; i++) {
ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i);
- check_warn_goto_done(ret, "Error writing DP_ADDR");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing DP_ADDR\n");
+ goto done;
+ }
ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]);
- check_warn_goto_done(ret, "Error writing DP_DATA");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing DP_DATA\n");
+ goto done;
+ }
ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE);
- check_warn_goto_done(ret, "Error writing DP_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing DP_CMD\n");
+ goto done;
+ }
ret = smsc75xx_dataport_wait_not_busy(dev);
- check_warn_goto_done(ret, "smsc75xx_dataport_write timeout");
+ if (ret < 0) {
+ netdev_warn(dev->net, "smsc75xx_dataport_write timeout\n");
+ goto done;
+ }
}
done:
@@ -438,14 +536,15 @@ static void smsc75xx_deferred_multicast_write(struct work_struct *param)
struct usbnet *dev = pdata->dev;
int ret;
- netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x",
- pdata->rfe_ctl);
+ netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+ pdata->rfe_ctl);
smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN,
DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table);
ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
- check_warn(ret, "Error writing RFE_CRL");
+ if (ret < 0)
+ netdev_warn(dev->net, "Error writing RFE_CRL\n");
}
static void smsc75xx_set_multicast(struct net_device *netdev)
@@ -465,15 +564,15 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
pdata->multicast_hash_table[i] = 0;
if (dev->net->flags & IFF_PROMISC) {
- netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+ netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n");
pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU;
} else if (dev->net->flags & IFF_ALLMULTI) {
- netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
+ netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n");
pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
} else if (!netdev_mc_empty(dev->net)) {
struct netdev_hw_addr *ha;
- netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+ netif_dbg(dev, drv, dev->net, "receive multicast hash filter\n");
pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
@@ -483,7 +582,7 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
(1 << (bitnum % 32));
}
} else {
- netif_dbg(dev, drv, dev->net, "receive own packets only");
+ netif_dbg(dev, drv, dev->net, "receive own packets only\n");
pdata->rfe_ctl |= RFE_CTL_DPF;
}
@@ -511,18 +610,24 @@ static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
if (cap & FLOW_CTRL_RX)
flow |= FLOW_RX_FCEN;
- netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
} else {
- netif_dbg(dev, link, dev->net, "half duplex");
+ netif_dbg(dev, link, dev->net, "half duplex\n");
}
ret = smsc75xx_write_reg(dev, FLOW, flow);
- check_warn_return(ret, "Error writing FLOW");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing FLOW\n");
+ return ret;
+ }
ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow);
- check_warn_return(ret, "Error writing FCT_FLOW");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing FCT_FLOW\n");
+ return ret;
+ }
return 0;
}
@@ -539,16 +644,18 @@ static int smsc75xx_link_reset(struct usbnet *dev)
PHY_INT_SRC_CLEAR_ALL);
ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
- check_warn_return(ret, "Error writing INT_STS");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing INT_STS\n");
+ return ret;
+ }
mii_check_media(mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
- netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x"
- " rmtadv: %04x", ethtool_cmd_speed(&ecmd),
- ecmd.duplex, lcladv, rmtadv);
+ netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
}
@@ -558,21 +665,21 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
u32 intdata;
if (urb->actual_length != 4) {
- netdev_warn(dev->net,
- "unexpected urb length %d", urb->actual_length);
+ netdev_warn(dev->net, "unexpected urb length %d\n",
+ urb->actual_length);
return;
}
memcpy(&intdata, urb->transfer_buffer, 4);
le32_to_cpus(&intdata);
- netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata);
+ netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT)
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
else
- netdev_warn(dev->net,
- "unexpected interrupt, intdata=0x%08X", intdata);
+ netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
+ intdata);
}
static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
@@ -596,8 +703,8 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
struct usbnet *dev = netdev_priv(netdev);
if (ee->magic != LAN75XX_EEPROM_MAGIC) {
- netdev_warn(dev->net,
- "EEPROM: magic value mismatch: 0x%x", ee->magic);
+ netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x\n",
+ ee->magic);
return -EINVAL;
}
@@ -619,9 +726,15 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
{
struct usbnet *dev = netdev_priv(net);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ int ret;
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
- return 0;
+
+ ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
+ if (ret < 0)
+ netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret);
+
+ return ret;
}
static const struct ethtool_ops smsc75xx_ethtool_ops = {
@@ -657,14 +770,14 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
netif_dbg(dev, ifup, dev->net,
- "MAC address read from EEPROM");
+ "MAC address read from EEPROM\n");
return;
}
}
/* no eeprom, or eeprom values are invalid. generate random MAC */
eth_hw_addr_random(dev->net);
- netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
static int smsc75xx_set_mac_address(struct usbnet *dev)
@@ -674,19 +787,29 @@ static int smsc75xx_set_mac_address(struct usbnet *dev)
u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8;
int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi);
- check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write RX_ADDRH: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo);
- check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write RX_ADDRL: %d\n", ret);
+ return ret;
+ }
addr_hi |= ADDR_FILTX_FB_VALID;
ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi);
- check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write ADDR_FILTX: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo);
- check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret);
+ if (ret < 0)
+ netdev_warn(dev->net, "Failed to write ADDR_FILTX+4: %d\n", ret);
- return 0;
+ return ret;
}
static int smsc75xx_phy_initialize(struct usbnet *dev)
@@ -708,12 +831,15 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
do {
msleep(10);
bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
- check_warn_return(bmcr, "Error reading MII_BMCR");
+ if (bmcr < 0) {
+ netdev_warn(dev->net, "Error reading MII_BMCR\n");
+ return bmcr;
+ }
timeout++;
} while ((bmcr & BMCR_RESET) && (timeout < 100));
if (timeout >= 100) {
- netdev_warn(dev->net, "timeout on PHY Reset");
+ netdev_warn(dev->net, "timeout on PHY Reset\n");
return -EIO;
}
@@ -725,14 +851,18 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
/* read and write to clear phy interrupt status */
ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
- check_warn_return(ret, "Error reading PHY_INT_SRC");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ return ret;
+ }
+
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
PHY_INT_MASK_DEFAULT);
mii_nway_restart(&dev->mii);
- netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+ netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
return 0;
}
@@ -743,14 +873,20 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
bool rxenabled;
ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
- check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret);
+ return ret;
+ }
rxenabled = ((buf & MAC_RX_RXEN) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN;
ret = smsc75xx_write_reg(dev, MAC_RX, buf);
- check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
+ return ret;
+ }
}
/* add 4 to size for FCS */
@@ -758,12 +894,18 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE);
ret = smsc75xx_write_reg(dev, MAC_RX, buf);
- check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
+ return ret;
+ }
if (rxenabled) {
buf |= MAC_RX_RXEN;
ret = smsc75xx_write_reg(dev, MAC_RX, buf);
- check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
+ return ret;
+ }
}
return 0;
@@ -774,7 +916,10 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
struct usbnet *dev = netdev_priv(netdev);
int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
- check_warn_return(ret, "Failed to set mac rx frame length");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to set mac rx frame length\n");
+ return ret;
+ }
return usbnet_change_mtu(netdev, new_mtu);
}
@@ -799,19 +944,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
/* it's racing here! */
ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
- check_warn_return(ret, "Error writing RFE_CTL");
+ if (ret < 0)
+ netdev_warn(dev->net, "Error writing RFE_CTL\n");
- return 0;
+ return ret;
}
-static int smsc75xx_wait_ready(struct usbnet *dev)
+static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
{
int timeout = 0;
do {
u32 buf;
- int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
- check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+ int ret;
+
+ ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm);
+
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
if (buf & PMT_CTL_DEV_RDY)
return 0;
@@ -820,7 +972,7 @@ static int smsc75xx_wait_ready(struct usbnet *dev)
timeout++;
} while (timeout < 100);
- netdev_warn(dev->net, "timeout waiting for device ready");
+ netdev_warn(dev->net, "timeout waiting for device ready\n");
return -EIO;
}
@@ -830,79 +982,112 @@ static int smsc75xx_reset(struct usbnet *dev)
u32 buf;
int ret = 0, timeout;
- netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
+ netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n");
- ret = smsc75xx_wait_ready(dev);
- check_warn_return(ret, "device not ready in smsc75xx_reset");
+ ret = smsc75xx_wait_ready(dev, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "device not ready in smsc75xx_reset\n");
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
buf |= HW_CFG_LRST;
ret = smsc75xx_write_reg(dev, HW_CFG, buf);
- check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
+ return ret;
+ }
timeout = 0;
do {
msleep(10);
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
timeout++;
} while ((buf & HW_CFG_LRST) && (timeout < 100));
if (timeout >= 100) {
- netdev_warn(dev->net, "timeout on completion of Lite Reset");
+ netdev_warn(dev->net, "timeout on completion of Lite Reset\n");
return -EIO;
}
- netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY");
+ netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n");
ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
- check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
buf |= PMT_CTL_PHY_RST;
ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
- check_warn_return(ret, "Failed to write PMT_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+ return ret;
+ }
timeout = 0;
do {
msleep(10);
ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
- check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
timeout++;
} while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
if (timeout >= 100) {
- netdev_warn(dev->net, "timeout waiting for PHY Reset");
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
return -EIO;
}
- netif_dbg(dev, ifup, dev->net, "PHY reset complete");
-
- smsc75xx_init_mac_address(dev);
+ netif_dbg(dev, ifup, dev->net, "PHY reset complete\n");
ret = smsc75xx_set_mac_address(dev);
- check_warn_return(ret, "Failed to set mac address");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to set mac address\n");
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr);
+ netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n",
+ dev->net->dev_addr);
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
+ buf);
buf |= HW_CFG_BIR;
ret = smsc75xx_write_reg(dev, HW_CFG, buf);
- check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after "
- "writing HW_CFG_BIR: 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n",
+ buf);
if (!turbo_mode) {
buf = 0;
@@ -915,99 +1100,157 @@ static int smsc75xx_reset(struct usbnet *dev)
dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
}
- netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld",
- (ulong)dev->rx_urb_size);
+ netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n",
+ (ulong)dev->rx_urb_size);
ret = smsc75xx_write_reg(dev, BURST_CAP, buf);
- check_warn_return(ret, "Failed to write BURST_CAP: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, BURST_CAP, &buf);
- check_warn_return(ret, "Failed to read BURST_CAP: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net,
- "Read Value from BURST_CAP after writing: 0x%08x", buf);
+ "Read Value from BURST_CAP after writing: 0x%08x\n", buf);
ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
- check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf);
- check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net,
- "Read Value from BULK_IN_DLY after writing: 0x%08x", buf);
+ "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf);
if (turbo_mode) {
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf);
buf |= (HW_CFG_MEF | HW_CFG_BCE);
ret = smsc75xx_write_reg(dev, HW_CFG, buf);
- check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf);
}
/* set FIFO sizes */
buf = (MAX_RX_FIFO_SIZE - 512) / 512;
ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf);
- check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FCT_RX_FIFO_END: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf);
buf = (MAX_TX_FIFO_SIZE - 512) / 512;
ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf);
- check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FCT_TX_FIFO_END: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf);
ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
- check_warn_return(ret, "Failed to write INT_STS: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, ID_REV, &buf);
- check_warn_return(ret, "Failed to read ID_REV: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf);
ret = smsc75xx_read_reg(dev, E2P_CMD, &buf);
- check_warn_return(ret, "Failed to read E2P_CMD: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read E2P_CMD: %d\n", ret);
+ return ret;
+ }
/* only set default GPIO/LED settings if no EEPROM is detected */
if (!(buf & E2P_CMD_LOADED)) {
ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
- check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read LED_GPIO_CFG: %d\n", ret);
+ return ret;
+ }
buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
- check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret);
+ return ret;
+ }
}
ret = smsc75xx_write_reg(dev, FLOW, 0);
- check_warn_return(ret, "Failed to write FLOW: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_write_reg(dev, FCT_FLOW, 0);
- check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FCT_FLOW: %d\n", ret);
+ return ret;
+ }
/* Don't need rfe_ctl_lock during initialisation */
ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
- check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret);
+ return ret;
+ }
pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF;
ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
- check_warn_return(ret, "Failed to write RFE_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write RFE_CTL: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
- check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
+ netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n",
+ pdata->rfe_ctl);
/* Enable or disable checksum offload engines */
smsc75xx_set_features(dev->net, dev->net->features);
@@ -1015,69 +1258,111 @@ static int smsc75xx_reset(struct usbnet *dev)
smsc75xx_set_multicast(dev->net);
ret = smsc75xx_phy_initialize(dev);
- check_warn_return(ret, "Failed to initialize PHY: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to initialize PHY: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf);
- check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
+ return ret;
+ }
/* enable PHY interrupts */
buf |= INT_ENP_PHY_INT;
ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
- check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
+ return ret;
+ }
/* allow mac to detect speed and duplex from phy */
ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
- check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
+ return ret;
+ }
buf |= (MAC_CR_ADD | MAC_CR_ASD);
ret = smsc75xx_write_reg(dev, MAC_CR, buf);
- check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
- check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MAC_TX: %d\n", ret);
+ return ret;
+ }
buf |= MAC_TX_TXEN;
ret = smsc75xx_write_reg(dev, MAC_TX, buf);
- check_warn_return(ret, "Failed to write MAC_TX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_TX: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf);
ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf);
- check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read FCT_TX_CTL: %d\n", ret);
+ return ret;
+ }
buf |= FCT_TX_CTL_EN;
ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf);
- check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FCT_TX_CTL: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
- check_warn_return(ret, "Failed to set max rx frame length");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to set max rx frame length\n");
+ return ret;
+ }
ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
- check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret);
+ return ret;
+ }
buf |= MAC_RX_RXEN;
ret = smsc75xx_write_reg(dev, MAC_RX, buf);
- check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf);
ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf);
- check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read FCT_RX_CTL: %d\n", ret);
+ return ret;
+ }
buf |= FCT_RX_CTL_EN;
ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf);
- check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FCT_RX_CTL: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf);
+ netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf);
- netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0");
+ netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0\n");
return 0;
}
@@ -1102,14 +1387,17 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
ret = usbnet_get_endpoints(dev, intf);
- check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
+ return ret;
+ }
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
GFP_KERNEL);
pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (!pdata) {
- netdev_warn(dev->net, "Unable to allocate smsc75xx_priv");
+ netdev_warn(dev->net, "Unable to allocate smsc75xx_priv\n");
return -ENOMEM;
}
@@ -1132,8 +1420,20 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
+ ret = smsc75xx_wait_ready(dev, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
+ return ret;
+ }
+
+ smsc75xx_init_mac_address(dev);
+
/* Init all registers */
ret = smsc75xx_reset(dev);
+ if (ret < 0) {
+ netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
+ return ret;
+ }
dev->net->netdev_ops = &smsc75xx_netdev_ops;
dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
@@ -1147,172 +1447,647 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
- netif_dbg(dev, ifdown, dev->net, "free pdata");
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
dev->data[0] = 0;
}
}
+static u16 smsc_crc(const u8 *buffer, size_t len)
+{
+ return bitrev16(crc16(0xFFFF, buffer, len));
+}
+
+static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg,
+ u32 wuf_mask1)
+{
+ int cfg_base = WUF_CFGX + filter * 4;
+ int mask_base = WUF_MASKX + filter * 16;
+ int ret;
+
+ ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUF_CFGX\n");
+ return ret;
+ }
+
+ ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUF_MASKX\n");
+ return ret;
+ }
+
+ ret = smsc75xx_write_reg(dev, mask_base + 4, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUF_MASKX\n");
+ return ret;
+ }
+
+ ret = smsc75xx_write_reg(dev, mask_base + 8, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUF_MASKX\n");
+ return ret;
+ }
+
+ ret = smsc75xx_write_reg(dev, mask_base + 12, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUF_MASKX\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smsc75xx_enter_suspend0(struct usbnet *dev)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ return ret;
+ }
+
+ val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST));
+ val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS;
+
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND0;
+
+ return 0;
+}
+
+static int smsc75xx_enter_suspend1(struct usbnet *dev)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ return ret;
+ }
+
+ val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+ val |= PMT_CTL_SUS_MODE_1;
+
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+
+ /* clear wol status, enable energy detection */
+ val &= ~PMT_CTL_WUPS;
+ val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
+
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND1;
+
+ return 0;
+}
+
+static int smsc75xx_enter_suspend2(struct usbnet *dev)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ return ret;
+ }
+
+ val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+ val |= PMT_CTL_SUS_MODE_2;
+
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND2;
+
+ return 0;
+}
+
+static int smsc75xx_enter_suspend3(struct usbnet *dev)
+{
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading FCT_RX_CTL\n");
+ return ret;
+ }
+
+ if (val & FCT_RX_CTL_RXUSED) {
+ netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n");
+ return -EBUSY;
+ }
+
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ return ret;
+ }
+
+ val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+ val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN;
+
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+
+ /* clear wol status */
+ val &= ~PMT_CTL_WUPS;
+ val |= PMT_CTL_WUPS_WOL;
+
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND3;
+
+ return 0;
+}
+
+static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret;
+
+ netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
+
+ /* read to clear */
+ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ return ret;
+ }
+
+ /* enable interrupt source */
+ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_INT_MASK\n");
+ return ret;
+ }
+
+ ret |= mask;
+
+ smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+
+ return 0;
+}
+
+static int smsc75xx_link_ok_nopm(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret;
+
+ /* first, a dummy read, needed to latch some MII phys */
+ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ return ret;
+ }
+
+ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ return ret;
+ }
+
+ return !!(ret & BMSR_LSTATUS);
+}
+
+static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up)
+{
+ int ret;
+
+ if (!netif_running(dev->net)) {
+ /* interface is ifconfig down so fully power down hw */
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n");
+ return smsc75xx_enter_suspend2(dev);
+ }
+
+ if (!link_up) {
+ /* link is down so enter EDPD mode */
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
+
+ /* enable PHY wakeup events for if cable is attached */
+ ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+ PHY_INT_MASK_ANEG_COMP);
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ return ret;
+ }
+
+ netdev_info(dev->net, "entering SUSPEND1 mode\n");
+ return smsc75xx_enter_suspend1(dev);
+ }
+
+ /* enable PHY wakeup events so we remote wakeup if cable is pulled */
+ ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+ PHY_INT_MASK_LINK_DOWN);
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ return ret;
+ }
+
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
+ return smsc75xx_enter_suspend3(dev);
+}
+
static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u32 val, link_up;
int ret;
- u32 val;
ret = usbnet_suspend(intf, message);
- check_warn_return(ret, "usbnet_suspend error");
+ if (ret < 0) {
+ netdev_warn(dev->net, "usbnet_suspend error\n");
+ return ret;
+ }
- /* if no wol options set, enter lowest power SUSPEND2 mode */
- if (!(pdata->wolopts & SUPPORTED_WAKE)) {
- netdev_info(dev->net, "entering SUSPEND2 mode");
+ if (pdata->suspend_flags) {
+ netdev_warn(dev->net, "error during last resume\n");
+ pdata->suspend_flags = 0;
+ }
+
+ /* determine if link is up using only _nopm functions */
+ link_up = smsc75xx_link_ok_nopm(dev);
+
+ if (message.event == PM_EVENT_AUTO_SUSPEND) {
+ ret = smsc75xx_autosuspend(dev, link_up);
+ goto done;
+ }
+
+ /* if we get this far we're not autosuspending */
+ /* if no wol options set, or if link is down and we're not waking on
+ * PHY activity, enter lowest power SUSPEND2 mode
+ */
+ if (!(pdata->wolopts & SUPPORTED_WAKE) ||
+ !(link_up || (pdata->wolopts & WAKE_PHY))) {
+ netdev_info(dev->net, "entering SUSPEND2 mode\n");
/* disable energy detect (link up) & wake up events */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
val &= ~(WUCSR_MPEN | WUCSR_WUEN);
- ret = smsc75xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ goto done;
+ }
val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ goto done;
+ }
- /* enter suspend2 mode */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ ret = smsc75xx_enter_suspend2(dev);
+ goto done;
+ }
- val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
- val |= PMT_CTL_SUS_MODE_2;
+ if (pdata->wolopts & WAKE_PHY) {
+ ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
+ (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN));
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ goto done;
+ }
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ /* if link is down then configure EDPD and enter SUSPEND1,
+ * otherwise enter SUSPEND0 below
+ */
+ if (!link_up) {
+ struct mii_if_info *mii = &dev->mii;
+ netdev_info(dev->net, "entering SUSPEND1 mode\n");
+
+ /* enable energy detect power-down mode */
+ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id,
+ PHY_MODE_CTRL_STS);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n");
+ goto done;
+ }
- return 0;
+ ret |= MODE_CTRL_STS_EDPWRDOWN;
+
+ smsc75xx_mdio_write_nopm(dev->net, mii->phy_id,
+ PHY_MODE_CTRL_STS, ret);
+
+ /* enter SUSPEND1 mode */
+ ret = smsc75xx_enter_suspend1(dev);
+ goto done;
+ }
}
- if (pdata->wolopts & WAKE_MAGIC) {
- /* clear any pending magic packet status */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) {
+ int i, filter = 0;
- val |= WUCSR_MPR;
+ /* disable all filters */
+ for (i = 0; i < WUF_NUM; i++) {
+ ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUF_CFGX\n");
+ goto done;
+ }
+ }
- ret = smsc75xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
- }
+ if (pdata->wolopts & WAKE_MCAST) {
+ const u8 mcast[] = {0x01, 0x00, 0x5E};
+ netdev_info(dev->net, "enabling multicast detection\n");
- /* enable/disable magic packup wake */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST
+ | smsc_crc(mcast, 3);
+ ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing wakeup filter\n");
+ goto done;
+ }
+ }
- if (pdata->wolopts & WAKE_MAGIC) {
- netdev_info(dev->net, "enabling magic packet wakeup");
- val |= WUCSR_MPEN;
+ if (pdata->wolopts & WAKE_ARP) {
+ const u8 arp[] = {0x08, 0x06};
+ netdev_info(dev->net, "enabling ARP detection\n");
+
+ val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16)
+ | smsc_crc(arp, 2);
+ ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing wakeup filter\n");
+ goto done;
+ }
+ }
+
+ /* clear any pending pattern match packet status */
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
+
+ val |= WUCSR_WUFR;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
+
+ netdev_info(dev->net, "enabling packet match detection\n");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
+
+ val |= WUCSR_WUEN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
} else {
- netdev_info(dev->net, "disabling magic packet wakeup");
- val &= ~WUCSR_MPEN;
+ netdev_info(dev->net, "disabling packet match detection\n");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
+
+ val &= ~WUCSR_WUEN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
}
- ret = smsc75xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ /* disable magic, bcast & unicast wakeup sources */
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
- /* enable wol wakeup source */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN);
- val |= PMT_CTL_WOL_EN;
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ if (pdata->wolopts & WAKE_PHY) {
+ netdev_info(dev->net, "enabling PHY wakeup\n");
- /* enable receiver */
- ret = smsc75xx_read_reg(dev, MAC_RX, &val);
- check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ goto done;
+ }
- val |= MAC_RX_RXEN;
+ /* clear wol status, enable energy detection */
+ val &= ~PMT_CTL_WUPS;
+ val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
- ret = smsc75xx_write_reg(dev, MAC_RX, val);
- check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ goto done;
+ }
+ }
- /* some wol options are enabled, so enter SUSPEND0 */
- netdev_info(dev->net, "entering SUSPEND0 mode");
+ if (pdata->wolopts & WAKE_MAGIC) {
+ netdev_info(dev->net, "enabling magic packet wakeup\n");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ /* clear any pending magic packet status */
+ val |= WUCSR_MPR | WUCSR_MPEN;
- val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
- val |= PMT_CTL_SUS_MODE_0;
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
+ }
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ if (pdata->wolopts & WAKE_BCAST) {
+ netdev_info(dev->net, "enabling broadcast detection\n");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
- /* clear wol status */
- val &= ~PMT_CTL_WUPS;
- val |= PMT_CTL_WUPS_WOL;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ val |= WUCSR_BCAST_FR | WUCSR_BCST_EN;
- /* read back PMT_CTL */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
+ }
- smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+ if (pdata->wolopts & WAKE_UCAST) {
+ netdev_info(dev->net, "enabling unicast detection\n");
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
- return 0;
+ val |= WUCSR_WUFR | WUCSR_PFDA_EN;
+
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
+ }
+
+ /* enable receiver to enable frame reception */
+ ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret);
+ goto done;
+ }
+
+ val |= MAC_RX_RXEN;
+
+ ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
+ goto done;
+ }
+
+ /* some wol options are enabled, so enter SUSPEND0 */
+ netdev_info(dev->net, "entering SUSPEND0 mode\n");
+ ret = smsc75xx_enter_suspend0(dev);
+
+done:
+ if (ret)
+ usbnet_resume(intf);
+ return ret;
}
static int smsc75xx_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ u8 suspend_flags = pdata->suspend_flags;
int ret;
u32 val;
- if (pdata->wolopts & WAKE_MAGIC) {
- netdev_info(dev->net, "resuming from SUSPEND0");
+ netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
- smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+ /* do this first to ensure it's cleared even in error case */
+ pdata->suspend_flags = 0;
- /* Disable magic packup wake */
- ret = smsc75xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ if (suspend_flags & SUSPEND_ALLMODES) {
+ /* Disable wakeup sources */
+ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ return ret;
+ }
- val &= ~WUCSR_MPEN;
+ val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN
+ | WUCSR_BCST_EN);
- ret = smsc75xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ return ret;
+ }
/* clear wake-up status */
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ return ret;
+ }
val &= ~PMT_CTL_WOL_EN;
val |= PMT_CTL_WUPS;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
- } else {
- netdev_info(dev->net, "resuming from SUSPEND2");
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
+ }
- ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
- check_warn_return(ret, "Error reading PMT_CTL");
+ if (suspend_flags & SUSPEND_SUSPEND2) {
+ netdev_info(dev->net, "resuming from SUSPEND2\n");
+
+ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PMT_CTL\n");
+ return ret;
+ }
val |= PMT_CTL_PHY_PWRUP;
- ret = smsc75xx_write_reg(dev, PMT_CTL, val);
- check_warn_return(ret, "Error writing PMT_CTL");
+ ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PMT_CTL\n");
+ return ret;
+ }
}
- ret = smsc75xx_wait_ready(dev);
- check_warn_return(ret, "device not ready in smsc75xx_resume");
+ ret = smsc75xx_wait_ready(dev, 1);
+ if (ret < 0) {
+ netdev_warn(dev->net, "device not ready in smsc75xx_resume\n");
+ return ret;
+ }
return usbnet_resume(intf);
}
@@ -1352,7 +2127,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
- "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
dev->net->stats.rx_errors++;
dev->net->stats.rx_dropped++;
@@ -1364,7 +2139,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (ETH_FRAME_LEN + 12))) {
netif_dbg(dev, rx_err, dev->net,
- "size err rx_cmd_a=0x%08x", rx_cmd_a);
+ "size err rx_cmd_a=0x%08x\n",
+ rx_cmd_a);
return 0;
}
@@ -1381,7 +2157,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!ax_skb)) {
- netdev_warn(dev->net, "Error allocating skb");
+ netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
@@ -1406,7 +2182,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
if (unlikely(skb->len < 0)) {
- netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+ netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
return 0;
}
@@ -1454,6 +2230,12 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
return skb;
}
+static int smsc75xx_manage_power(struct usbnet *dev, int on)
+{
+ dev->intf->needs_remote_wakeup = on;
+ return 0;
+}
+
static const struct driver_info smsc75xx_info = {
.description = "smsc75xx USB 2.0 Gigabit Ethernet",
.bind = smsc75xx_bind,
@@ -1463,6 +2245,7 @@ static const struct driver_info smsc75xx_info = {
.rx_fixup = smsc75xx_rx_fixup,
.tx_fixup = smsc75xx_tx_fixup,
.status = smsc75xx_status,
+ .manage_power = smsc75xx_manage_power,
.flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
};
@@ -1490,6 +2273,7 @@ static struct usb_driver smsc75xx_driver = {
.reset_resume = smsc75xx_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
+ .supports_autosuspend = 1,
};
module_usb_driver(smsc75xx_driver);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 362cb8cfeb92..9b736701f854 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -26,6 +26,8 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
@@ -46,16 +48,12 @@
#define SMSC95XX_INTERNAL_PHY_ID (1)
#define SMSC95XX_TX_OVERHEAD (8)
#define SMSC95XX_TX_OVERHEAD_CSUM (12)
-#define SUPPORTED_WAKE (WAKE_MAGIC)
+#define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
+ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
-#define check_warn(ret, fmt, args...) \
- ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
-
-#define check_warn_return(ret, fmt, args...) \
- ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
-
-#define check_warn_goto_done(ret, fmt, args...) \
- ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
+#define FEATURE_8_WAKEUP_FILTERS (0x01)
+#define FEATURE_PHY_NLP_CROSSOVER (0x02)
+#define FEATURE_AUTOSUSPEND (0x04)
struct smsc95xx_priv {
u32 mac_cr;
@@ -63,105 +61,107 @@ struct smsc95xx_priv {
u32 hash_lo;
u32 wolopts;
spinlock_t mac_cr_lock;
-};
-
-struct usb_context {
- struct usb_ctrlrequest req;
- struct usbnet *dev;
+ u8 features;
};
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
- u32 *data)
+static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_READ_REGISTER,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+ if (!in_pm)
+ fn = usbnet_read_cmd;
+ else
+ fn = usbnet_read_cmd_nopm;
+ ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
- netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);
+ netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
+ index, ret);
- le32_to_cpus(buf);
- *data = *buf;
- kfree(buf);
+ le32_to_cpus(&buf);
+ *data = buf;
return ret;
}
-static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
- u32 data)
+static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data, int in_pm)
{
- u32 *buf = kmalloc(4, GFP_KERNEL);
+ u32 buf;
int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
BUG_ON(!dev);
- if (!buf)
- return -ENOMEM;
-
- *buf = data;
- cpu_to_le32s(buf);
+ if (!in_pm)
+ fn = usbnet_write_cmd;
+ else
+ fn = usbnet_write_cmd_nopm;
- ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_VENDOR_REQUEST_WRITE_REGISTER,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+ buf = data;
+ cpu_to_le32s(&buf);
+ ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
+ | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, index, &buf, 4);
if (unlikely(ret < 0))
- netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index);
-
- kfree(buf);
+ netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
+ index, ret);
return ret;
}
-static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
+static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
+ u32 *data)
{
- if (WARN_ON_ONCE(!dev))
- return -EINVAL;
-
- cpu_to_le32s(&feature);
-
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ return __smsc95xx_read_reg(dev, index, data, 1);
}
-static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
+static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
+ u32 data)
{
- if (WARN_ON_ONCE(!dev))
- return -EINVAL;
+ return __smsc95xx_write_reg(dev, index, data, 1);
+}
- cpu_to_le32s(&feature);
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
+{
+ return __smsc95xx_read_reg(dev, index, data, 0);
+}
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
+{
+ return __smsc95xx_write_reg(dev, index, data, 0);
}
/* Loop until the read is completed with timeout
* called with phy_mutex held */
-static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
+static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
+ int in_pm)
{
unsigned long start_time = jiffies;
u32 val;
int ret;
do {
- ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
- check_warn_return(ret, "Error reading MII_ACCESS");
+ ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_ACCESS\n");
+ return ret;
+ }
+
if (!(val & MII_BUSY_))
return 0;
} while (!time_after(jiffies, start_time + HZ));
@@ -169,7 +169,8 @@ static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
return -EIO;
}
-static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+ int in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
@@ -178,21 +179,33 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = smsc95xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
+ ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
+ goto done;
+ }
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
- ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
- check_warn_goto_done(ret, "Error writing MII_ADDR");
+ ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ goto done;
+ }
- ret = smsc95xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
+ ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
+ goto done;
+ }
- ret = smsc95xx_read_reg(dev, MII_DATA, &val);
- check_warn_goto_done(ret, "Error reading MII_DATA");
+ ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_DATA\n");
+ goto done;
+ }
ret = (u16)(val & 0xFFFF);
@@ -201,8 +214,8 @@ done:
return ret;
}
-static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
- int regval)
+static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
+ int idx, int regval, int in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
@@ -211,27 +224,62 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- ret = smsc95xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
+ ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
+ goto done;
+ }
val = regval;
- ret = smsc95xx_write_reg(dev, MII_DATA, val);
- check_warn_goto_done(ret, "Error writing MII_DATA");
+ ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MII_DATA\n");
+ goto done;
+ }
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
- ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
- check_warn_goto_done(ret, "Error writing MII_ADDR");
+ ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MII_ADDR\n");
+ goto done;
+ }
- ret = smsc95xx_phy_wait_not_busy(dev);
- check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
+ ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
+ goto done;
+ }
done:
mutex_unlock(&dev->phy_mutex);
}
+static int smsc95xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
+ int idx)
+{
+ return __smsc95xx_mdio_read(netdev, phy_id, idx, 1);
+}
+
+static void smsc95xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
+ int idx, int regval)
+{
+ __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 1);
+}
+
+static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+ return __smsc95xx_mdio_read(netdev, phy_id, idx, 0);
+}
+
+static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
+ int regval)
+{
+ __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 0);
+}
+
static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
{
unsigned long start_time = jiffies;
@@ -240,7 +288,11 @@ static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
do {
ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
- check_warn_return(ret, "Error reading E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading E2P_CMD\n");
+ return ret;
+ }
+
if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
break;
udelay(40);
@@ -262,7 +314,10 @@ static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
do {
ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
- check_warn_return(ret, "Error reading E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading E2P_CMD\n");
+ return ret;
+ }
if (!(val & E2P_CMD_BUSY_))
return 0;
@@ -290,14 +345,20 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
for (i = 0; i < length; i++) {
val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
ret = smsc95xx_write_reg(dev, E2P_CMD, val);
- check_warn_return(ret, "Error writing E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_CMD\n");
+ return ret;
+ }
ret = smsc95xx_wait_eeprom(dev);
if (ret < 0)
return ret;
ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
- check_warn_return(ret, "Error reading E2P_DATA");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading E2P_DATA\n");
+ return ret;
+ }
data[i] = val & 0xFF;
offset++;
@@ -322,7 +383,10 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
/* Issue write/erase enable command */
val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
ret = smsc95xx_write_reg(dev, E2P_CMD, val);
- check_warn_return(ret, "Error writing E2P_DATA");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_DATA\n");
+ return ret;
+ }
ret = smsc95xx_wait_eeprom(dev);
if (ret < 0)
@@ -333,12 +397,18 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
/* Fill data register */
val = data[i];
ret = smsc95xx_write_reg(dev, E2P_DATA, val);
- check_warn_return(ret, "Error writing E2P_DATA");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_DATA\n");
+ return ret;
+ }
/* Send "write" command */
val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
ret = smsc95xx_write_reg(dev, E2P_CMD, val);
- check_warn_return(ret, "Error writing E2P_CMD");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing E2P_CMD\n");
+ return ret;
+ }
ret = smsc95xx_wait_eeprom(dev);
if (ret < 0)
@@ -350,60 +420,24 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
return 0;
}
-static void smsc95xx_async_cmd_callback(struct urb *urb)
-{
- struct usb_context *usb_context = urb->context;
- struct usbnet *dev = usb_context->dev;
- int status = urb->status;
-
- check_warn(status, "async callback failed with %d\n", status);
-
- kfree(usb_context);
- usb_free_urb(urb);
-}
-
static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
- u32 *data)
+ u32 data)
{
- struct usb_context *usb_context;
- int status;
- struct urb *urb;
const u16 size = 4;
+ u32 buf;
+ int ret;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_warn(dev->net, "Error allocating URB\n");
- return -ENOMEM;
- }
-
- usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
- if (usb_context == NULL) {
- netdev_warn(dev->net, "Error allocating control msg\n");
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- usb_context->req.bRequestType =
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- usb_context->req.bRequest = USB_VENDOR_REQUEST_WRITE_REGISTER;
- usb_context->req.wValue = 00;
- usb_context->req.wIndex = cpu_to_le16(index);
- usb_context->req.wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
- (void *)&usb_context->req, data, size,
- smsc95xx_async_cmd_callback,
- (void *)usb_context);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
- status);
- kfree(usb_context);
- usb_free_urb(urb);
- }
+ buf = data;
+ cpu_to_le32s(&buf);
- return status;
+ ret = usbnet_write_cmd_async(dev, USB_VENDOR_REQUEST_WRITE_REGISTER,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, index, &buf, size);
+ if (ret < 0)
+ netdev_warn(dev->net, "Error write async cmd, sts=%d\n",
+ ret);
+ return ret;
}
/* returns hash bit number for given MAC address
@@ -460,14 +494,17 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
/* Initiate async writes, as we can't wait for completion here */
- ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
- check_warn(ret, "failed to initiate async write to HASHH");
+ ret = smsc95xx_write_reg_async(dev, HASHH, pdata->hash_hi);
+ if (ret < 0)
+ netdev_warn(dev->net, "failed to initiate async write to HASHH\n");
- ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
- check_warn(ret, "failed to initiate async write to HASHL");
+ ret = smsc95xx_write_reg_async(dev, HASHL, pdata->hash_lo);
+ if (ret < 0)
+ netdev_warn(dev->net, "failed to initiate async write to HASHL\n");
- ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
- check_warn(ret, "failed to initiate async write to MAC_CR");
+ ret = smsc95xx_write_reg_async(dev, MAC_CR, pdata->mac_cr);
+ if (ret < 0)
+ netdev_warn(dev->net, "failed to initiate async write to MAC_CR\n");
}
static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
@@ -476,7 +513,10 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
u32 flow, afc_cfg = 0;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
- check_warn_return(ret, "Error reading AFC_CFG");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading AFC_CFG\n");
+ return ret;
+ }
if (duplex == DUPLEX_FULL) {
u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -501,12 +541,16 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
}
ret = smsc95xx_write_reg(dev, FLOW, flow);
- check_warn_return(ret, "Error writing FLOW");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing FLOW\n");
+ return ret;
+ }
ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
- check_warn_return(ret, "Error writing AFC_CFG");
+ if (ret < 0)
+ netdev_warn(dev->net, "Error writing AFC_CFG\n");
- return 0;
+ return ret;
}
static int smsc95xx_link_reset(struct usbnet *dev)
@@ -520,10 +564,16 @@ static int smsc95xx_link_reset(struct usbnet *dev)
/* clear interrupt status */
ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- check_warn_return(ret, "Error reading PHY_INT_SRC");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ return ret;
+ }
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- check_warn_return(ret, "Error writing INT_STS");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing INT_STS\n");
+ return ret;
+ }
mii_check_media(mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -545,12 +595,16 @@ static int smsc95xx_link_reset(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- check_warn_return(ret, "Error writing MAC_CR");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing MAC_CR\n");
+ return ret;
+ }
ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
- check_warn_return(ret, "Error updating PHY flow control");
+ if (ret < 0)
+ netdev_warn(dev->net, "Error updating PHY flow control\n");
- return 0;
+ return ret;
}
static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
@@ -584,7 +638,10 @@ static int smsc95xx_set_features(struct net_device *netdev,
int ret;
ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
- check_warn_return(ret, "Failed to read COE_CR: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
+ return ret;
+ }
if (features & NETIF_F_HW_CSUM)
read_buf |= Tx_COE_EN_;
@@ -597,7 +654,10 @@ static int smsc95xx_set_features(struct net_device *netdev,
read_buf &= ~Rx_COE_EN_;
ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
- check_warn_return(ret, "Failed to write COE_CR: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
return 0;
@@ -635,7 +695,7 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
{
/* all smsc95xx registers */
- return COE_CR - ID_REV + 1;
+ return COE_CR - ID_REV + sizeof(u32);
}
static void
@@ -677,9 +737,15 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
{
struct usbnet *dev = netdev_priv(net);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int ret;
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
- return 0;
+
+ ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
+ if (ret < 0)
+ netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret);
+
+ return ret;
}
static const struct ethtool_ops smsc95xx_ethtool_ops = {
@@ -734,12 +800,16 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
int ret;
ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
- check_warn_return(ret, "Failed to write ADDRL: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
- check_warn_return(ret, "Failed to write ADDRH: %d\n", ret);
+ if (ret < 0)
+ netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
- return 0;
+ return ret;
}
/* starts the TX path */
@@ -755,17 +825,21 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
+ return ret;
+ }
/* Enable Tx at SCSRs */
ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
- check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret);
+ if (ret < 0)
+ netdev_warn(dev->net, "Failed to write TX_CFG: %d\n", ret);
- return 0;
+ return ret;
}
/* Starts the Receive path */
-static int smsc95xx_start_rx_path(struct usbnet *dev)
+static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
@@ -775,10 +849,11 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
+ ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
+ if (ret < 0)
+ netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
- return 0;
+ return ret;
}
static int smsc95xx_phy_initialize(struct usbnet *dev)
@@ -813,7 +888,10 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
/* read to clear */
ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
- check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PHY_INT_SRC during init\n");
+ return ret;
+ }
smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
PHY_INT_MASK_DEFAULT_);
@@ -832,13 +910,19 @@ static int smsc95xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
- check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
+ return ret;
+ }
timeout = 0;
do {
msleep(10);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
timeout++;
} while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
@@ -848,13 +932,19 @@ static int smsc95xx_reset(struct usbnet *dev)
}
ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
- check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
+ return ret;
+ }
timeout = 0;
do {
msleep(10);
ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
- check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
+ return ret;
+ }
timeout++;
} while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
@@ -867,22 +957,32 @@ static int smsc95xx_reset(struct usbnet *dev)
if (ret < 0)
return ret;
- netif_dbg(dev, ifup, dev->net,
- "MAC Address: %pM\n", dev->net->dev_addr);
+ netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n",
+ dev->net->dev_addr);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net,
- "Read Value from HW_CFG : 0x%08x\n", read_buf);
+ netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
+ read_buf);
read_buf |= HW_CFG_BIR_;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
+ return ret;
+ }
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
+
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
read_buf);
@@ -898,34 +998,49 @@ static int smsc95xx_reset(struct usbnet *dev)
dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
}
- netif_dbg(dev, ifup, dev->net,
- "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
+ netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n",
+ (ulong)dev->rx_urb_size);
ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
- check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
- check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net,
"Read Value from BURST_CAP after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
- check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
- check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net,
"Read Value from BULK_IN_DLY after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
- netif_dbg(dev, ifup, dev->net,
- "Read Value from HW_CFG: 0x%08x\n", read_buf);
+ netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n",
+ read_buf);
if (turbo_mode)
read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_);
@@ -936,66 +1051,111 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= NET_IP_ALIGN << 9;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
- check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
/* Configure GPIO pins as LED outputs */
write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
LED_GPIO_CFG_FDX_LED;
ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
- check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret);
+ return ret;
+ }
/* Init Tx */
ret = smsc95xx_write_reg(dev, FLOW, 0);
- check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
- check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
+ return ret;
+ }
/* Don't need mac_cr_lock during initialisation */
ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
- check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
+ return ret;
+ }
/* Init Rx */
/* Set Vlan */
ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
- check_warn_return(ret, "Failed to write VLAN1: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write VLAN1: %d\n", ret);
+ return ret;
+ }
/* Enable or disable checksum offload engines */
ret = smsc95xx_set_features(dev->net, dev->net->features);
- check_warn_return(ret, "Failed to set checksum offload features");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to set checksum offload features\n");
+ return ret;
+ }
smsc95xx_set_multicast(dev->net);
ret = smsc95xx_phy_initialize(dev);
- check_warn_return(ret, "Failed to init PHY");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to init PHY\n");
+ return ret;
+ }
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
- check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
+ return ret;
+ }
/* enable PHY interrupts */
read_buf |= INT_EP_CTL_PHY_INT_;
ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
- check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
+ return ret;
+ }
ret = smsc95xx_start_tx_path(dev);
- check_warn_return(ret, "Failed to start TX path");
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to start TX path\n");
+ return ret;
+ }
- ret = smsc95xx_start_rx_path(dev);
- check_warn_return(ret, "Failed to start RX path");
+ ret = smsc95xx_start_rx_path(dev, 0);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to start RX path\n");
+ return ret;
+ }
netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
return 0;
@@ -1017,12 +1177,16 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = NULL;
+ u32 val;
int ret;
printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
ret = usbnet_get_endpoints(dev, intf);
- check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
+ if (ret < 0) {
+ netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
+ return ret;
+ }
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
GFP_KERNEL);
@@ -1047,6 +1211,22 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
/* Init all registers */
ret = smsc95xx_reset(dev);
+ /* detect device revision as different features may be available */
+ ret = smsc95xx_read_reg(dev, ID_REV, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ return ret;
+ }
+ val >>= 16;
+
+ if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
+ (val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
+ pdata->features = (FEATURE_8_WAKEUP_FILTERS |
+ FEATURE_PHY_NLP_CROSSOVER |
+ FEATURE_AUTOSUSPEND);
+ else if (val == ID_REV_CHIP_ID_9512_)
+ pdata->features = FEATURE_8_WAKEUP_FILTERS;
+
dev->net->netdev_ops = &smsc95xx_netdev_ops;
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
@@ -1066,113 +1246,448 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
}
}
+static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
+{
+ u32 crc = bitrev16(crc16(0xFFFF, buffer, len));
+ return crc << ((filter % 2) * 16);
+}
+
+static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret;
+
+ netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
+
+ /* read to clear */
+ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ return ret;
+ }
+
+ /* enable interrupt source */
+ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_INT_MASK\n");
+ return ret;
+ }
+
+ ret |= mask;
+
+ smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+
+ return 0;
+}
+
+static int smsc95xx_link_ok_nopm(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret;
+
+ /* first, a dummy read, needed to latch some MII phys */
+ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ return ret;
+ }
+
+ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ return ret;
+ }
+
+ return !!(ret & BMSR_LSTATUS);
+}
+
+static int smsc95xx_enter_suspend0(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ return ret;
+ }
+
+ val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
+ val |= PM_CTL_SUS_MODE_0;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ return ret;
+ }
+
+ /* clear wol status */
+ val &= ~PM_CTL_WUPS_;
+ val |= PM_CTL_WUPS_WOL_;
+
+ /* enable energy detection */
+ if (pdata->wolopts & WAKE_PHY)
+ val |= PM_CTL_WUPS_ED_;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ return ret;
+ }
+
+ /* read back PM_CTRL */
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0)
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+
+ return ret;
+}
+
+static int smsc95xx_enter_suspend1(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct mii_if_info *mii = &dev->mii;
+ u32 val;
+ int ret;
+
+ /* reconfigure link pulse detection timing for
+ * compatibility with non-standard link partners
+ */
+ if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
+ smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
+
+ /* enable energy detect power-down mode */
+ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n");
+ return ret;
+ }
+
+ ret |= MODE_CTRL_STS_EDPWRDOWN_;
+
+ smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret);
+
+ /* enter SUSPEND1 mode */
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ return ret;
+ }
+
+ val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+ val |= PM_CTL_SUS_MODE_1;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ return ret;
+ }
+
+ /* clear wol status, enable energy detection */
+ val &= ~PM_CTL_WUPS_;
+ val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+
+ return ret;
+}
+
+static int smsc95xx_enter_suspend2(struct usbnet *dev)
+{
+ u32 val;
+ int ret;
+
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ return ret;
+ }
+
+ val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+ val |= PM_CTL_SUS_MODE_2;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+
+ return ret;
+}
+
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ u32 val, link_up;
int ret;
- u32 val;
ret = usbnet_suspend(intf, message);
- check_warn_return(ret, "usbnet_suspend error");
+ if (ret < 0) {
+ netdev_warn(dev->net, "usbnet_suspend error\n");
+ return ret;
+ }
- /* if no wol options set, enter lowest power SUSPEND2 mode */
- if (!(pdata->wolopts & SUPPORTED_WAKE)) {
- netdev_info(dev->net, "entering SUSPEND2 mode");
+ /* determine if link is up using only _nopm functions */
+ link_up = smsc95xx_link_ok_nopm(dev);
+
+ /* if no wol options set, or if link is down and we're not waking on
+ * PHY activity, enter lowest power SUSPEND2 mode
+ */
+ if (!(pdata->wolopts & SUPPORTED_WAKE) ||
+ !(link_up || (pdata->wolopts & WAKE_PHY))) {
+ netdev_info(dev->net, "entering SUSPEND2 mode\n");
/* disable energy detect (link up) & wake up events */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
- ret = smsc95xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
- check_warn_return(ret, "Error reading PM_CTRL");
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ goto done;
+ }
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
- check_warn_return(ret, "Error writing PM_CTRL");
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ goto done;
+ }
- /* enter suspend2 mode */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
- check_warn_return(ret, "Error reading PM_CTRL");
+ ret = smsc95xx_enter_suspend2(dev);
+ goto done;
+ }
- val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
- val |= PM_CTL_SUS_MODE_2;
+ if (pdata->wolopts & WAKE_PHY) {
+ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
+ (PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_));
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ goto done;
+ }
+
+ /* if link is down then configure EDPD and enter SUSPEND1,
+ * otherwise enter SUSPEND0 below
+ */
+ if (!link_up) {
+ netdev_info(dev->net, "entering SUSPEND1 mode\n");
+ ret = smsc95xx_enter_suspend1(dev);
+ goto done;
+ }
+ }
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
- check_warn_return(ret, "Error writing PM_CTRL");
+ if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
+ u32 *filter_mask = kzalloc(sizeof(u32) * 32, GFP_KERNEL);
+ u32 command[2];
+ u32 offset[2];
+ u32 crc[4];
+ int wuff_filter_count =
+ (pdata->features & FEATURE_8_WAKEUP_FILTERS) ?
+ LAN9500A_WUFF_NUM : LAN9500_WUFF_NUM;
+ int i, filter = 0;
+
+ if (!filter_mask) {
+ netdev_warn(dev->net, "Unable to allocate filter_mask\n");
+ ret = -ENOMEM;
+ goto done;
+ }
- return 0;
+ memset(command, 0, sizeof(command));
+ memset(offset, 0, sizeof(offset));
+ memset(crc, 0, sizeof(crc));
+
+ if (pdata->wolopts & WAKE_BCAST) {
+ const u8 bcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ netdev_info(dev->net, "enabling broadcast detection\n");
+ filter_mask[filter * 4] = 0x003F;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x05UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(bcast, 6, filter);
+ filter++;
+ }
+
+ if (pdata->wolopts & WAKE_MCAST) {
+ const u8 mcast[] = {0x01, 0x00, 0x5E};
+ netdev_info(dev->net, "enabling multicast detection\n");
+ filter_mask[filter * 4] = 0x0007;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x09UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(mcast, 3, filter);
+ filter++;
+ }
+
+ if (pdata->wolopts & WAKE_ARP) {
+ const u8 arp[] = {0x08, 0x06};
+ netdev_info(dev->net, "enabling ARP detection\n");
+ filter_mask[filter * 4] = 0x0003;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x05UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x0C << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(arp, 2, filter);
+ filter++;
+ }
+
+ if (pdata->wolopts & WAKE_UCAST) {
+ netdev_info(dev->net, "enabling unicast detection\n");
+ filter_mask[filter * 4] = 0x003F;
+ filter_mask[filter * 4 + 1] = 0x00;
+ filter_mask[filter * 4 + 2] = 0x00;
+ filter_mask[filter * 4 + 3] = 0x00;
+ command[filter/4] |= 0x01UL << ((filter % 4) * 8);
+ offset[filter/4] |= 0x00 << ((filter % 4) * 8);
+ crc[filter/2] |= smsc_crc(dev->net->dev_addr, ETH_ALEN, filter);
+ filter++;
+ }
+
+ for (i = 0; i < (wuff_filter_count * 4); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUFF\n");
+ kfree(filter_mask);
+ goto done;
+ }
+ }
+ kfree(filter_mask);
+
+ for (i = 0; i < (wuff_filter_count / 4); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUFF\n");
+ goto done;
+ }
+ }
+
+ for (i = 0; i < (wuff_filter_count / 4); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUFF\n");
+ goto done;
+ }
+ }
+
+ for (i = 0; i < (wuff_filter_count / 2); i++) {
+ ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUFF\n");
+ goto done;
+ }
+ }
+
+ /* clear any pending pattern match packet status */
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
+
+ val |= WUCSR_WUFR_;
+
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
}
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
+ }
val |= WUCSR_MPR_;
- ret = smsc95xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
+ }
+
+ /* enable/disable wakeup sources */
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ goto done;
}
- /* enable/disable magic packup wake */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
+ netdev_info(dev->net, "enabling pattern match wakeup\n");
+ val |= WUCSR_WAKE_EN_;
+ } else {
+ netdev_info(dev->net, "disabling pattern match wakeup\n");
+ val &= ~WUCSR_WAKE_EN_;
+ }
if (pdata->wolopts & WAKE_MAGIC) {
- netdev_info(dev->net, "enabling magic packet wakeup");
+ netdev_info(dev->net, "enabling magic packet wakeup\n");
val |= WUCSR_MPEN_;
} else {
- netdev_info(dev->net, "disabling magic packet wakeup");
+ netdev_info(dev->net, "disabling magic packet wakeup\n");
val &= ~WUCSR_MPEN_;
}
- ret = smsc95xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ goto done;
+ }
/* enable wol wakeup source */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
- check_warn_return(ret, "Error reading PM_CTRL");
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ goto done;
+ }
val |= PM_CTL_WOL_EN_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
- check_warn_return(ret, "Error writing PM_CTRL");
-
- /* enable receiver */
- smsc95xx_start_rx_path(dev);
-
- /* some wol options are enabled, so enter SUSPEND0 */
- netdev_info(dev->net, "entering SUSPEND0 mode");
-
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
- check_warn_return(ret, "Error reading PM_CTRL");
-
- val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
- val |= PM_CTL_SUS_MODE_0;
-
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
- check_warn_return(ret, "Error writing PM_CTRL");
+ /* phy energy detect wakeup source */
+ if (pdata->wolopts & WAKE_PHY)
+ val |= PM_CTL_ED_EN_;
- /* clear wol status */
- val &= ~PM_CTL_WUPS_;
- val |= PM_CTL_WUPS_WOL_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
- check_warn_return(ret, "Error writing PM_CTRL");
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ goto done;
+ }
- /* read back PM_CTRL */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
- check_warn_return(ret, "Error reading PM_CTRL");
+ /* enable receiver to enable frame reception */
+ smsc95xx_start_rx_path(dev, 1);
- smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+ /* some wol options are enabled, so enter SUSPEND0 */
+ netdev_info(dev->net, "entering SUSPEND0 mode\n");
+ ret = smsc95xx_enter_suspend0(dev);
- return 0;
+done:
+ if (ret)
+ usbnet_resume(intf);
+ return ret;
}
static int smsc95xx_resume(struct usb_interface *intf)
@@ -1184,33 +1699,44 @@ static int smsc95xx_resume(struct usb_interface *intf)
BUG_ON(!dev);
- if (pdata->wolopts & WAKE_MAGIC) {
- smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
-
- /* Disable magic packup wake */
- ret = smsc95xx_read_reg(dev, WUCSR, &val);
- check_warn_return(ret, "Error reading WUCSR");
+ if (pdata->wolopts) {
+ /* clear wake-up sources */
+ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading WUCSR\n");
+ return ret;
+ }
- val &= ~WUCSR_MPEN_;
+ val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
- ret = smsc95xx_write_reg(dev, WUCSR, val);
- check_warn_return(ret, "Error writing WUCSR");
+ ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing WUCSR\n");
+ return ret;
+ }
/* clear wake-up status */
- ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
- check_warn_return(ret, "Error reading PM_CTRL");
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ return ret;
+ }
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, val);
- check_warn_return(ret, "Error writing PM_CTRL");
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ return ret;
+ }
}
- return usbnet_resume(intf);
- check_warn_return(ret, "usbnet_resume error");
+ ret = usbnet_resume(intf);
+ if (ret < 0)
+ netdev_warn(dev->net, "usbnet_resume error\n");
- return 0;
+ return ret;
}
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 2ff9815aa27c..f360ee372554 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -53,6 +53,11 @@
#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_ID_9500_ (0x9500)
+#define ID_REV_CHIP_ID_9500A_ (0x9E00)
+#define ID_REV_CHIP_ID_9512_ (0xEC00)
+#define ID_REV_CHIP_ID_9530_ (0x9530)
+#define ID_REV_CHIP_ID_89530_ (0x9E08)
+#define ID_REV_CHIP_ID_9730_ (0x9730)
#define INT_STS (0x08)
#define INT_STS_TX_STOP_ (0x00020000)
@@ -203,8 +208,11 @@
#define VLAN2 (0x124)
#define WUFF (0x128)
+#define LAN9500_WUFF_NUM (4)
+#define LAN9500A_WUFF_NUM (8)
#define WUCSR (0x12C)
+#define WUCSR_WFF_PTR_RST_ (0x80000000)
#define WUCSR_GUE_ (0x00000200)
#define WUCSR_WUFR_ (0x00000040)
#define WUCSR_MPR_ (0x00000020)
@@ -218,6 +226,23 @@
/* Vendor-specific PHY Definitions */
+/* EDPD NLP / crossover time configuration (LAN9500A only) */
+#define PHY_EDPD_CONFIG (16)
+#define PHY_EDPD_CONFIG_TX_NLP_EN_ ((u16)0x8000)
+#define PHY_EDPD_CONFIG_TX_NLP_1000_ ((u16)0x0000)
+#define PHY_EDPD_CONFIG_TX_NLP_768_ ((u16)0x2000)
+#define PHY_EDPD_CONFIG_TX_NLP_512_ ((u16)0x4000)
+#define PHY_EDPD_CONFIG_TX_NLP_256_ ((u16)0x6000)
+#define PHY_EDPD_CONFIG_RX_1_NLP_ ((u16)0x1000)
+#define PHY_EDPD_CONFIG_RX_NLP_64_ ((u16)0x0000)
+#define PHY_EDPD_CONFIG_RX_NLP_256_ ((u16)0x0400)
+#define PHY_EDPD_CONFIG_RX_NLP_512_ ((u16)0x0800)
+#define PHY_EDPD_CONFIG_RX_NLP_1000_ ((u16)0x0C00)
+#define PHY_EDPD_CONFIG_EXT_CROSSOVER_ ((u16)0x0001)
+#define PHY_EDPD_CONFIG_DEFAULT (PHY_EDPD_CONFIG_TX_NLP_EN_ | \
+ PHY_EDPD_CONFIG_TX_NLP_768_ | \
+ PHY_EDPD_CONFIG_RX_1_NLP_)
+
/* Mode Control/Status Register */
#define PHY_MODE_CTRL_STS (17)
#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index edb81ed06950..5e33606c1366 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
unsigned long lockflags;
size_t size = dev->rx_urb_size;
+ /* prevent rx skb allocation when error ratio is high */
+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+ usb_free_urb(urb);
+ return -ENOLINK;
+ }
+
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
break;
}
+ /* stop rx if packet error rate is high */
+ if (++dev->pkt_cnt > 30) {
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ } else {
+ if (state == rx_cleanup)
+ dev->pkt_err++;
+ if (dev->pkt_err > 20)
+ set_bit(EVENT_RX_KILL, &dev->flags);
+ }
+
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
@@ -719,7 +736,8 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
- if (info->manage_power)
+ if (info->manage_power &&
+ !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);
@@ -790,18 +808,23 @@ int usbnet_open (struct net_device *net)
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
"simple");
+ /* reset rx error state */
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
- if (retval < 0)
- goto done_manage_power_error;
- usb_autopm_put_interface(dev->intf);
+ if (retval < 0) {
+ retval = 0;
+ set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+ } else {
+ usb_autopm_put_interface(dev->intf);
+ }
}
return retval;
-
-done_manage_power_error:
- clear_bit(EVENT_DEV_OPEN, &dev->flags);
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -1102,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- if (netif_msg_tx_err(dev)) {
- netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
- goto drop;
- } else {
- /* cdc_ncm collected packet; waits for more */
+ /* packet collected; minidriver waiting for more */
+ if (info->flags & FLAG_MULTI_PACKET)
goto not_drop;
- }
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+ goto drop;
}
}
length = skb->len;
@@ -1253,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
}
}
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
@@ -1447,6 +1471,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
strcpy(net->name, "wwan%d");
+ /* devices that cannot do ARP */
+ if ((dev->driver_info->flags & FLAG_NOARP) != 0)
+ net->flags |= IFF_NOARP;
+
/* maybe the remote can't receive an Ethernet MTU */
if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
@@ -1615,6 +1643,212 @@ void usbnet_device_suggests_idle(struct usbnet *dev)
}
EXPORT_SYMBOL(usbnet_device_suggests_idle);
+/*
+ * For devices that can do without special commands
+ */
+int usbnet_manage_power(struct usbnet *dev, int on)
+{
+ dev->intf->needs_remote_wakeup = on;
+ return 0;
+}
+EXPORT_SYMBOL(usbnet_manage_power);
+
+/*-------------------------------------------------------------------------*/
+static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size)
+{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
+ " value=0x%04x index=0x%04x size=%d\n",
+ cmd, reqtype, value, index, size);
+
+ if (data) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ cmd, reqtype, value, index, buf, size,
+ USB_CTRL_GET_TIMEOUT);
+ if (err > 0 && err <= size)
+ memcpy(data, buf, err);
+ kfree(buf);
+out:
+ return err;
+}
+
+static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data,
+ u16 size)
+{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
+ " value=0x%04x index=0x%04x size=%d\n",
+ cmd, reqtype, value, index, size);
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ cmd, reqtype, value, index, buf, size,
+ USB_CTRL_SET_TIMEOUT);
+ kfree(buf);
+
+out:
+ return err;
+}
+
+/*
+ * The function can't be called inside suspend/resume callback,
+ * otherwise deadlock will be caused.
+ */
+int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size)
+{
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+ ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_read_cmd);
+
+/*
+ * The function can't be called inside suspend/resume callback,
+ * otherwise deadlock will be caused.
+ */
+int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size)
+{
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+ ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+ usb_autopm_put_interface(dev->intf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd);
+
+/*
+ * The function can be called inside suspend/resume callback safely
+ * and should only be called by suspend/resume callback generally.
+ */
+int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size)
+{
+ return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+}
+EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
+
+/*
+ * The function can be called inside suspend/resume callback safely
+ * and should only be called by suspend/resume callback generally.
+ */
+int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data,
+ u16 size)
+{
+ return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
+ data, size);
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
+
+static void usbnet_async_cmd_cb(struct urb *urb)
+{
+ struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
+ int status = urb->status;
+
+ if (status < 0)
+ dev_dbg(&urb->dev->dev, "%s failed with %d",
+ __func__, status);
+
+ kfree(req);
+ usb_free_urb(urb);
+}
+
+/*
+ * The caller must make sure that device can't be put into suspend
+ * state until the control URB completes.
+ */
+int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size)
+{
+ struct usb_ctrlrequest *req = NULL;
+ struct urb *urb;
+ int err = -ENOMEM;
+ void *buf = NULL;
+
+ netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
+ " value=0x%04x index=0x%04x size=%d\n",
+ cmd, reqtype, value, index, size);
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(dev->net, "Error allocating URB in"
+ " %s!\n", __func__);
+ goto fail;
+ }
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_ATOMIC);
+ if (!buf) {
+ netdev_err(dev->net, "Error allocating buffer"
+ " in %s!\n", __func__);
+ goto fail_free;
+ }
+ }
+
+ req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!req) {
+ netdev_err(dev->net, "Failed to allocate memory for %s\n",
+ __func__);
+ goto fail_free_buf;
+ }
+
+ req->bRequestType = reqtype;
+ req->bRequest = cmd;
+ req->wValue = cpu_to_le16(value);
+ req->wIndex = cpu_to_le16(index);
+ req->wLength = cpu_to_le16(size);
+
+ usb_fill_control_urb(urb, dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ (void *)req, buf, size,
+ usbnet_async_cmd_cb, req);
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ netdev_err(dev->net, "Error submitting the control"
+ " message: status=%d\n", err);
+ goto fail_free;
+ }
+ return 0;
+
+fail_free_buf:
+ kfree(buf);
+fail_free:
+ kfree(req);
+ usb_free_urb(urb);
+fail:
+ return err;
+
+}
+EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
/*-------------------------------------------------------------------------*/
static int __init usbnet_init(void)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e522ff70444c..95814d9747ef 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -264,6 +264,7 @@ static void veth_setup(struct net_device *dev)
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
@@ -339,7 +340,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (IS_ERR(net))
return PTR_ERR(net);
- peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
+ peer = rtnl_create_link(net, ifname, &veth_link_ops, tbp);
if (IS_ERR(peer)) {
put_net(net);
return PTR_ERR(peer);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbf8b0625352..35c00c5ea02a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
@@ -51,15 +52,51 @@ struct virtnet_stats {
u64 rx_packets;
};
+/* Internal representation of a send virtqueue */
+struct send_queue {
+ /* Virtqueue associated with this send _queue */
+ struct virtqueue *vq;
+
+ /* TX: fragments + linear part + virtio header */
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+
+ /* Name of the send queue: output.$index */
+ char name[40];
+};
+
+/* Internal representation of a receive virtqueue */
+struct receive_queue {
+ /* Virtqueue associated with this receive_queue */
+ struct virtqueue *vq;
+
+ struct napi_struct napi;
+
+ /* Number of input buffers, and max we've ever had. */
+ unsigned int num, max;
+
+ /* Chain pages by the private ptr. */
+ struct page *pages;
+
+ /* RX: fragments + linear part + virtio header */
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+
+ /* Name of this receive queue: input.$index */
+ char name[40];
+};
+
struct virtnet_info {
struct virtio_device *vdev;
- struct virtqueue *rvq, *svq, *cvq;
+ struct virtqueue *cvq;
struct net_device *dev;
- struct napi_struct napi;
+ struct send_queue *sq;
+ struct receive_queue *rq;
unsigned int status;
- /* Number of input buffers, and max we've ever had. */
- unsigned int num, max;
+ /* Max # of queue pairs supported by the device */
+ u16 max_queue_pairs;
+
+ /* # of queue pairs currently used by the driver */
+ u16 curr_queue_pairs;
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -67,6 +104,9 @@ struct virtnet_info {
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
+ /* Has control virtqueue */
+ bool has_cvq;
+
/* enable config space updates */
bool config_enable;
@@ -82,12 +122,14 @@ struct virtnet_info {
/* Lock for config space updates */
struct mutex config_lock;
- /* Chain pages by the private ptr. */
- struct page *pages;
+ /* Does the affinity hint is set for virtqueues? */
+ bool affinity_hint_set;
+
+ /* Per-cpu variable to show the mapping from CPU to virtqueue */
+ int __percpu *vq_index;
- /* fragments + linear part + virtio header */
- struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
- struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
+ /* CPU hot plug notifier */
+ struct notifier_block nb;
};
struct skb_vnet_hdr {
@@ -95,7 +137,6 @@ struct skb_vnet_hdr {
struct virtio_net_hdr hdr;
struct virtio_net_hdr_mrg_rxbuf mhdr;
};
- unsigned int num_sg;
};
struct padded_vnet_hdr {
@@ -108,6 +149,29 @@ struct padded_vnet_hdr {
char padding[6];
};
+/* Converting between virtqueue no. and kernel tx/rx queue no.
+ * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
+ */
+static int vq2txq(struct virtqueue *vq)
+{
+ return (virtqueue_get_queue_index(vq) - 1) / 2;
+}
+
+static int txq2vq(int txq)
+{
+ return txq * 2 + 1;
+}
+
+static int vq2rxq(struct virtqueue *vq)
+{
+ return virtqueue_get_queue_index(vq) / 2;
+}
+
+static int rxq2vq(int rxq)
+{
+ return rxq * 2;
+}
+
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct skb_vnet_hdr *)skb->cb;
@@ -117,22 +181,22 @@ static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
* private is used to chain pages for big packets, put the whole
* most recent used list in the beginning for reuse
*/
-static void give_pages(struct virtnet_info *vi, struct page *page)
+static void give_pages(struct receive_queue *rq, struct page *page)
{
struct page *end;
- /* Find end of list, sew whole thing into vi->pages. */
+ /* Find end of list, sew whole thing into vi->rq.pages. */
for (end = page; end->private; end = (struct page *)end->private);
- end->private = (unsigned long)vi->pages;
- vi->pages = page;
+ end->private = (unsigned long)rq->pages;
+ rq->pages = page;
}
-static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
+static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
{
- struct page *p = vi->pages;
+ struct page *p = rq->pages;
if (p) {
- vi->pages = (struct page *)p->private;
+ rq->pages = (struct page *)p->private;
/* clear private here, it is used to chain pages */
p->private = 0;
} else
@@ -140,15 +204,15 @@ static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
return p;
}
-static void skb_xmit_done(struct virtqueue *svq)
+static void skb_xmit_done(struct virtqueue *vq)
{
- struct virtnet_info *vi = svq->vdev->priv;
+ struct virtnet_info *vi = vq->vdev->priv;
/* Suppress further interrupts. */
- virtqueue_disable_cb(svq);
+ virtqueue_disable_cb(vq);
/* We were probably waiting for more output buffers. */
- netif_wake_queue(vi->dev);
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
static void set_skb_frag(struct sk_buff *skb, struct page *page,
@@ -167,9 +231,10 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
}
/* Called from bottom half context */
-static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+static struct sk_buff *page_to_skb(struct receive_queue *rq,
struct page *page, unsigned int len)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
unsigned int copy, hdr_len, offset;
@@ -212,8 +277,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* the case of a broken device.
*/
if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
- if (net_ratelimit())
- pr_debug("%s: too much data\n", skb->dev->name);
+ net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
dev_kfree_skb(skb);
return NULL;
}
@@ -225,12 +289,12 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
}
if (page)
- give_pages(vi, page);
+ give_pages(rq, page);
return skb;
}
-static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
+static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
{
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
struct page *page;
@@ -244,7 +308,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
- page = virtqueue_get_buf(vi->rvq, &len);
+ page = virtqueue_get_buf(rq->vq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers);
@@ -257,14 +321,15 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
set_skb_frag(skb, page, 0, &len);
- --vi->num;
+ --rq->num;
}
return 0;
}
-static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
+static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
{
- struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct net_device *dev = vi->dev;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
struct page *page;
@@ -274,7 +339,7 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
if (vi->mergeable_rx_bufs || vi->big_packets)
- give_pages(vi, buf);
+ give_pages(rq, buf);
else
dev_kfree_skb(buf);
return;
@@ -286,14 +351,14 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
skb_trim(skb, len);
} else {
page = buf;
- skb = page_to_skb(vi, page, len);
+ skb = page_to_skb(rq, page, len);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- give_pages(vi, page);
+ give_pages(rq, page);
return;
}
if (vi->mergeable_rx_bufs)
- if (receive_mergeable(vi, skb)) {
+ if (receive_mergeable(rq, skb)) {
dev_kfree_skb(skb);
return;
}
@@ -333,9 +398,8 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
- if (net_ratelimit())
- printk(KERN_WARNING "%s: bad gso type %u.\n",
- dev->name, hdr->hdr.gso_type);
+ net_warn_ratelimited("%s: bad gso type %u.\n",
+ dev->name, hdr->hdr.gso_type);
goto frame_err;
}
@@ -344,9 +408,7 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
- if (net_ratelimit())
- printk(KERN_WARNING "%s: zero gso size.\n",
- dev->name);
+ net_warn_ratelimited("%s: zero gso size.\n", dev->name);
goto frame_err;
}
@@ -363,8 +425,9 @@ frame_err:
dev_kfree_skb(skb);
}
-static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
int err;
@@ -376,77 +439,77 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
- skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
+ skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
- err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
+ err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
return err;
}
-static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
{
struct page *first, *list = NULL;
char *p;
int i, err, offset;
- /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
+ /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
- first = get_a_page(vi, gfp);
+ first = get_a_page(rq, gfp);
if (!first) {
if (list)
- give_pages(vi, list);
+ give_pages(rq, list);
return -ENOMEM;
}
- sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
+ sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
list = first;
}
- first = get_a_page(vi, gfp);
+ first = get_a_page(rq, gfp);
if (!first) {
- give_pages(vi, list);
+ give_pages(rq, list);
return -ENOMEM;
}
p = page_address(first);
- /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
- /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
- sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
+ /* rq->sg[0], rq->sg[1] share the same page */
+ /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
+ sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
- /* vi->rx_sg[1] for data packet, from offset */
+ /* rq->sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
- sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
+ sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2,
first, gfp);
if (err < 0)
- give_pages(vi, first);
+ give_pages(rq, first);
return err;
}
-static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{
struct page *page;
int err;
- page = get_a_page(vi, gfp);
+ page = get_a_page(rq, gfp);
if (!page)
return -ENOMEM;
- sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
+ sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
- err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
+ err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp);
if (err < 0)
- give_pages(vi, page);
+ give_pages(rq, page);
return err;
}
@@ -458,97 +521,108 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
* before we're receiving packets, or from refill_work which is
* careful to disable receiving (using napi_disable).
*/
-static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
int err;
bool oom;
do {
if (vi->mergeable_rx_bufs)
- err = add_recvbuf_mergeable(vi, gfp);
+ err = add_recvbuf_mergeable(rq, gfp);
else if (vi->big_packets)
- err = add_recvbuf_big(vi, gfp);
+ err = add_recvbuf_big(rq, gfp);
else
- err = add_recvbuf_small(vi, gfp);
+ err = add_recvbuf_small(rq, gfp);
oom = err == -ENOMEM;
- if (err < 0)
+ if (err)
break;
- ++vi->num;
- } while (err > 0);
- if (unlikely(vi->num > vi->max))
- vi->max = vi->num;
- virtqueue_kick(vi->rvq);
+ ++rq->num;
+ } while (rq->vq->num_free);
+ if (unlikely(rq->num > rq->max))
+ rq->max = rq->num;
+ virtqueue_kick(rq->vq);
return !oom;
}
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
+ struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
+
/* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&vi->napi)) {
+ if (napi_schedule_prep(&rq->napi)) {
virtqueue_disable_cb(rvq);
- __napi_schedule(&vi->napi);
+ __napi_schedule(&rq->napi);
}
}
-static void virtnet_napi_enable(struct virtnet_info *vi)
+static void virtnet_napi_enable(struct receive_queue *rq)
{
- napi_enable(&vi->napi);
+ napi_enable(&rq->napi);
/* If all buffers were filled by other side before we napi_enabled, we
* won't get another interrupt, so process any outstanding packets
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- virtqueue_disable_cb(vi->rvq);
+ if (napi_schedule_prep(&rq->napi)) {
+ virtqueue_disable_cb(rq->vq);
local_bh_disable();
- __napi_schedule(&vi->napi);
+ __napi_schedule(&rq->napi);
local_bh_enable();
}
}
static void refill_work(struct work_struct *work)
{
- struct virtnet_info *vi;
+ struct virtnet_info *vi =
+ container_of(work, struct virtnet_info, refill.work);
bool still_empty;
+ int i;
- vi = container_of(work, struct virtnet_info, refill.work);
- napi_disable(&vi->napi);
- still_empty = !try_fill_recv(vi, GFP_KERNEL);
- virtnet_napi_enable(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct receive_queue *rq = &vi->rq[i];
- /* In theory, this can happen: if we don't get any buffers in
- * we will *never* try to fill again. */
- if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ napi_disable(&rq->napi);
+ still_empty = !try_fill_recv(rq, GFP_KERNEL);
+ virtnet_napi_enable(rq);
+
+ /* In theory, this can happen: if we don't get any buffers in
+ * we will *never* try to fill again.
+ */
+ if (still_empty)
+ schedule_delayed_work(&vi->refill, HZ/2);
+ }
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
- struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
+ struct receive_queue *rq =
+ container_of(napi, struct receive_queue, napi);
+ struct virtnet_info *vi = rq->vq->vdev->priv;
void *buf;
unsigned int len, received = 0;
again:
while (received < budget &&
- (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
- receive_buf(vi->dev, buf, len);
- --vi->num;
+ (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ receive_buf(rq, buf, len);
+ --rq->num;
received++;
}
- if (vi->num < vi->max / 2) {
- if (!try_fill_recv(vi, GFP_ATOMIC))
+ if (rq->num < rq->max / 2) {
+ if (!try_fill_recv(rq, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
- if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
+ if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
napi_schedule_prep(napi)) {
- virtqueue_disable_cb(vi->rvq);
+ virtqueue_disable_cb(rq->vq);
__napi_schedule(napi);
goto again;
}
@@ -557,13 +631,29 @@ again:
return received;
}
-static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
+static int virtnet_open(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+ virtnet_napi_enable(&vi->rq[i]);
+ }
+
+ return 0;
+}
+
+static void free_old_xmit_skbs(struct send_queue *sq)
{
struct sk_buff *skb;
- unsigned int len, tot_sgs = 0;
+ unsigned int len;
+ struct virtnet_info *vi = sq->vq->vdev->priv;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
+ while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
u64_stats_update_begin(&stats->tx_syncp);
@@ -571,16 +661,16 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
stats->tx_packets++;
u64_stats_update_end(&stats->tx_syncp);
- tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
}
- return tot_sgs;
}
-static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
+static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ unsigned num_sg;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
@@ -615,44 +705,39 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
+ sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
- return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
0, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- int capacity;
+ int qnum = skb_get_queue_mapping(skb);
+ struct send_queue *sq = &vi->sq[qnum];
+ int err;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(vi);
+ free_old_xmit_skbs(sq);
/* Try to transmit */
- capacity = xmit_skb(vi, skb);
-
- /* This can happen with OOM and indirect buffers. */
- if (unlikely(capacity < 0)) {
- if (likely(capacity == -ENOMEM)) {
- if (net_ratelimit())
- dev_warn(&dev->dev,
- "TX queue failure: out of memory\n");
- } else {
- dev->stats.tx_fifo_errors++;
- if (net_ratelimit())
- dev_warn(&dev->dev,
- "Unexpected TX queue failure: %d\n",
- capacity);
- }
+ err = xmit_skb(sq, skb);
+
+ /* This should not happen! */
+ if (unlikely(err)) {
+ dev->stats.tx_fifo_errors++;
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+ "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
- virtqueue_kick(vi->svq);
+ virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -660,14 +745,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Apparently nice girls don't return TX_BUSY; stop the queue
* before it gets out of hand. Naturally, this wastes entries. */
- if (capacity < 2+MAX_SKB_FRAGS) {
- netif_stop_queue(dev);
- if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
+ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ netif_stop_subqueue(dev, qnum);
+ if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- capacity += free_old_xmit_skbs(vi);
- if (capacity >= 2+MAX_SKB_FRAGS) {
- netif_start_queue(dev);
- virtqueue_disable_cb(vi->svq);
+ free_old_xmit_skbs(sq);
+ if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+ netif_start_subqueue(dev, qnum);
+ virtqueue_disable_cb(sq->vq);
}
}
}
@@ -734,23 +819,13 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
static void virtnet_netpoll(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ int i;
- napi_schedule(&vi->napi);
+ for (i = 0; i < vi->curr_queue_pairs; i++)
+ napi_schedule(&vi->rq[i].napi);
}
#endif
-static int virtnet_open(struct net_device *dev)
-{
- struct virtnet_info *vi = netdev_priv(dev);
-
- /* Make sure we have some buffers: if oom use wq. */
- if (!try_fill_recv(vi, GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
- virtnet_napi_enable(vi);
- return 0;
-}
-
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -806,13 +881,39 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
rtnl_unlock();
}
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+{
+ struct scatterlist sg;
+ struct virtio_net_ctrl_mq s;
+ struct net_device *dev = vi->dev;
+
+ if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
+ return 0;
+
+ s.virtqueue_pairs = queue_pairs;
+ sg_init_one(&sg, &s, sizeof(s));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+ VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){
+ dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
+ queue_pairs);
+ return -EINVAL;
+ } else
+ vi->curr_queue_pairs = queue_pairs;
+
+ return 0;
+}
+
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ int i;
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- napi_disable(&vi->napi);
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ napi_disable(&vi->rq[i].napi);
return 0;
}
@@ -919,16 +1020,86 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
return 0;
}
+static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
+{
+ int i;
+ int cpu;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtqueue_set_affinity(vi->rq[i].vq, -1);
+ virtqueue_set_affinity(vi->sq[i].vq, -1);
+ }
+
+ vi->affinity_hint_set = false;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == hcpu) {
+ *per_cpu_ptr(vi->vq_index, cpu) = -1;
+ } else {
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
+ }
+ }
+}
+
+static void virtnet_set_affinity(struct virtnet_info *vi)
+{
+ int i;
+ int cpu;
+
+ /* In multiqueue mode, when the number of cpu is equal to the number of
+ * queue pairs, we let the queue pairs to be private to one cpu by
+ * setting the affinity hint to eliminate the contention.
+ */
+ if (vi->curr_queue_pairs == 1 ||
+ vi->max_queue_pairs != num_online_cpus()) {
+ virtnet_clean_affinity(vi, -1);
+ return;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vi->rq[i].vq, cpu);
+ virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
+ }
+
+ vi->affinity_hint_set = true;
+}
+
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ case CPU_DEAD:
+ virtnet_set_affinity(vi);
+ break;
+ case CPU_DOWN_PREPARE:
+ virtnet_clean_affinity(vi, (long)hcpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+ ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
ring->rx_pending = ring->rx_max_pending;
ring->tx_pending = ring->tx_max_pending;
-
}
@@ -944,10 +1115,55 @@ static void virtnet_get_drvinfo(struct net_device *dev,
}
+/* TODO: Eliminate OOO packets during switching */
+static int virtnet_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u16 queue_pairs = channels->combined_count;
+ int err;
+
+ /* We don't support separate rx/tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count)
+ return -EINVAL;
+
+ if (queue_pairs > vi->max_queue_pairs)
+ return -EINVAL;
+
+ get_online_cpus();
+ err = virtnet_set_queues(vi, queue_pairs);
+ if (!err) {
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+
+ virtnet_set_affinity(vi);
+ }
+ put_online_cpus();
+
+ return err;
+}
+
+static void virtnet_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ channels->combined_count = vi->curr_queue_pairs;
+ channels->max_combined = vi->max_queue_pairs;
+ channels->max_other = 0;
+ channels->rx_count = 0;
+ channels->tx_count = 0;
+ channels->other_count = 0;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_channels = virtnet_set_channels,
+ .get_channels = virtnet_get_channels,
};
#define MIN_MTU 68
@@ -961,6 +1177,28 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+/* To avoid contending a lock hold by a vcpu who would exit to host, select the
+ * txq based on the processor id.
+ */
+static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ int txq;
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (skb_rx_queue_recorded(skb)) {
+ txq = skb_get_rx_queue(skb);
+ } else {
+ txq = *__this_cpu_ptr(vi->vq_index);
+ if (txq == -1)
+ txq = 0;
+ }
+
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
+
+ return txq;
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -972,6 +1210,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
+ .ndo_select_queue = virtnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
@@ -1007,10 +1246,10 @@ static void virtnet_config_changed_work(struct work_struct *work)
if (vi->status & VIRTIO_NET_S_LINK_UP) {
netif_carrier_on(vi->dev);
- netif_wake_queue(vi->dev);
+ netif_tx_wake_all_queues(vi->dev);
} else {
netif_carrier_off(vi->dev);
- netif_stop_queue(vi->dev);
+ netif_tx_stop_all_queues(vi->dev);
}
done:
mutex_unlock(&vi->config_lock);
@@ -1023,41 +1262,206 @@ static void virtnet_config_changed(struct virtio_device *vdev)
schedule_work(&vi->config_work);
}
-static int init_vqs(struct virtnet_info *vi)
+static void virtnet_free_queues(struct virtnet_info *vi)
{
- struct virtqueue *vqs[3];
- vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
- const char *names[] = { "input", "output", "control" };
- int nvqs, err;
+ kfree(vi->rq);
+ kfree(vi->sq);
+}
- /* We expect two virtqueues, receive then send,
- * and optionally control. */
- nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
+static void free_receive_bufs(struct virtnet_info *vi)
+{
+ int i;
- err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
- if (err)
- return err;
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ while (vi->rq[i].pages)
+ __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
+ }
+}
- vi->rvq = vqs[0];
- vi->svq = vqs[1];
+static void free_unused_bufs(struct virtnet_info *vi)
+{
+ void *buf;
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct virtqueue *vq = vi->sq[i].vq;
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ dev_kfree_skb(buf);
+ }
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct virtqueue *vq = vi->rq[i].vq;
+
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ dev_kfree_skb(buf);
+ --vi->rq[i].num;
+ }
+ BUG_ON(vi->rq[i].num != 0);
+ }
+}
+
+static void virtnet_del_vqs(struct virtnet_info *vi)
+{
+ struct virtio_device *vdev = vi->vdev;
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
- vi->cvq = vqs[2];
+ virtnet_clean_affinity(vi, -1);
+ vdev->config->del_vqs(vdev);
+
+ virtnet_free_queues(vi);
+}
+
+static int virtnet_find_vqs(struct virtnet_info *vi)
+{
+ vq_callback_t **callbacks;
+ struct virtqueue **vqs;
+ int ret = -ENOMEM;
+ int i, total_vqs;
+ const char **names;
+
+ /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
+ * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
+ * possible control vq.
+ */
+ total_vqs = vi->max_queue_pairs * 2 +
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
+
+ /* Allocate space for find_vqs parameters */
+ vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_vq;
+ callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
+ if (!callbacks)
+ goto err_callback;
+ names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
+ if (!names)
+ goto err_names;
+
+ /* Parameters for control virtqueue, if any */
+ if (vi->has_cvq) {
+ callbacks[total_vqs - 1] = NULL;
+ names[total_vqs - 1] = "control";
+ }
+
+ /* Allocate/initialize parameters for send/receive virtqueues */
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ callbacks[rxq2vq(i)] = skb_recv_done;
+ callbacks[txq2vq(i)] = skb_xmit_done;
+ sprintf(vi->rq[i].name, "input.%d", i);
+ sprintf(vi->sq[i].name, "output.%d", i);
+ names[rxq2vq(i)] = vi->rq[i].name;
+ names[txq2vq(i)] = vi->sq[i].name;
+ }
+
+ ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
+ names);
+ if (ret)
+ goto err_find;
+
+ if (vi->has_cvq) {
+ vi->cvq = vqs[total_vqs - 1];
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
}
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ vi->rq[i].vq = vqs[rxq2vq(i)];
+ vi->sq[i].vq = vqs[txq2vq(i)];
+ }
+
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+
+ return 0;
+
+err_find:
+ kfree(names);
+err_names:
+ kfree(callbacks);
+err_callback:
+ kfree(vqs);
+err_vq:
+ return ret;
+}
+
+static int virtnet_alloc_queues(struct virtnet_info *vi)
+{
+ int i;
+
+ vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
+ if (!vi->sq)
+ goto err_sq;
+ vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
+ if (!vi->rq)
+ goto err_rq;
+
+ INIT_DELAYED_WORK(&vi->refill, refill_work);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ vi->rq[i].pages = NULL;
+ netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ napi_weight);
+
+ sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
+ sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
+ }
+
return 0;
+
+err_rq:
+ kfree(vi->sq);
+err_sq:
+ return -ENOMEM;
+}
+
+static int init_vqs(struct virtnet_info *vi)
+{
+ int ret;
+
+ /* Allocate send & receive queues */
+ ret = virtnet_alloc_queues(vi);
+ if (ret)
+ goto err;
+
+ ret = virtnet_find_vqs(vi);
+ if (ret)
+ goto err_free;
+
+ get_online_cpus();
+ virtnet_set_affinity(vi);
+ put_online_cpus();
+
+ return 0;
+
+err_free:
+ virtnet_free_queues(vi);
+err:
+ return ret;
}
static int virtnet_probe(struct virtio_device *vdev)
{
- int err;
+ int i, err;
struct net_device *dev;
struct virtnet_info *vi;
+ u16 max_queue_pairs;
+
+ /* Find if host supports multiqueue virtio_net device */
+ err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
+ offsetof(struct virtio_net_config,
+ max_virtqueue_pairs), &max_queue_pairs);
+
+ /* We need at least 2 queue's */
+ if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+ max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
+ !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+ max_queue_pairs = 1;
/* Allocate ourselves a network device with room for our info */
- dev = alloc_etherdev(sizeof(struct virtnet_info));
+ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
if (!dev)
return -ENOMEM;
@@ -1103,22 +1507,21 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Set up our device-specific information */
vi = netdev_priv(dev);
- netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
- vi->pages = NULL;
vi->stats = alloc_percpu(struct virtnet_stats);
err = -ENOMEM;
if (vi->stats == NULL)
goto free;
- INIT_DELAYED_WORK(&vi->refill, refill_work);
+ vi->vq_index = alloc_percpu(int);
+ if (vi->vq_index == NULL)
+ goto free_stats;
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
- sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
- sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1129,9 +1532,20 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+ vi->has_cvq = true;
+
+ /* Use single tx/rx queue pair as default */
+ vi->curr_queue_pairs = 1;
+ vi->max_queue_pairs = max_queue_pairs;
+
+ /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free_index;
+
+ netif_set_real_num_tx_queues(dev, 1);
+ netif_set_real_num_rx_queues(dev, 1);
err = register_netdev(dev);
if (err) {
@@ -1140,12 +1554,22 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Last of all, set up some receive buffers. */
- try_fill_recv(vi, GFP_KERNEL);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ try_fill_recv(&vi->rq[i], GFP_KERNEL);
+
+ /* If we didn't even get one input buffer, we're useless. */
+ if (vi->rq[i].num == 0) {
+ free_unused_bufs(vi);
+ err = -ENOMEM;
+ goto free_recv_bufs;
+ }
+ }
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->num == 0) {
- err = -ENOMEM;
- goto unregister;
+ vi->nb.notifier_call = &virtnet_cpu_callback;
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_recv_bufs;
}
/* Assume link up if device can't report link status,
@@ -1158,13 +1582,19 @@ static int virtnet_probe(struct virtio_device *vdev)
netif_carrier_on(dev);
}
- pr_debug("virtnet: registered device %s\n", dev->name);
+ pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
+ dev->name, max_queue_pairs);
+
return 0;
-unregister:
+free_recv_bufs:
+ free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
- vdev->config->del_vqs(vdev);
+ cancel_delayed_work_sync(&vi->refill);
+ virtnet_del_vqs(vi);
+free_index:
+ free_percpu(vi->vq_index);
free_stats:
free_percpu(vi->stats);
free:
@@ -1172,28 +1602,6 @@ free:
return err;
}
-static void free_unused_bufs(struct virtnet_info *vi)
-{
- void *buf;
- while (1) {
- buf = virtqueue_detach_unused_buf(vi->svq);
- if (!buf)
- break;
- dev_kfree_skb(buf);
- }
- while (1) {
- buf = virtqueue_detach_unused_buf(vi->rvq);
- if (!buf)
- break;
- if (vi->mergeable_rx_bufs || vi->big_packets)
- give_pages(vi, buf);
- else
- dev_kfree_skb(buf);
- --vi->num;
- }
- BUG_ON(vi->num != 0);
-}
-
static void remove_vq_common(struct virtnet_info *vi)
{
vi->vdev->config->reset(vi->vdev);
@@ -1201,16 +1609,17 @@ static void remove_vq_common(struct virtnet_info *vi)
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
- vi->vdev->config->del_vqs(vi->vdev);
+ free_receive_bufs(vi);
- while (vi->pages)
- __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+ virtnet_del_vqs(vi);
}
-static void __devexit virtnet_remove(struct virtio_device *vdev)
+static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device. */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
@@ -1222,6 +1631,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
flush_work(&vi->config_work);
+ free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
@@ -1230,6 +1640,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
static int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ int i;
/* Prevent config work handler from accessing the device */
mutex_lock(&vi->config_lock);
@@ -1240,7 +1651,10 @@ static int virtnet_freeze(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev))
- napi_disable(&vi->napi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ napi_disable(&vi->rq[i].napi);
+ netif_napi_del(&vi->rq[i].napi);
+ }
remove_vq_common(vi);
@@ -1252,24 +1666,28 @@ static int virtnet_freeze(struct virtio_device *vdev)
static int virtnet_restore(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err;
+ int err, i;
err = init_vqs(vi);
if (err)
return err;
if (netif_running(vi->dev))
- virtnet_napi_enable(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_enable(&vi->rq[i]);
netif_device_attach(vi->dev);
- if (!try_fill_recv(vi, GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
mutex_lock(&vi->config_lock);
vi->config_enable = true;
mutex_unlock(&vi->config_lock);
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
+
return 0;
}
#endif
@@ -1287,7 +1705,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
- VIRTIO_NET_F_GUEST_ANNOUNCE,
+ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
};
static struct virtio_driver virtio_net_driver = {
@@ -1297,7 +1715,7 @@ static struct virtio_driver virtio_net_driver = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtnet_probe,
- .remove = __devexit_p(virtnet_remove),
+ .remove = virtnet_remove,
.config_changed = virtnet_config_changed,
#ifdef CONFIG_PM
.freeze = virtnet_freeze,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0ae1bcc6da73..12c6440d1649 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
adapter->netdev->name, adapter->link_speed);
- if (!netif_carrier_ok(adapter->netdev))
- netif_carrier_on(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
- if (netif_carrier_ok(adapter->netdev))
- netif_carrier_off(adapter->netdev);
+ netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1094,10 +1092,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
- return vmxnet3_tq_xmit(skb,
- &adapter->tx_queue[skb->queue_mapping],
- adapter, netdev);
+ BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+ return vmxnet3_tq_xmit(skb,
+ &adapter->tx_queue[skb->queue_mapping],
+ adapter, netdev);
}
@@ -1243,8 +1241,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skb_reserve(new_skb, NET_IP_ALIGN);
rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev,
- rbi->skb->data, rbi->len,
- PCI_DMA_FROMDEVICE);
+ rbi->skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
@@ -1331,14 +1329,14 @@ rcd_done:
/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(adapter,
- rxprod_reg[ring_idx] + rq->qid * 8,
- ring->next2fill);
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ ring->next2fill);
rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
vmxnet3_getRxComp(rcd,
- &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
+ &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
return num_rxd;
@@ -1922,7 +1920,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
free_irq(adapter->pdev->irq, adapter->netdev);
break;
default:
- BUG_ON(true);
+ BUG();
}
}
@@ -2885,7 +2883,7 @@ vmxnet3_reset_work(struct work_struct *data)
}
-static int __devinit
+static int
vmxnet3_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -2949,11 +2947,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_DriverShared),
- &adapter->shared_pa);
+ sizeof(struct Vmxnet3_DriverShared),
+ &adapter->shared_pa);
if (!adapter->shared) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ pci_name(pdev));
err = -ENOMEM;
goto err_alloc_shared;
}
@@ -2964,16 +2962,16 @@ vmxnet3_probe_device(struct pci_dev *pdev,
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
- &adapter->queue_desc_pa);
+ &adapter->queue_desc_pa);
if (!adapter->tqd_start) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ pci_name(pdev));
err = -ENOMEM;
goto err_alloc_queue_desc;
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
- adapter->num_tx_queues);
+ adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
@@ -3019,7 +3017,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->dev_number = atomic_read(&devices_found);
- adapter->share_intr = irq_share_mode;
+ adapter->share_intr = irq_share_mode;
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
adapter->num_tx_queues != adapter->num_rx_queues)
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
@@ -3061,11 +3059,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+ netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {
printk(KERN_ERR "Failed to register adapter %s\n",
- pci_name(pdev));
+ pci_name(pdev));
goto err_register;
}
@@ -3096,7 +3095,7 @@ err_alloc_shared:
}
-static void __devexit
+static void
vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3302,7 +3301,7 @@ static struct pci_driver vmxnet3_driver = {
.name = vmxnet3_driver_name,
.id_table = vmxnet3_pciid_table,
.probe = vmxnet3_probe_device,
- .remove = __devexit_p(vmxnet3_remove_device),
+ .remove = vmxnet3_remove_device,
#ifdef CONFIG_PM
.driver.pm = &vmxnet3_pm_ops,
#endif
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8b5c61917076..656230e0d18c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -29,6 +29,8 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/hash.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
@@ -110,18 +112,23 @@ struct vxlan_dev {
__u16 port_max;
__u8 tos; /* TOS override */
__u8 ttl;
- bool learn;
+ u32 flags; /* VXLAN_F_* below */
unsigned long age_interval;
struct timer_list age_timer;
spinlock_t hash_lock;
unsigned int addrcnt;
unsigned int addrmax;
- unsigned int addrexceeded;
struct hlist_head fdb_head[FDB_HASH_SIZE];
};
+#define VXLAN_F_LEARN 0x01
+#define VXLAN_F_PROXY 0x02
+#define VXLAN_F_RSC 0x04
+#define VXLAN_F_L2MISS 0x08
+#define VXLAN_F_L3MISS 0x10
+
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -155,6 +162,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
+ bool send_ip, send_eth;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
@@ -162,16 +170,24 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm = nlmsg_data(nlh);
memset(ndm, 0, sizeof(*ndm));
- ndm->ndm_family = AF_BRIDGE;
+
+ send_eth = send_ip = true;
+
+ if (type == RTM_GETNEIGH) {
+ ndm->ndm_family = AF_INET;
+ send_ip = fdb->remote_ip != 0;
+ send_eth = !is_zero_ether_addr(fdb->eth_addr);
+ } else
+ ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = NDA_DST;
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+ if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
- if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+ if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
@@ -223,6 +239,29 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
+static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb f;
+
+ memset(&f, 0, sizeof f);
+ f.state = NUD_STALE;
+ f.remote_ip = ipa; /* goes to NDA_DST */
+
+ vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+}
+
+static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
+{
+ struct vxlan_fdb f;
+
+ memset(&f, 0, sizeof f);
+ f.state = NUD_STALE;
+ memcpy(f.eth_addr, eth_addr, ETH_ALEN);
+
+ vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+}
+
/* Hash Ethernet address */
static u32 eth_hash(const unsigned char *addr)
{
@@ -466,7 +505,8 @@ static int vxlan_join_group(struct net_device *dev)
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct sock *sk = vn->sock->sk;
struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_ifindex = vxlan->link,
};
int err;
@@ -493,7 +533,8 @@ static int vxlan_leave_group(struct net_device *dev)
int err = 0;
struct sock *sk = vn->sock->sk;
struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ .imr_ifindex = vxlan->link,
};
/* Only leave group when last vxlan is done. */
@@ -552,6 +593,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
+ skb_reset_mac_header(skb);
+
/* Re-examine inner Ethernet packet */
oip = ip_hdr(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -561,12 +604,22 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vxlan->dev->dev_addr) == 0)
goto drop;
- if (vxlan->learn)
+ if (vxlan->flags & VXLAN_F_LEARN)
vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
__skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
- skb->ip_summed = CHECKSUM_NONE;
+
+ /* If the NIC driver gave us an encapsulated packet with
+ * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
+ * leave the CHECKSUM_UNNECESSARY, the device checksummed it
+ * for us. Otherwise force the upper layers to verify it.
+ */
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
+ !(vxlan->dev->features & NETIF_F_RXCSUM))
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->encapsulation = 0;
err = IP_ECN_decapsulate(oip, skb);
if (unlikely(err)) {
@@ -600,6 +653,117 @@ drop:
return 0;
}
+static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct arphdr *parp;
+ u8 *arpptr, *sha;
+ __be32 sip, tip;
+ struct neighbour *n;
+
+ if (dev->flags & IFF_NOARP)
+ goto out;
+
+ if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
+ dev->stats.tx_dropped++;
+ goto out;
+ }
+ parp = arp_hdr(skb);
+
+ if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
+ parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
+ parp->ar_pro != htons(ETH_P_IP) ||
+ parp->ar_op != htons(ARPOP_REQUEST) ||
+ parp->ar_hln != dev->addr_len ||
+ parp->ar_pln != 4)
+ goto out;
+ arpptr = (u8 *)parp + sizeof(struct arphdr);
+ sha = arpptr;
+ arpptr += dev->addr_len; /* sha */
+ memcpy(&sip, arpptr, sizeof(sip));
+ arpptr += sizeof(sip);
+ arpptr += dev->addr_len; /* tha */
+ memcpy(&tip, arpptr, sizeof(tip));
+
+ if (ipv4_is_loopback(tip) ||
+ ipv4_is_multicast(tip))
+ goto out;
+
+ n = neigh_lookup(&arp_tbl, &tip, dev);
+
+ if (n) {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f;
+ struct sk_buff *reply;
+
+ if (!(n->nud_state & NUD_CONNECTED)) {
+ neigh_release(n);
+ goto out;
+ }
+
+ f = vxlan_find_mac(vxlan, n->ha);
+ if (f && f->remote_ip == 0) {
+ /* bridge-local neighbor */
+ neigh_release(n);
+ goto out;
+ }
+
+ reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+ n->ha, sha);
+
+ neigh_release(n);
+
+ skb_reset_mac_header(reply);
+ __skb_pull(reply, skb_network_offset(reply));
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+ reply->pkt_type = PACKET_HOST;
+
+ if (netif_rx_ni(reply) == NET_RX_DROP)
+ dev->stats.rx_dropped++;
+ } else if (vxlan->flags & VXLAN_F_L3MISS)
+ vxlan_ip_miss(dev, tip);
+out:
+ consume_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct neighbour *n;
+ struct iphdr *pip;
+
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+ return false;
+
+ n = NULL;
+ switch (ntohs(eth_hdr(skb)->h_proto)) {
+ case ETH_P_IP:
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return false;
+ pip = ip_hdr(skb);
+ n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
+ break;
+ default:
+ return false;
+ }
+
+ if (n) {
+ bool diff;
+
+ diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
+ if (diff) {
+ memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ dev->addr_len);
+ memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
+ }
+ neigh_release(n);
+ return diff;
+ } else if (vxlan->flags & VXLAN_F_L3MISS)
+ vxlan_ip_miss(dev, pip->daddr);
+ return false;
+}
+
/* Extract dsfield from inner protocol */
static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
@@ -622,22 +786,6 @@ static inline u8 vxlan_ecn_encap(u8 tos,
return INET_ECN_encapsulate(tos, inner);
}
-static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
-{
- const struct ethhdr *eth = (struct ethhdr *) skb->data;
- const struct vxlan_fdb *f;
-
- if (is_multicast_ether_addr(eth->h_dest))
- return vxlan->gaddr;
-
- f = vxlan_find_mac(vxlan, eth->h_dest);
- if (f)
- return f->remote_ip;
- else
- return vxlan->gaddr;
-
-}
-
static void vxlan_sock_free(struct sk_buff *skb)
{
sock_put(skb->sk);
@@ -684,6 +832,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct rtable *rt;
const struct iphdr *old_iph;
+ struct ethhdr *eth;
struct iphdr *iph;
struct vxlanhdr *vxh;
struct udphdr *uh;
@@ -694,10 +843,55 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
__be16 df = 0;
__u8 tos, ttl;
int err;
+ bool did_rsc = false;
+ const struct vxlan_fdb *f;
+
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
- dst = vxlan_find_dst(vxlan, skb);
- if (!dst)
+ if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
+ return arp_reduce(dev, skb);
+ else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
+ did_rsc = route_shortcircuit(dev, skb);
+
+ f = vxlan_find_mac(vxlan, eth->h_dest);
+ if (f == NULL) {
+ did_rsc = false;
+ dst = vxlan->gaddr;
+ if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
+ !is_multicast_ether_addr(eth->h_dest))
+ vxlan_fdb_miss(vxlan, eth->h_dest);
+ } else
+ dst = f->remote_ip;
+
+ if (!dst) {
+ if (did_rsc) {
+ __skb_pull(skb, skb_network_offset(skb));
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->pkt_type = PACKET_HOST;
+
+ /* short-circuited back to local bridge */
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ struct vxlan_stats *stats =
+ this_cpu_ptr(vxlan->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+ return NETDEV_TX_OK;
+ }
goto drop;
+ }
+
+ if (!skb->encapsulation) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
/* Need space for new headers (invalidates iph ptr) */
if (skb_cow_head(skb, VXLAN_HEADROOM))
@@ -769,8 +963,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_set_owner(dev, skb);
- /* See __IPTUNNEL_XMIT */
- skb->ip_summed = CHECKSUM_NONE;
+ /* See iptunnel_xmit() */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, &rt->dst, NULL);
err = ip_local_out(skb);
@@ -991,7 +1186,12 @@ static void vxlan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_RXCSUM;
+
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
spin_lock_init(&vxlan->hash_lock);
@@ -1020,6 +1220,10 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
[IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
[IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
+ [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
+ [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
+ [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1111,14 +1315,29 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
+ if (data[IFLA_VXLAN_TTL])
+ vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+
if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
- vxlan->learn = true;
+ vxlan->flags |= VXLAN_F_LEARN;
if (data[IFLA_VXLAN_AGEING])
vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
else
vxlan->age_interval = FDB_AGE_DEFAULT;
+ if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
+ vxlan->flags |= VXLAN_F_PROXY;
+
+ if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
+ vxlan->flags |= VXLAN_F_RSC;
+
+ if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
+ vxlan->flags |= VXLAN_F_L2MISS;
+
+ if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
+ vxlan->flags |= VXLAN_F_L3MISS;
+
if (data[IFLA_VXLAN_LIMIT])
vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
@@ -1155,6 +1374,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -1183,7 +1406,15 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
- nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
+ nla_put_u8(skb, IFLA_VXLAN_LEARNING,
+ !!(vxlan->flags & VXLAN_F_LEARN)) ||
+ nla_put_u8(skb, IFLA_VXLAN_PROXY,
+ !!(vxlan->flags & VXLAN_F_PROXY)) ||
+ nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
+ nla_put_u8(skb, IFLA_VXLAN_L2MISS,
+ !!(vxlan->flags & VXLAN_F_L2MISS)) ||
+ nla_put_u8(skb, IFLA_VXLAN_L3MISS,
+ !!(vxlan->flags & VXLAN_F_L3MISS)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
goto nla_put_failure;
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index eac709bed7ae..df70248e2fda 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -52,9 +52,9 @@ endif
quiet_cmd_build_wanxlfw = BLD FW $@
cmd_build_wanxlfw = \
- $(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
+ $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
$(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
- hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
+ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
$(obj)/wanxlfw.inc: $(src)/wanxlfw.S
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index ef36cafd44b7..851dc7b7e8b0 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -707,8 +707,7 @@ static void dscc4_free1(struct pci_dev *pdev)
kfree(ppriv);
}
-static int __devinit dscc4_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct dscc4_pci_priv *priv;
struct dscc4_dev_priv *dpriv;
@@ -1968,7 +1967,7 @@ err_out:
return -ENOMEM;
}
-static void __devexit dscc4_remove_one(struct pci_dev *pdev)
+static void dscc4_remove_one(struct pci_dev *pdev)
{
struct dscc4_pci_priv *ppriv;
struct dscc4_dev_priv *root;
@@ -2053,7 +2052,7 @@ static struct pci_driver dscc4_driver = {
.name = DRV_NAME,
.id_table = dscc4_pci_tbl,
.probe = dscc4_init_one,
- .remove = __devexit_p(dscc4_remove_one),
+ .remove = dscc4_remove_one,
};
module_pci_driver(dscc4_driver);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index b6271325f803..56941d6547eb 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2361,7 +2361,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
* via a printk and leave the corresponding interface and all that follow
* disabled.
*/
-static char *type_strings[] __devinitdata = {
+static char *type_strings[] = {
"no hardware", /* Should never be seen */
"FarSync T2P",
"FarSync T4P",
@@ -2371,7 +2371,7 @@ static char *type_strings[] __devinitdata = {
"FarSync TE1"
};
-static void __devinit
+static void
fst_init_card(struct fst_card_info *card)
{
int i;
@@ -2415,7 +2415,7 @@ static const struct net_device_ops fst_ops = {
* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
*/
-static int __devinit
+static int
fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int no_of_cards_added = 0;
@@ -2615,7 +2615,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/*
* Cleanup and close down a card
*/
-static void __devexit
+static void
fst_remove_one(struct pci_dev *pdev)
{
struct fst_card_info *card;
@@ -2652,7 +2652,7 @@ static struct pci_driver fst_driver = {
.name = FST_NAME,
.id_table = fst_pci_dev_id,
.probe = fst_add_one,
- .remove = __devexit_p(fst_remove_one),
+ .remove = fst_remove_one,
.suspend = NULL,
.resume = NULL,
};
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index cf4903355a34..62f01b74cbd6 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -676,8 +676,7 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef NEED_DETECT_RAM
-static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
- u32 ramsize)
+static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
u32 i = ramsize &= ~3;
@@ -705,7 +704,7 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
#endif /* NEED_DETECT_RAM */
-static void __devinit sca_init(card_t *card, int wait_states)
+static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
sca_out(wait_states, WCRM, card);
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index e2779faa6c4f..6269a09c7369 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -605,8 +605,7 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
}
-static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
- u32 ramsize)
+static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
u32 i = ramsize &= ~3;
@@ -625,7 +624,7 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
}
-static void __devinit sca_init(card_t *card, int wait_states)
+static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
sca_out(wait_states, WCRM, card);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 760776b3d66c..fc9d11d74d60 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1326,7 +1326,7 @@ static const struct net_device_ops hss_hdlc_ops = {
.ndo_do_ioctl = hss_hdlc_ioctl,
};
-static int __devinit hss_init_one(struct platform_device *pdev)
+static int hss_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
@@ -1377,7 +1377,7 @@ err_free:
return err;
}
-static int __devexit hss_remove_one(struct platform_device *pdev)
+static int hss_remove_one(struct platform_device *pdev)
{
struct port *port = platform_get_drvdata(pdev);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index f5d533a706ea..7ef435bab425 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -816,8 +816,7 @@ static const struct net_device_ops lmc_ops = {
.ndo_get_stats = lmc_get_stats,
};
-static int __devinit lmc_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
lmc_softc_t *sc;
struct net_device *dev;
@@ -986,7 +985,7 @@ err_req_io:
/*
* Called from pci when removing module.
*/
-static void __devexit lmc_remove_one(struct pci_dev *pdev)
+static void lmc_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -1733,7 +1732,7 @@ static struct pci_driver lmc_driver = {
.name = "lmc",
.id_table = lmc_pci_tbl,
.probe = lmc_init_one,
- .remove = __devexit_p(lmc_remove_one),
+ .remove = lmc_remove_one,
};
module_pci_driver(lmc_driver);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 5fe246e060d7..53efc57fcace 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -297,8 +297,8 @@ static const struct net_device_ops pc300_ops = {
.ndo_do_ioctl = pc300_ioctl,
};
-static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pc300_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
card_t *card;
u32 __iomem *p;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 9659fcaa34ed..ddbce54040e2 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -276,8 +276,8 @@ static const struct net_device_ops pci200_ops = {
.ndo_do_ioctl = pci200_ioctl,
};
-static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pci200_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
card_t *card;
u32 __iomem *p;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index feb7541b33fb..6a24a5a70cc7 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -557,8 +557,8 @@ static const struct net_device_ops wanxl_ops = {
.ndo_get_stats = wanxl_get_stats,
};
-static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int wanxl_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
card_t *card;
u32 ramsize, stat;
diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S
index 73aae2bf2f1c..21565d59ec7b 100644
--- a/drivers/net/wan/wanxlfw.S
+++ b/drivers/net/wan/wanxlfw.S
@@ -35,6 +35,7 @@
*/
#include <linux/hdlc.h>
+#include <linux/hdlc/ioctl.h>
#include "wanxl.h"
/* memory addresses and offsets */
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 4b66ab1d0e5c..6702da838b0e 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -209,6 +209,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
result = i2400m_reset(i2400m, rt);
if (result >= 0)
result = 0;
+ break;
default:
result = -EINVAL;
}
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6650fde99e1d..9f1e947f3557 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -152,6 +152,9 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
USB_DEVICE_ID_I6050_2 = 0x0188,
+ USB_DEVICE_ID_I6150 = 0x07d6,
+ USB_DEVICE_ID_I6150_2 = 0x07d7,
+ USB_DEVICE_ID_I6150_3 = 0x07d9,
USB_DEVICE_ID_I6250 = 0x0187,
};
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 713d033891e6..080f36303a4f 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
switch (id->idProduct) {
case USB_DEVICE_ID_I6050:
case USB_DEVICE_ID_I6050_2:
+ case USB_DEVICE_ID_I6150:
+ case USB_DEVICE_ID_I6150_2:
+ case USB_DEVICE_ID_I6150_3:
case USB_DEVICE_ID_I6250:
i2400mu->i6050 = 1;
break;
@@ -759,6 +762,9 @@ static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 6deaae18db57..28aa05f60c26 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -156,11 +156,7 @@ config PRISM54
---help---
This enables support for FullMAC PCI/Cardbus prism54 devices. This
driver is now deprecated in favor for the SoftMAC driver, p54pci.
- p54pci supports FullMAC PCI/Cardbus devices as well. For details on
- the scheduled removal of this driver on the kernel see the feature
- removal schedule:
-
- Documentation/feature-removal-schedule.txt
+ p54pci supports FullMAC PCI/Cardbus devices as well.
For more information refer to the p54 wiki:
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 062dfdff6364..67156efe14c4 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -47,7 +47,7 @@ obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
-obj-$(CONFIG_ATH_COMMON) += ath/
+obj-$(CONFIG_ATH_CARDS) += ath/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 154a4965be4f..3d339e04efb7 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1761,7 +1761,7 @@ static const struct ieee80211_ops adm8211_ops = {
.get_tsf = adm8211_get_tsft
};
-static int __devinit adm8211_probe(struct pci_dev *pdev,
+static int adm8211_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ieee80211_hw *dev;
@@ -1935,7 +1935,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
}
-static void __devexit adm8211_remove(struct pci_dev *pdev)
+static void adm8211_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct adm8211_priv *priv;
@@ -1985,7 +1985,7 @@ static struct pci_driver adm8211_driver = {
.name = "adm8211",
.id_table = adm8211_pci_id_table,
.probe = adm8211_probe,
- .remove = __devexit_p(adm8211_remove),
+ .remove = adm8211_remove,
#ifdef CONFIG_PM
.suspend = adm8211_suspend,
.resume = adm8211_resume,
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3cd05a7173f6..53295418f576 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -78,7 +78,7 @@ static struct pci_driver airo_driver = {
.name = DRV_NAME,
.id_table = card_ids,
.probe = airo_pci_probe,
- .remove = __devexit_p(airo_pci_remove),
+ .remove = airo_pci_remove,
.suspend = airo_pci_suspend,
.resume = airo_pci_resume,
};
@@ -5584,7 +5584,7 @@ static void timer_func( struct net_device *dev ) {
}
#ifdef CONFIG_PCI
-static int __devinit airo_pci_probe(struct pci_dev *pdev,
+static int airo_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct net_device *dev;
@@ -5606,7 +5606,7 @@ static int __devinit airo_pci_probe(struct pci_dev *pdev,
return 0;
}
-static void __devexit airo_pci_remove(struct pci_dev *pdev)
+static void airo_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -7433,7 +7433,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
num_null_ies++;
break;
- case WLAN_EID_GENERIC:
+ case WLAN_EID_VENDOR_SPECIFIC:
if (ie[1] >= 4 &&
ie[2] == 0x00 &&
ie[3] == 0x50 &&
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b9ddf21273..77fa4286e5e9 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -379,7 +379,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
manifest_sync_timeout);
if (!size) {
- dev_printk(KERN_ERR, &udev->dev, "FW buffer length invalid!\n");
+ dev_err(&udev->dev, "FW buffer length invalid!\n");
return -EINVAL;
}
@@ -391,8 +391,8 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
if (need_dfu_state) {
ret = at76_dfu_get_state(udev, &dfu_state);
if (ret < 0) {
- dev_printk(KERN_ERR, &udev->dev,
- "cannot get DFU state: %d\n", ret);
+ dev_err(&udev->dev,
+ "cannot get DFU state: %d\n", ret);
goto exit;
}
need_dfu_state = 0;
@@ -407,9 +407,9 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
dfu_timeout = at76_get_timeout(&dfu_stat_buf);
need_dfu_state = 0;
} else
- dev_printk(KERN_ERR, &udev->dev,
- "at76_dfu_get_status returned %d\n",
- ret);
+ dev_err(&udev->dev,
+ "at76_dfu_get_status returned %d\n",
+ ret);
break;
case STATE_DFU_DOWNLOAD_BUSY:
@@ -438,9 +438,9 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
blockno++;
if (ret != bsize)
- dev_printk(KERN_ERR, &udev->dev,
- "at76_load_int_fw_block "
- "returned %d\n", ret);
+ dev_err(&udev->dev,
+ "at76_load_int_fw_block returned %d\n",
+ ret);
need_dfu_state = 1;
break;
@@ -1255,8 +1255,7 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
at76_dbg(DBG_DEVSTART, "opmode %d", op_mode);
if (op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
- dev_printk(KERN_ERR, &udev->dev, "unexpected opmode %d\n",
- op_mode);
+ dev_err(&udev->dev, "unexpected opmode %d\n", op_mode);
return -EINVAL;
}
@@ -1275,9 +1274,9 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
size, bsize, blockno);
ret = at76_load_ext_fw_block(udev, blockno, block, bsize);
if (ret != bsize) {
- dev_printk(KERN_ERR, &udev->dev,
- "loading %dth firmware block failed: %d\n",
- blockno, ret);
+ dev_err(&udev->dev,
+ "loading %dth firmware block failed: %d\n",
+ blockno, ret);
goto exit;
}
buf += bsize;
@@ -1293,8 +1292,8 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
exit:
kfree(block);
if (ret < 0)
- dev_printk(KERN_ERR, &udev->dev,
- "downloading external firmware failed: %d\n", ret);
+ dev_err(&udev->dev,
+ "downloading external firmware failed: %d\n", ret);
return ret;
}
@@ -1308,8 +1307,8 @@ static int at76_load_internal_fw(struct usb_device *udev, struct fwentry *fwe)
need_remap ? 0 : 2 * HZ);
if (ret < 0) {
- dev_printk(KERN_ERR, &udev->dev,
- "downloading internal fw failed with %d\n", ret);
+ dev_err(&udev->dev,
+ "downloading internal fw failed with %d\n", ret);
goto exit;
}
@@ -1319,8 +1318,8 @@ static int at76_load_internal_fw(struct usb_device *udev, struct fwentry *fwe)
if (need_remap) {
ret = at76_remap(udev);
if (ret < 0) {
- dev_printk(KERN_ERR, &udev->dev,
- "sending REMAP failed with %d\n", ret);
+ dev_err(&udev->dev,
+ "sending REMAP failed with %d\n", ret);
goto exit;
}
}
@@ -1555,11 +1554,10 @@ static struct fwentry *at76_load_firmware(struct usb_device *udev,
at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
if (ret < 0) {
- dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n",
- fwe->fwname);
- dev_printk(KERN_ERR, &udev->dev,
- "you may need to download the firmware from "
- "http://developer.berlios.de/projects/at76c503a/\n");
+ dev_err(&udev->dev, "firmware %s not found!\n",
+ fwe->fwname);
+ dev_err(&udev->dev,
+ "you may need to download the firmware from http://developer.berlios.de/projects/at76c503a/\n");
goto exit;
}
@@ -1567,17 +1565,17 @@ static struct fwentry *at76_load_firmware(struct usb_device *udev,
fwh = (struct at76_fw_header *)(fwe->fw->data);
if (fwe->fw->size <= sizeof(*fwh)) {
- dev_printk(KERN_ERR, &udev->dev,
- "firmware is too short (0x%zx)\n", fwe->fw->size);
+ dev_err(&udev->dev,
+ "firmware is too short (0x%zx)\n", fwe->fw->size);
goto exit;
}
/* CRC currently not checked */
fwe->board_type = le32_to_cpu(fwh->board_type);
if (fwe->board_type != board_type) {
- dev_printk(KERN_ERR, &udev->dev,
- "board type mismatch, requested %u, got %u\n",
- board_type, fwe->board_type);
+ dev_err(&udev->dev,
+ "board type mismatch, requested %u, got %u\n",
+ board_type, fwe->board_type);
goto exit;
}
@@ -2150,8 +2148,7 @@ static int at76_alloc_urbs(struct at76_priv *priv,
}
if (!ep_in || !ep_out) {
- dev_printk(KERN_ERR, &interface->dev,
- "bulk endpoints missing\n");
+ dev_err(&interface->dev, "bulk endpoints missing\n");
return -ENXIO;
}
@@ -2161,15 +2158,14 @@ static int at76_alloc_urbs(struct at76_priv *priv,
priv->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
priv->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->rx_urb || !priv->tx_urb) {
- dev_printk(KERN_ERR, &interface->dev, "cannot allocate URB\n");
+ dev_err(&interface->dev, "cannot allocate URB\n");
return -ENOMEM;
}
buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE;
priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!priv->bulk_out_buffer) {
- dev_printk(KERN_ERR, &interface->dev,
- "cannot allocate output buffer\n");
+ dev_err(&interface->dev, "cannot allocate output buffer\n");
return -ENOMEM;
}
@@ -2230,8 +2226,7 @@ static int at76_init_new_device(struct at76_priv *priv,
/* MAC address */
ret = at76_get_hw_config(priv);
if (ret < 0) {
- dev_printk(KERN_ERR, &interface->dev,
- "cannot get MAC address\n");
+ dev_err(&interface->dev, "cannot get MAC address\n");
goto exit;
}
@@ -2358,8 +2353,8 @@ static int at76_probe(struct usb_interface *interface,
we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */
if (op_mode == OPMODE_HW_CONFIG_MODE) {
- dev_printk(KERN_ERR, &interface->dev,
- "cannot handle a device in HW_CONFIG_MODE\n");
+ dev_err(&interface->dev,
+ "cannot handle a device in HW_CONFIG_MODE\n");
ret = -EBUSY;
goto error;
}
@@ -2371,9 +2366,9 @@ static int at76_probe(struct usb_interface *interface,
"downloading internal firmware\n");
ret = at76_load_internal_fw(udev, fwe);
if (ret < 0) {
- dev_printk(KERN_ERR, &interface->dev,
- "error %d downloading internal firmware\n",
- ret);
+ dev_err(&interface->dev,
+ "error %d downloading internal firmware\n",
+ ret);
goto error;
}
usb_put_dev(udev);
@@ -2408,8 +2403,8 @@ static int at76_probe(struct usb_interface *interface,
/* Re-check firmware version */
ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
if (ret < 0) {
- dev_printk(KERN_ERR, &interface->dev,
- "error %d getting firmware version\n", ret);
+ dev_err(&interface->dev,
+ "error %d getting firmware version\n", ret);
goto error;
}
}
@@ -2449,7 +2444,7 @@ static void at76_disconnect(struct usb_interface *interface)
wiphy_info(priv->hw->wiphy, "disconnecting\n");
at76_delete_device(priv);
- dev_printk(KERN_INFO, &interface->dev, "disconnected\n");
+ dev_info(&interface->dev, "disconnected\n");
}
/* Structure for registering this driver with the USB subsystem */
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 09602241901b..2c02b4e84094 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,4 +1,7 @@
-menuconfig ATH_COMMON
+config ATH_COMMON
+ tristate
+
+menuconfig ATH_CARDS
tristate "Atheros Wireless Cards"
depends on CFG80211 && (!UML || BROKEN)
---help---
@@ -14,7 +17,7 @@ menuconfig ATH_COMMON
http://wireless.kernel.org/en/users/Drivers/Atheros
-if ATH_COMMON
+if ATH_CARDS
config ATH_DEBUG
bool "Atheros wireless debugging"
@@ -26,5 +29,7 @@ source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
+source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d716b748e574..97b964ded2be 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
+obj-$(CONFIG_AR5523) += ar5523/
+obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
new file mode 100644
index 000000000000..0d320cc7769b
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -0,0 +1,8 @@
+config AR5523
+ tristate "Atheros AR5523 wireless driver support"
+ depends on MAC80211 && USB
+ select ATH_COMMON
+ select FW_LOADER
+ ---help---
+ This module add support for AR5523 based USB dongles such as D-Link
+ DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
new file mode 100644
index 000000000000..ebf7f3bf0a33
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AR5523) := ar5523.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
new file mode 100644
index 000000000000..7157f7d311c5
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -0,0 +1,1798 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This driver is based on the uath driver written by Damien Bergamini for
+ * OpenBSD, who did black-box analysis of the Windows binary driver to find
+ * out how the hardware works. It contains a lot magic numbers because of
+ * that and only has minimal functionality.
+ */
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+
+#include "ar5523.h"
+#include "ar5523_hw.h"
+
+/*
+ * Various supported device vendors/products.
+ * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g
+ */
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar);
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
+
+static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
+ struct ar5523_tx_cmd *cmd)
+{
+ int dlen, olen;
+ __be32 *rp;
+
+ dlen = be32_to_cpu(hdr->len) - sizeof(*hdr);
+
+ if (dlen < 0) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff,
+ dlen);
+
+ rp = (__be32 *)(hdr + 1);
+ if (dlen >= sizeof(u32)) {
+ olen = be32_to_cpu(rp[0]);
+ dlen -= sizeof(u32);
+ if (olen == 0) {
+ /* convention is 0 =>'s one word */
+ olen = sizeof(u32);
+ }
+ } else
+ olen = 0;
+
+ if (cmd->odata) {
+ if (cmd->olen < olen) {
+ ar5523_err(ar, "olen to small %d < %d\n",
+ cmd->olen, olen);
+ cmd->olen = 0;
+ cmd->res = -EOVERFLOW;
+ } else {
+ cmd->olen = olen;
+ memcpy(cmd->odata, &rp[1], olen);
+ cmd->res = 0;
+ }
+ }
+
+out:
+ complete(&cmd->done);
+}
+
+static void ar5523_cmd_rx_cb(struct urb *urb)
+{
+ struct ar5523 *ar = urb->context;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
+ int dlen;
+ u32 code, hdrlen;
+
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "RX USB error %d.\n", urb->status);
+ goto skip;
+ }
+
+ if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
+ ar5523_err(ar, "RX USB to short.\n");
+ goto skip;
+ }
+
+ ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
+ be32_to_cpu(hdr->code) & 0xff, hdr->priv);
+
+ code = be32_to_cpu(hdr->code);
+ hdrlen = be32_to_cpu(hdr->len);
+
+ switch (code & 0xff) {
+ default:
+ /* reply to a read command */
+ if (hdr->priv != AR5523_CMD_ID) {
+ ar5523_err(ar, "Unexpected command id: %02x\n",
+ code & 0xff);
+ goto skip;
+ }
+ ar5523_read_reply(ar, hdr, cmd);
+ break;
+
+ case WDCMSG_DEVICE_AVAIL:
+ ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
+ cmd->res = 0;
+ cmd->olen = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_SEND_COMPLETE:
+ ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
+ atomic_read(&ar->tx_nr_pending));
+ if (!test_bit(AR5523_HW_UP, &ar->flags))
+ ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
+ else {
+ mod_timer(&ar->tx_wd_timer,
+ jiffies + AR5523_TX_WD_TIMEOUT);
+ ar5523_data_tx_pkt_put(ar);
+
+ }
+ break;
+
+ case WDCMSG_TARGET_START:
+ /* This command returns a bogus id so it needs special
+ handling */
+ dlen = hdrlen - sizeof(*hdr);
+ if (dlen != (int)sizeof(u32)) {
+ ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
+ return;
+ }
+ memcpy(cmd->odata, hdr + 1, sizeof(u32));
+ cmd->olen = sizeof(u32);
+ cmd->res = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_STATS_UPDATE:
+ ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
+ break;
+ }
+
+skip:
+ ar5523_submit_rx_cmd(ar);
+}
+
+static int ar5523_alloc_rx_cmd(struct ar5523 *ar)
+{
+ ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ar->rx_cmd_urb)
+ return -ENOMEM;
+
+ ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ GFP_KERNEL,
+ &ar->rx_cmd_urb->transfer_dma);
+ if (!ar->rx_cmd_buf) {
+ usb_free_urb(ar->rx_cmd_urb);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ar5523_cancel_rx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->rx_cmd_urb);
+}
+
+static void ar5523_free_rx_cmd(struct ar5523 *ar)
+{
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma);
+ usb_free_urb(ar->rx_cmd_urb);
+}
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar)
+{
+ int error;
+
+ usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
+ ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
+ AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
+ ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
+ if (error) {
+ if (error != -ENODEV)
+ ar5523_err(ar, "error %d when submitting rx urb\n",
+ error);
+ return error;
+ }
+ return 0;
+}
+
+/*
+ * Command submitted cb
+ */
+static void ar5523_cmd_tx_cb(struct urb *urb)
+{
+ struct ar5523_tx_cmd *cmd = urb->context;
+ struct ar5523 *ar = cmd->ar;
+
+ if (urb->status) {
+ ar5523_err(ar, "Failed to TX command. Status = %d\n",
+ urb->status);
+ cmd->res = urb->status;
+ complete(&cmd->done);
+ return;
+ }
+
+ if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
+ cmd->res = 0;
+ complete(&cmd->done);
+ }
+}
+
+static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ struct ar5523_cmd_hdr *hdr;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ int xferlen, error;
+
+ /* always bulk-out a multiple of 4 bytes */
+ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;
+
+ hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx;
+ memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
+ hdr->len = cpu_to_be32(xferlen);
+ hdr->code = cpu_to_be32(code);
+ hdr->priv = AR5523_CMD_ID;
+
+ if (flags & AR5523_CMD_FLAG_MAGIC)
+ hdr->magic = cpu_to_be32(1 << 24);
+ memcpy(hdr + 1, idata, ilen);
+
+ cmd->odata = odata;
+ cmd->olen = olen;
+ cmd->flags = flags;
+
+ ar5523_dbg(ar, "do cmd %02x\n", code);
+
+ usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
+ cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
+ cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "could not send command 0x%x, error=%d\n",
+ code, error);
+ return error;
+ }
+
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+ cmd->odata = NULL;
+ ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ code);
+ cmd->res = -ETIMEDOUT;
+ }
+ return cmd->res;
+}
+
+static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data,
+ int len, int flags)
+{
+ flags &= ~AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, data, len, NULL, 0, flags);
+}
+
+static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ flags |= AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags);
+}
+
+static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(0); /* 0 = single write */
+ *(__be32 *)write.data = cpu_to_be32(val);
+
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ 3 * sizeof(u32), 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write register 0x%02x\n", reg);
+ return error;
+}
+
+static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
+ int len)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(len);
+ memcpy(write.data, data, len);
+
+ /* properly handle the case where len is zero (reset) */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
+ len, reg);
+ return error;
+}
+
+static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
+ int olen)
+{
+ int error;
+ __be32 which_be;
+
+ which_be = cpu_to_be32(which);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
+ &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC);
+ if (error != 0)
+ ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which);
+ return error;
+}
+
+static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
+{
+ int error;
+ __be32 cap_be, val_be;
+
+ cap_be = cpu_to_be32(cap);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be,
+ sizeof(cap_be), &val_be, sizeof(__be32),
+ AR5523_CMD_FLAG_MAGIC);
+ if (error != 0) {
+ ar5523_err(ar, "could not read capability %u\n", cap);
+ return error;
+ }
+ *val = be32_to_cpu(val_be);
+ return error;
+}
+
+static int ar5523_get_devcap(struct ar5523 *ar)
+{
+#define GETCAP(x) do { \
+ error = ar5523_get_capability(ar, x, &cap); \
+ if (error != 0) \
+ return error; \
+ ar5523_info(ar, "Cap: " \
+ "%s=0x%08x\n", #x, cap); \
+} while (0)
+ int error;
+ u32 cap;
+
+ /* collect device capabilities */
+ GETCAP(CAP_TARGET_VERSION);
+ GETCAP(CAP_TARGET_REVISION);
+ GETCAP(CAP_MAC_VERSION);
+ GETCAP(CAP_MAC_REVISION);
+ GETCAP(CAP_PHY_REVISION);
+ GETCAP(CAP_ANALOG_5GHz_REVISION);
+ GETCAP(CAP_ANALOG_2GHz_REVISION);
+
+ GETCAP(CAP_REG_DOMAIN);
+ GETCAP(CAP_REG_CAP_BITS);
+ GETCAP(CAP_WIRELESS_MODES);
+ GETCAP(CAP_CHAN_SPREAD_SUPPORT);
+ GETCAP(CAP_COMPRESS_SUPPORT);
+ GETCAP(CAP_BURST_SUPPORT);
+ GETCAP(CAP_FAST_FRAMES_SUPPORT);
+ GETCAP(CAP_CHAP_TUNING_SUPPORT);
+ GETCAP(CAP_TURBOG_SUPPORT);
+ GETCAP(CAP_TURBO_PRIME_SUPPORT);
+ GETCAP(CAP_DEVICE_TYPE);
+ GETCAP(CAP_WME_SUPPORT);
+ GETCAP(CAP_TOTAL_QUEUES);
+ GETCAP(CAP_CONNECTION_ID_MAX);
+
+ GETCAP(CAP_LOW_5GHZ_CHAN);
+ GETCAP(CAP_HIGH_5GHZ_CHAN);
+ GETCAP(CAP_LOW_2GHZ_CHAN);
+ GETCAP(CAP_HIGH_2GHZ_CHAN);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_5G);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_2G);
+
+ GETCAP(CAP_CIPHER_AES_CCM);
+ GETCAP(CAP_CIPHER_TKIP);
+ GETCAP(CAP_MIC_TKIP);
+ return 0;
+}
+
+static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode)
+{
+ struct ar5523_cmd_ledsteady led;
+
+ led.lednum = cpu_to_be32(lednum);
+ led.ledmode = cpu_to_be32(ledmode);
+
+ ar5523_dbg(ar, "set %s led %s (steady)\n",
+ (lednum == UATH_LED_LINK) ? "link" : "activity",
+ ledmode ? "on" : "off");
+ return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led),
+ 0);
+}
+
+static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op)
+{
+ struct ar5523_cmd_rx_filter rxfilter;
+
+ rxfilter.bits = cpu_to_be32(bits);
+ rxfilter.op = cpu_to_be32(op);
+
+ ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op);
+ return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter,
+ sizeof(rxfilter), 0);
+}
+
+static int ar5523_reset_tx_queues(struct ar5523 *ar)
+{
+ __be32 qid = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "resetting Tx queue\n");
+ return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE,
+ &qid, sizeof(qid), 0);
+}
+
+static int ar5523_set_chan(struct ar5523 *ar)
+{
+ struct ieee80211_conf *conf = &ar->hw->conf;
+
+ struct ar5523_cmd_reset reset;
+
+ memset(&reset, 0, sizeof(reset));
+ reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
+ reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
+ reset.freq = cpu_to_be32(conf->channel->center_freq);
+ reset.maxrdpower = cpu_to_be32(50); /* XXX */
+ reset.channelchange = cpu_to_be32(1);
+ reset.keeprccontent = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
+ be32_to_cpu(reset.flags),
+ conf->channel->center_freq);
+ return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
+}
+
+static int ar5523_queue_init(struct ar5523 *ar)
+{
+ struct ar5523_cmd_txq_setup qinfo;
+
+ ar5523_dbg(ar, "setting up Tx queue\n");
+ qinfo.qid = cpu_to_be32(0);
+ qinfo.len = cpu_to_be32(sizeof(qinfo.attr));
+ qinfo.attr.priority = cpu_to_be32(0); /* XXX */
+ qinfo.attr.aifs = cpu_to_be32(3);
+ qinfo.attr.logcwmin = cpu_to_be32(4);
+ qinfo.attr.logcwmax = cpu_to_be32(10);
+ qinfo.attr.bursttime = cpu_to_be32(0);
+ qinfo.attr.mode = cpu_to_be32(0);
+ qinfo.attr.qflags = cpu_to_be32(1); /* XXX? */
+ return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo,
+ sizeof(qinfo), 0);
+}
+
+static int ar5523_switch_chan(struct ar5523 *ar)
+{
+ int error;
+
+ error = ar5523_set_chan(ar);
+ if (error) {
+ ar5523_err(ar, "could not set chan, error %d\n", error);
+ goto out_err;
+ }
+
+ /* reset Tx rings */
+ error = ar5523_reset_tx_queues(ar);
+ if (error) {
+ ar5523_err(ar, "could not reset Tx queues, error %d\n",
+ error);
+ goto out_err;
+ }
+ /* set Tx rings WME properties */
+ error = ar5523_queue_init(ar);
+ if (error)
+ ar5523_err(ar, "could not init wme, error %d\n", error);
+
+out_err:
+ return error;
+}
+
+static void ar5523_rx_data_put(struct ar5523 *ar,
+ struct ar5523_rx_data *data)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_free);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+}
+
+static void ar5523_data_rx_cb(struct urb *urb)
+{
+ struct ar5523_rx_data *data = urb->context;
+ struct ar5523 *ar = data->ar;
+ struct ar5523_rx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_rx_status *rx_status;
+ u32 rxlen;
+ int usblen = urb->actual_length;
+ int hdrlen, pad;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "%s: USB err: %d\n", __func__,
+ urb->status);
+ goto skip;
+ }
+
+ if (usblen < AR5523_MIN_RXBUFSZ) {
+ ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
+ goto skip;
+ }
+
+ chunk = (struct ar5523_chunk *) data->skb->data;
+
+ if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
+ chunk->seqnum != 0) {
+ ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
+ chunk->seqnum, chunk->flags,
+ be16_to_cpu(chunk->length));
+ goto skip;
+ }
+
+ /* Rx descriptor is located at the end, 32-bit aligned */
+ desc = (struct ar5523_rx_desc *)
+ (data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
+
+ rxlen = be32_to_cpu(desc->len);
+ if (rxlen > ar->rxbufsz) {
+ ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
+ be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ if (!rxlen) {
+ ar5523_dbg(ar, "RX: rxlen is 0\n");
+ goto skip;
+ }
+
+ if (be32_to_cpu(desc->status) != 0) {
+ ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
+ be32_to_cpu(desc->status), be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ skb_reserve(data->skb, sizeof(*chunk));
+ skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
+ if (!IS_ALIGNED(hdrlen, 4)) {
+ ar5523_dbg(ar, "eek, alignment workaround activated\n");
+ pad = ALIGN(hdrlen, 4) - hdrlen;
+ memmove(data->skb->data + pad, data->skb->data, hdrlen);
+ skb_pull(data->skb, pad);
+ skb_put(data->skb, pad);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(data->skb);
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->freq = be32_to_cpu(desc->channel);
+ rx_status->band = hw->conf.channel->band;
+ rx_status->signal = -95 + be32_to_cpu(desc->rssi);
+
+ ieee80211_rx_irqsafe(hw, data->skb);
+ data->skb = NULL;
+
+skip:
+ if (data->skb) {
+ dev_kfree_skb_irq(data->skb);
+ data->skb = NULL;
+ }
+
+ ar5523_rx_data_put(ar, data);
+ if (atomic_inc_return(&ar->rx_data_free_cnt) >=
+ AR5523_RX_DATA_REFILL_COUNT &&
+ test_bit(AR5523_HW_UP, &ar->flags))
+ queue_work(ar->wq, &ar->rx_refill_work);
+}
+
+static void ar5523_rx_refill_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+
+ if (!list_empty(&ar->rx_data_free))
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ goto done;
+
+ data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
+ if (!data->skb) {
+ ar5523_err(ar, "could not allocate rx skbuff\n");
+ return;
+ }
+
+ usb_fill_bulk_urb(data->urb, ar->dev,
+ ar5523_data_rx_pipe(ar->dev), data->skb->data,
+ ar->rxbufsz, ar5523_data_rx_cb, data);
+
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_used);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+ atomic_dec(&ar->rx_data_free_cnt);
+
+ error = usb_submit_urb(data->urb, GFP_KERNEL);
+ if (error) {
+ kfree_skb(data->skb);
+ if (error != -ENODEV)
+ ar5523_err(ar, "Err sending rx data urb %d\n",
+ error);
+ ar5523_rx_data_put(ar, data);
+ atomic_inc(&ar->rx_data_free_cnt);
+ return;
+ }
+
+ } while (true);
+done:
+ return;
+}
+
+static void ar5523_cancel_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ if (!list_empty(&ar->rx_data_used))
+ data = (struct ar5523_rx_data *) ar->rx_data_used.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ usb_kill_urb(data->urb);
+ list_move(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ } while (data);
+}
+
+static void ar5523_free_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+
+ ar5523_cancel_rx_bufs(ar);
+ while (!list_empty(&ar->rx_data_free)) {
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ list_del(&data->list);
+ usb_free_urb(data->urb);
+ }
+}
+
+static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
+{
+ int i;
+
+ for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
+ struct ar5523_rx_data *data = &ar->rx_data[i];
+
+ data->ar = ar;
+ data->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!data->urb) {
+ ar5523_err(ar, "could not allocate rx data urb\n");
+ goto err;
+ }
+ list_add_tail(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ }
+ return 0;
+
+err:
+ ar5523_free_rx_bufs(ar);
+ return -ENOMEM;
+}
+
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
+{
+ atomic_dec(&ar->tx_nr_total);
+ if (!atomic_dec_return(&ar->tx_nr_pending)) {
+ del_timer(&ar->tx_wd_timer);
+ wake_up(&ar->tx_flush_waitq);
+ }
+
+ if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) {
+ ar5523_dbg(ar, "restart tx queue\n");
+ ieee80211_wake_queues(ar->hw);
+ }
+}
+
+static void ar5523_data_tx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = data->ar;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (urb->status) {
+ ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status);
+ ar5523_data_tx_pkt_put(ar);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+ }
+ usb_free_urb(urb);
+}
+
+static void ar5523_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = hw->priv;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "tx called\n");
+ if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
+ ar5523_dbg(ar, "tx queue full\n");
+ ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+ ieee80211_stop_queues(hw);
+ }
+
+ data->skb = skb;
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_pending);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ ieee80211_queue_work(ar->hw, &ar->tx_work);
+}
+
+static void ar5523_tx_work_locked(struct ar5523 *ar)
+{
+ struct ar5523_tx_data *data;
+ struct ar5523_tx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_tx_info *txi;
+ struct urb *urb;
+ struct sk_buff *skb;
+ int error = 0, paylen;
+ u32 txqid;
+ unsigned long flags;
+
+ BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ if (!list_empty(&ar->tx_queue_pending)) {
+ data = (struct ar5523_tx_data *)
+ ar->tx_queue_pending.next;
+ list_del(&data->list);
+ } else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ skb = data->skb;
+ txqid = 0;
+ txi = IEEE80211_SKB_CB(skb);
+ paylen = skb->len;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ar5523_err(ar, "Failed to allocate TX urb\n");
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ data->ar = ar;
+ data->urb = urb;
+
+ desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
+ chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));
+
+ chunk->seqnum = 0;
+ chunk->flags = UATH_CFLAGS_FINAL;
+ chunk->length = cpu_to_be16(skb->len);
+
+ desc->msglen = cpu_to_be32(skb->len);
+ desc->msgid = AR5523_DATA_ID;
+ desc->buflen = cpu_to_be32(paylen);
+ desc->type = cpu_to_be32(WDCMSG_SEND);
+ desc->flags = cpu_to_be32(UATH_TX_NOTIFY);
+
+ if (test_bit(AR5523_CONNECTED, &ar->flags))
+ desc->connid = cpu_to_be32(AR5523_ID_BSS);
+ else
+ desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);
+
+ if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ txqid |= UATH_TXQID_MINRATE;
+
+ desc->txqid = cpu_to_be32(txqid);
+
+ urb->transfer_flags = URB_ZERO_PACKET;
+ usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
+ skb->data, skb->len, ar5523_data_tx_cb, skb);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_submitted);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
+ atomic_inc(&ar->tx_nr_pending);
+
+ ar5523_dbg(ar, "TX Frame (%d pending)\n",
+ atomic_read(&ar->tx_nr_pending));
+ error = usb_submit_urb(urb, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "error %d when submitting tx urb\n",
+ error);
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ atomic_dec(&ar->tx_nr_pending);
+ ar5523_data_tx_pkt_put(ar);
+ usb_free_urb(urb);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } while (true);
+}
+
+static void ar5523_tx_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+ ar5523_tx_work_locked(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_tx_wd_timer(unsigned long arg)
+{
+ struct ar5523 *ar = (struct ar5523 *) arg;
+
+ ar5523_dbg(ar, "TX watchdog timer triggered\n");
+ ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
+}
+
+static void ar5523_tx_wd_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
+
+ /* Occasionally the TX queues stop responding. The only way to
+ * recover seems to be to reset the dongle.
+ */
+
+ mutex_lock(&ar->mutex);
+ ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+
+ ar5523_err(ar, "Will restart dongle.\n");
+ ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_flush_tx(struct ar5523 *ar)
+{
+ ar5523_tx_work_locked(ar);
+
+ /* Don't waste time trying to flush if USB is disconnected */
+ if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
+ return;
+ if (!wait_event_timeout(ar->tx_flush_waitq,
+ !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
+ ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+}
+
+static void ar5523_free_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx,
+ cmd->urb_tx->transfer_dma);
+ usb_free_urb(cmd->urb_tx);
+}
+
+static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ cmd->ar = ar;
+ init_completion(&cmd->done);
+
+ cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
+ if (!cmd->urb_tx) {
+ ar5523_err(ar, "could not allocate urb\n");
+ return -ENOMEM;
+ }
+ cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
+ GFP_KERNEL,
+ &cmd->urb_tx->transfer_dma);
+ if (!cmd->buf_tx) {
+ usb_free_urb(cmd->urb_tx);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * This function is called periodically (every second) when associated to
+ * query device statistics.
+ */
+static void ar5523_stat_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+
+ /*
+ * Send request for statistics asynchronously once a second. This
+ * seems to be important. Throughput is a lot better if this is done.
+ */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
+ if (error)
+ ar5523_err(ar, "could not query stats, error %d\n", error);
+ mutex_unlock(&ar->mutex);
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
+}
+
+/*
+ * Interface routines to the mac80211 stack.
+ */
+static int ar5523_start(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+ __be32 val;
+
+ ar5523_dbg(ar, "start called\n");
+
+ mutex_lock(&ar->mutex);
+ val = cpu_to_be32(0);
+ ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0);
+
+ /* set MAC address */
+ ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr,
+ ETH_ALEN);
+
+ /* XXX honor net80211 state */
+ ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001);
+ ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001);
+ ar5523_config(ar, CFG_ABOLT, 0x0000003f);
+ ar5523_config(ar, CFG_WME_ENABLED, 0x00000000);
+
+ ar5523_config(ar, CFG_SERVICE_TYPE, 1);
+ ar5523_config(ar, CFG_TP_SCALE, 0x00000000);
+ ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c);
+ ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c);
+ ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
+ ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000);
+ ar5523_config(ar, CFG_MODE_CTS, 0x00000002);
+
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0,
+ &val, sizeof(val), AR5523_CMD_FLAG_MAGIC);
+ if (error) {
+ ar5523_dbg(ar, "could not start target, error %d\n", error);
+ goto err;
+ }
+ ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n",
+ be32_to_cpu(val));
+
+ ar5523_switch_chan(ar);
+
+ val = cpu_to_be32(TARGET_DEVICE_AWAKE);
+ ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0);
+ /* XXX? check */
+ ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
+
+ set_bit(AR5523_HW_UP, &ar->flags);
+ queue_work(ar->wq, &ar->rx_refill_work);
+
+ /* enable Rx */
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar,
+ UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
+ UATH_FILTER_OP_SET);
+
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON);
+ ar5523_dbg(ar, "start OK\n");
+
+err:
+ mutex_unlock(&ar->mutex);
+ return error;
+}
+
+static void ar5523_stop(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "stop called\n");
+
+ cancel_delayed_work_sync(&ar->stat_work);
+ mutex_lock(&ar->mutex);
+ clear_bit(AR5523_HW_UP, &ar->flags);
+
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF);
+
+ ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
+
+ del_timer_sync(&ar->tx_wd_timer);
+ cancel_work_sync(&ar->tx_wd_work);
+ cancel_work_sync(&ar->rx_refill_work);
+ ar5523_cancel_rx_bufs(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ar5523 *ar = hw->priv;
+ int ret;
+
+ ar5523_dbg(ar, "set_rts_threshold called\n");
+ mutex_lock(&ar->mutex);
+
+ ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value);
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+}
+
+static void ar5523_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "flush called\n");
+ ar5523_flush_tx(ar);
+}
+
+static int ar5523_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "add interface called\n");
+
+ if (ar->vif) {
+ ar5523_dbg(ar, "invalid add_interface\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ar->vif = vif;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void ar5523_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "remove interface called\n");
+ ar->vif = NULL;
+}
+
+static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "config called\n");
+ mutex_lock(&ar->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ar5523_dbg(ar, "Do channel switch\n");
+ ar5523_flush_tx(ar);
+ ar5523_switch_chan(ar);
+ }
+ mutex_unlock(&ar->mutex);
+ return 0;
+}
+
+static int ar5523_get_wlan_mode(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_supported_band *band;
+ int bit;
+ struct ieee80211_sta *sta;
+ u32 sta_rate_set;
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ if (!sta) {
+ ar5523_info(ar, "STA not found!\n");
+ return WLAN_MODE_11b;
+ }
+ sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
+
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (sta_rate_set & 1) {
+ int rate = band->bitrates[bit].bitrate;
+ switch (rate) {
+ case 60:
+ case 90:
+ case 120:
+ case 180:
+ case 240:
+ case 360:
+ case 480:
+ case 540:
+ return WLAN_MODE_11g;
+ }
+ }
+ sta_rate_set >>= 1;
+ }
+ return WLAN_MODE_11b;
+}
+
+static void ar5523_create_rateset(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ar5523_cmd_rateset *rs,
+ bool basic)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta *sta;
+ int bit, i = 0;
+ u32 sta_rate_set, basic_rate_set;
+
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ basic_rate_set = bss_conf->basic_rates;
+ if (!sta) {
+ ar5523_info(ar, "STA not found. Cannot set rates\n");
+ sta_rate_set = bss_conf->basic_rates;
+ } else
+ sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
+
+ ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ BUG_ON(i >= AR5523_MAX_NRATES);
+ ar5523_dbg(ar, "Considering rate %d : %d\n",
+ band->bitrates[bit].hw_value, sta_rate_set & 1);
+ if (sta_rate_set & 1) {
+ rs->set[i] = band->bitrates[bit].hw_value;
+ if (basic_rate_set & 1 && basic)
+ rs->set[i] |= 0x80;
+ i++;
+ }
+ sta_rate_set >>= 1;
+ basic_rate_set >>= 1;
+ }
+
+ rs->length = i;
+}
+
+static int ar5523_set_basic_rates(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_rates rates;
+
+ memset(&rates, 0, sizeof(rates));
+ rates.connid = cpu_to_be32(2); /* XXX */
+ rates.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+ ar5523_create_rateset(ar, bss, &rates.rateset, true);
+
+ return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates,
+ sizeof(rates), 0);
+}
+
+static int ar5523_create_connection(struct ar5523 *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_create_connection create;
+ int wlan_mode;
+
+ memset(&create, 0, sizeof(create));
+ create.connid = cpu_to_be32(2);
+ create.bssid = cpu_to_be32(0);
+ /* XXX packed or not? */
+ create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+
+ ar5523_create_rateset(ar, bss, &create.connattr.rateset, false);
+
+ wlan_mode = ar5523_get_wlan_mode(ar, bss);
+ create.connattr.wlanmode = cpu_to_be32(wlan_mode);
+
+ return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create,
+ sizeof(create), 0);
+}
+
+static int ar5523_write_associd(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_set_associd associd;
+
+ memset(&associd, 0, sizeof(associd));
+ associd.defaultrateix = cpu_to_be32(0); /* XXX */
+ associd.associd = cpu_to_be32(bss->aid);
+ associd.timoffset = cpu_to_be32(0x3b); /* XXX */
+ memcpy(associd.bssid, bss->bssid, ETH_ALEN);
+ return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
+ sizeof(associd), 0);
+}
+
+static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss,
+ u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+
+ ar5523_dbg(ar, "bss_info_changed called\n");
+ mutex_lock(&ar->mutex);
+
+ if (!(changed & BSS_CHANGED_ASSOC))
+ goto out_unlock;
+
+ if (bss->assoc) {
+ error = ar5523_create_connection(ar, vif, bss);
+ if (error) {
+ ar5523_err(ar, "could not create connection\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_set_basic_rates(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set negotiated rate set\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_write_associd(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set association\n");
+ goto out_unlock;
+ }
+
+ /* turn link LED on */
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
+ set_bit(AR5523_CONNECTED, &ar->flags);
+ ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);
+
+ } else {
+ cancel_delayed_work(&ar->stat_work);
+ clear_bit(AR5523_CONNECTED, &ar->flags);
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ }
+
+out_unlock:
+ mutex_unlock(&ar->mutex);
+
+}
+
+#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_FCSFAIL | \
+ FIF_OTHER_BSS)
+
+static void ar5523_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ar5523 *ar = hw->priv;
+ u32 filter = 0;
+
+ ar5523_dbg(ar, "configure_filter called\n");
+ mutex_lock(&ar->mutex);
+ ar5523_flush_tx(ar);
+
+ *total_flags &= AR5523_SUPPORTED_FILTERS;
+
+ /* The filters seems strange. UATH_FILTER_RX_BCAST and
+ * UATH_FILTER_RX_MCAST does not result in those frames being RXed.
+ * The only way I have found to get [mb]cast frames seems to be
+ * to set UATH_FILTER_RX_PROM. */
+ filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
+ UATH_FILTER_RX_PROM;
+
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET);
+
+ mutex_unlock(&ar->mutex);
+}
+
+static const struct ieee80211_ops ar5523_ops = {
+ .start = ar5523_start,
+ .stop = ar5523_stop,
+ .tx = ar5523_tx,
+ .set_rts_threshold = ar5523_set_rts_threshold,
+ .add_interface = ar5523_add_interface,
+ .remove_interface = ar5523_remove_interface,
+ .config = ar5523_hwconfig,
+ .bss_info_changed = ar5523_bss_info_changed,
+ .configure_filter = ar5523_configure_filter,
+ .flush = ar5523_flush,
+};
+
+static int ar5523_host_available(struct ar5523 *ar)
+{
+ struct ar5523_cmd_host_available setup;
+
+ /* inform target the host is available */
+ setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR);
+ setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR);
+ setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH);
+ setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD);
+ return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE,
+ &setup, sizeof(setup), NULL, 0, 0);
+}
+
+static int ar5523_get_devstatus(struct ar5523 *ar)
+{
+ u8 macaddr[ETH_ALEN];
+ int error;
+
+ /* retrieve MAC address */
+ error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
+ if (error) {
+ ar5523_err(ar, "could not read MAC address\n");
+ return error;
+ }
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);
+
+ error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
+ &ar->serial[0], sizeof(ar->serial));
+ if (error) {
+ ar5523_err(ar, "could not read device serial number\n");
+ return error;
+ }
+ return 0;
+}
+
+#define AR5523_SANE_RXBUFSZ 2000
+
+static int ar5523_get_max_rxsz(struct ar5523 *ar)
+{
+ int error;
+ __be32 rxsize;
+
+ /* Get max rx size */
+ error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
+ sizeof(rxsize));
+ if (error != 0) {
+ ar5523_err(ar, "could not read max RX size\n");
+ return error;
+ }
+
+ ar->rxbufsz = be32_to_cpu(rxsize);
+
+ if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
+ ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
+ AR5523_SANE_RXBUFSZ);
+ ar->rxbufsz = AR5523_SANE_RXBUFSZ;
+ }
+
+ ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
+ return 0;
+}
+
+/*
+ * This is copied from rtl818x, but we should probably move this
+ * to common code as in OpenBSD.
+ */
+static const struct ieee80211_rate ar5523_rates[] = {
+ { .bitrate = 10, .hw_value = 2, },
+ { .bitrate = 20, .hw_value = 4 },
+ { .bitrate = 55, .hw_value = 11, },
+ { .bitrate = 110, .hw_value = 22, },
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+};
+
+static const struct ieee80211_channel ar5523_channels[] = {
+ { .center_freq = 2412 },
+ { .center_freq = 2417 },
+ { .center_freq = 2422 },
+ { .center_freq = 2427 },
+ { .center_freq = 2432 },
+ { .center_freq = 2437 },
+ { .center_freq = 2442 },
+ { .center_freq = 2447 },
+ { .center_freq = 2452 },
+ { .center_freq = 2457 },
+ { .center_freq = 2462 },
+ { .center_freq = 2467 },
+ { .center_freq = 2472 },
+ { .center_freq = 2484 },
+};
+
+static int ar5523_init_modes(struct ar5523 *ar)
+{
+ BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels));
+ BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates));
+
+ memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
+ memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
+
+ ar->band.band = IEEE80211_BAND_2GHZ;
+ ar->band.channels = ar->channels;
+ ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
+ ar->band.bitrates = ar->rates;
+ ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+ return 0;
+}
+
+/*
+ * Load the MIPS R4000 microcode into the device. Once the image is loaded,
+ * the device will detach itself from the bus and reattach later with a new
+ * product Id (a la ezusb).
+ */
+static int ar5523_load_firmware(struct usb_device *dev)
+{
+ struct ar5523_fwblock *txblock, *rxblock;
+ const struct firmware *fw;
+ void *fwbuf;
+ int len, offset;
+ int foolen; /* XXX(hch): handle short transfers */
+ int error = -ENXIO;
+
+ if (request_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) {
+ dev_err(&dev->dev, "no firmware found: %s\n",
+ AR5523_FIRMWARE_FILE);
+ return -ENOENT;
+ }
+
+ txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
+ if (!txblock)
+ goto out;
+
+ rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL);
+ if (!rxblock)
+ goto out_free_txblock;
+
+ fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL);
+ if (!fwbuf)
+ goto out_free_rxblock;
+
+ memset(txblock, 0, sizeof(struct ar5523_fwblock));
+ txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
+ txblock->total = cpu_to_be32(fw->size);
+
+ offset = 0;
+ len = fw->size;
+ while (len > 0) {
+ int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE);
+
+ txblock->remain = cpu_to_be32(len - mlen);
+ txblock->len = cpu_to_be32(mlen);
+
+ /* send firmware block meta-data */
+ error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev),
+ txblock, sizeof(*txblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block info\n");
+ goto out_free_fwbuf;
+ }
+
+ /* send firmware block data */
+ memcpy(fwbuf, fw->data + offset, mlen);
+ error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev),
+ fwbuf, mlen, &foolen,
+ AR5523_DATA_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block data\n");
+ goto out_free_fwbuf;
+ }
+
+ /* wait for ack from firmware */
+ error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev),
+ rxblock, sizeof(*rxblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not read firmware answer\n");
+ goto out_free_fwbuf;
+ }
+
+ len -= mlen;
+ offset += mlen;
+ }
+
+ /*
+ * Set the error to -ENXIO to make sure we continue probing for
+ * a driver.
+ */
+ error = -ENXIO;
+
+ out_free_fwbuf:
+ kfree(fwbuf);
+ out_free_rxblock:
+ kfree(rxblock);
+ out_free_txblock:
+ kfree(txblock);
+ out:
+ release_firmware(fw);
+ return error;
+}
+
+static int ar5523_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ieee80211_hw *hw;
+ struct ar5523 *ar;
+ int error = -ENOMEM;
+
+ /*
+ * Load firmware if the device requires it. This will return
+ * -ENXIO on success and we'll get called back afer the usb
+ * id changes to indicate that the firmware is present.
+ */
+ if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
+ return ar5523_load_firmware(dev);
+
+
+ hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
+ if (!hw)
+ goto out;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->dev = dev;
+ mutex_init(&ar->mutex);
+
+ INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
+ init_timer(&ar->tx_wd_timer);
+ setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
+ INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
+ INIT_WORK(&ar->tx_work, ar5523_tx_work);
+ INIT_LIST_HEAD(&ar->tx_queue_pending);
+ INIT_LIST_HEAD(&ar->tx_queue_submitted);
+ spin_lock_init(&ar->tx_data_list_lock);
+ atomic_set(&ar->tx_nr_total, 0);
+ atomic_set(&ar->tx_nr_pending, 0);
+ init_waitqueue_head(&ar->tx_flush_waitq);
+
+ atomic_set(&ar->rx_data_free_cnt, 0);
+ INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
+ INIT_LIST_HEAD(&ar->rx_data_free);
+ INIT_LIST_HEAD(&ar->rx_data_used);
+ spin_lock_init(&ar->rx_data_list_lock);
+
+ ar->wq = create_singlethread_workqueue("ar5523");
+ if (!ar->wq) {
+ ar5523_err(ar, "Could not create wq\n");
+ goto out_free_ar;
+ }
+
+ error = ar5523_alloc_rx_bufs(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx buffers\n");
+ goto out_free_wq;
+ }
+
+ error = ar5523_alloc_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx command buffers\n");
+ goto out_free_rx_bufs;
+ }
+
+ error = ar5523_alloc_tx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate tx command buffers\n");
+ goto out_free_rx_cmd;
+ }
+
+ error = ar5523_submit_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Failed to submit rx cmd\n");
+ goto out_free_tx_cmd;
+ }
+
+ /*
+ * We're now ready to send/receive firmware commands.
+ */
+ error = ar5523_host_available(ar);
+ if (error) {
+ ar5523_err(ar, "could not initialize adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_max_rxsz(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devcap(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devstatus(ar);
+ if (error != 0) {
+ ar5523_err(ar, "could not get device status\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
+ (id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
+
+ ar->vif = NULL;
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL;
+ hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
+ sizeof(struct ar5523_chunk);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->queues = 1;
+
+ error = ar5523_init_modes(ar);
+ if (error)
+ goto out_cancel_rx_cmd;
+
+ usb_set_intfdata(intf, hw);
+
+ error = ieee80211_register_hw(hw);
+ if (error) {
+ ar5523_err(ar, "could not register device\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "Found and initialized AR5523 device\n");
+ return 0;
+
+out_cancel_rx_cmd:
+ ar5523_cancel_rx_cmd(ar);
+out_free_tx_cmd:
+ ar5523_free_tx_cmd(ar);
+out_free_rx_cmd:
+ ar5523_free_rx_cmd(ar);
+out_free_rx_bufs:
+ ar5523_free_rx_bufs(ar);
+out_free_wq:
+ destroy_workqueue(ar->wq);
+out_free_ar:
+ ieee80211_free_hw(hw);
+out:
+ return error;
+}
+
+static void ar5523_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "detaching\n");
+ set_bit(AR5523_USB_DISCONNECTED, &ar->flags);
+
+ ieee80211_unregister_hw(hw);
+
+ ar5523_cancel_rx_cmd(ar);
+ ar5523_free_tx_cmd(ar);
+ ar5523_free_rx_cmd(ar);
+ ar5523_free_rx_bufs(ar);
+
+ destroy_workqueue(ar->wq);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+}
+
+#define AR5523_DEVICE_UG(vendor, device) \
+ { USB_DEVICE((vendor), (device)) }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_PRE_FIRMWARE }
+#define AR5523_DEVICE_UX(vendor, device) \
+ { USB_DEVICE((vendor), (device)), \
+ .driver_info = AR5523_FLAG_ABG }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE }
+
+static struct usb_device_id ar5523_id_table[] = {
+ AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */
+ AR5523_DEVICE_UX(0x0cf3, 0x0005), /* Atheros2 / AR5523_3 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7801), /* Conceptronic / AR5523_1 */
+ AR5523_DEVICE_UX(0x0d8e, 0x7811), /* Conceptronic / AR5523_2 */
+ AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
+ AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
+ AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
+ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
+ AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ (CyberTAN Technology) */
+ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
+ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7802), /* Globalsun / AR5523_3 */
+ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
+ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
+ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
+ AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
+ AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
+ AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
+ AR5523_DEVICE_UG(0x1385, 0x4250), /* Netgear3 / WG111T (2) */
+ AR5523_DEVICE_UG(0x1385, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x1385, 0x5f02), /* Netgear / WPN111 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ar5523_id_table);
+
+static struct usb_driver ar5523_driver = {
+ .name = "ar5523",
+ .id_table = ar5523_id_table,
+ .probe = ar5523_probe,
+ .disconnect = ar5523_disconnect,
+};
+
+module_usb_driver(ar5523_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
new file mode 100644
index 000000000000..00c6fd346d48
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define AR5523_FLAG_PRE_FIRMWARE (1 << 0)
+#define AR5523_FLAG_ABG (1 << 1)
+
+#define AR5523_FIRMWARE_FILE "ar5523.bin"
+
+#define AR5523_CMD_TX_PIPE 0x01
+#define AR5523_DATA_TX_PIPE 0x02
+#define AR5523_CMD_RX_PIPE 0x81
+#define AR5523_DATA_RX_PIPE 0x82
+
+#define ar5523_cmd_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
+#define ar5523_data_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
+#define ar5523_cmd_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
+#define ar5523_data_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
+
+#define AR5523_DATA_TIMEOUT 10000
+#define AR5523_CMD_TIMEOUT 1000
+
+#define AR5523_TX_DATA_COUNT 8
+#define AR5523_TX_DATA_RESTART_COUNT 2
+#define AR5523_RX_DATA_COUNT 16
+#define AR5523_RX_DATA_REFILL_COUNT 8
+
+#define AR5523_CMD_ID 1
+#define AR5523_DATA_ID 2
+
+#define AR5523_TX_WD_TIMEOUT (HZ * 2)
+#define AR5523_FLUSH_TIMEOUT (HZ * 3)
+
+enum AR5523_flags {
+ AR5523_HW_UP,
+ AR5523_USB_DISCONNECTED,
+ AR5523_CONNECTED
+};
+
+struct ar5523_tx_cmd {
+ struct ar5523 *ar;
+ struct urb *urb_tx;
+ void *buf_tx;
+ void *odata;
+ int olen;
+ int flags;
+ int res;
+ struct completion done;
+};
+
+/* This struct is placed in tx_info->driver_data. It must not be larger
+ * than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
+ */
+struct ar5523_tx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct sk_buff *skb;
+ struct urb *urb;
+};
+
+struct ar5523_rx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct urb *urb;
+ struct sk_buff *skb;
+};
+
+struct ar5523 {
+ struct usb_device *dev;
+ struct ieee80211_hw *hw;
+
+ unsigned long flags;
+ struct mutex mutex;
+ struct workqueue_struct *wq;
+
+ struct ar5523_tx_cmd tx_cmd;
+
+ struct delayed_work stat_work;
+
+ struct timer_list tx_wd_timer;
+ struct work_struct tx_wd_work;
+ struct work_struct tx_work;
+ struct list_head tx_queue_pending;
+ struct list_head tx_queue_submitted;
+ spinlock_t tx_data_list_lock;
+ wait_queue_head_t tx_flush_waitq;
+
+ /* Queued + Submitted TX frames */
+ atomic_t tx_nr_total;
+
+ /* Submitted TX frames */
+ atomic_t tx_nr_pending;
+
+ void *rx_cmd_buf;
+ struct urb *rx_cmd_urb;
+
+ struct ar5523_rx_data rx_data[AR5523_RX_DATA_COUNT];
+ spinlock_t rx_data_list_lock;
+ struct list_head rx_data_free;
+ struct list_head rx_data_used;
+ atomic_t rx_data_free_cnt;
+
+ struct work_struct rx_refill_work;
+
+ unsigned int rxbufsz;
+ u8 serial[16];
+
+ struct ieee80211_channel channels[14];
+ struct ieee80211_rate rates[12];
+ struct ieee80211_supported_band band;
+ struct ieee80211_vif *vif;
+};
+
+/* flags for sending firmware commands */
+#define AR5523_CMD_FLAG_READ (1 << 1)
+#define AR5523_CMD_FLAG_MAGIC (1 << 2)
+
+#define ar5523_dbg(ar, format, arg...) \
+ dev_dbg(&(ar)->dev->dev, format, ## arg)
+
+/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
+ * fail. Instead of dealing with them in every possible place just surpress
+ * any messages on USB disconnect.
+ */
+#define ar5523_err(ar, format, arg...) \
+do { \
+ if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
+ dev_err(&(ar)->dev->dev, format, ## arg); \
+ } \
+} while (0)
+#define ar5523_info(ar, format, arg...) \
+ dev_info(&(ar)->dev->dev, format, ## arg)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523_hw.h b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
new file mode 100644
index 000000000000..0fe2c803f48f
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* all fields are big endian */
+struct ar5523_fwblock {
+ __be32 flags;
+#define AR5523_WRITE_BLOCK (1 << 4)
+
+ __be32 len;
+#define AR5523_MAX_FWBLOCK_SIZE 2048
+
+ __be32 total;
+ __be32 remain;
+ __be32 rxtotal;
+ __be32 pad[123];
+} __packed;
+
+#define AR5523_MAX_RXCMDSZ 1024
+#define AR5523_MAX_TXCMDSZ 1024
+
+struct ar5523_cmd_hdr {
+ __be32 len;
+ __be32 code;
+/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
+/* messages from Host -> Target */
+#define WDCMSG_HOST_AVAILABLE 0x01
+#define WDCMSG_BIND 0x02
+#define WDCMSG_TARGET_RESET 0x03
+#define WDCMSG_TARGET_GET_CAPABILITY 0x04
+#define WDCMSG_TARGET_SET_CONFIG 0x05
+#define WDCMSG_TARGET_GET_STATUS 0x06
+#define WDCMSG_TARGET_GET_STATS 0x07
+#define WDCMSG_TARGET_START 0x08
+#define WDCMSG_TARGET_STOP 0x09
+#define WDCMSG_TARGET_ENABLE 0x0a
+#define WDCMSG_TARGET_DISABLE 0x0b
+#define WDCMSG_CREATE_CONNECTION 0x0c
+#define WDCMSG_UPDATE_CONNECT_ATTR 0x0d
+#define WDCMSG_DELETE_CONNECT 0x0e
+#define WDCMSG_SEND 0x0f
+#define WDCMSG_FLUSH 0x10
+/* messages from Target -> Host */
+#define WDCMSG_STATS_UPDATE 0x11
+#define WDCMSG_BMISS 0x12
+#define WDCMSG_DEVICE_AVAIL 0x13
+#define WDCMSG_SEND_COMPLETE 0x14
+#define WDCMSG_DATA_AVAIL 0x15
+#define WDCMSG_SET_PWR_MODE 0x16
+#define WDCMSG_BMISS_ACK 0x17
+#define WDCMSG_SET_LED_STEADY 0x18
+#define WDCMSG_SET_LED_BLINK 0x19
+/* more messages */
+#define WDCMSG_SETUP_BEACON_DESC 0x1a
+#define WDCMSG_BEACON_INIT 0x1b
+#define WDCMSG_RESET_KEY_CACHE 0x1c
+#define WDCMSG_RESET_KEY_CACHE_ENTRY 0x1d
+#define WDCMSG_SET_KEY_CACHE_ENTRY 0x1e
+#define WDCMSG_SET_DECOMP_MASK 0x1f
+#define WDCMSG_SET_REGULATORY_DOMAIN 0x20
+#define WDCMSG_SET_LED_STATE 0x21
+#define WDCMSG_WRITE_ASSOCID 0x22
+#define WDCMSG_SET_STA_BEACON_TIMERS 0x23
+#define WDCMSG_GET_TSF 0x24
+#define WDCMSG_RESET_TSF 0x25
+#define WDCMSG_SET_ADHOC_MODE 0x26
+#define WDCMSG_SET_BASIC_RATE 0x27
+#define WDCMSG_MIB_CONTROL 0x28
+#define WDCMSG_GET_CHANNEL_DATA 0x29
+#define WDCMSG_GET_CUR_RSSI 0x2a
+#define WDCMSG_SET_ANTENNA_SWITCH 0x2b
+#define WDCMSG_USE_SHORT_SLOT_TIME 0x2f
+#define WDCMSG_SET_POWER_MODE 0x30
+#define WDCMSG_SETUP_PSPOLL_DESC 0x31
+#define WDCMSG_SET_RX_MULTICAST_FILTER 0x32
+#define WDCMSG_RX_FILTER 0x33
+#define WDCMSG_PER_CALIBRATION 0x34
+#define WDCMSG_RESET 0x35
+#define WDCMSG_DISABLE 0x36
+#define WDCMSG_PHY_DISABLE 0x37
+#define WDCMSG_SET_TX_POWER_LIMIT 0x38
+#define WDCMSG_SET_TX_QUEUE_PARAMS 0x39
+#define WDCMSG_SETUP_TX_QUEUE 0x3a
+#define WDCMSG_RELEASE_TX_QUEUE 0x3b
+#define WDCMSG_SET_DEFAULT_KEY 0x43
+
+ __u32 priv; /* driver private data,
+ don't care about endianess */
+ __be32 magic;
+ __be32 reserved2[4];
+};
+
+struct ar5523_cmd_host_available {
+ __be32 sw_ver_major;
+ __be32 sw_ver_minor;
+ __be32 sw_ver_patch;
+ __be32 sw_ver_build;
+} __packed;
+
+#define ATH_SW_VER_MAJOR 1
+#define ATH_SW_VER_MINOR 5
+#define ATH_SW_VER_PATCH 0
+#define ATH_SW_VER_BUILD 9999
+
+struct ar5523_chunk {
+ u8 seqnum; /* sequence number for ordering */
+ u8 flags;
+#define UATH_CFLAGS_FINAL 0x01 /* final chunk of a msg */
+#define UATH_CFLAGS_RXMSG 0x02 /* chunk contains rx completion */
+#define UATH_CFLAGS_DEBUG 0x04 /* for debugging */
+ __be16 length; /* chunk size in bytes */
+ /* chunk data follows */
+} __packed;
+
+/*
+ * Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
+ */
+struct ar5523_rx_desc {
+ __be32 len; /* msg length including header */
+ __be32 code; /* WDCMSG_DATA_AVAIL */
+ __be32 gennum; /* generation number */
+ __be32 status; /* start of RECEIVE_INFO */
+#define UATH_STATUS_OK 0
+#define UATH_STATUS_STOP_IN_PROGRESS 1
+#define UATH_STATUS_CRC_ERR 2
+#define UATH_STATUS_PHY_ERR 3
+#define UATH_STATUS_DECRYPT_CRC_ERR 4
+#define UATH_STATUS_DECRYPT_MIC_ERR 5
+#define UATH_STATUS_DECOMP_ERR 6
+#define UATH_STATUS_KEY_ERR 7
+#define UATH_STATUS_ERR 8
+ __be32 tstamp_low; /* low-order 32-bits of rx timestamp */
+ __be32 tstamp_high; /* high-order 32-bits of rx timestamp */
+ __be32 framelen; /* frame length */
+ __be32 rate; /* rx rate code */
+ __be32 antenna;
+ __be32 rssi;
+ __be32 channel;
+ __be32 phyerror;
+ __be32 connix; /* key table ix for bss traffic */
+ __be32 decrypterror;
+ __be32 keycachemiss;
+ __be32 pad; /* XXX? */
+} __packed;
+
+struct ar5523_tx_desc {
+ __be32 msglen;
+ u32 msgid; /* msg id (supplied by host) */
+ __be32 type; /* opcode: WDMSG_SEND or WDCMSG_FLUSH */
+ __be32 txqid; /* tx queue id and flags */
+#define UATH_TXQID_MASK 0x0f
+#define UATH_TXQID_MINRATE 0x10 /* use min tx rate */
+#define UATH_TXQID_FF 0x20 /* content is fast frame */
+ __be32 connid; /* tx connection id */
+#define UATH_ID_INVALID 0xffffffff /* for sending prior to connection */
+ __be32 flags; /* non-zero if response desired */
+#define UATH_TX_NOTIFY (1 << 24) /* f/w will send a UATH_NOTIF_TX */
+ __be32 buflen; /* payload length */
+} __packed;
+
+
+#define AR5523_ID_BSS 2
+#define AR5523_ID_BROADCAST 0xffffffff
+
+/* structure for command UATH_CMD_WRITE_MAC */
+struct ar5523_write_mac {
+ __be32 reg;
+ __be32 len;
+ u8 data[32];
+} __packed;
+
+struct ar5523_cmd_rateset {
+ __u8 length;
+#define AR5523_MAX_NRATES 32
+ __u8 set[AR5523_MAX_NRATES];
+};
+
+struct ar5523_cmd_set_associd { /* AR5523_WRITE_ASSOCID */
+ __be32 defaultrateix;
+ __be32 associd;
+ __be32 timoffset;
+ __be32 turboprime;
+ __u8 bssid[6];
+} __packed;
+
+/* structure for command WDCMSG_RESET */
+struct ar5523_cmd_reset {
+ __be32 flags; /* channel flags */
+#define UATH_CHAN_TURBO 0x0100
+#define UATH_CHAN_CCK 0x0200
+#define UATH_CHAN_OFDM 0x0400
+#define UATH_CHAN_2GHZ 0x1000
+#define UATH_CHAN_5GHZ 0x2000
+ __be32 freq; /* channel frequency */
+ __be32 maxrdpower;
+ __be32 cfgctl;
+ __be32 twiceantennareduction;
+ __be32 channelchange;
+ __be32 keeprccontent;
+} __packed;
+
+/* structure for command WDCMSG_SET_BASIC_RATE */
+struct ar5523_cmd_rates {
+ __be32 connid;
+ __be32 keeprccontent;
+ __be32 size;
+ struct ar5523_cmd_rateset rateset;
+} __packed;
+
+enum {
+ WLAN_MODE_NONE = 0,
+ WLAN_MODE_11b,
+ WLAN_MODE_11a,
+ WLAN_MODE_11g,
+ WLAN_MODE_11a_TURBO,
+ WLAN_MODE_11g_TURBO,
+ WLAN_MODE_11a_TURBO_PRIME,
+ WLAN_MODE_11g_TURBO_PRIME,
+ WLAN_MODE_11a_XR,
+ WLAN_MODE_11g_XR,
+};
+
+struct ar5523_cmd_connection_attr {
+ __be32 longpreambleonly;
+ struct ar5523_cmd_rateset rateset;
+ __be32 wlanmode;
+} __packed;
+
+/* structure for command AR5523_CREATE_CONNECTION */
+struct ar5523_cmd_create_connection {
+ __be32 connid;
+ __be32 bssid;
+ __be32 size;
+ struct ar5523_cmd_connection_attr connattr;
+} __packed;
+
+struct ar5523_cmd_ledsteady { /* WDCMSG_SET_LED_STEADY */
+ __be32 lednum;
+#define UATH_LED_LINK 0
+#define UATH_LED_ACTIVITY 1
+ __be32 ledmode;
+#define UATH_LED_OFF 0
+#define UATH_LED_ON 1
+} __packed;
+
+struct ar5523_cmd_ledblink { /* WDCMSG_SET_LED_BLINK */
+ __be32 lednum;
+ __be32 ledmode;
+ __be32 blinkrate;
+ __be32 slowmode;
+} __packed;
+
+struct ar5523_cmd_ledstate { /* WDCMSG_SET_LED_STATE */
+ __be32 connected;
+} __packed;
+
+struct ar5523_cmd_txq_attr {
+ __be32 priority;
+ __be32 aifs;
+ __be32 logcwmin;
+ __be32 logcwmax;
+ __be32 bursttime;
+ __be32 mode;
+ __be32 qflags;
+} __packed;
+
+struct ar5523_cmd_txq_setup { /* WDCMSG_SETUP_TX_QUEUE */
+ __be32 qid;
+ __be32 len;
+ struct ar5523_cmd_txq_attr attr;
+} __packed;
+
+struct ar5523_cmd_rx_filter { /* WDCMSG_RX_FILTER */
+ __be32 bits;
+#define UATH_FILTER_RX_UCAST 0x00000001
+#define UATH_FILTER_RX_MCAST 0x00000002
+#define UATH_FILTER_RX_BCAST 0x00000004
+#define UATH_FILTER_RX_CONTROL 0x00000008
+#define UATH_FILTER_RX_BEACON 0x00000010 /* beacon frames */
+#define UATH_FILTER_RX_PROM 0x00000020 /* promiscuous mode */
+#define UATH_FILTER_RX_PHY_ERR 0x00000040 /* phy errors */
+#define UATH_FILTER_RX_PHY_RADAR 0x00000080 /* radar phy errors */
+#define UATH_FILTER_RX_XR_POOL 0x00000400 /* XR group polls */
+#define UATH_FILTER_RX_PROBE_REQ 0x00000800
+ __be32 op;
+#define UATH_FILTER_OP_INIT 0x0
+#define UATH_FILTER_OP_SET 0x1
+#define UATH_FILTER_OP_CLEAR 0x2
+#define UATH_FILTER_OP_TEMP 0x3
+#define UATH_FILTER_OP_RESTORE 0x4
+} __packed;
+
+enum {
+ CFG_NONE, /* Sentinal to indicate "no config" */
+ CFG_REG_DOMAIN, /* Regulatory Domain */
+ CFG_RATE_CONTROL_ENABLE,
+ CFG_DEF_XMIT_DATA_RATE, /* NB: if rate control is not enabled */
+ CFG_HW_TX_RETRIES,
+ CFG_SW_TX_RETRIES,
+ CFG_SLOW_CLOCK_ENABLE,
+ CFG_COMP_PROC,
+ CFG_USER_RTS_THRESHOLD,
+ CFG_XR2NORM_RATE_THRESHOLD,
+ CFG_XRMODE_SWITCH_COUNT,
+ CFG_PROTECTION_TYPE,
+ CFG_BURST_SEQ_THRESHOLD,
+ CFG_ABOLT,
+ CFG_IQ_LOG_COUNT_MAX,
+ CFG_MODE_CTS,
+ CFG_WME_ENABLED,
+ CFG_GPRS_CBR_PERIOD,
+ CFG_SERVICE_TYPE,
+ /* MAC Address to use. Overrides EEPROM */
+ CFG_MAC_ADDR,
+ CFG_DEBUG_EAR,
+ CFG_INIT_REGS,
+ /* An ID for use in error & debug messages */
+ CFG_DEBUG_ID,
+ CFG_COMP_WIN_SZ,
+ CFG_DIVERSITY_CTL,
+ CFG_TP_SCALE,
+ CFG_TPC_HALF_DBM5,
+ CFG_TPC_HALF_DBM2,
+ CFG_OVERRD_TX_POWER,
+ CFG_USE_32KHZ_CLOCK,
+ CFG_GMODE_PROTECTION,
+ CFG_GMODE_PROTECT_RATE_INDEX,
+ CFG_GMODE_NON_ERP_PREAMBLE,
+ CFG_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ /* Sentinal to indicate "no capability" */
+ CAP_NONE,
+ CAP_ALL, /* ALL capabilities */
+ CAP_TARGET_VERSION,
+ CAP_TARGET_REVISION,
+ CAP_MAC_VERSION,
+ CAP_MAC_REVISION,
+ CAP_PHY_REVISION,
+ CAP_ANALOG_5GHz_REVISION,
+ CAP_ANALOG_2GHz_REVISION,
+ /* Target supports WDC message debug features */
+ CAP_DEBUG_WDCMSG_SUPPORT,
+
+ CAP_REG_DOMAIN,
+ CAP_COUNTRY_CODE,
+ CAP_REG_CAP_BITS,
+
+ CAP_WIRELESS_MODES,
+ CAP_CHAN_SPREAD_SUPPORT,
+ CAP_SLEEP_AFTER_BEACON_BROKEN,
+ CAP_COMPRESS_SUPPORT,
+ CAP_BURST_SUPPORT,
+ CAP_FAST_FRAMES_SUPPORT,
+ CAP_CHAP_TUNING_SUPPORT,
+ CAP_TURBOG_SUPPORT,
+ CAP_TURBO_PRIME_SUPPORT,
+ CAP_DEVICE_TYPE,
+ CAP_XR_SUPPORT,
+ CAP_WME_SUPPORT,
+ CAP_TOTAL_QUEUES,
+ CAP_CONNECTION_ID_MAX, /* Should absorb CAP_KEY_CACHE_SIZE */
+
+ CAP_LOW_5GHZ_CHAN,
+ CAP_HIGH_5GHZ_CHAN,
+ CAP_LOW_2GHZ_CHAN,
+ CAP_HIGH_2GHZ_CHAN,
+
+ CAP_MIC_AES_CCM,
+ CAP_MIC_CKIP,
+ CAP_MIC_TKIP,
+ CAP_MIC_TKIP_WME,
+ CAP_CIPHER_AES_CCM,
+ CAP_CIPHER_CKIP,
+ CAP_CIPHER_TKIP,
+
+ CAP_TWICE_ANTENNAGAIN_5G,
+ CAP_TWICE_ANTENNAGAIN_2G,
+};
+
+enum {
+ ST_NONE, /* Sentinal to indicate "no status" */
+ ST_ALL,
+ ST_SERVICE_TYPE,
+ ST_WLAN_MODE,
+ ST_FREQ,
+ ST_BAND,
+ ST_LAST_RSSI,
+ ST_PS_FRAMES_DROPPED,
+ ST_CACHED_DEF_ANT,
+ ST_COUNT_OTHER_RX_ANT,
+ ST_USE_FAST_DIVERSITY,
+ ST_MAC_ADDR,
+ ST_RX_GENERATION_NUM,
+ ST_TX_QUEUE_DEPTH,
+ ST_SERIAL_NUMBER,
+ ST_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ TARGET_DEVICE_AWAKE,
+ TARGET_DEVICE_SLEEP,
+ TARGET_DEVICE_PWRDN,
+ TARGET_DEVICE_PWRSAVE,
+ TARGET_DEVICE_SUSPEND,
+ TARGET_DEVICE_RESUME,
+};
+
+/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
+#define IEEE80211_2ADDR_LEN 16
+
+#define AR5523_MIN_RXBUFSZ \
+ (((sizeof(__be32) + IEEE80211_2ADDR_LEN + \
+ sizeof(struct ar5523_rx_desc)) + 3) & ~3)
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 338c5c42357d..c9f81a388f15 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,6 +1,7 @@
config ATH5K
tristate "Atheros 5xxx wireless cards support"
depends on (PCI || ATHEROS_AR231X) && MAC80211
+ select ATH_COMMON
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index aec33cc207fd..8e8bcc7a4805 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -236,17 +236,4 @@ static struct platform_driver ath_ahb_driver = {
},
};
-static int __init
-ath5k_ahb_init(void)
-{
- return platform_driver_register(&ath_ahb_driver);
-}
-
-static void __exit
-ath5k_ahb_exit(void)
-{
- platform_driver_unregister(&ath_ahb_driver);
-}
-
-module_init(ath5k_ahb_init);
-module_exit(ath5k_ahb_exit);
+module_platform_driver(ath_ahb_driver);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9f31cfa56cc0..30ca0a60a64c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -511,8 +511,9 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
- ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
- &iter_data);
+ ieee80211_iterate_active_interfaces_atomic(
+ ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath5k_vif_iter, &iter_data);
memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
ah->opmode = iter_data.opmode;
@@ -848,7 +849,7 @@ ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
return;
dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
DMA_TO_DEVICE);
- dev_kfree_skb_any(bf->skb);
+ ieee80211_free_txskb(ah->hw, bf->skb);
bf->skb = NULL;
bf->skbaddr = 0;
bf->desc->ds_data = 0;
@@ -1335,20 +1336,9 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
* 15bit only. that means TSF extension has to be done within
* 32768usec (about 32ms). it might be necessary to move this to
* the interrupt handler, like it is done in madwifi.
- *
- * Unfortunately we don't know when the hardware takes the rx
- * timestamp (beginning of phy frame, data frame, end of rx?).
- * The only thing we know is that it is hardware specific...
- * On AR5213 it seems the rx timestamp is at the end of the
- * frame, but I'm not sure.
- *
- * NOTE: mac80211 defines mactime at the beginning of the first
- * data symbol. Since we don't have any time references it's
- * impossible to comply to that. This affects IBSS merge only
- * right now, so it's not too bad...
*/
rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
- rxs->flag |= RX_FLAG_MACTIME_MPDU;
+ rxs->flag |= RX_FLAG_MACTIME_END;
rxs->freq = ah->curchan->center_freq;
rxs->band = ah->curchan->band;
@@ -1575,7 +1565,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
drop_packet:
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
static void
@@ -2434,7 +2424,7 @@ static const struct ieee80211_iface_combination if_comb = {
.num_different_channels = 1,
};
-int __devinit
+int
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
struct ieee80211_hw *hw = ah->hw;
@@ -2860,7 +2850,7 @@ static void ath5k_reset_work(struct work_struct *work)
mutex_unlock(&ah->lock);
}
-static int __devinit
+static int
ath5k_init(struct ieee80211_hw *hw)
{
@@ -3045,8 +3035,9 @@ ath5k_any_vif_assoc(struct ath5k_hw *ah)
iter_data.need_set_hw_addr = false;
iter_data.found_active = true;
- ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
- &iter_data);
+ ieee80211_iterate_active_interfaces_atomic(
+ ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath5k_vif_iter, &iter_data);
return iter_data.any_assoc;
}
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index b9f708a45f4e..f77ef36acf87 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -158,7 +158,7 @@ void ath5k_unregister_leds(struct ath5k_hw *ah)
ath5k_unregister_led(&ah->tx_led);
}
-int __devinit ath5k_init_leds(struct ath5k_hw *ah)
+int ath5k_init_leds(struct ath5k_hw *ah)
{
int ret = 0;
struct ieee80211_hw *hw = ah->hw;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 7a28538e6e05..4264341533ea 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -62,7 +62,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
u16 qnum = skb_get_queue_mapping(skb);
if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -452,8 +452,9 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
iter_data.hw_macaddr = NULL;
iter_data.n_stas = 0;
iter_data.need_set_hw_addr = false;
- ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
- &iter_data);
+ ieee80211_iterate_active_interfaces_atomic(
+ ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath5k_vif_iter, &iter_data);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index dff48fbc63bf..859db7c34f87 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -155,7 +155,7 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
* PCI Initialization *
\********************/
-static int __devinit
+static int
ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -285,7 +285,7 @@ err:
return ret;
}
-static void __devexit
+static void
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
@@ -336,7 +336,7 @@ static struct pci_driver ath5k_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
- .remove = __devexit_p(ath5k_pci_remove),
+ .remove = ath5k_pci_remove,
.driver.pm = ATH5K_PM_OPS,
};
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 0c2dd4771c36..4084b1076286 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -789,9 +789,9 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* (I don't think it supports 44MHz) */
/* On 2425 initvals TURBO_SHORT is not present */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
- turbo = AR5K_PHY_TURBO_MODE |
- (ah->ah_radio == AR5K_RF2425) ? 0 :
- AR5K_PHY_TURBO_SHORT;
+ turbo = AR5K_PHY_TURBO_MODE;
+ if (ah->ah_radio != AR5K_RF2425)
+ turbo |= AR5K_PHY_TURBO_SHORT;
} else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
if (ah->ah_radio == AR5K_RF5413) {
mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index d755a5e7ed20..26c4b7220859 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -30,3 +30,12 @@ config ATH6KL_DEBUG
depends on ATH6KL
---help---
Enables debug support
+
+config ATH6KL_REGDOMAIN
+ bool "Atheros ath6kl regdomain support"
+ depends on ATH6KL
+ depends on CFG80211_CERTIFICATION_ONUS
+ ---help---
+ Enabling this makes it possible to change the regdomain in
+ the firmware. This can be only enabled if regulatory requirements
+ are taken into account.
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8cae8886f17d..cab0ec0d5380 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -34,6 +34,7 @@ ath6kl_core-y += main.o
ath6kl_core-y += txrx.o
ath6kl_core-y += wmi.o
ath6kl_core-y += core.o
+ath6kl_core-y += recovery.o
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7089f8160ad5..5516a8ccc3c6 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -147,15 +147,15 @@ static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
- if (ar->state != ATH6KL_STATE_SCHED_SCAN)
+ if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags))
return false;
del_timer_sync(&vif->sched_scan_timer);
- ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
- ATH6KL_HOST_MODE_AWAKE);
+ if (ar->state == ATH6KL_STATE_RECOVERY)
+ return true;
- ar->state = ATH6KL_STATE_ON;
+ ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false);
return true;
}
@@ -301,7 +301,7 @@ static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
static bool ath6kl_is_wpa_ie(const u8 *pos)
{
- return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 &&
pos[4] == 0xf2 && pos[5] == 0x01;
}
@@ -369,17 +369,13 @@ static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
{
switch (type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
*nw_type = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
*nw_type = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- *nw_type = AP_NETWORK;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- *nw_type = INFRA_NETWORK;
- break;
case NL80211_IFTYPE_P2P_GO:
*nw_type = AP_NETWORK;
break;
@@ -1031,30 +1027,15 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
vif->scan_req = request;
- if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
- ar->fw_capabilities)) {
- /*
- * If capable of doing P2P mgmt operations using
- * station interface, send additional information like
- * supported rates to advertise and xmit rates for
- * probe requests
- */
- ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
- WMI_LONG_SCAN, force_fg_scan,
- false, 0,
- ATH6KL_FG_SCAN_INTERVAL,
- n_channels, channels,
- request->no_cck,
- request->rates);
- } else {
- ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
- WMI_LONG_SCAN, force_fg_scan,
- false, 0,
- ATH6KL_FG_SCAN_INTERVAL,
- n_channels, channels);
- }
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0,
+ ATH6KL_FG_SCAN_INTERVAL,
+ n_channels, channels,
+ request->no_cck,
+ request->rates);
if (ret) {
- ath6kl_err("wmi_startscan_cmd failed\n");
+ ath6kl_err("failed to start scan: %d\n", ret);
vif->scan_req = NULL;
}
@@ -1093,15 +1074,18 @@ out:
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
enum wmi_phy_mode mode)
{
- enum nl80211_channel_type type;
+ struct cfg80211_chan_def chandef;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"channel switch notify nw_type %d freq %d mode %d\n",
vif->nw_type, freq, mode);
- type = (mode == WMI_11G_HT20) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT;
+ cfg80211_chandef_create(&chandef,
+ ieee80211_get_channel(vif->ar->wiphy, freq),
+ (mode == WMI_11G_HT20) ?
+ NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
- cfg80211_ch_switch_notify(vif->ndev, freq, type);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef);
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -1384,11 +1368,8 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return 0;
}
-/*
- * The type nl80211_tx_power_setting replaces the following
- * data type from 2.6.36 onwards
-*/
static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
enum nl80211_tx_power_setting type,
int mbm)
{
@@ -1423,7 +1404,9 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
return 0;
}
-static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
@@ -1614,8 +1597,8 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
vif->ssid_len = ibss_param->ssid_len;
memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
- if (ibss_param->channel)
- vif->ch_hint = ibss_param->channel->center_freq;
+ if (ibss_param->chandef.chan)
+ vif->ch_hint = ibss_param->chandef.chan->center_freq;
if (ibss_param->channel_fixed) {
/*
@@ -1889,7 +1872,7 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
struct cfg80211_wowlan *wow, u32 *filter)
{
int ret, pos;
- u8 mask[WOW_MASK_SIZE];
+ u8 mask[WOW_PATTERN_SIZE];
u16 i;
/* Configure the patterns that we received from the user. */
@@ -2107,33 +2090,16 @@ static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
return ret;
}
-static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
+ struct cfg80211_wowlan *wow, u32 *filter)
{
+ struct ath6kl *ar = vif->ar;
struct in_device *in_dev;
struct in_ifaddr *ifa;
- struct ath6kl_vif *vif;
int ret;
- u32 filter = 0;
u16 i, bmiss_time;
- u8 index = 0;
__be32 ips[MAX_IP_ADDRS];
-
- /* The FW currently can't support multi-vif WoW properly. */
- if (ar->num_vif > 1)
- return -EIO;
-
- vif = ath6kl_vif_first(ar);
- if (!vif)
- return -EIO;
-
- if (!ath6kl_cfg80211_ready(vif))
- return -EIO;
-
- if (!test_bit(CONNECTED, &vif->flags))
- return -ENOTCONN;
-
- if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
- return -EINVAL;
+ u8 index = 0;
if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
@@ -2155,7 +2121,7 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
* the user.
*/
if (wow)
- ret = ath6kl_wow_usr(ar, vif, wow, &filter);
+ ret = ath6kl_wow_usr(ar, vif, wow, filter);
else if (vif->nw_type == AP_NETWORK)
ret = ath6kl_wow_ap(ar, vif);
else
@@ -2190,12 +2156,10 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
return ret;
}
- ar->state = ATH6KL_STATE_SUSPENDING;
-
/* Setup own IP addr for ARP agent. */
in_dev = __in_dev_get_rtnl(vif->ndev);
if (!in_dev)
- goto skip_arp;
+ return 0;
ifa = in_dev->ifa_list;
memset(&ips, 0, sizeof(ips));
@@ -2218,41 +2182,61 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
return ret;
}
-skip_arp:
- ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ return ret;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_vif *first_vif, *vif;
+ int ret = 0;
+ u32 filter = 0;
+ bool connected = false;
+
+ /* enter / leave wow suspend on first vif always */
+ first_vif = ath6kl_vif_first(ar);
+ if (WARN_ON(unlikely(!first_vif)) ||
+ !ath6kl_cfg80211_ready(first_vif))
+ return -EIO;
+
+ if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
+ return -EINVAL;
+
+ /* install filters for each connected vif */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (!test_bit(CONNECTED, &vif->flags) ||
+ !ath6kl_cfg80211_ready(vif))
+ continue;
+ connected = true;
+
+ ret = ath6kl_wow_suspend_vif(vif, wow, &filter);
+ if (ret)
+ break;
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ if (!connected)
+ return -ENOTCONN;
+ else if (ret)
+ return ret;
+
+ ar->state = ATH6KL_STATE_SUSPENDING;
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx,
ATH6KL_WOW_MODE_ENABLE,
filter,
WOW_HOST_REQ_DELAY);
if (ret)
return ret;
- ret = ath6kl_cfg80211_host_sleep(ar, vif);
- if (ret)
- return ret;
-
- return 0;
+ return ath6kl_cfg80211_host_sleep(ar, first_vif);
}
-static int ath6kl_wow_resume(struct ath6kl *ar)
+static int ath6kl_wow_resume_vif(struct ath6kl_vif *vif)
{
- struct ath6kl_vif *vif;
+ struct ath6kl *ar = vif->ar;
int ret;
- vif = ath6kl_vif_first(ar);
- if (!vif)
- return -EIO;
-
- ar->state = ATH6KL_STATE_RESUMING;
-
- ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
- ATH6KL_HOST_MODE_AWAKE);
- if (ret) {
- ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
- ret);
- ar->state = ATH6KL_STATE_WOW;
- return ret;
- }
-
if (vif->nw_type != AP_NETWORK) {
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
@@ -2270,13 +2254,11 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
return ret;
}
- ar->state = ATH6KL_STATE_ON;
-
if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
- vif->fw_vif_idx, true);
+ vif->fw_vif_idx, true);
if (ret)
return ret;
}
@@ -2286,6 +2268,48 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
return 0;
}
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+ if (WARN_ON(unlikely(!vif)) ||
+ !ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ ar->state = ATH6KL_STATE_RESUMING;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+ if (ret) {
+ ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (!test_bit(CONNECTED, &vif->flags) ||
+ !ath6kl_cfg80211_ready(vif))
+ continue;
+ ret = ath6kl_wow_resume_vif(vif);
+ if (ret)
+ break;
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ if (ret)
+ goto cleanup;
+
+ ar->state = ATH6KL_STATE_ON;
+ return 0;
+
+cleanup:
+ ar->state = ATH6KL_STATE_WOW;
+ return ret;
+}
+
static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
@@ -2422,13 +2446,6 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
break;
- case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
- /*
- * Nothing needed for schedule scan, firmware is already in
- * wow mode and sleeping most of the time.
- */
- break;
-
default:
break;
}
@@ -2476,9 +2493,6 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
}
break;
- case ATH6KL_STATE_SCHED_SCAN:
- break;
-
default:
break;
}
@@ -2495,14 +2509,23 @@ static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
{
struct ath6kl *ar = wiphy_priv(wiphy);
+ ath6kl_recovery_suspend(ar);
+
return ath6kl_hif_suspend(ar, wow);
}
static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
{
struct ath6kl *ar = wiphy_priv(wiphy);
+ int err;
- return ath6kl_hif_resume(ar);
+ err = ath6kl_hif_resume(ar);
+ if (err)
+ return err;
+
+ ath6kl_recovery_resume(ar);
+
+ return 0;
}
/*
@@ -2739,6 +2762,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
int res;
int i, ret;
u16 rsn_capab = 0;
+ int inactivity_timeout = 0;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
@@ -2857,7 +2881,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
p.ssid_len = vif->ssid_len;
memcpy(p.ssid, vif->ssid, vif->ssid_len);
p.dot11_auth_mode = vif->dot11_auth_mode;
- p.ch = cpu_to_le16(info->channel->center_freq);
+ p.ch = cpu_to_le16(info->chandef.chan->center_freq);
/* Enable uAPSD support by default */
res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2875,14 +2899,22 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
if (info->inactivity_timeout) {
+
+ inactivity_timeout = info->inactivity_timeout;
+
+ if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
+ inactivity_timeout = DIV_ROUND_UP(inactivity_timeout,
+ 60);
+
res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
- info->inactivity_timeout);
+ inactivity_timeout);
if (res < 0)
return res;
}
- if (ath6kl_set_htcap(vif, info->channel->band,
- info->channel_type != NL80211_CHAN_NO_HT))
+ if (ath6kl_set_htcap(vif, info->chandef.chan->band,
+ cfg80211_get_chandef_type(&info->chandef)
+ != NL80211_CHAN_NO_HT))
return -EIO;
/*
@@ -2898,6 +2930,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
WLAN_EID_RSN, WMI_RSN_IE_CAPB,
(const u8 *) &rsn_capab,
sizeof(rsn_capab));
+ vif->rsn_capab = rsn_capab;
if (res < 0)
return res;
}
@@ -2977,7 +3010,6 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie)
{
@@ -3136,10 +3168,8 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, bool no_cck,
- bool dont_wait_for_ack, u64 *cookie)
+ unsigned int wait, const u8 *buf, size_t len,
+ bool no_cck, bool dont_wait_for_ack, u64 *cookie)
{
struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
struct ath6kl *ar = ath6kl_priv(vif->ndev);
@@ -3211,7 +3241,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
- int ret;
+ int ret, rssi_thold;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3219,10 +3249,6 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (vif->sme_state != SME_DISCONNECTED)
return -EBUSY;
- /* The FW currently can't support multi-vif WoW properly. */
- if (ar->num_vif > 1)
- return -EIO;
-
ath6kl_cfg80211_scan_complete_event(vif, true);
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
@@ -3244,6 +3270,23 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
return ret;
}
+ if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
+ ar->fw_capabilities)) {
+ if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+ rssi_thold = 0;
+ else if (request->rssi_thold < -127)
+ rssi_thold = -127;
+ else
+ rssi_thold = request->rssi_thold;
+
+ ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
+ rssi_thold);
+ if (ret) {
+ ath6kl_err("failed to set RSSI threshold for scan\n");
+ return ret;
+ }
+ }
+
/* fw uses seconds, also make sure that it's >0 */
interval = max_t(u16, 1, request->interval / 1000);
@@ -3251,15 +3294,6 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
interval, interval,
vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
- ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
- ATH6KL_WOW_MODE_ENABLE,
- WOW_FILTER_SSID,
- WOW_HOST_REQ_DELAY);
- if (ret) {
- ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
- return ret;
- }
-
/* this also clears IE in fw if it's not set */
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
@@ -3270,17 +3304,13 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
return ret;
}
- ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
- ATH6KL_HOST_MODE_ASLEEP);
- if (ret) {
- ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
- ret);
+ ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true);
+ if (ret)
return ret;
- }
- ar->state = ATH6KL_STATE_SCHED_SCAN;
+ set_bit(SCHED_SCANNING, &vif->flags);
- return ret;
+ return 0;
}
static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
@@ -3309,6 +3339,27 @@ static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
mask);
}
+static int ath6kl_cfg80211_set_txe_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ u32 rate, u32 pkts, u32 intvl)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ if (vif->nw_type != INFRA_NETWORK ||
+ !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ if (vif->sme_state != SME_CONNECTED)
+ return -ENOTCONN;
+
+ /* save this since the firmware won't report the interval */
+ vif->txe_intvl = intvl;
+
+ return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx,
+ rate, pkts, intvl);
+}
+
static const struct ieee80211_txrx_stypes
ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
@@ -3375,6 +3426,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.sched_scan_start = ath6kl_cfg80211_sscan_start,
.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
.set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
+ .set_cqm_txe_config = ath6kl_cfg80211_set_txe_config,
};
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3395,16 +3447,22 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
break;
}
- if (test_bit(CONNECTED, &vif->flags) ||
- test_bit(CONNECT_PEND, &vif->flags))
+ if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
+ (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags)))
ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
vif->sme_state = SME_DISCONNECTED;
clear_bit(CONNECTED, &vif->flags);
clear_bit(CONNECT_PEND, &vif->flags);
+ /* Stop netdev queues, needed during recovery */
+ netif_stop_queue(vif->ndev);
+ netif_carrier_off(vif->ndev);
+
/* disable scanning */
- if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+ if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
+ ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
ath6kl_warn("failed to disable scan during stop\n");
@@ -3416,7 +3474,7 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
struct ath6kl_vif *vif;
vif = ath6kl_vif_first(ar);
- if (!vif) {
+ if (!vif && ar->state != ATH6KL_STATE_RECOVERY) {
/* save the current power mode before enabling power save */
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
@@ -3434,6 +3492,56 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
ath6kl_cfg80211_stop(vif);
}
+static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ u32 rates[IEEE80211_NUM_BANDS];
+ int ret, i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
+ request->alpha2[0], request->alpha2[1],
+ request->intersect ? " intersect" : "",
+ request->processed ? " processed" : "",
+ request->initiator, request->user_reg_hint_type);
+
+ /*
+ * As firmware is not able intersect regdoms, we can only listen to
+ * cellular hints.
+ */
+ if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
+ return -EOPNOTSUPP;
+
+ ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
+ if (ret) {
+ ath6kl_err("failed to set regdomain: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Firmware will apply the regdomain change only after a scan is
+ * issued and it will send a WMI_REGDOMAIN_EVENTID when it has been
+ * changed.
+ */
+
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ if (wiphy->bands[i])
+ rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+
+
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false,
+ false, 0, ATH6KL_FG_SCAN_INTERVAL,
+ 0, NULL, false, rates);
+ if (ret) {
+ ath6kl_err("failed to start scan for a regdomain change: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
{
vif->aggr_cntxt = aggr_init(vif);
@@ -3506,9 +3614,13 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
- if (fw_vif_idx != 0)
+ if (fw_vif_idx != 0) {
ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
0x2;
+ if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
+ ar->fw_capabilities))
+ ndev->dev_addr[4] ^= 0x80;
+ }
init_netdev(ndev);
@@ -3562,6 +3674,12 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
BIT(NL80211_IFTYPE_P2P_CLIENT);
}
+ if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
+ test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
+ wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
+ ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
+ }
+
/* max num of ssids that can be probed during scanning */
wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
@@ -3607,7 +3725,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ath6kl_band_5ghz.ht_cap.ht_supported = false;
}
- if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) {
+ if (ar->hw.flags & ATH6KL_HW_64BIT_RATES) {
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
@@ -3646,12 +3764,12 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
- if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities))
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
ar->fw_capabilities))
- ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
+ ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 780f77775a91..e5e70f3a8ca8 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -22,7 +22,6 @@ enum ath6kl_cfg_suspend_mode {
ATH6KL_CFG_SUSPEND_DEEPSLEEP,
ATH6KL_CFG_SUSPEND_CUTPOWER,
ATH6KL_CFG_SUSPEND_WOW,
- ATH6KL_CFG_SUSPEND_SCHED_SCAN,
};
struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 82c4dd2a960e..4b46adbe8c92 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -33,6 +33,8 @@ static unsigned int wow_mode;
static unsigned int uart_debug;
static unsigned int ath6kl_p2p;
static unsigned int testmode;
+static unsigned int recovery_enable;
+static unsigned int heart_beat_poll;
module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644);
@@ -40,6 +42,12 @@ module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
+module_param(recovery_enable, uint, 0644);
+module_param(heart_beat_poll, uint, 0644);
+MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
+MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \
+ "polling. This also specifies the polling interval in" \
+ "msecs. Set reocvery_enable for this to be effective");
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
@@ -202,6 +210,17 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
__func__, wdev->netdev->name, wdev->netdev, ar);
+ ar->fw_recovery.enable = !!recovery_enable;
+ if (!ar->fw_recovery.enable)
+ return ret;
+
+ if (heart_beat_poll &&
+ test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
+ ar->fw_capabilities))
+ ar->fw_recovery.hb_poll = heart_beat_poll;
+
+ ath6kl_recovery_init(ar);
+
return ret;
err_rxbuf_cleanup:
@@ -291,6 +310,8 @@ void ath6kl_core_cleanup(struct ath6kl *ar)
{
ath6kl_hif_power_off(ar);
+ ath6kl_recovery_cleanup(ar);
+
destroy_workqueue(ar->ath6kl_wq);
if (ar->htc_target)
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index cec49a31029a..189d8faf8c87 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -115,6 +115,27 @@ enum ath6kl_fw_capability {
*/
ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+ /* Firmware supports filtering BSS results by RSSI */
+ ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
+
+ /* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */
+ ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
+
+ /* Firmware supports TX error rate notification */
+ ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+
+ /* supports WMI_SET_REGDOMAIN_CMDID command */
+ ATH6KL_FW_CAPABILITY_REGDOMAIN,
+
+ /* Firmware supports sched scan decoupled from host sleep */
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2,
+
+ /*
+ * Firmware capability for hang detection through heart beat
+ * challenge messages.
+ */
+ ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
+
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
};
@@ -128,11 +149,15 @@ struct ath6kl_fw_ie {
};
enum ath6kl_hw_flags {
- ATH6KL_HW_FLAG_64BIT_RATES = BIT(0),
+ ATH6KL_HW_64BIT_RATES = BIT(0),
+ ATH6KL_HW_AP_INACTIVITY_MINS = BIT(1),
+ ATH6KL_HW_MAP_LP_ENDPOINT = BIT(2),
+ ATH6KL_HW_SDIO_CRC_ERROR_WAR = BIT(3),
};
#define ATH6KL_FW_API2_FILE "fw-2.bin"
#define ATH6KL_FW_API3_FILE "fw-3.bin"
+#define ATH6KL_FW_API4_FILE "fw-4.bin"
/* AR6003 1.0 definitions */
#define AR6003_HW_1_0_VERSION 0x300002ba
@@ -186,6 +211,13 @@ enum ath6kl_hw_flags {
#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
AR6004_HW_1_2_FW_DIR "/bdata.bin"
+/* AR6004 1.3 definitions */
+#define AR6004_HW_1_3_VERSION 0x31c8088a
+#define AR6004_HW_1_3_FW_DIR "ath6k/AR6004/hw1.3"
+#define AR6004_HW_1_3_FIRMWARE_FILE "fw.ram.bin"
+#define AR6004_HW_1_3_BOARD_DATA_FILE "ath6k/AR6004/hw1.3/bdata.bin"
+#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw1.3/bdata.bin"
+
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
#define STA_PS_SLEEP BIT(1)
@@ -536,6 +568,7 @@ enum ath6kl_vif_state {
HOST_SLEEP_MODE_CMD_PROCESSED,
NETDEV_MCAST_ALL_ON,
NETDEV_MCAST_ALL_OFF,
+ SCHED_SCANNING,
};
struct ath6kl_vif {
@@ -580,11 +613,13 @@ struct ath6kl_vif {
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
+ u32 txe_intvl;
u16 bg_scan_period;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
struct wmi_connect_cmd profile;
+ u16 rsn_capab;
struct list_head mc_filter;
};
@@ -609,6 +644,7 @@ enum ath6kl_dev_state {
SKIP_SCAN,
ROAM_TBL_PEND,
FIRST_BOOT,
+ RECOVERY_CLEANUP,
};
enum ath6kl_state {
@@ -619,7 +655,16 @@ enum ath6kl_state {
ATH6KL_STATE_DEEPSLEEP,
ATH6KL_STATE_CUTPOWER,
ATH6KL_STATE_WOW,
- ATH6KL_STATE_SCHED_SCAN,
+ ATH6KL_STATE_RECOVERY,
+};
+
+/* Fw error recovery */
+#define ATH6KL_HB_RESP_MISS_THRES 5
+
+enum ath6kl_fw_err {
+ ATH6KL_FW_ASSERT,
+ ATH6KL_FW_HB_RESP_FAILURE,
+ ATH6KL_FW_EP_FULL,
};
struct ath6kl {
@@ -679,6 +724,7 @@ struct ath6kl {
struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq;
u32 want_ch_switch;
+ u16 last_ch;
/*
* FIXME: protects access to mcastpsq but is actually useless as
@@ -764,6 +810,17 @@ struct ath6kl {
bool wiphy_registered;
+ struct ath6kl_fw_recovery {
+ struct work_struct recovery_work;
+ unsigned long err_reason;
+ unsigned long hb_poll;
+ struct timer_list hb_timer;
+ u32 seq_num;
+ bool hb_pending;
+ u8 hb_misscnt;
+ bool enable;
+ } fw_recovery;
+
#ifdef CONFIG_ATH6KL_DEBUG
struct {
struct sk_buff_head fwlog_queue;
@@ -899,4 +956,12 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
void ath6kl_core_cleanup(struct ath6kl *ar);
void ath6kl_core_destroy(struct ath6kl *ar);
+/* Fw error recovery */
+void ath6kl_init_hw_restart(struct ath6kl *ar);
+void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason);
+void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie);
+void ath6kl_recovery_init(struct ath6kl *ar);
+void ath6kl_recovery_cleanup(struct ath6kl *ar);
+void ath6kl_recovery_suspend(struct ath6kl *ar);
+void ath6kl_recovery_resume(struct ath6kl *ar);
#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 49639d8266c2..f97cd4ead543 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -44,6 +44,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_USB = BIT(21),
ATH6KL_DBG_USB_BULK = BIT(22),
+ ATH6KL_DBG_RECOVERY = BIT(23),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 68ed6c2665b7..a6b614421fa4 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -136,6 +136,7 @@ static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
ath6kl_hif_dump_fw_crash(dev->ar);
ath6kl_read_fwlogs(dev->ar);
+ ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT);
return ret;
}
@@ -338,8 +339,7 @@ static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
- if (status)
- WARN_ON(1);
+ WARN_ON(status);
return status;
}
@@ -383,8 +383,7 @@ static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
- if (status)
- WARN_ON(1);
+ WARN_ON(status);
return status;
}
@@ -695,11 +694,6 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
- /* usb doesn't support enabling interrupts */
- /* FIXME: remove check once USB support is implemented */
- if (dev->ar->hif_type == ATH6KL_HIF_TYPE_USB)
- return 0;
-
status = ath6kl_hif_disable_intrs(dev);
fail_setup:
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index cd0e1ba410d6..fbb78dfe078f 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -2492,7 +2492,8 @@ static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
}
- if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
+ if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
+ assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
status = -ENOMEM;
goto fail_tx;
}
@@ -2655,12 +2656,6 @@ static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
struct htc_service_connect_resp resp;
int status;
- /* FIXME: remove once USB support is implemented */
- if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
- ath6kl_err("HTC doesn't support USB yet. Patience!\n");
- return -EOPNOTSUPP;
- }
-
/* we should be getting 1 control message that the target is ready */
packet = htc_wait_for_ctrl_msg(target);
@@ -2890,9 +2885,7 @@ static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
{
struct htc_packet *packet, *tmp_packet;
- /* FIXME: remove check once USB support is implemented */
- if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
- ath6kl_hif_cleanup_scatter(target->dev->ar);
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
list_for_each_entry_safe(packet, tmp_packet,
&target->free_ctrl_txbuf, list) {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index f9626c723693..ba6bd497b787 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -374,9 +374,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
packet = list_first_entry(txq,
struct htc_packet,
list);
- list_del(&packet->list);
- /* insert into local queue */
- list_add_tail(&packet->list, &send_queue);
+ /* move to local queue */
+ list_move_tail(&packet->list, &send_queue);
}
/*
@@ -399,11 +398,10 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
* for cleanup */
} else {
/* callback wants to keep this packet,
- * remove from caller's queue */
- list_del(&packet->list);
- /* put it in the send queue */
- list_add_tail(&packet->list,
- &send_queue);
+ * move from caller's queue to the send
+ * queue */
+ list_move_tail(&packet->list,
+ &send_queue);
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index f90b5db741cf..f21fa322e5ca 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,7 +42,7 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 6912,
.refclk_hz = 26000000,
.uarttx_pin = 8,
- .flags = 0,
+ .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR,
/* hw2.0 needs override address hardcoded */
.app_start_override_addr = 0x944C00,
@@ -68,7 +68,7 @@ static const struct ath6kl_hw hw_list[] = {
.refclk_hz = 26000000,
.uarttx_pin = 8,
.testscript_addr = 0x57ef74,
- .flags = 0,
+ .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR,
.fw = {
.dir = AR6003_HW_2_1_1_FW_DIR,
@@ -93,7 +93,8 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x433900,
.refclk_hz = 26000000,
.uarttx_pin = 11,
- .flags = ATH6KL_HW_FLAG_64BIT_RATES,
+ .flags = ATH6KL_HW_64BIT_RATES |
+ ATH6KL_HW_AP_INACTIVITY_MINS,
.fw = {
.dir = AR6004_HW_1_0_FW_DIR,
@@ -113,8 +114,8 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x43d400,
.refclk_hz = 40000000,
.uarttx_pin = 11,
- .flags = ATH6KL_HW_FLAG_64BIT_RATES,
-
+ .flags = ATH6KL_HW_64BIT_RATES |
+ ATH6KL_HW_AP_INACTIVITY_MINS,
.fw = {
.dir = AR6004_HW_1_1_FW_DIR,
.fw = AR6004_HW_1_1_FIRMWARE_FILE,
@@ -133,7 +134,8 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x435c00,
.refclk_hz = 40000000,
.uarttx_pin = 11,
- .flags = ATH6KL_HW_FLAG_64BIT_RATES,
+ .flags = ATH6KL_HW_64BIT_RATES |
+ ATH6KL_HW_AP_INACTIVITY_MINS,
.fw = {
.dir = AR6004_HW_1_2_FW_DIR,
@@ -142,6 +144,28 @@ static const struct ath6kl_hw hw_list[] = {
.fw_board = AR6004_HW_1_2_BOARD_DATA_FILE,
.fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE,
},
+ {
+ .id = AR6004_HW_1_3_VERSION,
+ .name = "ar6004 hw 1.3",
+ .dataset_patch_addr = 0x437860,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 7168,
+ .board_addr = 0x436400,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+ .flags = ATH6KL_HW_64BIT_RATES |
+ ATH6KL_HW_AP_INACTIVITY_MINS |
+ ATH6KL_HW_MAP_LP_ENDPOINT,
+
+ .fw = {
+ .dir = AR6004_HW_1_3_FW_DIR,
+ .fw = AR6004_HW_1_3_FIRMWARE_FILE,
+ },
+
+ .fw_board = AR6004_HW_1_3_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE,
+ },
};
/*
@@ -337,7 +361,7 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
return -EIO;
- /* connect to Video service, map this to to HI PRI */
+ /* connect to Video service, map this to HI PRI */
connect.svc_id = WMI_DATA_VI_SVC;
if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
return -EIO;
@@ -1088,6 +1112,12 @@ int ath6kl_init_fetch_firmwares(struct ath6kl *ar)
if (ret)
return ret;
+ ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE);
+ if (ret == 0) {
+ ar->fw_api = 4;
+ goto out;
+ }
+
ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE);
if (ret == 0) {
ar->fw_api = 3;
@@ -1401,8 +1431,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
/* WAR to avoid SDIO CRC err */
- if (ar->version.target_ver == AR6003_HW_2_0_VERSION ||
- ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
+ if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) {
ath6kl_err("temporary war to avoid sdio crc error\n");
param = 0x28;
@@ -1520,7 +1549,7 @@ static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
return NULL;
}
-int ath6kl_init_hw_start(struct ath6kl *ar)
+static int __ath6kl_init_hw_start(struct ath6kl *ar)
{
long timeleft;
int ret, i;
@@ -1616,8 +1645,6 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
goto err_htc_stop;
}
- ar->state = ATH6KL_STATE_ON;
-
return 0;
err_htc_stop:
@@ -1630,7 +1657,18 @@ err_power_off:
return ret;
}
-int ath6kl_init_hw_stop(struct ath6kl *ar)
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+ int err;
+
+ err = __ath6kl_init_hw_start(ar);
+ if (err)
+ return err;
+ ar->state = ATH6KL_STATE_ON;
+ return 0;
+}
+
+static int __ath6kl_init_hw_stop(struct ath6kl *ar)
{
int ret;
@@ -1646,11 +1684,37 @@ int ath6kl_init_hw_stop(struct ath6kl *ar)
if (ret)
ath6kl_warn("failed to power off hif: %d\n", ret);
- ar->state = ATH6KL_STATE_OFF;
+ return 0;
+}
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+ int err;
+
+ err = __ath6kl_init_hw_stop(ar);
+ if (err)
+ return err;
+ ar->state = ATH6KL_STATE_OFF;
return 0;
}
+void ath6kl_init_hw_restart(struct ath6kl *ar)
+{
+ clear_bit(WMI_READY, &ar->flag);
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ if (__ath6kl_init_hw_stop(ar)) {
+ ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to stop during fw error recovery\n");
+ return;
+ }
+
+ if (__ath6kl_init_hw_start(ar)) {
+ ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to restart during fw error recovery\n");
+ return;
+ }
+}
+
/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
{
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index c189e28e86a9..bd50b6b7b492 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -293,13 +293,17 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
}
address = TARG_VTOP(ar->target_type, debug_hdr_addr);
- ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
+ ret = ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
+ if (ret)
+ goto out;
address = TARG_VTOP(ar->target_type,
le32_to_cpu(debug_hdr.dbuf_addr));
firstbuf = address;
dropped = le32_to_cpu(debug_hdr.dropped);
- ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+ ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+ if (ret)
+ goto out;
loop = 100;
@@ -322,7 +326,8 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
address = TARG_VTOP(ar->target_type,
le32_to_cpu(debug_buf.next));
- ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
+ ret = ath6kl_diag_read(ar, address, &debug_buf,
+ sizeof(debug_buf));
if (ret)
goto out;
@@ -436,12 +441,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
break;
}
- if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) {
- ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+ if (ar->last_ch != channel)
/* we actually don't know the phymode, default to HT20 */
- ath6kl_cfg80211_ch_switch_notify(vif, channel,
- WMI_11G_HT20);
- }
+ ath6kl_cfg80211_ch_switch_notify(vif, channel, WMI_11G_HT20);
ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
set_bit(CONNECTED, &vif->flags);
@@ -606,6 +608,18 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
switch (vif->nw_type) {
case AP_NETWORK:
+ /*
+ * reconfigure any saved RSN IE capabilites in the beacon /
+ * probe response to stay in sync with the supplicant.
+ */
+ if (vif->rsn_capab &&
+ test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ ar->fw_capabilities))
+ ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
+ WLAN_EID_RSN, WMI_RSN_IE_CAPB,
+ (const u8 *) &vif->rsn_capab,
+ sizeof(vif->rsn_capab));
+
return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx,
&vif->profile);
default:
@@ -628,6 +642,9 @@ static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
if (ar->want_ch_switch & (1 << vif->fw_vif_idx))
res = ath6kl_commit_ch_switch(vif, channel);
+ /* if channel switch failed, oh well we tried */
+ ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+
if (res)
ath6kl_err("channel switch failed nw_type %d res %d\n",
vif->nw_type, res);
@@ -981,8 +998,25 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
if (vif->nw_type == AP_NETWORK) {
/* disconnect due to other STA vif switching channels */
if (reason == BSS_DISCONNECTED &&
- prot_reason_status == WMI_AP_REASON_STA_ROAM)
+ prot_reason_status == WMI_AP_REASON_STA_ROAM) {
ar->want_ch_switch |= 1 << vif->fw_vif_idx;
+ /* bail back to this channel if STA vif fails connect */
+ ar->last_ch = le16_to_cpu(vif->profile.ch);
+ }
+
+ if (prot_reason_status == WMI_AP_REASON_MAX_STA) {
+ /* send max client reached notification to user space */
+ cfg80211_conn_failed(vif->ndev, bssid,
+ NL80211_CONN_FAIL_MAX_CLIENTS,
+ GFP_KERNEL);
+ }
+
+ if (prot_reason_status == WMI_AP_REASON_ACL) {
+ /* send blocked client notification to user space */
+ cfg80211_conn_failed(vif->ndev, bssid,
+ NL80211_CONN_FAIL_BLOCKED_CLIENT,
+ GFP_KERNEL);
+ }
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
@@ -1041,6 +1075,9 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
}
}
+ /* restart disconnected concurrent vifs waiting for new channel */
+ ath6kl_check_ch_switch(ar, ar->last_ch);
+
/* update connect & link status atomically */
spin_lock_bh(&vif->if_lock);
clear_bit(CONNECTED, &vif->flags);
diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c
new file mode 100644
index 000000000000..3a8d5e97dc8e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/recovery.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+
+static void ath6kl_recovery_work(struct work_struct *work)
+{
+ struct ath6kl *ar = container_of(work, struct ath6kl,
+ fw_recovery.recovery_work);
+
+ ar->state = ATH6KL_STATE_RECOVERY;
+
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+
+ ath6kl_init_hw_restart(ar);
+
+ ar->state = ATH6KL_STATE_ON;
+ clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
+
+ ar->fw_recovery.err_reason = 0;
+
+ if (ar->fw_recovery.hb_poll)
+ mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+ msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Fw error detected, reason:%d\n",
+ reason);
+
+ set_bit(reason, &ar->fw_recovery.err_reason);
+
+ if (!test_bit(RECOVERY_CLEANUP, &ar->flag) &&
+ ar->state != ATH6KL_STATE_RECOVERY)
+ queue_work(ar->ath6kl_wq, &ar->fw_recovery.recovery_work);
+}
+
+void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie)
+{
+ if (cookie == ar->fw_recovery.seq_num)
+ ar->fw_recovery.hb_pending = false;
+}
+
+static void ath6kl_recovery_hb_timer(unsigned long data)
+{
+ struct ath6kl *ar = (struct ath6kl *) data;
+ int err;
+
+ if (test_bit(RECOVERY_CLEANUP, &ar->flag) ||
+ (ar->state == ATH6KL_STATE_RECOVERY))
+ return;
+
+ if (ar->fw_recovery.hb_pending)
+ ar->fw_recovery.hb_misscnt++;
+ else
+ ar->fw_recovery.hb_misscnt = 0;
+
+ if (ar->fw_recovery.hb_misscnt > ATH6KL_HB_RESP_MISS_THRES) {
+ ar->fw_recovery.hb_misscnt = 0;
+ ar->fw_recovery.seq_num = 0;
+ ar->fw_recovery.hb_pending = false;
+ ath6kl_recovery_err_notify(ar, ATH6KL_FW_HB_RESP_FAILURE);
+ return;
+ }
+
+ ar->fw_recovery.seq_num++;
+ ar->fw_recovery.hb_pending = true;
+
+ err = ath6kl_wmi_get_challenge_resp_cmd(ar->wmi,
+ ar->fw_recovery.seq_num, 0);
+ if (err)
+ ath6kl_warn("Failed to send hb challenge request, err:%d\n",
+ err);
+
+ mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+ msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_init(struct ath6kl *ar)
+{
+ struct ath6kl_fw_recovery *recovery = &ar->fw_recovery;
+
+ clear_bit(RECOVERY_CLEANUP, &ar->flag);
+ INIT_WORK(&recovery->recovery_work, ath6kl_recovery_work);
+ recovery->seq_num = 0;
+ recovery->hb_misscnt = 0;
+ ar->fw_recovery.hb_pending = false;
+ ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer;
+ ar->fw_recovery.hb_timer.data = (unsigned long) ar;
+ init_timer_deferrable(&ar->fw_recovery.hb_timer);
+
+ if (ar->fw_recovery.hb_poll)
+ mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+ msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
+
+void ath6kl_recovery_cleanup(struct ath6kl *ar)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ set_bit(RECOVERY_CLEANUP, &ar->flag);
+
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+ cancel_work_sync(&ar->fw_recovery.recovery_work);
+}
+
+void ath6kl_recovery_suspend(struct ath6kl *ar)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ ath6kl_recovery_cleanup(ar);
+
+ if (!ar->fw_recovery.err_reason)
+ return;
+
+ /* Process pending fw error detection */
+ ar->fw_recovery.err_reason = 0;
+ WARN_ON(ar->state != ATH6KL_STATE_ON);
+ ar->state = ATH6KL_STATE_RECOVERY;
+ ath6kl_init_hw_restart(ar);
+ ar->state = ATH6KL_STATE_ON;
+}
+
+void ath6kl_recovery_resume(struct ath6kl *ar)
+{
+ if (!ar->fw_recovery.enable)
+ return;
+
+ clear_bit(RECOVERY_CLEANUP, &ar->flag);
+
+ if (!ar->fw_recovery.hb_poll)
+ return;
+
+ ar->fw_recovery.hb_pending = false;
+ ar->fw_recovery.seq_num = 0;
+ ar->fw_recovery.hb_misscnt = 0;
+ mod_timer(&ar->fw_recovery.hb_timer,
+ jiffies + msecs_to_jiffies(ar->fw_recovery.hb_poll));
+}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 05b95405f7b5..d111980d44c0 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -709,7 +709,7 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct htc_target *target = ar->htc_target;
- int ret;
+ int ret = 0;
bool virt_scat = false;
if (ar_sdio->scatter_enabled)
@@ -844,22 +844,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
bool try_deepsleep = false;
int ret;
- if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
- ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
-
- ret = ath6kl_set_sdio_pm_caps(ar);
- if (ret)
- goto cut_pwr;
-
- ret = ath6kl_cfg80211_suspend(ar,
- ATH6KL_CFG_SUSPEND_SCHED_SCAN,
- NULL);
- if (ret)
- goto cut_pwr;
-
- return 0;
- }
-
if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
(!ar->suspend_mode && wow)) {
@@ -942,14 +926,14 @@ static int ath6kl_sdio_resume(struct ath6kl *ar)
case ATH6KL_STATE_WOW:
break;
- case ATH6KL_STATE_SCHED_SCAN:
- break;
-
case ATH6KL_STATE_SUSPENDING:
break;
case ATH6KL_STATE_RESUMING:
break;
+
+ case ATH6KL_STATE_RECOVERY:
+ break;
}
ath6kl_cfg80211_resume(ar);
@@ -1462,3 +1446,6 @@ MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 7dfa0fd86d7b..78b369286579 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -288,8 +288,16 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
int status = 0;
struct ath6kl_cookie *cookie = NULL;
- if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW))
+ if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
+ dev_kfree_skb(skb);
return -EACCES;
+ }
+
+ if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
+ eid >= ENDPOINT_MAX)) {
+ status = -EINVAL;
+ goto fail_ctrl_tx;
+ }
spin_lock_bh(&ar->lock);
@@ -591,6 +599,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
*/
set_bit(WMI_CTRL_EP_FULL, &ar->flag);
ath6kl_err("wmi ctrl ep is full\n");
+ ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
return action;
}
@@ -695,22 +704,31 @@ void ath6kl_tx_complete(struct htc_target *target,
list);
list_del(&packet->list);
+ if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
+ packet->endpoint >= ENDPOINT_MAX))
+ continue;
+
ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
- if (!ath6kl_cookie)
- goto fatal;
+ if (WARN_ON_ONCE(!ath6kl_cookie))
+ continue;
status = packet->status;
skb = ath6kl_cookie->skb;
eid = packet->endpoint;
map_no = ath6kl_cookie->map_no;
- if (!skb || !skb->data)
- goto fatal;
+ if (WARN_ON_ONCE(!skb || !skb->data)) {
+ dev_kfree_skb(skb);
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
__skb_queue_tail(&skb_queue, skb);
- if (!status && (packet->act_len != skb->len))
- goto fatal;
+ if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
ar->tx_pending[eid]--;
@@ -792,11 +810,6 @@ void ath6kl_tx_complete(struct htc_target *target,
wake_up(&ar->event_wq);
return;
-
-fatal:
- WARN_ON(1);
- spin_unlock_bh(&ar->lock);
- return;
}
void ath6kl_tx_data_cleanup(struct ath6kl *ar)
@@ -885,8 +898,11 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
break;
packet = (struct htc_packet *) skb->head;
- if (!IS_ALIGNED((unsigned long) skb->data, 4))
+ if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
+ size_t len = skb_headlen(skb);
skb->data = PTR_ALIGN(skb->data - 4, 4);
+ skb_set_tail_pointer(skb, len);
+ }
set_htc_rxpkt_info(packet, skb, skb->data,
ATH6KL_BUFFER_SIZE, endpoint);
packet->skb = skb;
@@ -908,8 +924,11 @@ void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
return;
packet = (struct htc_packet *) skb->head;
- if (!IS_ALIGNED((unsigned long) skb->data, 4))
+ if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
+ size_t len = skb_headlen(skb);
skb->data = PTR_ALIGN(skb->data - 4, 4);
+ skb_set_tail_pointer(skb, len);
+ }
set_htc_rxpkt_info(packet, skb, skb->data,
ATH6KL_AMSDU_BUFFER_SIZE, 0);
packet->skb = skb;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 3740c3d6ab88..62bcc0d5bc23 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -185,9 +185,10 @@ static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
for (i = 0; i < urb_cnt; i++) {
urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
GFP_KERNEL);
- if (urb_context == NULL)
- /* FIXME: set status to -ENOMEM */
- break;
+ if (urb_context == NULL) {
+ status = -ENOMEM;
+ goto fail_alloc_pipe_resources;
+ }
urb_context->pipe = pipe;
@@ -204,6 +205,7 @@ static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc);
+fail_alloc_pipe_resources:
return status;
}
@@ -803,7 +805,11 @@ static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
break;
case WMI_DATA_VI_SVC:
- *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
+
+ if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+ else
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
/*
* Disable rxdata2 directly, it will be enabled
* if FW enable rxdata2
@@ -811,7 +817,11 @@ static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
break;
case WMI_DATA_VO_SVC:
- *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP;
+
+ if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+ else
+ *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
/*
* Disable rxdata2 directly, it will be enabled
* if FW enable rxdata2
@@ -1196,7 +1206,14 @@ static struct usb_driver ath6kl_usb_driver = {
static int ath6kl_usb_init(void)
{
- usb_register(&ath6kl_usb_driver);
+ int ret;
+
+ ret = usb_register(&ath6kl_usb_driver);
+ if (ret) {
+ ath6kl_err("usb registration failed: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -1220,3 +1237,6 @@ MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index c30ab4b11d61..998f8b0f62fd 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -474,7 +474,7 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;
}
id = vif->last_roc_id;
- cfg80211_ready_on_channel(&vif->wdev, id, chan, NL80211_CHAN_NO_HT,
+ cfg80211_ready_on_channel(&vif->wdev, id, chan,
dur, GFP_ATOMIC);
return 0;
@@ -513,8 +513,7 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
else
id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
vif->last_cancel_roc_id = 0;
- cfg80211_remain_on_channel_expired(&vif->wdev, id, chan,
- NL80211_CHAN_NO_HT, GFP_ATOMIC);
+ cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, GFP_ATOMIC);
return 0;
}
@@ -936,8 +935,12 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
- ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
- regpair->regDmnEnum);
+ if (regpair)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
+ regpair->regDmnEnum);
+ else
+ ath6kl_warn("Regpair not found reg_code 0x%0x\n",
+ reg_code);
}
if (country && wmi->parent_dev->wiphy_registered) {
@@ -1116,7 +1119,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
* the timer would not ever fire if the scan interval is short
* enough.
*/
- if (ar->state == ATH6KL_STATE_SCHED_SCAN &&
+ if (test_bit(SCHED_SCANNING, &vif->flags) &&
!timer_pending(&vif->sched_scan_timer)) {
mod_timer(&vif->sched_scan_timer, jiffies +
msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
@@ -1170,6 +1173,9 @@ static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
rate = RATE_AUTO;
} else {
index = reply->rate_index & 0x7f;
+ if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
+ return -EINVAL;
+
sgi = (reply->rate_index & 0x80) ? 1 : 0;
rate = wmi_rate_tbl[index][sgi];
}
@@ -1531,6 +1537,68 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
return 0;
}
+static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
+{
+ struct wmi_txe_notify_event *ev;
+ u32 rate, pkts;
+
+ if (len < sizeof(*ev))
+ return -EINVAL;
+
+ if (vif->sme_state != SME_CONNECTED)
+ return -ENOTCONN;
+
+ ev = (struct wmi_txe_notify_event *) datap;
+ rate = le32_to_cpu(ev->rate);
+ pkts = le32_to_cpu(ev->pkts);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n",
+ vif->bssid, rate, pkts, vif->txe_intvl);
+
+ cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,
+ rate, vif->txe_intvl, GFP_KERNEL);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
+ u32 rate, u32 pkts, u32 intvl)
+{
+ struct sk_buff *skb;
+ struct wmi_txe_notify_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_txe_notify_cmd *) skb->data;
+ cmd->rate = cpu_to_le32(rate);
+ cmd->pkts = cpu_to_le32(pkts);
+ cmd->intvl = cpu_to_le32(intvl);
+
+ return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi)
+{
+ struct sk_buff *skb;
+ struct wmi_set_rssi_filter_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_rssi_filter_cmd *) skb->data;
+ cmd->rssi = rssi;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
struct wmi_snr_threshold_params_cmd *snr_cmd)
{
@@ -1677,8 +1745,11 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
int ret;
u16 info1;
- if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1))))
+ if (WARN_ON(skb == NULL ||
+ (if_idx > (wmi->parent_dev->vif_max - 1)))) {
+ dev_kfree_skb(skb);
return -EINVAL;
+ }
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
cmd_id, skb->len, sync_flag);
@@ -1833,6 +1904,59 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
return ret;
}
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time,
+ u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list)
+{
+ struct sk_buff *skb;
+ struct wmi_start_scan_cmd *sc;
+ s8 size;
+ int i, ret;
+
+ size = sizeof(struct wmi_start_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_start_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->num_ch = num_chan;
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+/*
+ * beginscan supports (compared to old startscan) P2P mgmt operations using
+ * station interface, send additional information like supported rates to
+ * advertise and xmit rates for probe requests
+ */
int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
@@ -1848,6 +1972,15 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
int num_rates;
u32 ratemask;
+ if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ return ath6kl_wmi_startscan_cmd(wmi, if_idx,
+ scan_type, force_fgscan,
+ is_legacy, home_dwell_time,
+ force_scan_interval,
+ num_chan, ch_list);
+ }
+
size = sizeof(struct wmi_begin_scan_cmd);
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
@@ -1900,50 +2033,24 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
-/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
- * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
- * mgmt operations using station interface.
- */
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
- enum wmi_scan_type scan_type,
- u32 force_fgscan, u32 is_legacy,
- u32 home_dwell_time, u32 force_scan_interval,
- s8 num_chan, u16 *ch_list)
+int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable)
{
struct sk_buff *skb;
- struct wmi_start_scan_cmd *sc;
- s8 size;
- int i, ret;
-
- size = sizeof(struct wmi_start_scan_cmd);
-
- if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
- return -EINVAL;
-
- if (num_chan > WMI_MAX_CHANNELS)
- return -EINVAL;
-
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
+ struct wmi_enable_sched_scan_cmd *sc;
+ int ret;
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
if (!skb)
return -ENOMEM;
- sc = (struct wmi_start_scan_cmd *) skb->data;
- sc->scan_type = scan_type;
- sc->force_fg_scan = cpu_to_le32(force_fgscan);
- sc->is_legacy = cpu_to_le32(is_legacy);
- sc->home_dwell_time = cpu_to_le32(home_dwell_time);
- sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
- sc->num_ch = num_chan;
-
- for (i = 0; i < num_chan; i++)
- sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s scheduled scan on vif %d\n",
+ enable ? "enabling" : "disabling", if_idx);
+ sc = (struct wmi_enable_sched_scan_cmd *) skb->data;
+ sc->enable = enable ? 1 : 0;
- ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_ENABLE_SCHED_SCAN_CMDID,
NO_SYNC_WMIFLAG);
-
return ret;
}
@@ -2275,8 +2382,10 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
struct wmi_data_hdr *data_hdr;
int ret;
- if (WARN_ON(skb == NULL || ep_id == wmi->ep_id))
+ if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) {
+ dev_kfree_skb(skb);
return -EINVAL;
+ }
skb_push(skb, sizeof(struct wmi_data_hdr));
@@ -2313,10 +2422,8 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
spin_unlock_bh(&wmi->lock);
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
- if (!skb) {
- ret = -ENOMEM;
- goto free_skb;
- }
+ if (!skb)
+ return -ENOMEM;
cmd = (struct wmi_sync_cmd *) skb->data;
@@ -2339,7 +2446,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
* then do not send the Synchronize cmd on the control ep
*/
if (ret)
- goto free_skb;
+ goto free_cmd_skb;
/*
* Send sync cmd followed by sync data messages on all
@@ -2349,15 +2456,12 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
NO_SYNC_WMIFLAG);
if (ret)
- goto free_skb;
-
- /* cmd buffer sent, we no longer own it */
- skb = NULL;
+ goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) {
if (WARN_ON(!data_sync_bufs[index].skb))
- break;
+ goto free_data_skb;
ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
data_sync_bufs[index].
@@ -2366,17 +2470,20 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
ep_id, if_idx);
- if (ret)
- break;
-
data_sync_bufs[index].skb = NULL;
+
+ if (ret)
+ goto free_data_skb;
}
-free_skb:
+ return 0;
+
+free_cmd_skb:
/* free up any resources left over (possibly due to an error) */
if (skb)
dev_kfree_skb(skb);
+free_data_skb:
for (index = 0; index < num_pri_streams; index++) {
if (data_sync_bufs[index].skb != NULL) {
dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
@@ -2618,11 +2725,13 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
{
struct sk_buff *skb;
int ret, mode, band;
- u64 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+ u64 mcsrate, ratemask[ATH6KL_NUM_BANDS];
struct wmi_set_tx_select_rates64_cmd *cmd;
memset(&ratemask, 0, sizeof(ratemask));
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+ /* only check 2.4 and 5 GHz bands, skip the rest */
+ for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
if (band == IEEE80211_BAND_5GHZ)
@@ -2668,11 +2777,13 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
{
struct sk_buff *skb;
int ret, mode, band;
- u32 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+ u32 mcsrate, ratemask[ATH6KL_NUM_BANDS];
struct wmi_set_tx_select_rates32_cmd *cmd;
memset(&ratemask, 0, sizeof(ratemask));
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+ /* only check 2.4 and 5 GHz bands, skip the rest */
+ for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
if (band == IEEE80211_BAND_5GHZ)
@@ -2716,7 +2827,7 @@ int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
{
struct ath6kl *ar = wmi->parent_dev;
- if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES)
+ if (ar->hw.flags & ATH6KL_HW_64BIT_RATES)
return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
else
return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
@@ -3139,12 +3250,40 @@ int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
return ret;
}
+int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2)
+{
+ struct sk_buff *skb;
+ struct wmi_set_regdomain_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_regdomain_cmd *) skb->data;
+ memcpy(cmd->iso_name, alpha2, 2);
+
+ return ath6kl_wmi_cmd_send(wmi, 0, skb,
+ WMI_SET_REGDOMAIN_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
s32 ath6kl_wmi_get_rate(s8 rate_index)
{
+ u8 sgi = 0;
+
if (rate_index == RATE_AUTO)
return 0;
- return wmi_rate_tbl[(u32) rate_index][0];
+ /* SGI is stored as the MSB of the rate_index */
+ if (rate_index & RATE_INDEX_MSB) {
+ rate_index &= RATE_INDEX_WITHOUT_SGI_MASK;
+ sgi = 1;
+ }
+
+ if (WARN_ON(rate_index > RATE_MCS_7_40))
+ rate_index = RATE_MCS_7_40;
+
+ return wmi_rate_tbl[(u32) rate_index][sgi];
}
static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
@@ -3634,6 +3773,19 @@ int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
NO_SYNC_WMIFLAG);
}
+static void ath6kl_wmi_hb_challenge_resp_event(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmix_hb_challenge_resp_cmd *cmd;
+
+ if (len < sizeof(struct wmix_hb_challenge_resp_cmd))
+ return;
+
+ cmd = (struct wmix_hb_challenge_resp_cmd *) datap;
+ ath6kl_recovery_hb_event(wmi->parent_dev,
+ le32_to_cpu(cmd->cookie));
+}
+
static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
{
struct wmix_cmd_hdr *cmd;
@@ -3658,6 +3810,7 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
switch (id) {
case WMIX_HB_CHALLENGE_RESP_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n");
+ ath6kl_wmi_hb_challenge_resp_event(wmi, datap, len);
break;
case WMIX_DBGLOG_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len);
@@ -3750,6 +3903,9 @@ static int ath6kl_wmi_proc_events_vif(struct wmi *wmi, u16 if_idx, u16 cmd_id,
case WMI_RX_ACTION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
+ case WMI_TXE_NOTIFY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TXE_NOTIFY_EVENTID\n");
+ return ath6kl_wmi_txe_notify_event_rx(wmi, datap, len, vif);
default:
ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id);
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 43339aca585d..98b1755e67f4 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -48,7 +48,7 @@
#define A_BAND_24GHZ 0
#define A_BAND_5GHZ 1
-#define A_NUM_BANDS 2
+#define ATH6KL_NUM_BANDS 2
/* in ms */
#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
@@ -628,6 +628,20 @@ enum wmi_cmd_id {
WMI_SET_MCASTRATE,
WMI_STA_BMISS_ENHANCE_CMDID,
+
+ WMI_SET_REGDOMAIN_CMDID,
+
+ WMI_SET_RSSI_FILTER_CMDID,
+
+ WMI_SET_KEEP_ALIVE_EXT,
+
+ WMI_VOICE_DETECTION_ENABLE_CMDID,
+
+ WMI_SET_TXE_NOTIFY_CMDID,
+
+ WMI_SET_RECOVERY_TEST_PARAMETER_CMDID, /*0xf094*/
+
+ WMI_ENABLE_SCHED_SCAN_CMDID,
};
enum wmi_mgmt_frame_type {
@@ -843,7 +857,7 @@ struct wmi_begin_scan_cmd {
u8 scan_type;
/* Supported rates to advertise in the probe request frames */
- struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS];
+ struct wmi_supp_rates supp_rates[ATH6KL_NUM_BANDS];
/* how many channels follow */
u8 num_ch;
@@ -941,6 +955,11 @@ struct wmi_scan_params_cmd {
__le32 max_dfsch_act_time;
} __packed;
+/* WMI_ENABLE_SCHED_SCAN_CMDID */
+struct wmi_enable_sched_scan_cmd {
+ u8 enable;
+} __packed;
+
/* WMI_SET_BSS_FILTER_CMDID */
enum wmi_bss_filter {
/* no beacons forwarded */
@@ -1032,6 +1051,11 @@ struct wmi_sta_bmiss_enhance_cmd {
u8 enable;
} __packed;
+struct wmi_set_regdomain_cmd {
+ u8 length;
+ u8 iso_name[2];
+} __packed;
+
/* WMI_SET_POWER_MODE_CMDID */
enum wmi_power_mode {
REC_POWER = 0x01,
@@ -1276,6 +1300,11 @@ struct wmi_snr_threshold_params_cmd {
u8 reserved[3];
} __packed;
+/* Don't report BSSs with signal (RSSI) below this threshold */
+struct wmi_set_rssi_filter_cmd {
+ s8 rssi;
+} __packed;
+
enum wmi_preamble_policy {
WMI_IGNORE_BARKER_IN_ERP = 0,
WMI_FOLLOW_BARKER_IN_ERP,
@@ -1455,6 +1484,20 @@ enum wmi_event_id {
WMI_P2P_CAPABILITIES_EVENTID,
WMI_RX_ACTION_EVENTID,
WMI_P2P_INFO_EVENTID,
+
+ /* WPS Events */
+ WMI_WPS_GET_STATUS_EVENTID,
+ WMI_WPS_PROFILE_EVENTID,
+
+ /* more P2P events */
+ WMI_NOA_INFO_EVENTID,
+ WMI_OPPPS_INFO_EVENTID,
+ WMI_PORT_STATUS_EVENTID,
+
+ /* 802.11w */
+ WMI_GET_RSN_CAP_EVENTID,
+
+ WMI_TXE_NOTIFY_EVENTID,
};
struct wmi_ready_event_2 {
@@ -1749,6 +1792,9 @@ struct rx_stats {
a_sle32 ucast_rate;
} __packed;
+#define RATE_INDEX_WITHOUT_SGI_MASK 0x7f
+#define RATE_INDEX_MSB 0x80
+
struct tkip_ccmp_stats {
__le32 tkip_local_mic_fail;
__le32 tkip_cnter_measures_invoked;
@@ -2019,7 +2065,6 @@ struct wmi_set_ie_cmd {
#define WOW_MAX_FILTERS_PER_LIST 4
#define WOW_PATTERN_SIZE 64
-#define WOW_MASK_SIZE 64
#define MAC_MAX_FILTERS_PER_LIST 4
@@ -2028,7 +2073,7 @@ struct wow_filter {
u8 wow_filter_id;
u8 wow_filter_size;
u8 wow_filter_offset;
- u8 wow_filter_mask[WOW_MASK_SIZE];
+ u8 wow_filter_mask[WOW_PATTERN_SIZE];
u8 wow_filter_pattern[WOW_PATTERN_SIZE];
} __packed;
@@ -2087,6 +2132,19 @@ struct wmi_del_wow_pattern_cmd {
__le16 filter_id;
} __packed;
+/* WMI_SET_TXE_NOTIFY_CMDID */
+struct wmi_txe_notify_cmd {
+ __le32 rate;
+ __le32 pkts;
+ __le32 intvl;
+} __packed;
+
+/* WMI_TXE_NOTIFY_EVENTID */
+struct wmi_txe_notify_event {
+ __le32 rate;
+ __le32 pkts;
+} __packed;
+
/* WMI_SET_AKMP_PARAMS_CMD */
struct wmi_pmkid {
@@ -2505,11 +2563,6 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
u16 channel);
int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
- enum wmi_scan_type scan_type,
- u32 force_fgscan, u32 is_legacy,
- u32 home_dwell_time, u32 force_scan_interval,
- s8 num_chan, u16 *ch_list);
int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
enum wmi_scan_type scan_type,
@@ -2517,6 +2570,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list, u32 no_cck,
u32 *rates);
+int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable);
int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
@@ -2592,6 +2646,7 @@ int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
const u8 *mask);
int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u16 list_id, u16 filter_id);
+int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
@@ -2600,6 +2655,9 @@ int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
u8 *filter, bool add_filter);
int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
+ u32 rate, u32 pkts, u32 intvl);
+int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2);
/* AP mode uAPSD */
int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
@@ -2658,6 +2716,8 @@ int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
void ath6kl_wmi_sscan_timer(unsigned long ptr);
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
+
struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
void *ath6kl_wmi_init(struct ath6kl *devt);
void ath6kl_wmi_shutdown(struct wmi *wmi);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index c7aa6646123e..7647ed6b73d7 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,7 @@ config ATH9K_HW
tristate
config ATH9K_COMMON
tristate
+ select ATH_COMMON
config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
@@ -55,7 +56,8 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K && DEBUG_FS
+ depends on ATH9K
+ select MAC80211_DEBUGFS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 6f7cf49eff4d..262e1e036fd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -534,98 +534,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
- {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
- {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
- {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
- {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
- {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
- {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
- {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
- {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
- {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
- {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
- {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
- {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
- {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
- {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
- {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
- {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
- {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
- {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
- {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
- {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
- {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
- {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
- {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
- {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
- {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
- {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
- {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
- {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
- {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
- {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
- {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
- {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 84b558d126ca..56317b0fb6b6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -276,6 +276,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
offset_array[i],
REG_READ(ah, offset_array[i]));
+ if (AR_SREV_9565(ah) &&
+ (iCoff == 63 || qCoff == 63 ||
+ iCoff == -63 || qCoff == -63))
+ return;
+
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
iCoff);
@@ -886,6 +891,74 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
}
+static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
+{
+ int offset[8], total = 0, test;
+ int agc_out, i;
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+ AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+ AR_PHY_65NM_RXTX2_RXON, 0x0);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
+
+ for (i = 6; i > 0; i--) {
+ offset[i] = BIT(i - 1);
+ test = total + offset[i];
+
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+ test);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+ test);
+ udelay(100);
+ agc_out = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_OUT);
+ offset[i] = (agc_out) ? 0 : 1;
+ total += (offset[i] << (i - 1));
+ }
+
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, total);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
+ AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
+ AR_PHY_65NM_RXTX2_RXON_OVR, 0);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
+}
+
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -903,6 +976,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
AR_PHY_CL_TAB_1,
AR_PHY_CL_TAB_2 };
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
if (rtt) {
if (!ar9003_hw_rtt_restore(ah, chan))
run_rtt_cal = true;
@@ -984,6 +1059,14 @@ skip_tx_iqcal:
status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ ar9003_hw_manual_peak_cal(ah, i,
+ IS_CHAN_2GHZ(chan));
+ }
+ }
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 5bbe5057ba18..562186ca9b52 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -18,6 +18,7 @@
#include "hw.h"
#include "ar9003_phy.h"
#include "ar9003_eeprom.h"
+#include "ar9003_mci.h"
#define COMP_HDR_LEN 4
#define COMP_CKSUM_LEN 2
@@ -41,7 +42,6 @@
static int ar9003_hw_power_interpolate(int32_t x,
int32_t *px, int32_t *py, u_int16_t np);
-
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
@@ -2987,10 +2987,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_RX_MASK:
return pBase->txrxMask & 0xf;
case EEP_PAPRD:
- if (AR_SREV_9462(ah))
- return false;
- if (!ah->config.enable_paprd);
- return false;
return !!(pBase->featureEnable & BIT(5));
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
@@ -3005,24 +3001,24 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
}
}
-static bool ar9300_eeprom_read_byte(struct ath_common *common, int address,
+static bool ar9300_eeprom_read_byte(struct ath_hw *ah, int address,
u8 *buffer)
{
u16 val;
- if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ if (unlikely(!ath9k_hw_nvram_read(ah, address / 2, &val)))
return false;
*buffer = (val >> (8 * (address % 2))) & 0xff;
return true;
}
-static bool ar9300_eeprom_read_word(struct ath_common *common, int address,
+static bool ar9300_eeprom_read_word(struct ath_hw *ah, int address,
u8 *buffer)
{
u16 val;
- if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ if (unlikely(!ath9k_hw_nvram_read(ah, address / 2, &val)))
return false;
buffer[0] = val >> 8;
@@ -3048,14 +3044,14 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
* the 16-bit word at that address
*/
if (address % 2 == 0) {
- if (!ar9300_eeprom_read_byte(common, address--, buffer++))
+ if (!ar9300_eeprom_read_byte(ah, address--, buffer++))
goto error;
count--;
}
for (i = 0; i < count / 2; i++) {
- if (!ar9300_eeprom_read_word(common, address, buffer))
+ if (!ar9300_eeprom_read_word(ah, address, buffer))
goto error;
address -= 2;
@@ -3063,7 +3059,7 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
}
if (count % 2)
- if (!ar9300_eeprom_read_byte(common, address, buffer))
+ if (!ar9300_eeprom_read_byte(ah, address, buffer))
goto error;
return true;
@@ -3240,12 +3236,11 @@ static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
int mdata_size)
{
- struct ath_common *common = ath9k_hw_common(ah);
u16 *data = (u16 *) mptr;
int i;
for (i = 0; i < mdata_size / 2; i++, data++)
- ath9k_hw_nvram_read(common, i, data);
+ ath9k_hw_nvram_read(ah, i, data);
return 0;
}
@@ -3601,7 +3596,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
* 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
* SWITCH_TABLE_COM_SPDT_WLAN_IDLE
*/
- if (AR_SREV_9462_20_OR_LATER(ah)) {
+ if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
value = ar9003_switch_com_spdt_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -5037,16 +5032,28 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
case CTL_5GHT20:
case CTL_2GHT20:
for (i = ALL_TARGET_HT20_0_8_16;
- i <= ALL_TARGET_HT20_23; i++)
+ i <= ALL_TARGET_HT20_23; i++) {
pPwrArray[i] = (u8)min((u16)pPwrArray[i],
minCtlPower);
+ if (ath9k_hw_mci_is_enabled(ah))
+ pPwrArray[i] =
+ (u8)min((u16)pPwrArray[i],
+ ar9003_mci_get_max_txpower(ah,
+ pCtlMode[ctlMode]));
+ }
break;
case CTL_5GHT40:
case CTL_2GHT40:
for (i = ALL_TARGET_HT40_0_8_16;
- i <= ALL_TARGET_HT40_23; i++)
+ i <= ALL_TARGET_HT40_23; i++) {
pPwrArray[i] = (u8)min((u16)pPwrArray[i],
minCtlPower);
+ if (ath9k_hw_mci_is_enabled(ah))
+ pPwrArray[i] =
+ (u8)min((u16)pPwrArray[i],
+ ar9003_mci_get_max_txpower(ah,
+ pCtlMode[ctlMode]));
+ }
break;
default:
break;
@@ -5064,6 +5071,33 @@ static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx)
return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2;
}
+static void ar9003_paprd_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 *targetPowerValT2)
+{
+ int i;
+
+ if (!ar9003_is_paprd_enabled(ah))
+ return;
+
+ if (IS_CHAN_HT40(chan))
+ i = ALL_TARGET_HT40_7;
+ else
+ i = ALL_TARGET_HT20_7;
+
+ if (IS_CHAN_2GHZ(chan)) {
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) &&
+ !AR_SREV_9462(ah) && !AR_SREV_9565(ah)) {
+ if (IS_CHAN_HT40(chan))
+ i = ALL_TARGET_HT40_0_8_16;
+ else
+ i = ALL_TARGET_HT20_0_8_16;
+ }
+ }
+
+ ah->paprd_target_power = targetPowerValT2[i];
+}
+
static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan, u16 cfgCtl,
u8 twiceAntennaReduction,
@@ -5085,7 +5119,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
*/
ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
- if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
+ if (ar9003_is_paprd_enabled(ah)) {
if (IS_CHAN_2GHZ(chan))
modal_hdr = &eep->modalHeader2G;
else
@@ -5126,7 +5160,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
twiceAntennaReduction,
powerLimit);
- if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
+ if (ar9003_is_paprd_enabled(ah)) {
for (i = 0; i < ar9300RateSize; i++) {
if ((ah->paprd_ratemask & (1 << i)) &&
(abs(targetPowerValT2[i] -
@@ -5158,19 +5192,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
/* Write target power array to registers */
ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
ar9003_hw_calibration_apply(ah, chan->channel);
-
- if (IS_CHAN_2GHZ(chan)) {
- if (IS_CHAN_HT40(chan))
- i = ALL_TARGET_HT40_0_8_16;
- else
- i = ALL_TARGET_HT20_0_8_16;
- } else {
- if (IS_CHAN_HT40(chan))
- i = ALL_TARGET_HT40_7;
- else
- i = ALL_TARGET_HT20_7;
- }
- ah->paprd_target_power = targetPowerValT2[i];
+ ar9003_paprd_set_txpower(ah, chan, targetPowerValT2);
}
static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 41b1a75e6bec..54ba42f4108a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -68,13 +68,13 @@
#define AR9300_BASE_ADDR 0x3ff
#define AR9300_BASE_ADDR_512 0x1ff
-#define AR9300_OTP_BASE 0x14000
-#define AR9300_OTP_STATUS 0x15f18
+#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
+#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
#define AR9300_OTP_STATUS_TYPE 0x7
#define AR9300_OTP_STATUS_VALID 0x4
#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
#define AR9300_OTP_STATUS_SM_BUSY 0x1
-#define AR9300_OTP_READ_DATA 0x15f1c
+#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1a36fa262639..59bf5f31e212 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -35,12 +35,6 @@
*/
static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
{
-#define AR9462_BB_CTX_COEFJ(x) \
- ar9462_##x##_baseband_core_txfir_coeff_japan_2484
-
-#define AR9462_BBC_TXIFR_COEFFJ \
- ar9462_2p0_baseband_core_txfir_coeff_japan_2484
-
if (AR_SREV_9330_11(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -70,6 +64,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9331_modes_lowest_ob_db_tx_gain_1p1);
+ /* Japan 2484 Mhz CCK */
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9331_1p1_baseband_core_txfir_coeff_japan_2484);
+
/* additional clock settings */
if (ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
@@ -106,6 +104,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9331_modes_lowest_ob_db_tx_gain_1p2);
+ /* Japan 2484 Mhz CCK */
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9331_1p2_baseband_core_txfir_coeff_japan_2484);
+
/* additional clock settings */
if (ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
@@ -180,6 +182,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485_modes_lowest_ob_db_tx_gain_1_1);
+ /* Japan 2484 Mhz CCK */
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
+
/* Load PCIE SERDES settings from INI */
/* Awake Setting */
@@ -219,19 +225,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
+ ar9462_pciephy_clkreq_disable_L1_2p0);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
+ ar9462_pciephy_clkreq_disable_L1_2p0);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9462_modes_fast_clock_2p0);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- AR9462_BB_CTX_COEFJ(2p0));
-
- INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ);
+ ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
} else if (AR_SREV_9550(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -328,9 +332,9 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+ ar9565_1p0_pciephy_clkreq_disable_L1);
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+ ar9565_1p0_pciephy_clkreq_disable_L1);
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
@@ -540,7 +544,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar9340Common_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1);
+ ar9485_common_rx_gain_1_1);
else if (AR_SREV_9550(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_rx_gain_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 44c202ce6c66..8dd069259e7b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -714,7 +714,6 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
return true;
}
-EXPORT_SYMBOL(ar9003_mci_start_reset);
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata)
@@ -750,6 +749,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
mci_hw->bt_state = MCI_BT_AWAKE;
+ REG_CLR_BIT(ah, AR_PHY_TIMING4,
+ 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
@@ -759,6 +761,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
+ REG_SET_BIT(ah, AR_PHY_TIMING4,
+ 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
+
exit:
ar9003_mci_enable_interrupt(ah);
return 0;
@@ -799,6 +804,9 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+ if (AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1);
+
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
@@ -818,7 +826,7 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 regval;
+ u32 regval, i;
ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
is_full_sleep, is_2g);
@@ -847,11 +855,18 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
- SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
- SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ if (AR_SREV_9565(ah)) {
+ regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
+ } else {
+ regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
+ }
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
@@ -865,9 +880,24 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+ /* Set the time out to 3.125ms (5 BT slots) */
+ REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090);
+
+ /* concurrent tx priority */
+ if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) {
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
+ AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_REDUCE_TXPWR, 0);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f);
+ }
+
regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
@@ -910,6 +940,9 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
mci->ready = true;
ar9003_mci_prep_interface(ah);
+ if (AR_SREV_9565(ah))
+ REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
+ AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
if (en_int)
ar9003_mci_enable_interrupt(ah);
@@ -1028,7 +1061,9 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
- REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
+
+ if (AR_SREV_9462(ah))
+ REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
} else {
ar9003_mci_send_lna_take(ah, true);
udelay(5);
@@ -1170,7 +1205,7 @@ EXPORT_SYMBOL(ar9003_mci_cleanup);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 value = 0;
+ u32 value = 0, tsf;
u8 query_type;
switch (state_type) {
@@ -1228,6 +1263,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
case MCI_STATE_RECOVER_RX:
+ tsf = ath9k_hw_gettsf32(ah);
+ if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) {
+ ath_dbg(ath9k_hw_common(ah), MCI,
+ "(MCI) ignore Rx recovery\n");
+ break;
+ }
+ ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n");
+ mci->last_recovery = tsf;
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
@@ -1426,3 +1469,17 @@ void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
ar9003_mci_send_coex_wlan_channels(ah, true);
}
EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
+
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+ if (!ah->btcoex_hw.mci.concur_tx)
+ goto out;
+
+ if (ctlmode == CTL_2GHT20)
+ return ATH_BTCOEX_HT20_MAX_TXPOWER;
+ else if (ctlmode == CTL_2GHT40)
+ return ATH_BTCOEX_HT40_MAX_TXPOWER;
+
+out:
+ return -1;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 2a2d01889613..66d7ab9f920d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -18,6 +18,7 @@
#define AR9003_MCI_H
#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+#define MCI_RECOVERY_DUR_TSF (100 * 1000) /* 100 ms */
/* Default remote BT device MCI COEX version */
#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
@@ -125,6 +126,7 @@ enum ath_mci_gpm_coex_profile_type {
MCI_GPM_COEX_PROFILE_HID,
MCI_GPM_COEX_PROFILE_BNEP,
MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_A2DPVO,
MCI_GPM_COEX_PROFILE_MAX
};
@@ -196,7 +198,6 @@ enum mci_state_type {
MCI_STATE_SEND_WLAN_COEX_VERSION,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
- MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
MCI_STATE_DEBUG,
@@ -278,6 +279,7 @@ void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
void ar9003_mci_set_power_awake(struct ath_hw *ah);
void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
+u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode);
#else
@@ -324,6 +326,10 @@ static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
{
}
+static inline u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
+{
+ return -1;
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 0ed3846f9cbb..09c1f9da67a0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -74,15 +74,23 @@ static int ar9003_get_training_power_2g(struct ath_hw *ah)
unsigned int power, scale, delta;
scale = ar9003_get_paprd_scale_factor(ah, chan);
- power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
- AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
- delta = abs((int) ah->paprd_target_power - (int) power);
- if (delta > scale)
- return -1;
-
- if (delta < 4)
- power -= 4 - delta;
+ if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ power = ah->paprd_target_power + 2;
+ } else if (AR_SREV_9485(ah)) {
+ power = 25;
+ } else {
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+
+ delta = abs((int) ah->paprd_target_power - (int) power);
+ if (delta > scale)
+ return -1;
+
+ if (delta < 4)
+ power -= 4 - delta;
+ }
return power;
}
@@ -169,6 +177,9 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK,
ah->paprd_ratemask_ht40);
+ ath_dbg(common, CALIBRATE, "PAPRD HT20 mask: 0x%x, HT40 mask: 0x%x\n",
+ ah->paprd_ratemask, ah->paprd_ratemask_ht40);
+
for (i = 0; i < ah->caps.max_txchains; i++) {
REG_RMW_FIELD(ah, ctrl0[i],
AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
@@ -204,7 +215,20 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
- val = AR_SREV_9462(ah) ? 0x91 : 147;
+
+ if (AR_SREV_9485(ah)) {
+ val = 148;
+ } else {
+ if (IS_CHAN_2GHZ(ah->curchan)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ val = 145;
+ else
+ val = 147;
+ } else {
+ val = 137;
+ }
+ }
+
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
@@ -215,15 +239,24 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
- if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9550(ah))
+
+ if (AR_SREV_9485(ah) ||
+ AR_SREV_9462(ah) ||
+ AR_SREV_9565(ah) ||
+ AR_SREV_9550(ah) ||
+ AR_SREV_9330(ah) ||
+ AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
- AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
- -3);
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -3);
else
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
- AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
- -6);
- val = AR_SREV_9462(ah) ? -10 : -15;
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
+
+ val = -10;
+
+ if (IS_CHAN_2GHZ(ah->curchan) && !AR_SREV_9462(ah) && !AR_SREV_9565(ah))
+ val = -15;
+
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
val);
@@ -262,9 +295,6 @@ static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
u32 reg = AR_PHY_TXGAIN_TABLE;
int i;
- memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
- memset(index, 0, sizeof(ah->paprd_gain_table_index));
-
for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) {
entry[i] = REG_READ(ah, reg);
index[i] = (entry[i] >> 24) & 0xff;
@@ -763,7 +793,7 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
}
EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
-int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
+void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
{
unsigned int i, desired_gain, gain_index;
unsigned int train_power = ah->paprd_training_power;
@@ -781,8 +811,6 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
-
- return 0;
}
EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
@@ -894,7 +922,7 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain]));
- buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
+ buf = kmalloc(2 * 48 * sizeof(u32), GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -945,9 +973,13 @@ EXPORT_SYMBOL(ar9003_paprd_init_table);
bool ar9003_paprd_is_done(struct ath_hw *ah)
{
int paprd_done, agc2_pwr;
+
paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+ if (AR_SREV_9485(ah))
+ goto exit;
+
if (paprd_done == 0x1) {
agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
@@ -963,7 +995,16 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
if (agc2_pwr <= PAPRD_IDEAL_AGC2_PWR_RANGE)
paprd_done = 0;
}
-
+exit:
return !!paprd_done;
}
EXPORT_SYMBOL(ar9003_paprd_is_done);
+
+bool ar9003_is_paprd_enabled(struct ath_hw *ah)
+{
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->config.enable_paprd)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ar9003_is_paprd_enabled);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 759f5f5a7154..3afc24bde6d6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -586,32 +586,19 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
ath9k_hw_synth_delay(ah, chan, synthDelay);
}
-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
{
- switch (rx) {
- case 0x5:
+ if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
- break;
- default:
- break;
- }
+
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
- else
- REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ tx = 3;
- if (tx == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
}
/*
@@ -784,7 +771,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
if (chan->channel == 2484)
- ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
+ ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 9a48e3d2f231..107956298488 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -32,6 +32,7 @@
#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+#define AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT 16
#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
@@ -697,13 +698,6 @@
#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
-#define AR_PHY_65NM_CH0_RXTX1 0x16100
-#define AR_PHY_65NM_CH0_RXTX2 0x16104
-#define AR_PHY_65NM_CH1_RXTX1 0x16500
-#define AR_PHY_65NM_CH1_RXTX2 0x16504
-#define AR_PHY_65NM_CH2_RXTX1 0x16900
-#define AR_PHY_65NM_CH2_RXTX2 0x16904
-
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL 0xf000
@@ -1151,9 +1145,8 @@
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
-#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + \
- (AR_SREV_9485(ah) ? \
- 0x580 : 0x490))
+#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x580 : 0x490))
+
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
@@ -1169,15 +1162,13 @@
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
-#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + \
- (AR_SREV_9485(ah) ? \
- 0x584 : 0x494))
+#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x584 : 0x494))
+
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
-#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + \
- (AR_SREV_9485(ah) ? \
- 0x588 : 0x498))
+#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x588 : 0x498))
+
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
@@ -1193,9 +1184,8 @@
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
-#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + \
- (AR_SREV_9485(ah) ? \
- 0x58c : 0x49c))
+#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x58c : 0x49c))
+
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
@@ -1214,7 +1204,8 @@
#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
-#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + 0x4a0)
+#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x590 : 0x4a0))
+
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002
@@ -1228,7 +1219,8 @@
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
-#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + 0x4a4)
+#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x594 : 0x4a4))
+
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000
@@ -1236,7 +1228,8 @@
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
-#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + 0x4a8)
+#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x598 : 0x4a8))
+
#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
@@ -1285,4 +1278,43 @@
#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000
#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26
+/* Manual Peak detector calibration */
+#define AR_PHY_65NM_BASE 0x16000
+#define AR_PHY_65NM_RXRF_GAINSTAGES(i) (AR_PHY_65NM_BASE + \
+ (i * 0x400) + 0x8)
+#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE 0x80000000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE_S 31
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC 0x00000002
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC_S 1
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR 0x70000000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_S 28
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR 0x03800000
+#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_S 23
+
+#define AR_PHY_65NM_RXTX2(i) (AR_PHY_65NM_BASE + \
+ (i * 0x400) + 0x104)
+#define AR_PHY_65NM_RXTX2_RXON_OVR 0x00001000
+#define AR_PHY_65NM_RXTX2_RXON_OVR_S 12
+#define AR_PHY_65NM_RXTX2_RXON 0x00000800
+#define AR_PHY_65NM_RXTX2_RXON_S 11
+
+#define AR_PHY_65NM_RXRF_AGC(i) (AR_PHY_65NM_BASE + \
+ (i * 0x400) + 0xc)
+#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE 0x80000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE_S 31
+#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR 0x40000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR_S 30
+#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR 0x20000000
+#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR_S 29
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR 0x1E000000
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR_S 25
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR 0x00078000
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR_S 15
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR 0x01F80000
+#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR_S 19
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR 0x00007e00
+#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR_S 9
+#define AR_PHY_65NM_RXRF_AGC_AGC_OUT 0x00000004
+#define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S 2
+
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 1d8235e19f0f..f69d292bdc02 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -211,6 +211,8 @@ static const u32 ar9340_1p0_radio_core_40M[][2] = {
{0x0001609c, 0x02566f3a},
{0x000160ac, 0xa4647c00},
{0x000160b0, 0x01885f5a},
+ {0x00008244, 0x0010f400},
+ {0x0000824c, 0x0001e800},
};
#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
@@ -1273,9 +1275,9 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000081f8, 0x00000000},
{0x000081fc, 0x00000000},
{0x00008240, 0x00100000},
- {0x00008244, 0x0010f424},
+ {0x00008244, 0x0010f3d7},
{0x00008248, 0x00000800},
- {0x0000824c, 0x0001e848},
+ {0x0000824c, 0x0001e7ae},
{0x00008250, 0x00000000},
{0x00008254, 0x00000000},
{0x00008258, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 58f30f65c6b6..ccc42a71b436 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index fb4497fc7a3d..a3710f3bb90c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -18,7 +18,7 @@
#ifndef INITVALS_9485_H
#define INITVALS_9485_H
-/* AR9485 1.0 */
+/* AR9485 1.1 */
#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
@@ -31,6 +31,11 @@ static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
/* Addr allmodes */
+ {0x00009e00, 0x037216a0},
+ {0x00009e04, 0x00182020},
+ {0x00009e18, 0x00000000},
+ {0x00009e2c, 0x00004121},
+ {0x00009e44, 0x02282324},
{0x0000a000, 0x00060005},
{0x0000a004, 0x00810080},
{0x0000a008, 0x00830082},
@@ -164,6 +169,11 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -198,6 +208,22 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
{0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
{0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -234,9 +260,193 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
-#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+ {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+ {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+ {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+ {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
@@ -245,19 +455,19 @@ static const u32 ar9485_1_1[][2] = {
{0x0000a580, 0x00000000},
{0x0000a584, 0x00000000},
{0x0000a588, 0x00000000},
- {0x0000a58c, 0x00000000},
- {0x0000a590, 0x00000000},
- {0x0000a594, 0x00000000},
- {0x0000a598, 0x00000000},
- {0x0000a59c, 0x00000000},
- {0x0000a5a0, 0x00000000},
- {0x0000a5a4, 0x00000000},
- {0x0000a5a8, 0x00000000},
- {0x0000a5ac, 0x00000000},
- {0x0000a5b0, 0x00000000},
- {0x0000a5b4, 0x00000000},
- {0x0000a5b8, 0x00000000},
- {0x0000a5bc, 0x00000000},
+ {0x0000a58c, 0x01804000},
+ {0x0000a590, 0x02808a02},
+ {0x0000a594, 0x0340ca02},
+ {0x0000a598, 0x0340cd03},
+ {0x0000a59c, 0x0340cd03},
+ {0x0000a5a0, 0x06415304},
+ {0x0000a5a4, 0x04c11905},
+ {0x0000a5a8, 0x06415905},
+ {0x0000a5ac, 0x06415905},
+ {0x0000a5b0, 0x06415905},
+ {0x0000a5b4, 0x06415905},
+ {0x0000a5b8, 0x06415905},
+ {0x0000a5bc, 0x06415905},
};
static const u32 ar9485_1_1_radio_core[][2] = {
@@ -340,7 +550,7 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
+ {0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
@@ -362,7 +572,7 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
{0x00009d18, 0x00000000},
{0x00009d1c, 0x00000000},
{0x00009e08, 0x0038233c},
- {0x00009e24, 0x9927b515},
+ {0x00009e24, 0x992bb515},
{0x00009e28, 0x12ef0200},
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
@@ -427,7 +637,7 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
{0x0000a408, 0x0e79e5c6},
{0x0000a40c, 0x00820820},
{0x0000a414, 0x1ce739cf},
- {0x0000a418, 0x2d0019ce},
+ {0x0000a418, 0x2d0021ce},
{0x0000a41c, 0x1ce739ce},
{0x0000a420, 0x000001ce},
{0x0000a424, 0x1ce739ce},
@@ -443,8 +653,8 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a5c4, 0xbfad9d74},
- {0x0000a5c8, 0x0048060a},
- {0x0000a5cc, 0x00000637},
+ {0x0000a5c8, 0x00480605},
+ {0x0000a5cc, 0x00002e37},
{0x0000a760, 0x03020100},
{0x0000a764, 0x09080504},
{0x0000a768, 0x0d0c0b0a},
@@ -464,17 +674,22 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
static const u32 ar9485_common_rx_gain_1_1[][2] = {
/* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x01800082},
- {0x0000a014, 0x01820181},
- {0x0000a018, 0x01840183},
- {0x0000a01c, 0x01880185},
- {0x0000a020, 0x018a0189},
- {0x0000a024, 0x02850284},
- {0x0000a028, 0x02890288},
+ {0x00009e00, 0x03721b20},
+ {0x00009e04, 0x00082020},
+ {0x00009e18, 0x0300501e},
+ {0x00009e2c, 0x00002e21},
+ {0x00009e44, 0x02182324},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
{0x0000a02c, 0x03850384},
{0x0000a030, 0x03890388},
{0x0000a034, 0x038b038a},
@@ -496,15 +711,15 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x21212128},
- {0x0000a098, 0x171c1c1c},
- {0x0000a09c, 0x02020212},
- {0x0000a0a0, 0x00000202},
+ {0x0000a080, 0x18181818},
+ {0x0000a084, 0x18181818},
+ {0x0000a088, 0x18181818},
+ {0x0000a08c, 0x18181818},
+ {0x0000a090, 0x18181818},
+ {0x0000a094, 0x18181818},
+ {0x0000a098, 0x17181818},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
{0x0000a0a4, 0x00000000},
{0x0000a0a8, 0x00000000},
{0x0000a0ac, 0x00000000},
@@ -512,22 +727,22 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x111f1100},
- {0x0000a0c8, 0x111d111e},
- {0x0000a0cc, 0x111b111c},
- {0x0000a0d0, 0x22032204},
- {0x0000a0d4, 0x22012202},
- {0x0000a0d8, 0x221f2200},
- {0x0000a0dc, 0x221d221e},
- {0x0000a0e0, 0x33013302},
- {0x0000a0e4, 0x331f3300},
- {0x0000a0e8, 0x4402331e},
- {0x0000a0ec, 0x44004401},
- {0x0000a0f0, 0x441e441f},
- {0x0000a0f4, 0x55015502},
- {0x0000a0f8, 0x551f5500},
- {0x0000a0fc, 0x6602551e},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
{0x0000a100, 0x66006601},
{0x0000a104, 0x661e661f},
{0x0000a108, 0x7703661d},
@@ -636,17 +851,12 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
- {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
- {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -850,4 +1060,6 @@ static const u32 ar9485_1_1_mac_core[][2] = {
{0x000083d0, 0x000301ff},
};
+#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 843e79f67ff2..0c2ac0c6dc89 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -768,9 +768,9 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
{0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
+static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
/* Addr allmodes */
- {0x00018c00, 0x18212ede},
+ {0x00018c00, 0x18213ede},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0003780c},
};
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index dfe6a4707fd2..42794c546a40 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -129,10 +129,10 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TXMAXTRY 13
#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
+ ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
#define ATH_AGGR_DELIM_SZ 4
#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
@@ -259,19 +259,21 @@ struct ath_atx_tid {
};
struct ath_node {
-#ifdef CONFIG_ATH9K_DEBUGFS
- struct list_head list; /* for sc->nodes */
-#endif
+ struct ath_softc *sc;
struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */
- struct ath_atx_tid tid[WME_NUM_TID];
- struct ath_atx_ac ac[WME_NUM_AC];
+ struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
+ struct ath_atx_ac ac[IEEE80211_NUM_ACS];
int ps_key;
u16 maxampdu;
u8 mpdudensity;
bool sleeping;
+
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+ struct dentry *node_stat;
+#endif
};
#define AGGR_CLEANUP BIT(1)
@@ -299,9 +301,9 @@ struct ath_tx {
struct list_head txbuf;
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
- struct ath_txq *txq_map[WME_NUM_AC];
- u32 txq_max_pending[WME_NUM_AC];
- u16 max_aggr_framelen[WME_NUM_AC][4][32];
+ struct ath_txq *txq_map[IEEE80211_NUM_ACS];
+ u32 txq_max_pending[IEEE80211_NUM_ACS];
+ u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
};
struct ath_rx_edma {
@@ -315,7 +317,6 @@ struct ath_rx {
u32 *rxlink;
u32 num_pkts;
unsigned int rxfilter;
- spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
struct ath_buf *rx_bufptr;
@@ -326,7 +327,6 @@ struct ath_rx {
int ath_startrecv(struct ath_softc *sc);
bool ath_stoprecv(struct ath_softc *sc);
-void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
@@ -437,6 +437,7 @@ void ath9k_set_beacon(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+#define ATH_ANI_MAX_SKIP_COUNT 10
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
#define ATH_PLL_WORK_INTERVAL 100
@@ -460,6 +461,12 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
/* BTCOEX */
/**********/
+#define ATH_DUMP_BTCOEX(_s, _val) \
+ do { \
+ len += snprintf(buf + len, size - len, \
+ "%20s : %10d\n", _s, (_val)); \
+ } while (0)
+
enum bt_op_flags {
BT_OP_PRIORITY_DETECTED,
BT_OP_SCAN,
@@ -478,8 +485,10 @@ struct ath_btcoex {
u32 btscan_no_stomp; /* in usec */
u32 duty_cycle;
u32 bt_wait_time;
+ int rssi_count;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
+ u8 stomp_audio;
};
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -492,6 +501,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size);
#else
static inline int ath9k_init_btcoex(struct ath_softc *sc)
{
@@ -518,6 +528,10 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
}
+static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+ return 0;
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
struct ath9k_wow_pattern {
@@ -630,7 +644,6 @@ void ath_ant_comb_update(struct ath_softc *sc);
enum sc_op_flags {
SC_OP_INVALID,
SC_OP_BEACONS,
- SC_OP_RXFLUSH,
SC_OP_ANI_RUN,
SC_OP_PRIM_STA_VIF,
SC_OP_HW_RESET,
@@ -642,6 +655,7 @@ enum sc_op_flags {
#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
#define PS_WAIT_FOR_TX_ACK BIT(3)
#define PS_BEACON_SYNC BIT(4)
+#define PS_WAIT_FOR_ANI BIT(5)
struct ath_rate_table;
@@ -708,9 +722,6 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
- spinlock_t nodes_lock;
- struct list_head nodes; /* basically, stations */
- unsigned int tx_complete_poll_work_seen;
#endif
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1b48414dca95..2ca355e94da6 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -46,7 +46,7 @@ static void ath9k_beaconq_config(struct ath_softc *sc)
qi.tqi_cwmax = 0;
} else {
/* Adhoc mode; important thing is to use 2x cwmin. */
- txq = sc->tx.txq_map[WME_AC_BE];
+ txq = sc->tx.txq_map[IEEE80211_AC_BE];
ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
if (ah->slottime == ATH9K_SLOT_TIME_20)
@@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
}
skb = ieee80211_beacon_get(hw, vif);
@@ -359,7 +360,6 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
bf = ath9k_beacon_generate(sc->hw, vif);
- WARN_ON(!bf);
if (sc->beacon.bmisscnt != 0) {
ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 419e9a3f2fed..9963b0bf9f72 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -49,6 +49,7 @@ static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
{ 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
{ 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
{ 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+ { 0xffffff01, 0xffffffff, 0xffffff01, 0xffffffff }, /* STOMP_AUDIO */
};
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
@@ -195,7 +196,7 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
ah->btcoex_hw.mci.need_flush_btinfo = false;
ah->btcoex_hw.mci.wlan_cal_seq = 0;
ah->btcoex_hw.mci.wlan_cal_done = 0;
- ah->btcoex_hw.mci.config = 0x2201;
+ ah->btcoex_hw.mci.config = (AR_SREV_9462(ah)) ? 0x2201 : 0xa4c1;
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci);
@@ -218,27 +219,45 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
+ u8 txprio_shift[] = { 24, 16, 16, 0 }; /* tx priority weight */
+ bool concur_tx = (mci_hw->concur_tx && btcoex_hw->tx_prio[stomp_type]);
+ const u32 *weight = ar9003_wlan_weights[stomp_type];
+ int i;
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- const u32 *weight = ar9003_wlan_weights[stomp_type];
- int i;
-
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
- btcoex_hw->mci.stomp_ftp)
- stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
- weight = mci_wlan_weights[stomp_type];
- }
-
- for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
- btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
- btcoex_hw->wlan_weight[i] = weight[i];
- }
- } else {
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->bt_coex_weights =
SM(bt_weight, AR_BTCOEX_BT_WGHT) |
SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+ return;
+ }
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ enum ath_stomp_type stype =
+ ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+ btcoex_hw->mci.stomp_ftp) ?
+ ATH_BTCOEX_STOMP_LOW_FTP : stomp_type;
+ weight = mci_wlan_weights[stype];
}
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex_hw->wlan_weight[i] = weight[i];
+ if (concur_tx && i) {
+ btcoex_hw->wlan_weight[i] &=
+ ~(0xff << txprio_shift[i-1]);
+ btcoex_hw->wlan_weight[i] |=
+ (btcoex_hw->tx_prio[stomp_type] <<
+ txprio_shift[i-1]);
+ }
+ }
+ /* Last WLAN weight has to be adjusted wrt tx priority */
+ if (concur_tx) {
+ btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
+ btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
+ << txprio_shift[i-1]);
+ }
+
}
EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
@@ -385,3 +404,13 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
}
}
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
+
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+ btcoex->tx_prio[i] = stomp_txprio[i];
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_set_concur_txprio);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 385197ad79b0..6de26ea5d5fa 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -39,6 +39,9 @@
#define ATH_BTCOEX_RX_WAIT_TIME 100
#define ATH_BTCOEX_STOMP_FTP_THRESH 5
+#define ATH_BTCOEX_HT20_MAX_TXPOWER 0x14
+#define ATH_BTCOEX_HT40_MAX_TXPOWER 0x10
+
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
@@ -47,6 +50,7 @@ enum ath_stomp_type {
ATH_BTCOEX_STOMP_LOW,
ATH_BTCOEX_STOMP_NONE,
ATH_BTCOEX_STOMP_LOW_FTP,
+ ATH_BTCOEX_STOMP_AUDIO,
ATH_BTCOEX_STOMP_MAX
};
@@ -84,6 +88,8 @@ struct ath9k_hw_mci {
u8 bt_ver_minor;
u8 bt_state;
u8 stomp_ftp;
+ bool concur_tx;
+ u32 last_recovery;
};
struct ath_btcoex_hw {
@@ -98,6 +104,7 @@ struct ath_btcoex_hw {
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
+ u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
};
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
@@ -112,5 +119,6 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
void ath9k_hw_btcoex_disable(struct ath_hw *ah);
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type);
+void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e5cceb077574..1e8508530e98 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
if (chan && chan->noisefloor) {
s8 delta = chan->noisefloor -
+ ATH9K_NF_CAL_NOISE_THRESH -
ath9k_hw_get_default_nf(ah, chan);
if (delta > 0)
noise += delta;
@@ -410,6 +411,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
+ ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 1060c19a5012..60dcb6c22db9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -21,6 +21,9 @@
#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+/* Internal noise floor can vary by about 6db depending on the frequency */
+#define ATH9K_NF_CAL_NOISE_THRESH 6
+
#define NUM_NF_READINGS 6
#define ATH9K_NF_CAL_HIST_MAX 5
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ad14fecc76c6..5f845beeb18b 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,18 +23,10 @@
/* Common header for Atheros 802.11n base driver cores */
-#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
-/* These must match mac80211 skb queue mapping numbers */
-#define WME_AC_VO 0
-#define WME_AC_VI 1
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_NUM_AC 4
-
#define ATH_RSSI_DUMMY_MARKER 0x127
#define ATH_RSSI_LPF_LEN 10
#define RSSI_LPF_THRESHOLD -20
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6727b566d294..e585fc827c50 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -512,62 +512,19 @@ static const struct file_operations fops_interrupt = {
.llseek = default_llseek,
};
-#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
-#define PR(str, elem) \
- do { \
- len += snprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
- sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
- sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
- sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
- if (len >= size) \
- goto done; \
-} while(0)
-
-#define PRX(str, elem) \
-do { \
- len += snprintf(buf + len, size - len, \
- "%s%13u%11u%10u%10u\n", str, \
- (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
- (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
- (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
- (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
- if (len >= size) \
- goto done; \
-} while(0)
-
-#define PRQLE(str, elem) \
-do { \
- len += snprintf(buf + len, size - len, \
- "%s%13i%11i%10i%10i\n", str, \
- list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
- list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
- list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
- list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
- if (len >= size) \
- goto done; \
-} while (0)
-
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char *buf;
- unsigned int len = 0, size = 8000;
- int i;
+ unsigned int len = 0, size = 2048;
ssize_t retval = 0;
- char tmp[32];
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x"
- " poll-work-seen: %u\n"
- "%30s %10s%10s%10s\n\n",
- ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
- sc->tx_complete_poll_work_seen,
+ len += sprintf(buf, "%30s %10s%10s%10s\n\n",
"BE", "BK", "VI", "VO");
PR("MPDUs Queued: ", queued);
@@ -587,62 +544,11 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("DELIM Underrun: ", delim_underrun);
PR("TX-Pkts-All: ", tx_pkts_all);
PR("TX-Bytes-All: ", tx_bytes_all);
- PR("hw-put-tx-buf: ", puttxbuf);
- PR("hw-tx-start: ", txstart);
- PR("hw-tx-proc-desc: ", txprocdesc);
+ PR("HW-put-tx-buf: ", puttxbuf);
+ PR("HW-tx-start: ", txstart);
+ PR("HW-tx-proc-desc: ", txprocdesc);
PR("TX-Failed: ", txfailed);
- len += snprintf(buf + len, size - len,
- "%s%11p%11p%10p%10p\n", "txq-memory-address:",
- sc->tx.txq_map[WME_AC_BE],
- sc->tx.txq_map[WME_AC_BK],
- sc->tx.txq_map[WME_AC_VI],
- sc->tx.txq_map[WME_AC_VO]);
- if (len >= size)
- goto done;
-
- PRX("axq-qnum: ", axq_qnum);
- PRX("axq-depth: ", axq_depth);
- PRX("axq-ampdu_depth: ", axq_ampdu_depth);
- PRX("axq-stopped ", stopped);
- PRX("tx-in-progress ", axq_tx_inprogress);
- PRX("pending-frames ", pending_frames);
- PRX("txq_headidx: ", txq_headidx);
- PRX("txq_tailidx: ", txq_headidx);
-
- PRQLE("axq_q empty: ", axq_q);
- PRQLE("axq_acq empty: ", axq_acq);
- for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
- snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
- PRQLE(tmp, txq_fifo[i]);
- }
-
- /* Print out more detailed queue-info */
- for (i = 0; i <= WME_AC_BK; i++) {
- struct ath_txq *txq = &(sc->tx.txq[i]);
- struct ath_atx_ac *ac;
- struct ath_atx_tid *tid;
- if (len >= size)
- goto done;
- spin_lock_bh(&txq->axq_lock);
- if (!list_empty(&txq->axq_acq)) {
- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
- list);
- len += snprintf(buf + len, size - len,
- "txq[%i] first-ac: %p sched: %i\n",
- i, ac, ac->sched);
- if (list_empty(&ac->tid_q) || (len >= size))
- goto done_for;
- tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
- list);
- len += snprintf(buf + len, size - len,
- " first-tid: %p sched: %i paused: %i\n",
- tid, tid->sched, tid->paused);
- }
- done_for:
- spin_unlock_bh(&txq->axq_lock);
- }
-done:
if (len > size)
len = size;
@@ -652,62 +558,41 @@ done:
return retval;
}
-static ssize_t read_file_stations(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t read_file_queues(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
+ struct ath_txq *txq;
char *buf;
- unsigned int len = 0, size = 64000;
- struct ath_node *an = NULL;
+ unsigned int len = 0, size = 1024;
ssize_t retval = 0;
- int q;
+ int i;
+ char *qname[4] = {"VO", "VI", "BE", "BK"};
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- len += snprintf(buf + len, size - len,
- "Stations:\n"
- " tid: addr sched paused buf_q-empty an ac baw\n"
- " ac: addr sched tid_q-empty txq\n");
-
- spin_lock(&sc->nodes_lock);
- list_for_each_entry(an, &sc->nodes, list) {
- unsigned short ma = an->maxampdu;
- if (ma == 0)
- ma = 65535; /* see ath_lookup_rate */
- len += snprintf(buf + len, size - len,
- "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
- an->vif->addr, an->sta->addr, ma,
- (unsigned int)(an->mpdudensity));
- if (len >= size)
- goto done;
-
- for (q = 0; q < WME_NUM_TID; q++) {
- struct ath_atx_tid *tid = &(an->tid[q]);
- len += snprintf(buf + len, size - len,
- " tid: %p %s %s %i %p %p %hu\n",
- tid, tid->sched ? "sched" : "idle",
- tid->paused ? "paused" : "running",
- skb_queue_empty(&tid->buf_q),
- tid->an, tid->ac, tid->baw_size);
- if (len >= size)
- goto done;
- }
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ txq = sc->tx.txq_map[i];
+ len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
- for (q = 0; q < WME_NUM_AC; q++) {
- struct ath_atx_ac *ac = &(an->ac[q]);
- len += snprintf(buf + len, size - len,
- " ac: %p %s %i %p\n",
- ac, ac->sched ? "sched" : "idle",
- list_empty(&ac->tid_q), ac->txq);
- if (len >= size)
- goto done;
- }
+ ath_txq_lock(sc, txq);
+
+ len += snprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += snprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += snprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += snprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += snprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
+
+ ath_txq_unlock(sc, txq);
}
-done:
- spin_unlock(&sc->nodes_lock);
if (len > size)
len = size;
@@ -837,6 +722,9 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
len += snprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "PLL RX Hang",
sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MCI Reset",
+ sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -919,8 +807,8 @@ static const struct file_operations fops_xmit = {
.llseek = default_llseek,
};
-static const struct file_operations fops_stations = {
- .read = read_file_stations,
+static const struct file_operations fops_queues = {
+ .read = read_file_queues,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -973,7 +861,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("RX-LENGTH-ERR", rx_len_err);
RXS_ERR("RX-OOM-ERR", rx_oom_err);
RXS_ERR("RX-RATE-ERR", rx_rate_err);
- RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
@@ -1586,6 +1473,250 @@ static const struct file_operations fops_samps = {
#endif
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ u32 len = 0, size = 1500;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!sc->sc_ah->common.btcoex_enabled) {
+ len = snprintf(buf, size, "%s\n",
+ "BTCOEX is disabled");
+ goto exit;
+ }
+
+ len = ath9k_dump_btcoex(sc, buf, size);
+exit:
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_btcoex = {
+ .read = read_file_btcoex,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+#endif
+
+static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_softc *sc = an->sc;
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ u32 len = 0, size = 4096;
+ char *buf;
+ size_t retval;
+ int tidno, acno;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!an->sta->ht_cap.ht_supported) {
+ len = snprintf(buf, size, "%s\n",
+ "HT not supported");
+ goto exit;
+ }
+
+ len = snprintf(buf, size, "Max-AMPDU: %d\n",
+ an->maxampdu);
+ len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+ an->mpdudensity);
+
+ len += snprintf(buf + len, size - len,
+ "%2s%7s\n", "AC", "SCHED");
+
+ for (acno = 0, ac = &an->ac[acno];
+ acno < IEEE80211_NUM_ACS; acno++, ac++) {
+ txq = ac->txq;
+ ath_txq_lock(sc, txq);
+ len += snprintf(buf + len, size - len,
+ "%2d%7d\n",
+ acno, ac->sched);
+ ath_txq_unlock(sc, txq);
+ }
+
+ len += snprintf(buf + len, size - len,
+ "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+ "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+ "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
+ txq = tid->ac->txq;
+ ath_txq_lock(sc, txq);
+ len += snprintf(buf + len, size - len,
+ "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ tid->tidno, tid->seq_start, tid->seq_next,
+ tid->baw_size, tid->baw_head, tid->baw_tail,
+ tid->bar_index, tid->sched, tid->paused);
+ ath_txq_unlock(sc, txq);
+ }
+exit:
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_node_stat = {
+ .read = read_file_node_stat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ an->node_stat = debugfs_create_file("node_stat", S_IRUGO,
+ dir, an, &fops_node_stat);
+}
+
+void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ debugfs_remove(an->node_stat);
+}
+
+/* Ethtool support for get-stats */
+
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ AMKSTR(d_tx_pkts),
+ AMKSTR(d_tx_bytes),
+ AMKSTR(d_tx_mpdus_queued),
+ AMKSTR(d_tx_mpdus_completed),
+ AMKSTR(d_tx_mpdu_xretries),
+ AMKSTR(d_tx_aggregates),
+ AMKSTR(d_tx_ampdus_queued_hw),
+ AMKSTR(d_tx_ampdus_queued_sw),
+ AMKSTR(d_tx_ampdus_completed),
+ AMKSTR(d_tx_ampdu_retries),
+ AMKSTR(d_tx_ampdu_xretries),
+ AMKSTR(d_tx_fifo_underrun),
+ AMKSTR(d_tx_op_exceeded),
+ AMKSTR(d_tx_timer_expiry),
+ AMKSTR(d_tx_desc_cfg_err),
+ AMKSTR(d_tx_data_underrun),
+ AMKSTR(d_tx_delim_underrun),
+ "d_rx_decrypt_crc_err",
+ "d_rx_phy_err",
+ "d_rx_mic_err",
+ "d_rx_pre_delim_crc_err",
+ "d_rx_post_delim_crc_err",
+ "d_rx_decrypt_busy_err",
+
+ "d_rx_phyerr_radar",
+ "d_rx_phyerr_ofdm_timing",
+ "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
+
+void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+}
+
+int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH9K_SSTATS_LEN;
+ return 0;
+}
+
+#define AWDATA(elem) \
+ do { \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].elem; \
+ } while (0)
+
+#define AWDATA_RX(elem) \
+ do { \
+ data[i++] = sc->debug.stats.rxstats.elem; \
+ } while (0)
+
+void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath_softc *sc = hw->priv;
+ int i = 0;
+
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
+ AWDATA_RX(rx_pkts_all);
+ AWDATA_RX(rx_bytes_all);
+
+ AWDATA(tx_pkts_all);
+ AWDATA(tx_bytes_all);
+ AWDATA(queued);
+ AWDATA(completed);
+ AWDATA(xretries);
+ AWDATA(a_aggr);
+ AWDATA(a_queued_hw);
+ AWDATA(a_queued_sw);
+ AWDATA(a_completed);
+ AWDATA(a_retries);
+ AWDATA(a_xretries);
+ AWDATA(fifo_underrun);
+ AWDATA(xtxop);
+ AWDATA(timer_exp);
+ AWDATA(desc_cfg_err);
+ AWDATA(data_underrun);
+ AWDATA(delim_underrun);
+
+ AWDATA_RX(decrypt_crc_err);
+ AWDATA_RX(phy_err);
+ AWDATA_RX(mic_err);
+ AWDATA_RX(pre_delim_crc_err);
+ AWDATA_RX(post_delim_crc_err);
+ AWDATA_RX(decrypt_busy_err);
+
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
+
+ WARN_ON(i != ATH9K_SSTATS_LEN);
+}
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1609,16 +1740,16 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_interrupt);
debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_xmit);
+ debugfs_create_file("queues", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_queues);
debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[WME_AC_BK]);
+ &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[WME_AC_BE]);
+ &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[WME_AC_VI]);
+ &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
- &sc->tx.txq_max_pending[WME_AC_VO]);
- debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_stations);
+ &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_misc);
debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
@@ -1658,6 +1789,9 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_ant_diversity);
-
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
+ &fops_btcoex);
+#endif
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 2ed9785a38fa..6df2ab62dcb7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -41,6 +41,7 @@ enum ath_reset_type {
RESET_TYPE_PLL_HANG,
RESET_TYPE_MAC_HANG,
RESET_TYPE_BEACON_STUCK,
+ RESET_TYPE_MCI,
__RESET_TYPE_MAX
};
@@ -178,6 +179,21 @@ struct ath_tx_stats {
u32 txfailed;
};
+/*
+ * Various utility macros to print TX/Queue counters.
+ */
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
+#define TXSTATS sc->debug.stats.txstats
+#define PR(str, elem) \
+ do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
+ TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+ } while(0)
+
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
/**
@@ -200,7 +216,6 @@ struct ath_tx_stats {
* @rx_oom_err: No. of frames dropped due to OOM issues.
* @rx_rate_err: No. of frames dropped due to rate errors.
* @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
- * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
*/
@@ -219,7 +234,6 @@ struct ath_rx_stats {
u32 rx_oom_err;
u32 rx_rate_err;
u32 rx_too_many_frags_err;
- u32 rx_drop_rxflush;
u32 rx_beacons;
u32 rx_frags;
};
@@ -291,7 +305,22 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
unsigned int flags);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
-
+int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
#else
#define RX_STAT_INC(c) /* NOP */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index ea2a6cf7ef23..24877b00cbf4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -42,10 +42,15 @@ struct radar_types {
#define MIN_PPB_THRESH 50
#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
+/* percentage of pulse width tolerance */
+#define WIDTH_TOLERANCE 5
+#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
+#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
{ \
- ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE), \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ (PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, \
}
@@ -274,7 +279,7 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
static struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
- .set_domain = dpd_set_domain,
+ .set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
.region = NL80211_DFS_UNSET,
};
@@ -291,10 +296,11 @@ dfs_pattern_detector_init(enum nl80211_dfs_regions region)
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
- if (dpd->set_domain(dpd, region))
+ if (dpd->set_dfs_domain(dpd, region))
return dpd;
pr_err("Could not set DFS domain to %d. ", region);
+ kfree(dpd);
return NULL;
}
EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
index fd0328a30995..cda52f39f28a 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -62,7 +62,7 @@ struct radar_detector_specs {
/**
* struct dfs_pattern_detector - DFS pattern detector
* @exit(): destructor
- * @set_domain(): set DFS domain, resets detector lines upon domain changes
+ * @set_dfs_domain(): set DFS domain, resets detector lines upon domain changes
* @add_pulse(): add radar pulse to detector, returns true on detection
* @region: active DFS region, NL80211_DFS_UNSET until set
* @num_radar_types: number of different radar types
@@ -72,7 +72,7 @@ struct radar_detector_specs {
*/
struct dfs_pattern_detector {
void (*exit)(struct dfs_pattern_detector *dpd);
- bool (*set_domain)(struct dfs_pattern_detector *dpd,
+ bool (*set_dfs_domain)(struct dfs_pattern_detector *dpd,
enum nl80211_dfs_regions region);
bool (*add_pulse)(struct dfs_pattern_detector *dpd,
struct pulse_event *pe);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 0512397a293c..971d770722cf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -113,9 +113,34 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
}
}
-bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
+static bool ath9k_hw_nvram_read_blob(struct ath_hw *ah, u32 off,
+ u16 *data)
{
- return common->bus_ops->eeprom_read(common, off, data);
+ u16 *blob_data;
+
+ if (off * sizeof(u16) > ah->eeprom_blob->size)
+ return false;
+
+ blob_data = (u16 *)ah->eeprom_blob->data;
+ *data = blob_data[off];
+ return true;
+}
+
+bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool ret;
+
+ if (ah->eeprom_blob)
+ ret = ath9k_hw_nvram_read_blob(ah, off, data);
+ else
+ ret = common->bus_ops->eeprom_read(common, off, data);
+
+ if (!ret)
+ ath_dbg(common, EEPROM,
+ "unable to read eeprom region at offset %u\n", off);
+
+ return ret;
}
void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 319c651fa6c5..40d4f62d0f16 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -663,7 +663,7 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
int16_t targetRight);
bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
u16 *indexL, u16 *indexR);
-bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
+bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data);
void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
int eep_start_loc, int size);
void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 7d075105a85d..c2bfd748eed8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -32,16 +32,12 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
{
- struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.map4k;
int addr, eep_start_loc = 64;
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
- if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
- ath_dbg(common, EEPROM,
- "Unable to read eeprom region\n");
+ if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data))
return false;
- }
eep_data++;
}
@@ -196,7 +192,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
if (!ath9k_hw_use_flash(ah)) {
- if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
&magic)) {
ath_err(common, "Reading Magic # failed\n");
return false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index cd742fb944c2..3ae1f3df0637 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -33,18 +33,13 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
- struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data;
int addr, eep_start_loc = AR9287_EEP_START_LOC;
eep_data = (u16 *)eep;
for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
- if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
- eep_data)) {
- ath_dbg(common, EEPROM,
- "Unable to read eeprom region\n");
+ if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data))
return false;
- }
eep_data++;
}
@@ -190,7 +185,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
&magic)) {
ath_err(common, "Reading Magic # failed\n");
return false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index a8ac30a00720..1c25368b3836 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -91,17 +91,13 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
{
- struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.def;
int addr, ar5416_eep_start_loc = 0x100;
for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
- if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc,
- eep_data)) {
- ath_err(ath9k_hw_common(ah),
- "Unable to read eeprom region\n");
+ if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
+ eep_data))
return false;
- }
eep_data++;
}
return true;
@@ -271,7 +267,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
bool need_swap = false;
int i, addr, size;
- if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+ if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
ath_err(common, "Reading Magic # failed\n");
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index d9ed141a053e..4b412aaf4f36 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -187,6 +187,24 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
}
}
+static void ath_mci_ftp_adjust(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
+ (mci->num_pan || mci->num_other_acl))
+ ah->btcoex_hw.mci.stomp_ftp =
+ (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
+ else
+ ah->btcoex_hw.mci.stomp_ftp = false;
+ btcoex->bt_wait_time = 0;
+ sc->rx.num_pkts = 0;
+ }
+}
+
/*
* This is the master bt coex timer which runs for every
* 45ms, bt traffic will be given priority during 55% of this
@@ -197,41 +215,46 @@ static void ath_btcoex_period_timer(unsigned long data)
struct ath_softc *sc = (struct ath_softc *) data;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_mci_profile *mci = &btcoex->mci;
+ enum ath_stomp_type stomp_type;
u32 timer_period;
- bool is_btscan;
unsigned long flags;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
+ btcoex->bt_wait_time += btcoex->btcoex_period;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
goto skip_hw_wakeup;
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ ath9k_mci_update_rssi(sc);
+
ath9k_ps_wakeup(sc);
+
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath_detect_bt_priority(sc);
- is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
- btcoex->bt_wait_time += btcoex->btcoex_period;
- if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
- if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
- (mci->num_pan || mci->num_other_acl))
- ah->btcoex_hw.mci.stomp_ftp =
- (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
- else
- ah->btcoex_hw.mci.stomp_ftp = false;
- btcoex->bt_wait_time = 0;
- sc->rx.num_pkts = 0;
- }
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ath_mci_ftp_adjust(sc);
spin_lock_bh(&btcoex->btcoex_lock);
- ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
- btcoex->bt_stomp_type);
+ stomp_type = btcoex->bt_stomp_type;
+ timer_period = btcoex->btcoex_no_stomp;
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) {
+ if (test_bit(BT_OP_SCAN, &btcoex->op_flags)) {
+ stomp_type = ATH_BTCOEX_STOMP_ALL;
+ timer_period = btcoex->btscan_no_stomp;
+ }
+ } else if (btcoex->stomp_audio >= 5) {
+ stomp_type = ATH_BTCOEX_STOMP_AUDIO;
+ btcoex->stomp_audio = 0;
+ }
+ ath9k_hw_btcoex_bt_stomp(ah, stomp_type);
ath9k_hw_btcoex_enable(ah);
+
spin_unlock_bh(&btcoex->btcoex_lock);
/*
@@ -243,17 +266,16 @@ static void ath_btcoex_period_timer(unsigned long data)
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
- timer_period = is_btscan ? btcoex->btscan_no_stomp :
- btcoex->btcoex_no_stomp;
ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
timer_period * 10);
btcoex->hw_timer_enabled = true;
}
ath9k_ps_restore(sc);
+
skip_hw_wakeup:
- timer_period = btcoex->btcoex_period;
- mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
+ mod_timer(&btcoex->period_timer,
+ jiffies + msecs_to_jiffies(btcoex->btcoex_period));
}
/*
@@ -273,9 +295,10 @@ static void ath_btcoex_no_stomp_timer(void *arg)
spin_lock_bh(&btcoex->btcoex_lock);
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
- test_bit(BT_OP_SCAN, &btcoex->op_flags))
+ (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI) &&
+ test_bit(BT_OP_SCAN, &btcoex->op_flags)))
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
- else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
ath9k_hw_btcoex_enable(ah);
@@ -451,7 +474,7 @@ int ath9k_init_btcoex(struct ath_softc *sc)
r = ath_init_btcoex_timer(sc);
if (r)
return -1;
- txq = sc->tx.txq_map[WME_AC_BE];
+ txq = sc->tx.txq_map[IEEE80211_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
if (ath9k_hw_mci_is_enabled(ah)) {
@@ -474,4 +497,71 @@ int ath9k_init_btcoex(struct ath_softc *sc)
return 0;
}
+static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ u32 len = 0;
+ int i;
+
+ ATH_DUMP_BTCOEX("Total BT profiles", NUM_PROF(mci));
+ ATH_DUMP_BTCOEX("MGMT", mci->num_mgmt);
+ ATH_DUMP_BTCOEX("SCO", mci->num_sco);
+ ATH_DUMP_BTCOEX("A2DP", mci->num_a2dp);
+ ATH_DUMP_BTCOEX("HID", mci->num_hid);
+ ATH_DUMP_BTCOEX("PAN", mci->num_pan);
+ ATH_DUMP_BTCOEX("ACL", mci->num_other_acl);
+ ATH_DUMP_BTCOEX("BDR", mci->num_bdr);
+ ATH_DUMP_BTCOEX("Aggr. Limit", mci->aggr_limit);
+ ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+ ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+ ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+ ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+ ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
+ ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
+
+ len += snprintf(buf + len, size - len, "BT Weights: ");
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ len += snprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->bt_weight[i]);
+ len += snprintf(buf + len, size - len, "\n");
+ len += snprintf(buf + len, size - len, "WLAN Weights: ");
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ len += snprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->wlan_weight[i]);
+ len += snprintf(buf + len, size - len, "\n");
+ len += snprintf(buf + len, size - len, "Tx Priorities: ");
+ for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
+ len += snprintf(buf + len, size - len, "%08x ",
+ btcoex_hw->tx_prio[i]);
+
+ len += snprintf(buf + len, size - len, "\n");
+
+ return len;
+}
+
+static int ath9k_dump_legacy_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ u32 len = 0;
+
+ ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
+ ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
+ ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
+ ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
+
+ return len;
+}
+
+int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
+{
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ return ath9k_dump_mci_btcoex(sc, buf, size);
+ else
+ return ath9k_dump_legacy_btcoex(sc, buf, size);
+}
+
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index b30596fcf73a..96bfb18078fa 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -331,7 +331,7 @@ struct ath_tx_stats {
u32 skb_success;
u32 skb_failed;
u32 cab_queued;
- u32 queue_stats[WME_NUM_AC];
+ u32 queue_stats[IEEE80211_NUM_ACS];
};
struct ath_rx_stats {
@@ -493,7 +493,7 @@ struct ath9k_htc_priv {
int beaconq;
int cabq;
- int hwq_map[WME_NUM_AC];
+ int hwq_map[IEEE80211_NUM_ACS];
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f42d2eb6af99..d0ce1f5bba10 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -33,7 +33,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
qi.tqi_cwmin = 0;
qi.tqi_cwmax = 0;
} else if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
- int qnum = priv->hwq_map[WME_AC_BE];
+ int qnum = priv->hwq_map[IEEE80211_AC_BE];
ath9k_hw_get_txq_props(ah, qnum, &qi_be);
@@ -587,9 +587,9 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
(priv->num_sta_vif > 1) &&
(vif->type == NL80211_IFTYPE_STATION)) {
beacon_configured = false;
- ieee80211_iterate_active_interfaces_atomic(priv->hw,
- ath9k_htc_beacon_iter,
- &beacon_configured);
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_beacon_iter, &beacon_configured);
if (beacon_configured) {
ath_dbg(common, CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 3035deb7a0cd..87110de577ef 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -218,16 +218,16 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "BE queued",
- priv->debug.tx_stats.queue_stats[WME_AC_BE]);
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "BK queued",
- priv->debug.tx_stats.queue_stats[WME_AC_BK]);
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "VI queued",
- priv->debug.tx_stats.queue_stats[WME_AC_VI]);
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
len += snprintf(buf + len, sizeof(buf) - len,
"%20s : %10u\n", "VO queued",
- priv->debug.tx_stats.queue_stats[WME_AC_VO]);
+ priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
if (len > sizeof(buf))
len = sizeof(buf);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 0eacfc13c915..105582d6b714 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -207,7 +207,7 @@ void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
ath9k_hw_btcoex_init_3wire(priv->ah);
ath_htc_init_btcoex_work(priv);
- qnum = priv->hwq_map[WME_AC_BE];
+ qnum = priv->hwq_map[IEEE80211_AC_BE];
ath9k_hw_init_btcoex_hw(priv->ah, qnum);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d98255eb1b9a..05d5ba66cac3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -549,20 +549,20 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
goto err;
}
- if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BE)) {
ath_err(common, "Unable to setup xmit queue for BE traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BK)) {
ath_err(common, "Unable to setup xmit queue for BK traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VI)) {
ath_err(common, "Unable to setup xmit queue for VI traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
+ if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VO)) {
ath_err(common, "Unable to setup xmit queue for VO traffic\n");
goto err;
}
@@ -694,6 +694,20 @@ err_hw:
return ret;
}
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) },
+ { .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+};
+
static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw)
{
@@ -716,6 +730,9 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index ca78e33ca23e..9c07a8fa5134 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,8 +127,9 @@ static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
priv->rearm_ani = false;
priv->reconfig_beacon = false;
- ieee80211_iterate_active_interfaces_atomic(priv->hw,
- ath9k_htc_vif_iter, priv);
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_vif_iter, priv);
if (priv->rearm_ani)
ath9k_htc_start_ani(priv);
@@ -165,8 +166,9 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
- ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
- &iter_data);
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_bssid_iter, &iter_data);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
@@ -1036,26 +1038,6 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
- mutex_unlock(&priv->mutex);
- return -ENOBUFS;
- }
-
- if (priv->num_ibss_vif ||
- (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
- ath_err(common, "IBSS coexistence with other modes is not allowed\n");
- mutex_unlock(&priv->mutex);
- return -ENOBUFS;
- }
-
- if (((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC)) &&
- ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
- ath_err(common, "Max. number of beaconing interfaces reached\n");
- mutex_unlock(&priv->mutex);
- return -ENOBUFS;
- }
-
ath9k_htc_ps_wakeup(priv);
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@@ -1164,8 +1146,9 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
*/
if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
priv->rearm_ani = false;
- ieee80211_iterate_active_interfaces_atomic(priv->hw,
- ath9k_htc_vif_iter, priv);
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_vif_iter, priv);
if (!priv->rearm_ani)
ath9k_htc_stop_ani(priv);
}
@@ -1366,7 +1349,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
struct ath9k_tx_queue_info qi;
int ret = 0, qnum;
- if (queue >= WME_NUM_AC)
+ if (queue >= IEEE80211_NUM_ACS)
return 0;
mutex_lock(&priv->mutex);
@@ -1393,7 +1376,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
}
if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
- (qnum == priv->hwq_map[WME_AC_BE]))
+ (qnum == priv->hwq_map[IEEE80211_AC_BE]))
ath9k_htc_beaconq_config(priv);
out:
ath9k_htc_ps_restore(priv);
@@ -1486,8 +1469,9 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
{
if (priv->num_sta_assoc_vif == 1) {
- ieee80211_iterate_active_interfaces_atomic(priv->hw,
- ath9k_htc_bss_iter, priv);
+ ieee80211_iterate_active_interfaces_atomic(
+ priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_htc_bss_iter, priv);
ath9k_htc_set_bssid(priv);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 06cdcb772d78..b6a5a08810b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -21,10 +21,10 @@
/******/
static const int subtype_txq_to_hwq[] = {
- [WME_AC_BE] = ATH_TXQ_AC_BE,
- [WME_AC_BK] = ATH_TXQ_AC_BK,
- [WME_AC_VI] = ATH_TXQ_AC_VI,
- [WME_AC_VO] = ATH_TXQ_AC_VO,
+ [IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
+ [IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
+ [IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
+ [IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
};
#define ATH9K_HTC_INIT_TXQ(subtype) do { \
@@ -41,15 +41,15 @@ int get_hw_qnum(u16 queue, int *hwq_map)
{
switch (queue) {
case 0:
- return hwq_map[WME_AC_VO];
+ return hwq_map[IEEE80211_AC_VO];
case 1:
- return hwq_map[WME_AC_VI];
+ return hwq_map[IEEE80211_AC_VI];
case 2:
- return hwq_map[WME_AC_BE];
+ return hwq_map[IEEE80211_AC_BE];
case 3:
- return hwq_map[WME_AC_BK];
+ return hwq_map[IEEE80211_AC_BK];
default:
- return hwq_map[WME_AC_BE];
+ return hwq_map[IEEE80211_AC_BE];
}
}
@@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
switch (qnum) {
case 0:
- TX_QSTAT_INC(WME_AC_VO);
+ TX_QSTAT_INC(IEEE80211_AC_VO);
epid = priv->data_vo_ep;
break;
case 1:
- TX_QSTAT_INC(WME_AC_VI);
+ TX_QSTAT_INC(IEEE80211_AC_VI);
epid = priv->data_vi_ep;
break;
case 2:
- TX_QSTAT_INC(WME_AC_BE);
+ TX_QSTAT_INC(IEEE80211_AC_BE);
epid = priv->data_be_ep;
break;
case 3:
default:
- TX_QSTAT_INC(WME_AC_BK);
+ TX_QSTAT_INC(IEEE80211_AC_BK);
epid = priv->data_bk_ep;
break;
}
@@ -1082,7 +1082,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
rx_status->antenna = rxbuf->rxstatus.rs_antenna;
- rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
return true;
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 4a9570dfba72..aac4a406a513 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
skb, htc_hdr->endpoint_id,
txok);
+ } else {
+ kfree_skb(skb);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1829b445d0b0..7cb787065913 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2153,9 +2153,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
AR_RTC_FORCE_WAKE_EN);
udelay(50);
- if (ath9k_hw_mci_is_enabled(ah))
- ar9003_mci_set_power_awake(ah);
-
for (i = POWER_UP_TIME / 50; i > 0; i--) {
val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
if (val == AR_RTC_STATUS_ON)
@@ -2171,6 +2168,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
return false;
}
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_set_power_awake(ah);
+
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
return true;
@@ -2561,11 +2561,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
}
- if (AR_SREV_9485_10(ah)) {
- pCap->pcie_lcr_extsync_en = true;
- pCap->pcie_lcr_offset = 0x80;
- }
-
if (ath9k_hw_dfs_tested(ah))
pCap->hw_caps |= ATH9K_HW_CAP_DFS;
@@ -2604,6 +2599,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
}
+ if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbc1b7a4cbfd..9d26fc56ca56 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -20,6 +20,7 @@
#include <linux/if_ether.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/firmware.h>
#include "mac.h"
#include "ani.h"
@@ -247,6 +248,7 @@ enum ath9k_hw_caps {
ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18),
ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19),
+ ATH9K_HW_CAP_PAPRD = BIT(20),
};
/*
@@ -273,8 +275,6 @@ struct ath9k_hw_capabilities {
u8 rx_status_len;
u8 tx_desc_len;
u8 txs_len;
- u16 pcie_lcr_offset;
- bool pcie_lcr_extsync_en;
};
struct ath9k_ops_config {
@@ -401,6 +401,7 @@ enum ath9k_int {
struct ath9k_hw_cal_data {
u16 channel;
u32 channelFlags;
+ u32 chanmode;
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
@@ -834,6 +835,7 @@ struct ath_hw {
int coarse_low[5];
int firpwr[5];
enum ath9k_ani_cmd ani_function;
+ u32 ani_skip_count;
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex_hw btcoex_hw;
@@ -875,7 +877,6 @@ struct ath_hw {
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
- struct ar5416IniArray ini_japan2484;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray ini_radio_post_sys2ant;
@@ -921,6 +922,8 @@ struct ath_hw {
bool is_clk_25mhz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
+
+ const struct firmware *eeprom_blob;
};
struct ath_bus_ops {
@@ -928,7 +931,6 @@ struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
- void (*extn_synch_en)(struct ath_common *common);
void (*aspm_init)(struct ath_common *common);
};
@@ -1060,9 +1062,11 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
int chain);
int ar9003_paprd_create_curve(struct ath_hw *ah,
struct ath9k_hw_cal_data *caldata, int chain);
-int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
int ar9003_paprd_init_table(struct ath_hw *ah);
bool ar9003_paprd_is_done(struct ath_hw *ah);
+bool ar9003_is_paprd_enabled(struct ath_hw *ah);
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
/* Hardware family op attach helpers */
void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fad3ccd5cd91..f69ef5d48c7b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -23,6 +23,11 @@
#include "ath9k.h"
+struct ath9k_eeprom_ctx {
+ struct completion complete;
+ struct ath_hw *ah;
+};
+
static char *dev_info = "ath9k";
MODULE_AUTHOR("Atheros Communications");
@@ -435,7 +440,7 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- for (i = 0; i < WME_NUM_AC; i++) {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
sc->tx.txq_map[i]->mac80211_qnum = i;
sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
@@ -506,6 +511,51 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
}
+static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
+ void *ctx)
+{
+ struct ath9k_eeprom_ctx *ec = ctx;
+
+ if (eeprom_blob)
+ ec->ah->eeprom_blob = eeprom_blob;
+
+ complete(&ec->complete);
+}
+
+static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
+{
+ struct ath9k_eeprom_ctx ec;
+ struct ath_hw *ah = ah = sc->sc_ah;
+ int err;
+
+ /* try to load the EEPROM content asynchronously */
+ init_completion(&ec.complete);
+ ec.ah = sc->sc_ah;
+
+ err = request_firmware_nowait(THIS_MODULE, 1, name, sc->dev, GFP_KERNEL,
+ &ec, ath9k_eeprom_request_cb);
+ if (err < 0) {
+ ath_err(ath9k_hw_common(ah),
+ "EEPROM request failed\n");
+ return err;
+ }
+
+ wait_for_completion(&ec.complete);
+
+ if (!ah->eeprom_blob) {
+ ath_err(ath9k_hw_common(ah),
+ "Unable to load EEPROM file %s\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath9k_eeprom_release(struct ath_softc *sc)
+{
+ release_firmware(sc->sc_ah->eeprom_blob);
+}
+
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
@@ -563,10 +613,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
-#ifdef CONFIG_ATH9K_DEBUGFS
- spin_lock_init(&sc->nodes_lock);
- INIT_LIST_HEAD(&sc->nodes);
-#endif
#ifdef CONFIG_ATH9K_MAC_DEBUG
spin_lock_init(&sc->debug.samp_lock);
#endif
@@ -587,6 +633,12 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
+ if (pdata && pdata->eeprom_name) {
+ ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
+ if (ret)
+ goto err_eeprom;
+ }
+
/* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
if (ret)
@@ -623,7 +675,8 @@ err_btcoex:
err_queues:
ath9k_hw_deinit(ah);
err_hw:
-
+ ath9k_eeprom_release(sc);
+err_eeprom:
kfree(ah);
sc->sc_ah = NULL;
@@ -687,6 +740,7 @@ static const struct ieee80211_iface_combination if_comb = {
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
+ .beacon_int_infra_match = true,
};
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
@@ -885,6 +939,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
if (sc->dfs_detector != NULL)
sc->dfs_detector->exit(sc->dfs_detector);
+ ath9k_eeprom_release(sc);
kfree(sc->sc_ah);
sc->sc_ah = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 7b88b9c39ccd..ade3afb21f91 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -27,9 +27,6 @@ void ath_tx_complete_poll_work(struct work_struct *work)
struct ath_txq *txq;
int i;
bool needreset = false;
-#ifdef CONFIG_ATH9K_DEBUGFS
- sc->tx_complete_poll_work_seen++;
-#endif
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
@@ -182,13 +179,15 @@ void ath_rx_poll(unsigned long data)
static void ath_paprd_activate(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
int chain;
- if (!caldata || !caldata->paprd_done)
+ if (!caldata || !caldata->paprd_done) {
+ ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
return;
+ }
- ath9k_ps_wakeup(sc);
ar9003_paprd_enable(ah, false);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
if (!(ah->txchainmask & BIT(chain)))
@@ -197,8 +196,8 @@ static void ath_paprd_activate(struct ath_softc *sc)
ar9003_paprd_populate_single_table(ah, caldata, chain);
}
+ ath_dbg(common, CALIBRATE, "Activating PAPRD\n");
ar9003_paprd_enable(ah, true);
- ath9k_ps_restore(sc);
}
static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
@@ -211,7 +210,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
int time_left;
memset(&txctl, 0, sizeof(txctl));
- txctl.txq = sc->tx.txq_map[WME_AC_BE];
+ txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
memset(tx_info, 0, sizeof(*tx_info));
tx_info->band = hw->conf.channel->band;
@@ -256,8 +255,10 @@ void ath_paprd_calibrate(struct work_struct *work)
int len = 1800;
int ret;
- if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done)
+ if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+ ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
return;
+ }
ath9k_ps_wakeup(sc);
@@ -350,8 +351,18 @@ void ath_ani_calibrate(unsigned long data)
ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
/* Only calibrate if awake */
- if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
+ if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
+ if (++ah->ani_skip_count >= ATH_ANI_MAX_SKIP_COUNT) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_WAIT_FOR_ANI;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
goto set_timer;
+ }
+ ah->ani_skip_count = 0;
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags &= ~PS_WAIT_FOR_ANI;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_wakeup(sc);
@@ -423,11 +434,15 @@ set_timer:
cal_interval = min(cal_interval, (u32)short_cal_interval);
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
- if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD) && ah->caldata) {
- if (!ah->caldata->paprd_done)
+
+ if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
+ if (!ah->caldata->paprd_done) {
ieee80211_queue_work(sc->hw, &sc->paprd_work);
- else if (!ah->paprd_table_write_done)
+ } else if (!ah->paprd_table_write_done) {
+ ath9k_ps_wakeup(sc);
ath_paprd_activate(sc);
+ ath9k_ps_restore(sc);
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dd45edfa6bae..dd91f8fdc01c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -131,7 +131,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
- PS_WAIT_FOR_TX_ACK))) {
+ PS_WAIT_FOR_TX_ACK |
+ PS_WAIT_FOR_ANI))) {
mode = ATH9K_PM_NETWORK_SLEEP;
if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
ath9k_btcoex_stop_gen_timer(sc);
@@ -181,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc)
ath_start_ani(sc);
}
-static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
bool ret = true;
@@ -201,14 +202,6 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
if (!ath_drain_all_txq(sc, retry_tx))
ret = false;
- if (!flush) {
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_rx_tasklet(sc, 1, true);
- ath_rx_tasklet(sc, 1, false);
- } else {
- ath_flushrecv(sc);
- }
-
return ret;
}
@@ -261,11 +254,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = NULL;
bool fastcc = true;
- bool flush = false;
int r;
__ath_cancel_work(sc);
+ tasklet_disable(&sc->intr_tq);
spin_lock_bh(&sc->sc_pcu_lock);
if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
@@ -275,11 +268,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
if (!hchan) {
fastcc = false;
- flush = true;
hchan = ah->curchan;
}
- if (!ath_prepare_reset(sc, retry_tx, flush))
+ if (!ath_prepare_reset(sc, retry_tx))
fastcc = false;
ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
@@ -292,11 +284,17 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
goto out;
}
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
+ (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ ath9k_mci_set_txpower(sc, true, false);
+
if (!ath_complete_reset(sc, true))
r = -EIO;
out:
spin_unlock_bh(&sc->sc_pcu_lock);
+ tasklet_enable(&sc->intr_tq);
+
return r;
}
@@ -326,11 +324,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
u8 density;
an = (struct ath_node *)sta->drv_priv;
-#ifdef CONFIG_ATH9K_DEBUGFS
- spin_lock(&sc->nodes_lock);
- list_add(&an->list, &sc->nodes);
- spin_unlock(&sc->nodes_lock);
-#endif
+ an->sc = sc;
an->sta = sta;
an->vif = vif;
@@ -347,13 +341,6 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
-#ifdef CONFIG_ATH9K_DEBUGFS
- spin_lock(&sc->nodes_lock);
- list_del(&an->list);
- spin_unlock(&sc->nodes_lock);
- an->sta = NULL;
-#endif
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ath_tx_node_cleanup(sc, an);
}
@@ -489,17 +476,6 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & SCHED_INTR)
sched = true;
-#ifdef CONFIG_PM_SLEEP
- if (status & ATH9K_INT_BMISS) {
- if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
- ath_dbg(common, ANY, "during WoW we got a BMISS\n");
- atomic_inc(&sc->wow_got_bmiss_intr);
- atomic_dec(&sc->wow_sleep_proc_intr);
- }
- ath_dbg(common, INTERRUPT, "beacon miss interrupt\n");
- }
-#endif
-
/*
* If a FATAL or RXORN interrupt is received, we have to reset the
* chip immediately.
@@ -518,7 +494,15 @@ irqreturn_t ath_isr(int irq, void *dev)
goto chip_reset;
}
-
+#ifdef CONFIG_PM_SLEEP
+ if (status & ATH9K_INT_BMISS) {
+ if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
+ ath_dbg(common, ANY, "during WoW we got a BMISS\n");
+ atomic_inc(&sc->wow_got_bmiss_intr);
+ atomic_dec(&sc->wow_sleep_proc_intr);
+ }
+ }
+#endif
if (status & ATH9K_INT_SWBA)
tasklet_schedule(&sc->bcon_tasklet);
@@ -681,9 +665,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
- if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
- common->bus_ops->extn_synch_en(common);
-
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
@@ -816,7 +797,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
- ath_prepare_reset(sc, false, true);
+ ath_prepare_reset(sc, false);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
@@ -919,8 +900,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
ath9k_vif_iter(iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
- iter_data);
+ ieee80211_iterate_active_interfaces_atomic(
+ sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_vif_iter, iter_data);
}
/* Called with sc->mutex held. */
@@ -970,8 +952,9 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
if (ah->opmode == NL80211_IFTYPE_STATION &&
old_opmode == NL80211_IFTYPE_AP &&
test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
- ieee80211_iterate_active_interfaces_atomic(sc->hw,
- ath9k_sta_vif_iter, sc);
+ ieee80211_iterate_active_interfaces_atomic(
+ sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_sta_vif_iter, sc);
}
}
@@ -1324,7 +1307,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
struct ath9k_tx_queue_info qi;
int ret = 0;
- if (queue >= WME_NUM_AC)
+ if (queue >= IEEE80211_NUM_ACS)
return 0;
txq = sc->tx.txq_map[queue];
@@ -1449,6 +1432,9 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ ath9k_mci_update_wlan_channels(sc, false);
+
ath_dbg(common, CONFIG,
"Primary Station interface: %pM, BSSID: %pM\n",
vif->addr, common->curbssid);
@@ -1497,14 +1483,17 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
- ieee80211_iterate_active_interfaces_atomic(sc->hw,
- ath9k_bss_assoc_iter, sc);
+ ieee80211_iterate_active_interfaces_atomic(
+ sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath9k_bss_assoc_iter, sc);
if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
ah->opmode == NL80211_IFTYPE_STATION) {
memset(common->curbssid, 0, ETH_ALEN);
common->curaid = 0;
ath9k_hw_write_associd(sc->sc_ah);
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ ath9k_mci_update_wlan_channels(sc, true);
}
}
@@ -1837,6 +1826,9 @@ static u32 fill_chainmask(u32 cap, u32 new)
static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
{
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return true;
+
switch (val & 0x7) {
case 0x1:
case 0x3:
@@ -1887,134 +1879,6 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-#ifdef CONFIG_ATH9K_DEBUGFS
-
-/* Ethtool support for get-stats */
-
-#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
-static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
- "tx_pkts_nic",
- "tx_bytes_nic",
- "rx_pkts_nic",
- "rx_bytes_nic",
- AMKSTR(d_tx_pkts),
- AMKSTR(d_tx_bytes),
- AMKSTR(d_tx_mpdus_queued),
- AMKSTR(d_tx_mpdus_completed),
- AMKSTR(d_tx_mpdu_xretries),
- AMKSTR(d_tx_aggregates),
- AMKSTR(d_tx_ampdus_queued_hw),
- AMKSTR(d_tx_ampdus_queued_sw),
- AMKSTR(d_tx_ampdus_completed),
- AMKSTR(d_tx_ampdu_retries),
- AMKSTR(d_tx_ampdu_xretries),
- AMKSTR(d_tx_fifo_underrun),
- AMKSTR(d_tx_op_exceeded),
- AMKSTR(d_tx_timer_expiry),
- AMKSTR(d_tx_desc_cfg_err),
- AMKSTR(d_tx_data_underrun),
- AMKSTR(d_tx_delim_underrun),
-
- "d_rx_decrypt_crc_err",
- "d_rx_phy_err",
- "d_rx_mic_err",
- "d_rx_pre_delim_crc_err",
- "d_rx_post_delim_crc_err",
- "d_rx_decrypt_busy_err",
-
- "d_rx_phyerr_radar",
- "d_rx_phyerr_ofdm_timing",
- "d_rx_phyerr_cck_timing",
-
-};
-#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
-
-static void ath9k_get_et_strings(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- u32 sset, u8 *data)
-{
- if (sset == ETH_SS_STATS)
- memcpy(data, *ath9k_gstrings_stats,
- sizeof(ath9k_gstrings_stats));
-}
-
-static int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int sset)
-{
- if (sset == ETH_SS_STATS)
- return ATH9K_SSTATS_LEN;
- return 0;
-}
-
-#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
-#define AWDATA(elem) \
- do { \
- data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
- data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
- data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
- data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
- } while (0)
-
-#define AWDATA_RX(elem) \
- do { \
- data[i++] = sc->debug.stats.rxstats.elem; \
- } while (0)
-
-static void ath9k_get_et_stats(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ath_softc *sc = hw->priv;
- int i = 0;
-
- data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
- sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
- sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
- sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
- data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
- sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
- sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
- sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
- AWDATA_RX(rx_pkts_all);
- AWDATA_RX(rx_bytes_all);
-
- AWDATA(tx_pkts_all);
- AWDATA(tx_bytes_all);
- AWDATA(queued);
- AWDATA(completed);
- AWDATA(xretries);
- AWDATA(a_aggr);
- AWDATA(a_queued_hw);
- AWDATA(a_queued_sw);
- AWDATA(a_completed);
- AWDATA(a_retries);
- AWDATA(a_xretries);
- AWDATA(fifo_underrun);
- AWDATA(xtxop);
- AWDATA(timer_exp);
- AWDATA(desc_cfg_err);
- AWDATA(data_underrun);
- AWDATA(delim_underrun);
-
- AWDATA_RX(decrypt_crc_err);
- AWDATA_RX(phy_err);
- AWDATA_RX(mic_err);
- AWDATA_RX(pre_delim_crc_err);
- AWDATA_RX(post_delim_crc_err);
- AWDATA_RX(decrypt_busy_err);
-
- AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
- AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
- AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
-
- WARN_ON(i != ATH9K_SSTATS_LEN);
-}
-
-/* End of ethtool get-stats functions */
-
-#endif
-
-
#ifdef CONFIG_PM_SLEEP
static void ath9k_wow_map_triggers(struct ath_softc *sc,
@@ -2408,7 +2272,12 @@ struct ieee80211_ops ath9k_ops = {
#ifdef CONFIG_ATH9K_DEBUGFS
.get_et_sset_count = ath9k_get_et_sset_count,
- .get_et_stats = ath9k_get_et_stats,
- .get_et_strings = ath9k_get_et_strings,
+ .get_et_stats = ath9k_get_et_stats,
+ .get_et_strings = ath9k_get_et_strings,
+#endif
+
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+ .sta_add_debugfs = ath9k_sta_add_debugfs,
+ .sta_remove_debugfs = ath9k_sta_remove_debugfs,
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index ec2d7c807567..5c02702f21e7 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,6 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *info)
{
struct ath_mci_profile_info *entry;
+ u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
(info->type == MCI_GPM_COEX_PROFILE_VOICE))
@@ -59,6 +60,12 @@ static bool ath_mci_add_profile(struct ath_common *common,
memcpy(entry, info, 10);
INC_PROF(mci, info);
list_add_tail(&entry->list, &mci->info);
+ if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
+ if (info->voice_type < sizeof(voice_priority))
+ mci->voice_priority = voice_priority[info->voice_type];
+ else
+ mci->voice_priority = 110;
+ }
return true;
}
@@ -150,7 +157,7 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
* For single PAN/FTP profile, allocate 35% for BT
* to improve WLAN throughput.
*/
- btcoex->duty_cycle = 35;
+ btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
btcoex->btcoex_period = 53;
ath_dbg(common, MCI,
"Single PAN/FTP bt period %d ms dutycycle %d\n",
@@ -200,23 +207,6 @@ skip_tuning:
ath9k_btcoex_timer_resume(sc);
}
-static void ath_mci_wait_btcal_done(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- /* Stop tx & rx */
- ieee80211_stop_queues(sc->hw);
- ath_stoprecv(sc);
- ath_drain_all_txq(sc, false);
-
- /* Wait for cal done */
- ar9003_mci_start_reset(ah, ah->curchan);
-
- /* Resume tx & rx */
- ath_startrecv(sc);
- ieee80211_wake_queues(sc->hw);
-}
-
static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
@@ -228,7 +218,7 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
case MCI_GPM_BT_CAL_REQ:
if (mci_hw->bt_state == MCI_BT_AWAKE) {
mci_hw->bt_state = MCI_BT_CAL_START;
- ath_mci_wait_btcal_done(sc);
+ ath9k_queue_reset(sc, RESET_TYPE_MCI);
}
ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
break;
@@ -250,6 +240,58 @@ static void ath9k_mci_work(struct work_struct *work)
ath_mci_update_scheme(sc);
}
+static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
+{
+ if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
+ stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
+
+ if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
+ stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
+
+ if ((cur_txprio > ATH_MCI_HI_PRIO) &&
+ (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
+ stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
+}
+
+static void ath_mci_set_concur_txprio(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u8 stomp_txprio[ATH_BTCOEX_STOMP_MAX];
+
+ memset(stomp_txprio, 0, sizeof(stomp_txprio));
+ if (mci->num_mgmt) {
+ stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
+ if (!mci->num_pan && !mci->num_other_acl)
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
+ ATH_MCI_INQUIRY_PRIO;
+ } else {
+ u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
+
+ stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
+
+ if (mci->num_sco)
+ ath_mci_update_stomp_txprio(mci->voice_priority,
+ stomp_txprio);
+ if (mci->num_other_acl)
+ ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
+ if (mci->num_a2dp)
+ ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
+ if (mci->num_hid)
+ ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
+ if (mci->num_pan)
+ ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
+
+ if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
+ stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
+
+ if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
+ stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
+ }
+ ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
+}
+
static u8 ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info)
{
@@ -281,6 +323,7 @@ static u8 ath_mci_process_profile(struct ath_softc *sc,
} else
ath_mci_del_profile(common, mci, entry);
+ ath_mci_set_concur_txprio(sc);
return 1;
}
@@ -314,6 +357,7 @@ static u8 ath_mci_process_status(struct ath_softc *sc,
mci->num_mgmt++;
} while (++i < ATH_MCI_MAX_PROFILE);
+ ath_mci_set_concur_txprio(sc);
if (old_num_mgmt != mci->num_mgmt)
return 1;
@@ -518,6 +562,8 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
while (more_data == MCI_GPM_MORE) {
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ return;
pgpm = mci->gpm_buf.bf_addr;
offset = ar9003_mci_get_next_gpm_offset(ah, false,
@@ -600,3 +646,130 @@ void ath_mci_enable(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
sc->sc_ah->imask |= ATH9K_INT_MCI;
}
+
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ struct ath9k_channel *chan = ah->curchan;
+ u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
+ int i;
+ s16 chan_start, chan_end;
+ u16 wlan_chan;
+
+ if (!chan || !IS_CHAN_2GHZ(chan))
+ return;
+
+ if (allow_all)
+ goto send_wlan_chan;
+
+ wlan_chan = chan->channel - 2402;
+
+ chan_start = wlan_chan - 10;
+ chan_end = wlan_chan + 10;
+
+ if (chan->chanmode == CHANNEL_G_HT40PLUS)
+ chan_end += 20;
+ else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+ chan_start -= 20;
+
+ /* adjust side band */
+ chan_start -= 7;
+ chan_end += 7;
+
+ if (chan_start <= 0)
+ chan_start = 0;
+ if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
+ chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
+
+ ath_dbg(ath9k_hw_common(ah), MCI,
+ "WLAN current channel %d mask BT channel %d - %d\n",
+ wlan_chan, chan_start, chan_end);
+
+ for (i = chan_start; i < chan_end; i++)
+ MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
+
+send_wlan_chan:
+ /* update and send wlan channels info to BT */
+ for (i = 0; i < 4; i++)
+ mci->wlan_channels[i] = channelmap[i];
+ ar9003_mci_send_wlan_channels(ah);
+ ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
+}
+
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+ bool old_concur_tx = mci_hw->concur_tx;
+
+ if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
+ mci_hw->concur_tx = false;
+ return;
+ }
+
+ if (!IS_CHAN_2GHZ(ah->curchan))
+ return;
+
+ if (setchannel) {
+ struct ath9k_hw_cal_data *caldata = &sc->caldata;
+ if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+ (ah->curchan->channel > caldata->channel) &&
+ (ah->curchan->channel <= caldata->channel + 20))
+ return;
+ if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+ (ah->curchan->channel < caldata->channel) &&
+ (ah->curchan->channel >= caldata->channel - 20))
+ return;
+ mci_hw->concur_tx = false;
+ } else
+ mci_hw->concur_tx = concur_tx;
+
+ if (old_concur_tx != mci_hw->concur_tx)
+ ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+}
+
+static void ath9k_mci_stomp_audio(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+
+ if (!mci->num_sco && !mci->num_a2dp)
+ return;
+
+ if (ah->stats.avgbrssi > 25) {
+ btcoex->stomp_audio = 0;
+ return;
+ }
+
+ btcoex->stomp_audio++;
+}
+void ath9k_mci_update_rssi(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
+
+ ath9k_mci_stomp_audio(sc);
+
+ if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
+ return;
+
+ if (ah->stats.avgbrssi >= 40) {
+ if (btcoex->rssi_count < 0)
+ btcoex->rssi_count = 0;
+ if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
+ btcoex->rssi_count = 0;
+ ath9k_mci_set_txpower(sc, false, true);
+ }
+ } else {
+ if (btcoex->rssi_count > 0)
+ btcoex->rssi_count = 0;
+ if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
+ btcoex->rssi_count = 0;
+ ath9k_mci_set_txpower(sc, false, false);
+ }
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index fc14eea034eb..06958837620c 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -32,6 +32,27 @@
#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
ATH_MCI_MAX_SCO_PROFILE)
+#define ATH_MCI_INQUIRY_PRIO 62
+#define ATH_MCI_HI_PRIO 60
+#define ATH_MCI_NUM_BT_CHANNELS 79
+#define ATH_MCI_CONCUR_TX_SWITCH 5
+
+#define MCI_GPM_SET_CHANNEL_BIT(_p_gpm, _bt_chan) \
+ do { \
+ if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+ (_bt_chan / 8)) |= (1 << (_bt_chan & 7)); \
+ } \
+ } while (0)
+
+#define MCI_GPM_CLR_CHANNEL_BIT(_p_gpm, _bt_chan) \
+ do { \
+ if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
+ (_bt_chan / 8)) &= ~(1 << (_bt_chan & 7));\
+ } \
+ } while (0)
+
#define INC_PROF(_mci, _info) do { \
switch (_info->type) { \
case MCI_GPM_COEX_PROFILE_RFCOMM:\
@@ -49,6 +70,7 @@
_mci->num_pan++; \
break; \
case MCI_GPM_COEX_PROFILE_VOICE: \
+ case MCI_GPM_COEX_PROFILE_A2DPVO:\
_mci->num_sco++; \
break; \
default: \
@@ -73,6 +95,7 @@
_mci->num_pan--; \
break; \
case MCI_GPM_COEX_PROFILE_VOICE: \
+ case MCI_GPM_COEX_PROFILE_A2DPVO:\
_mci->num_sco--; \
break; \
default: \
@@ -113,6 +136,7 @@ struct ath_mci_profile {
u8 num_pan;
u8 num_other_acl;
u8 num_bdr;
+ u8 voice_priority;
};
struct ath_mci_buf {
@@ -130,13 +154,25 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
int ath_mci_setup(struct ath_softc *sc);
void ath_mci_cleanup(struct ath_softc *sc);
void ath_mci_intr(struct ath_softc *sc);
+void ath9k_mci_update_rssi(struct ath_softc *sc);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
void ath_mci_enable(struct ath_softc *sc);
+void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all);
+void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx);
#else
static inline void ath_mci_enable(struct ath_softc *sc)
{
}
+static inline void ath9k_mci_update_wlan_channels(struct ath_softc *sc,
+ bool allow_all)
+{
+}
+static inline void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
+ bool concur_tx)
+{
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f088f4bf9a26..7ae73fbd9136 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,17 +96,6 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
return true;
}
-static void ath_pci_extn_synch_enable(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct pci_dev *pdev = to_pci_dev(sc->dev);
- u8 lnkctl;
-
- pci_read_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, &lnkctl);
- lnkctl |= PCI_EXP_LNKCTL_ES;
- pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
-}
-
/* Need to be called after we discover btcoex capabilities */
static void ath_pci_aspm_init(struct ath_common *common)
{
@@ -125,23 +114,23 @@ static void ath_pci_aspm_init(struct ath_common *common)
if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
(AR_SREV_9285(ah))) {
- /* Bluetooth coexistance requires disabling ASPM. */
+ /* Bluetooth coexistence requires disabling ASPM. */
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
- PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1);
/*
* Both upstream and downstream PCIe components should
* have the same ASPM settings.
*/
pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
- PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1);
ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
return;
}
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
- if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
+ if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
ath9k_hw_configpcipowersave(ah, false);
@@ -153,7 +142,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
- .extn_synch_en = ath_pci_extn_synch_enable,
.aspm_init = ath_pci_aspm_init,
};
@@ -299,7 +287,7 @@ static void ath_pci_remove(struct pci_dev *pdev)
pci_release_region(pdev, 0);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ath_pci_suspend(struct device *device)
{
@@ -345,22 +333,15 @@ static int ath_pci_resume(struct device *device)
return 0;
}
-static const struct dev_pm_ops ath9k_pm_ops = {
- .suspend = ath_pci_suspend,
- .resume = ath_pci_resume,
- .freeze = ath_pci_suspend,
- .thaw = ath_pci_resume,
- .poweroff = ath_pci_suspend,
- .restore = ath_pci_resume,
-};
+static SIMPLE_DEV_PM_OPS(ath9k_pm_ops, ath_pci_suspend, ath_pci_resume);
#define ATH9K_PM_OPS (&ath9k_pm_ops)
-#else /* !CONFIG_PM */
+#else /* !CONFIG_PM_SLEEP */
#define ATH9K_PM_OPS NULL
-#endif /* !CONFIG_PM */
+#endif /* !CONFIG_PM_SLEEP */
MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 27ed80b54881..714558d1ba78 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -982,16 +982,6 @@ static void ath_rc_update_per(struct ath_softc *sc,
}
}
-static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
- struct ath_rc_stats *stats = &rc->rcstats[rix];
-
- stats->xretries += xretries;
- stats->retries += retries;
- stats->per = per;
-}
-
static void ath_rc_update_ht(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
struct ieee80211_tx_info *tx_info,
@@ -1065,14 +1055,6 @@ static void ath_rc_update_ht(struct ath_softc *sc,
}
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
- struct ath_rc_stats *stats;
-
- stats = &rc->rcstats[final_rate];
- stats->success++;
-}
-
static void ath_rc_tx_status(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
struct sk_buff *skb)
@@ -1350,7 +1332,25 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
}
}
-#ifdef CONFIG_ATH9K_DEBUGFS
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+
+void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+ struct ath_rc_stats *stats;
+
+ stats = &rc->rcstats[final_rate];
+ stats->success++;
+}
+
+void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
+ int xretries, int retries, u8 per)
+{
+ struct ath_rc_stats *stats = &rc->rcstats[rix];
+
+ stats->xretries += xretries;
+ stats->retries += retries;
+ stats->per = per;
+}
static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1428,10 +1428,17 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
struct ath_rate_priv *rc = priv_sta;
- debugfs_create_file("rc_stats", S_IRUGO, dir, rc, &fops_rcstat);
+ rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
+ dir, rc, &fops_rcstat);
+}
+
+static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
+{
+ struct ath_rate_priv *rc = priv_sta;
+ debugfs_remove(rc->debugfs_rcstats);
}
-#endif /* CONFIG_ATH9K_DEBUGFS */
+#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
@@ -1476,8 +1483,10 @@ static struct rate_control_ops ath_rate_ops = {
.free = ath_rate_free,
.alloc_sta = ath_rate_alloc_sta,
.free_sta = ath_rate_free_sta,
-#ifdef CONFIG_ATH9K_DEBUGFS
+
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
.add_sta_debugfs = ath_rate_add_sta_debugfs,
+ .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 268e67dc5fb2..267dbfcfaa96 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -211,10 +211,26 @@ struct ath_rate_priv {
struct ath_rateset neg_ht_rates;
const struct ath_rate_table *rate_table;
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
struct dentry *debugfs_rcstats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
+#endif
};
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
+void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
+void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
+ int xretries, int retries, u8 per);
+#else
+static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+}
+static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
+ int xretries, int retries, u8 per)
+{
+}
+#endif
+
#ifdef CONFIG_ATH9K_RATE_CONTROL
int ath_rate_control_register(void);
void ath_rate_control_unregister(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 83d16e7ed272..90752f246970 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -254,8 +254,6 @@ rx_init_fail:
static void ath_edma_start_recv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxbuflock);
-
ath9k_hw_rxena(sc->sc_ah);
ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
@@ -267,8 +265,6 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
-
- spin_unlock_bh(&sc->rx.rxbuflock);
}
static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -285,8 +281,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
- spin_lock_init(&sc->rx.rxbuflock);
- clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc->sc_ah->caps.rx_status_len;
@@ -447,7 +441,6 @@ int ath_startrecv(struct ath_softc *sc)
return 0;
}
- spin_lock_bh(&sc->rx.rxbuflock);
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
@@ -468,26 +461,31 @@ start_recv:
ath_opmode_init(sc);
ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
- spin_unlock_bh(&sc->rx.rxbuflock);
-
return 0;
}
+static void ath_flushrecv(struct ath_softc *sc)
+{
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
+}
+
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
bool stopped, reset = false;
- spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah, &reset);
+ ath_flushrecv(sc);
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
else
sc->rx.rxlink = NULL;
- spin_unlock_bh(&sc->rx.rxbuflock);
if (!(ah->ah_flags & AH_UNPLUGGED) &&
unlikely(!stopped)) {
@@ -499,15 +497,6 @@ bool ath_stoprecv(struct ath_softc *sc)
return stopped && !reset;
}
-void ath_flushrecv(struct ath_softc *sc)
-{
- set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_rx_tasklet(sc, 1, true);
- ath_rx_tasklet(sc, 1, false);
- clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
-}
-
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
{
/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
@@ -744,6 +733,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
+ list_del(&bf->list);
if (!bf->bf_mpdu)
return bf;
@@ -976,7 +966,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = ah->noise + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
- rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
if (rx_stats->rs_moreaggr)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
@@ -1059,16 +1049,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
dma_type = DMA_FROM_DEVICE;
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
- spin_lock_bh(&sc->rx.rxbuflock);
tsf = ath9k_hw_gettsf64(ah);
tsf_lower = tsf & 0xffffffff;
do {
bool decrypt_error = false;
- /* If handling rx interrupt and flush is in progress => exit */
- if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
- break;
memset(&rs, 0, sizeof(rs));
if (edma)
@@ -1105,17 +1091,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
rs.is_mybeacon = false;
- sc->rx.num_pkts++;
- ath_debug_stat_rx(sc, &rs);
+ if (ieee80211_is_data_present(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control))
+ sc->rx.num_pkts++;
- /*
- * If we're asked to flush receive queue, directly
- * chain it back at the queue without processing it.
- */
- if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
- RX_STAT_INC(rx_drop_rxflush);
- goto requeue_drop_frag;
- }
+ ath_debug_stat_rx(sc, &rs);
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
@@ -1251,19 +1231,18 @@ requeue_drop_frag:
sc->rx.frag = NULL;
}
requeue:
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ if (flush)
+ continue;
+
if (edma) {
- list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
} else {
- list_move_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_buf_link(sc, bf);
- if (!flush)
- ath9k_hw_rxena(ah);
+ ath9k_hw_rxena(ah);
}
} while (1);
- spin_unlock_bh(&sc->rx.rxbuflock);
-
if (!(ah->imask & ATH9K_INT_RXEOL)) {
ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
ath9k_hw_set_interrupts(ah);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4e6760f8596d..ad3c82c09177 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -907,10 +907,6 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
-#define AR_SREV_9462_20_OR_LATER(_ah) \
- (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
- ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
-
#define AR_SREV_9565(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
@@ -2315,6 +2311,8 @@ enum {
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
+#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
#define AR_BTCOEX_CTRL2 0x1948
#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
@@ -2360,4 +2358,11 @@ enum {
#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
+#define AR_MCI_MISC 0x1a74
+#define AR_MCI_MISC_HW_FIX_EN 0x00000001
+#define AR_MCI_MISC_HW_FIX_EN_S 0
+#define AR_MCI_DBG_CNT_CTRL 0x1a78
+#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
+#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index a483d518758c..9f8563091bea 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -118,7 +118,7 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
(ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
- if (AR_SREV_9462_20_OR_LATER(ah)) {
+ if (AR_SREV_9462_20(ah)) {
/* AR9462 2.0 has an extra descriptor word (time based
* discard) compared to other chips */
REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 741918a2027b..90e48a0fafe5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -312,7 +312,6 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
}
bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- bf->bf_next = NULL;
list_del(&bf->list);
spin_unlock_bh(&sc->tx.txbuflock);
@@ -1263,7 +1262,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
- tidno < WME_NUM_TID; tidno++, tid++) {
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
if (!tid->sched)
continue;
@@ -1297,7 +1296,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
- tidno < WME_NUM_TID; tidno++, tid++) {
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
ac = tid->ac;
txq = ac->txq;
@@ -1354,10 +1353,10 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
struct ath_hw *ah = sc->sc_ah;
struct ath9k_tx_queue_info qi;
static const int subtype_txq_to_hwq[] = {
- [WME_AC_BE] = ATH_TXQ_AC_BE,
- [WME_AC_BK] = ATH_TXQ_AC_BK,
- [WME_AC_VI] = ATH_TXQ_AC_VI,
- [WME_AC_VO] = ATH_TXQ_AC_VO,
+ [IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
+ [IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
+ [IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
+ [IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
};
int axq_qnum, i;
@@ -2319,6 +2318,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_txq_lock(sc, txq);
+ TX_STAT_INC(txq->axq_qnum, txprocdesc);
+
if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
ath_txq_unlock(sc, txq);
return;
@@ -2446,7 +2447,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
int tidno, acno;
for (tidno = 0, tid = &an->tid[tidno];
- tidno < WME_NUM_TID;
+ tidno < IEEE80211_NUM_TIDS;
tidno++, tid++) {
tid->an = an;
tid->tidno = tidno;
@@ -2464,7 +2465,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
}
for (acno = 0, ac = &an->ac[acno];
- acno < WME_NUM_AC; acno++, ac++) {
+ acno < IEEE80211_NUM_ACS; acno++, ac++) {
ac->sched = false;
ac->txq = sc->tx.txq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
@@ -2479,7 +2480,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
- tidno < WME_NUM_TID; tidno++, tid++) {
+ tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
ac = tid->ac;
txq = ac->txq;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 267d5dcf82dc..13a204598766 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -1,6 +1,7 @@
config CARL9170
tristate "Linux Community AR9170 802.11n USB support"
depends on USB && MAC80211 && EXPERIMENTAL
+ select ATH_COMMON
select FW_LOADER
select CRC32
help
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 24ac2876a733..63fd9af3fd39 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -28,11 +28,6 @@
#include "fwcmd.h"
#include "version.h"
-#define MAKE_STR(symbol) #symbol
-#define TO_STR(symbol) MAKE_STR(symbol)
-#define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER)
-MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT);
-
static const u8 otus_magic[4] = { OTUS_MAGIC };
static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
@@ -341,8 +336,12 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WLANTX_CAB)) {
if_comb_types |=
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_P2P_GO);
+
+#ifdef CONFIG_MAC80211_MESH
+ if_comb_types |=
+ BIT(NL80211_IFTYPE_MESH_POINT);
+#endif /* CONFIG_MAC80211_MESH */
}
}
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index e3b1b6e87760..24d75ab94f0d 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -343,7 +343,24 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
break;
}
} else {
- mac_addr = NULL;
+ /*
+ * Enable monitor mode
+ *
+ * rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
+ * sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
+ *
+ * When the hardware is in SNIFFER_PROMISC mode,
+ * it generates spurious ACKs for every incoming
+ * frame. This confuses every peer in the
+ * vicinity and the network throughput will suffer
+ * badly.
+ *
+ * Hence, the hardware will be put into station
+ * mode and just the rx filters are disabled.
+ */
+ cam_mode |= AR9170_MAC_CAM_STA;
+ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
+ mac_addr = common->macaddr;
bssid = NULL;
}
rcu_read_unlock();
@@ -355,8 +372,6 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
if (ar->sniffer_enabled) {
- rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
- sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index a0b723078547..4684dd989496 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -164,9 +164,6 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
struct carl9170_rsp *cmd = buf;
struct ieee80211_vif *vif;
- if (carl9170_check_sequence(ar, cmd->hdr.seq))
- return;
-
if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
carl9170_cmd_callback(ar, len, buf);
@@ -663,6 +660,35 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
return false;
}
+static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *skb;
+
+ /* (driver) frame trap handler
+ *
+ * Because power-saving mode handing has to be implemented by
+ * the driver/firmware. We have to check each incoming beacon
+ * from the associated AP, if there's new data for us (either
+ * broadcast/multicast or unicast) we have to react quickly.
+ *
+ * So, if you have you want to add additional frame trap
+ * handlers, this would be the perfect place!
+ */
+
+ carl9170_ps_beacon(ar, buf, len);
+
+ carl9170_ba_check(ar, buf, len);
+
+ skb = carl9170_rx_copy_data(buf, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(IEEE80211_SKB_RXCB(skb), status, sizeof(*status));
+ ieee80211_rx(ar->hw, skb);
+ return 0;
+}
+
/*
* If the frame alignment is right (or the kernel has
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
@@ -672,14 +698,12 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
* mode, and we need to observe the proper ordering,
* this is non-trivial.
*/
-
-static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
+static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
{
struct ar9170_rx_head *head;
struct ar9170_rx_macstatus *mac;
struct ar9170_rx_phystatus *phy = NULL;
struct ieee80211_rx_status status;
- struct sk_buff *skb;
int mpdu_len;
u8 mac_status;
@@ -790,19 +814,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
if (phy)
carl9170_rx_phy_status(ar, phy, &status);
+ else
+ status.flag |= RX_FLAG_NO_SIGNAL_VAL;
- carl9170_ps_beacon(ar, buf, mpdu_len);
-
- carl9170_ba_check(ar, buf, mpdu_len);
-
- skb = carl9170_rx_copy_data(buf, mpdu_len);
- if (!skb)
+ if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status))
goto drop;
- memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
- ieee80211_rx(ar->hw, skb);
return;
-
drop:
ar->rx_dropped++;
}
@@ -820,6 +838,9 @@ static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
if (unlikely(i > resplen))
break;
+ if (carl9170_check_sequence(ar, cmd->hdr.seq))
+ break;
+
carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
}
@@ -851,7 +872,7 @@ static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
if (i == 12)
carl9170_rx_untie_cmds(ar, buf, len);
else
- carl9170_handle_mpdu(ar, buf, len);
+ carl9170_rx_untie_data(ar, buf, len);
}
static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 84377cf580e0..ef4ec0da6e49 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1485,6 +1485,13 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
}
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ /* to static code analyzers and reviewers:
+ * mac80211 guarantees that a valid "sta"
+ * reference is present, if a frame is to
+ * be part of an ampdu. Hence any extra
+ * sta == NULL checks are redundant in this
+ * special case.
+ */
run = carl9170_tx_ampdu_queue(ar, sta, skb);
if (run)
carl9170_tx_ampdu(ar);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 888152ce3eca..307bc0ddff99 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -295,6 +295,13 @@ static void carl9170_usb_rx_irq_complete(struct urb *urb)
goto resubmit;
}
+ /*
+ * While the carl9170 firmware does not use this EP, the
+ * firmware loader in the EEPROM unfortunately does.
+ * Therefore we need to be ready to handle out-of-band
+ * responses and traps in case the firmware crashed and
+ * the loader took over again.
+ */
carl9170_handle_command_response(ar, urb->transfer_buffer,
urb->actual_length);
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 19befb331073..39e8a590d7fc 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -20,8 +20,8 @@
#include "ath.h"
#include "reg.h"
-#define REG_READ (common->ops->read)
-#define REG_WRITE (common->ops->write)
+#define REG_READ (common->ops->read)
+#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
/**
* ath_hw_set_bssid_mask - filter out bssids we listen
@@ -119,8 +119,8 @@ void ath_hw_setbssidmask(struct ath_common *common)
{
void *ah = common->ah;
- REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL);
- REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
+ REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
+ REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
}
EXPORT_SYMBOL(ath_hw_setbssidmask);
@@ -139,7 +139,7 @@ void ath_hw_cycle_counters_update(struct ath_common *common)
void *ah = common->ah;
/* freeze */
- REG_WRITE(ah, AR_MIBC_FMC, AR_MIBC);
+ REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
/* read */
cycles = REG_READ(ah, AR_CCCNT);
@@ -148,13 +148,13 @@ void ath_hw_cycle_counters_update(struct ath_common *common)
tx = REG_READ(ah, AR_TFCNT);
/* clear */
- REG_WRITE(ah, 0, AR_CCCNT);
- REG_WRITE(ah, 0, AR_RFCNT);
- REG_WRITE(ah, 0, AR_RCCNT);
- REG_WRITE(ah, 0, AR_TFCNT);
+ REG_WRITE(ah, AR_CCCNT, 0);
+ REG_WRITE(ah, AR_RFCNT, 0);
+ REG_WRITE(ah, AR_RCCNT, 0);
+ REG_WRITE(ah, AR_TFCNT, 0);
/* unfreeze */
- REG_WRITE(ah, 0, AR_MIBC);
+ REG_WRITE(ah, AR_MIBC, 0);
/* update all cycle counters here */
common->cc_ani.cycles += cycles;
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644
index 000000000000..bac3d98a0cfb
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -0,0 +1,29 @@
+config WIL6210
+ tristate "Wilocity 60g WiFi card wil6210 support"
+ depends on CFG80211
+ depends on PCI
+ default n
+ ---help---
+ This module adds support for wireless adapter based on
+ wil6210 chip by Wilocity. It supports operation on the
+ 60 GHz band, covered by the IEEE802.11ad standard.
+
+ http://wireless.kernel.org/en/users/Drivers/wil6210
+
+ If you choose to build it as a module, it will be called
+ wil6210
+
+config WIL6210_ISR_COR
+ bool "Use Clear-On-Read mode for ISR registers for wil6210"
+ depends on WIL6210
+ default y
+ ---help---
+ ISR registers on wil6210 chip may operate in either
+ COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+ For production code, use COR (say y); is default since
+ it saves extra target transaction;
+ For ISR debug, use W1C (say n); is allows to monitor ISR
+ registers with debugfs. If COR were used, ISR would
+ self-clear when accessed for debug purposes, it makes
+ such monitoring impossible.
+ Say y unless you debug interrupts
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644
index 000000000000..9396dc9fe3c5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-objs := main.o
+wil6210-objs += netdev.o
+wil6210-objs += cfg80211.o
+wil6210-objs += pcie_bus.o
+wil6210-objs += debugfs.o
+wil6210-objs += wmi.o
+wil6210-objs += interrupt.o
+wil6210-objs += txrx.o
+
+subdir-ccflags-y += -Werror
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644
index 000000000000..116f4e807ae1
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) { \
+ .band = IEEE80211_BAND_60GHZ, \
+ .center_freq = 56160 + (2160 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 40, \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+ CHAN60G(1, 0),
+ CHAN60G(2, 0),
+ CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+ .channels = wil_60ghz_channels,
+ .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = 0, /* TODO */
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+ .mcs = {
+ /* MCS 1..12 - SC PHY */
+ .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+ },
+ },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static const u32 wil_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+ static const struct {
+ enum nl80211_iftype nl;
+ enum wmi_network_type wmi;
+ } __nl2wmi[] = {
+ {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
+ {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
+ {NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
+ {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
+ };
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+ if (__nl2wmi[i].nl == type)
+ return __nl2wmi[i].wmi;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct wmi_notify_req_cmd cmd = {
+ .cid = 0,
+ .interval_usec = 0,
+ };
+
+ if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
+ return -ENOENT;
+
+ /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+ if (rc)
+ return rc;
+
+ sinfo->generation = wil->sinfo_gen;
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.mcs = wil->stats.bf_mcs;
+ sinfo->filled |= STATION_INFO_RX_BITRATE;
+ sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = 12; /* TODO: provide real value */
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (flags)
+ wil->monitor_flags = *flags;
+ else
+ wil->monitor_flags = 0;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+ struct {
+ struct wmi_start_scan_cmd cmd;
+ u16 chnl[4];
+ } __packed cmd;
+ uint i, n;
+
+ if (wil->scan_request) {
+ wil_err(wil, "Already scanning\n");
+ return -EAGAIN;
+ }
+
+ /* check we are client side */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+
+ }
+
+ /* FW don't support scan after connection attempt */
+ if (test_bit(wil_status_dontscan, &wil->status)) {
+ wil_err(wil, "Scan after connect attempt not supported\n");
+ return -EBUSY;
+ }
+
+ wil->scan_request = request;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.num_channels = 0;
+ n = min(request->n_channels, 4U);
+ for (i = 0; i < n; i++) {
+ int ch = request->channels[i]->hw_value;
+ if (ch == 0) {
+ wil_err(wil,
+ "Scan requested for unknown frequency %dMhz\n",
+ request->channels[i]->center_freq);
+ continue;
+ }
+ /* 0-based channel indexes */
+ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ wil_dbg(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
+ }
+
+ return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct cfg80211_bss *bss;
+ struct wmi_connect_cmd conn;
+ const u8 *ssid_eid;
+ const u8 *rsn_eid;
+ int ch;
+ int rc = 0;
+
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ return -ENOENT;
+ }
+
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (!ssid_eid) {
+ wil_err(wil, "No SSID\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+ if (rsn_eid) {
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ rc = -ERANGE;
+ wil_err(wil, "IE too large (%td bytes)\n",
+ sme->ie_len);
+ goto out;
+ }
+ /*
+ * For secure assoc, send:
+ * (1) WMI_DELETE_CIPHER_KEY_CMD
+ * (2) WMI_SET_APPIE_CMD
+ */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ goto out;
+ }
+ /* WMI_SET_APPIE_CMD */
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc) {
+ wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ goto out;
+ }
+ }
+
+ /* WMI_CONNECT_CMD */
+ memset(&conn, 0, sizeof(conn));
+ switch (bss->capability & 0x03) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ conn.network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ conn.network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ goto out;
+ }
+ if (rsn_eid) {
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ } else {
+ conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+
+ conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+ memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ conn.channel = ch - 1;
+
+ memcpy(conn.bssid, bss->bssid, 6);
+ memcpy(conn.dst_mac, bss->bssid, 6);
+ /*
+ * FW don't support scan after connection attempt
+ */
+ set_bit(wil_status_dontscan, &wil->status);
+
+ rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+ if (rc == 0) {
+ /* Connect can take lots of time */
+ mod_timer(&wil->connect_timer,
+ jiffies + msecs_to_jiffies(2000));
+ }
+
+ out:
+ cfg80211_put_bss(bss);
+
+ return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u16 reason_code)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wdev->preset_chandef = *chandef;
+
+ return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_add_cipher_key(wil, key_index, mac_addr,
+ params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_ap_settings *info)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct ieee80211_channel *channel = info->chandef.chan;
+ struct cfg80211_beacon_data *bcon = &info->beacon;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ if (!channel) {
+ wil_err(wil, "AP: No channel???\n");
+ return -EINVAL;
+ }
+
+ wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
+ print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+ info->ssid, info->ssid_len);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_channel(wil, channel->hw_value);
+ if (rc)
+ return rc;
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* IE's */
+ /* bcon 'head IE's are not relevant for 60g band */
+ wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ bcon->beacon_ies);
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+
+ wil->secure_pcp = info->privacy;
+
+ rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ rc = wil_rx_init(wil);
+
+ netif_carrier_on(ndev);
+
+ return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ /* To stop beaconing, set BI to 0 */
+ rc = wmi_set_bcon(wil, 0, wmi_nettype);
+
+ return rc;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+ .scan = wil_cfg80211_scan,
+ .connect = wil_cfg80211_connect,
+ .disconnect = wil_cfg80211_disconnect,
+ .change_virtual_intf = wil_cfg80211_change_iface,
+ .get_station = wil_cfg80211_get_station,
+ .set_monitor_channel = wil_cfg80211_set_channel,
+ .add_key = wil_cfg80211_add_key,
+ .del_key = wil_cfg80211_del_key,
+ .set_default_key = wil_cfg80211_set_default_key,
+ /* AP mode */
+ .start_ap = wil_cfg80211_start_ap,
+ .stop_ap = wil_cfg80211_stop_ap,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+ /* TODO: set real value */
+ wiphy->max_scan_ssids = 10;
+ wiphy->max_num_pmkids = 0 /* TODO: */;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MONITOR);
+ /* TODO: enable P2P when integrated with supplicant:
+ * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+ */
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+ __func__, wiphy->flags);
+ wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+ /* TODO: figure this out */
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = wil_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+ wiphy->mgmt_stypes = wil_mgmt_stypes;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+ int rc = 0;
+ struct wireless_dev *wdev;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+ sizeof(struct wil6210_priv));
+ if (!wdev->wiphy) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ set_wiphy_dev(wdev->wiphy, dev);
+ wil_wiphy_init(wdev->wiphy);
+
+ rc = wiphy_register(wdev->wiphy);
+ if (rc < 0)
+ goto out_failed_reg;
+
+ return wdev;
+
+out_failed_reg:
+ wiphy_free(wdev->wiphy);
+out:
+ kfree(wdev);
+
+ return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
new file mode 100644
index 000000000000..6a315ba5aa7d
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
@@ -0,0 +1,30 @@
+#ifndef WIL_DBG_HEXDUMP_H_
+#define WIL_DBG_HEXDUMP_H_
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
+ __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ print_hex_dump(KERN_DEBUG, prefix_str, \
+ prefix_type, rowsize, groupsize, \
+ buf, len, ascii); \
+} while (0)
+
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ wil_dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644
index 000000000000..65fc9683bfd8
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct vring *vring)
+{
+ void __iomem *x = wmi_addr(wil, vring->hwtail);
+
+ seq_printf(s, "VRING %s = {\n", name);
+ seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa);
+ seq_printf(s, " va = 0x%p\n", vring->va);
+ seq_printf(s, " size = %d\n", vring->size);
+ seq_printf(s, " swtail = %d\n", vring->swtail);
+ seq_printf(s, " swhead = %d\n", vring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ if (x)
+ seq_printf(s, "0x%08x\n", ioread32(x));
+ else
+ seq_printf(s, "???\n");
+
+ if (vring->va && (vring->size < 1025)) {
+ uint i;
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &vring->va[i].tx;
+ if ((i % 64) == 0 && (i != 0))
+ seq_printf(s, "\n");
+ seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
+ "S" : (vring->ctx[i] ? "H" : "h"));
+ }
+ seq_printf(s, "\n");
+ }
+ seq_printf(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+ uint i;
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_vring(s, wil, "rx", &wil->vring_rx);
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ struct vring *vring = &(wil->vring_tx[i]);
+ if (vring->va) {
+ char name[10];
+ snprintf(name, sizeof(name), "tx_%2d", i);
+ wil_print_vring(s, wil, name, vring);
+ }
+ }
+
+ return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+ .open = wil_vring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_mbox_ring r;
+ int rsize;
+ uint i;
+
+ wil_memcpy_fromio_32(&r, off, sizeof(r));
+ wil_mbox_ring_le2cpus(&r);
+ /*
+ * we just read memory block from NIC. This memory may be
+ * garbage. Check validity before using it.
+ */
+ rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+ seq_printf(s, "ring %s = {\n", prefix);
+ seq_printf(s, " base = 0x%08x\n", r.base);
+ seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+ seq_printf(s, " tail = 0x%08x\n", r.tail);
+ seq_printf(s, " head = 0x%08x\n", r.head);
+ seq_printf(s, " entry size = %d\n", r.entry_size);
+
+ if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+ seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
+ sizeof(struct wil6210_mbox_ring_desc));
+ goto out;
+ }
+
+ if (!wmi_addr(wil, r.base) ||
+ !wmi_addr(wil, r.tail) ||
+ !wmi_addr(wil, r.head)) {
+ seq_printf(s, " ??? pointers are garbage?\n");
+ goto out;
+ }
+
+ for (i = 0; i < rsize; i++) {
+ struct wil6210_mbox_ring_desc d;
+ struct wil6210_mbox_hdr hdr;
+ size_t delta = i * sizeof(d);
+ void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+ wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+ seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
+ d.sync ? "F" : "E",
+ (r.tail - r.base == delta) ? "t" : " ",
+ (r.head - r.base == delta) ? "h" : " ",
+ le32_to_cpu(d.addr));
+ if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+ u16 len = le16_to_cpu(hdr.len);
+ seq_printf(s, " -> %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len,
+ le16_to_cpu(hdr.type), hdr.flags);
+ if (len <= MAX_MBOXITEM_SIZE) {
+ int n = 0;
+ unsigned char printbuf[16 * 3 + 2];
+ unsigned char databuf[MAX_MBOXITEM_SIZE];
+ void __iomem *src = wmi_buffer(wil, d.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ /*
+ * No need to check @src for validity -
+ * we already validated @d.addr while
+ * reading header
+ */
+ wil_memcpy_fromio_32(databuf, src, len);
+ while (n < len) {
+ int l = min(len - n, 16);
+ hex_dump_to_buffer(databuf + n, l,
+ 16, 1, printbuf,
+ sizeof(printbuf),
+ false);
+ seq_printf(s, " : %s\n", printbuf);
+ n += l;
+ }
+ }
+ } else {
+ seq_printf(s, "\n");
+ }
+ }
+ out:
+ seq_printf(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx));
+ wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx));
+
+ return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+ .open = wil_mbox_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+ iowrite32(val, (void __iomem *)data);
+ wmb(); /* make sure write propagated to HW */
+
+ return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ *val = ioread32((void __iomem *)data);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void __iomem *value)
+{
+ return debugfs_create_file(name, mode, parent, (void * __force)value,
+ &fops_iomem_x32);
+}
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name,
+ struct dentry *parent, u32 off)
+{
+ struct dentry *d = debugfs_create_dir(name, parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d,
+ wil->csr + off);
+ wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 4);
+ wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 8);
+ wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d,
+ wil->csr + off + 12);
+ wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 16);
+ wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d,
+ wil->csr + off + 20);
+ wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d,
+ wil->csr + off + 24);
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+ wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW));
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_DATA));
+ wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+
+ return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+ if (a)
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ else
+ seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+ return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+ .open = wil_memread_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_default_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum { max_count = 4096 };
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ loff_t pos = *ppos;
+ size_t available = blob->size;
+ void *buf;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos >= available || !count)
+ return 0;
+
+ if (count > available - pos)
+ count = available - pos;
+ if (count > max_count)
+ count = max_count;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+ pos, count);
+
+ ret = copy_to_user(user_buf, buf, count);
+ kfree(buf);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations fops_ioblob = {
+ .read = wil_read_file_ioblob,
+ .open = wil_default_open,
+ .llseek = default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob)
+{
+ return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ /**
+ * BUG:
+ * this code does NOT sync device state with the rest of system
+ * use with care, debug only!!!
+ */
+ rtnl_lock();
+ dev_close(ndev);
+ ndev->flags &= ~IFF_UP;
+ rtnl_unlock();
+ wil_reset(wil);
+
+ return len;
+}
+
+static const struct file_operations fops_reset = {
+ .write = wil_write_file_reset,
+ .open = wil_default_open,
+};
+/*---------Tx descriptor------------*/
+
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct vring *vring = &(wil->vring_tx[0]);
+
+ if (!vring->va) {
+ seq_printf(s, "No Tx VRING\n");
+ return 0;
+ }
+
+ if (dbg_txdesc_index < vring->size) {
+ volatile struct vring_tx_desc *d =
+ &(vring->va[dbg_txdesc_index].tx);
+ volatile u32 *u = (volatile u32 *)d;
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+
+ seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = %p\n", skb);
+
+ if (skb) {
+ unsigned char printbuf[16 * 3 + 2];
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+
+ seq_printf(s, " len = %d\n", len);
+
+ while (i < len) {
+ int l = min(len - i, 16);
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, " : %s\n", printbuf);
+ i += l;
+ }
+ }
+ seq_printf(s, "}\n");
+ } else {
+ seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
+ }
+
+ return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+ .open = wil_txdesc_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ seq_printf(s,
+ "TSF : 0x%016llx\n"
+ "TxMCS : %d\n"
+ "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n",
+ wil->stats.tsf, wil->stats.bf_mcs,
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+ return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+ .open = wil_bf_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ if (*ppos != 0) {
+ wil_err(wil, "Unable to set SSID substring from [%d]\n",
+ (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(wdev->ssid)) {
+ wil_err(wil, "SSID too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+ if (netif_running(ndev)) {
+ wil_err(wil, "Unable to change SSID on running interface\n");
+ return -EINVAL;
+ }
+
+ wdev->ssid_len = count;
+ return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+ buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+ .read = wil_read_file_ssid,
+ .write = wil_write_file_ssid,
+ .open = wil_default_open,
+};
+
+/*----------------*/
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+ struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+ wil_to_wiphy(wil)->debugfsdir);
+
+ if (IS_ERR_OR_NULL(dbg))
+ return -ENODEV;
+
+ debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
+ debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
+ debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
+ debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+ &dbg_txdesc_index);
+ debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
+ debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
+ debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
+ &wil->secure_pcp);
+
+ wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
+ HOSTADDR(RGF_USER_USER_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_TX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_RX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_MISC_ICR));
+ wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+ wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+ debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
+ debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
+
+ debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+
+ wil->rgf_blob.data = (void * __force)wil->csr + 0;
+ wil->rgf_blob.size = 0xa000;
+ wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
+
+ wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
+ wil->fw_code_blob.size = 0x40000;
+ wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
+ &wil->fw_code_blob);
+
+ wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
+ wil->fw_data_blob.size = 0x8000;
+ wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
+ &wil->fw_data_blob);
+
+ wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
+ wil->fw_peri_blob.size = 0x18000;
+ wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
+ &wil->fw_peri_blob);
+
+ wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
+ wil->uc_code_blob.size = 0x10000;
+ wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
+ &wil->uc_code_blob);
+
+ wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
+ wil->uc_data_blob.size = 0x4000;
+ wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
+ &wil->uc_data_blob);
+
+ return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+ debugfs_remove_recursive(wil->debug);
+ wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644
index 000000000000..38049da71049
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
+ BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+ BIT_DMA_PSEUDO_CAUSE_TX | \
+ BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+ iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ wil_icr_clear(x, addr);
+
+ return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+ clear_bit(wil_status_irqen, &wil->status);
+}
+
+static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_TX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_RX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_MISC, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ set_bit(wil_status_irqen, &wil->status);
+
+ iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil6210_disable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_pseudo(wil);
+}
+
+void wil6210_enable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+
+ wil6210_unmask_irq_pseudo(wil);
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ wil6210_unmask_irq_misc(wil);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx(wil);
+
+ if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+ wil_dbg_IRQ(wil, "RX done\n");
+ isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+ wil_rx_handle(wil);
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_rx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx(wil);
+
+ if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+ uint i;
+ wil_dbg_IRQ(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+ for (i = 0; i < 24; i++) {
+ u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
+ if (isr & mask) {
+ isr &= ~mask;
+ wil_dbg_IRQ(wil, "TX done(%i)\n", i);
+ wil_tx_complete(wil, i);
+ }
+ }
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_tx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_misc(wil);
+
+ if (isr & ISR_MISC_FW_READY) {
+ wil_dbg_IRQ(wil, "IRQ: FW ready\n");
+ /**
+ * Actual FW ready indicated by the
+ * WMI_FW_READY_EVENTID
+ */
+ isr &= ~ISR_MISC_FW_READY;
+ }
+
+ wil->isr_misc = isr;
+
+ if (isr) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ wil6210_unmask_irq_misc(wil);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil->isr_misc;
+
+ wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+ if (isr & ISR_MISC_MBOX_EVT) {
+ wil_dbg_IRQ(wil, "MBOX event\n");
+ wmi_recv_cmd(wil);
+ isr &= ~ISR_MISC_MBOX_EVT;
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+ wil->isr_misc = 0;
+
+ wil6210_unmask_irq_misc(wil);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+
+ wil_dbg_IRQ(wil, "Thread IRQ\n");
+ /* Discover real IRQ cause */
+ if (wil->isr_misc)
+ wil6210_irq_misc_thread(irq, cookie);
+
+ wil6210_unmask_irq_pseudo(wil);
+
+ return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+ if (!test_bit(wil_status_irqen, &wil->status)) {
+ u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_rx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_tx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_misc = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+ "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+ pseudo_cause,
+ icm_rx, icr_rx, imv_rx,
+ icm_tx, icr_tx, imv_tx,
+ icm_misc, icr_misc, imv_misc);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ struct wil6210_priv *wil = cookie;
+ u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+ /**
+ * pseudo_cause is Clear-On-Read, no need to ACK
+ */
+ if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+ return IRQ_NONE;
+
+ /* FIXME: IRQ mask debug */
+ if (wil6210_debug_irq_mask(wil, pseudo_cause))
+ return IRQ_NONE;
+
+ wil6210_mask_irq_pseudo(wil);
+
+ /* Discover real IRQ cause
+ * There are 2 possible phases for every IRQ:
+ * - hard IRQ handler called right here
+ * - threaded handler called later
+ *
+ * Hard IRQ handler reads and clears ISR.
+ *
+ * If threaded handler requested, hard IRQ handler
+ * returns IRQ_WAKE_THREAD and saves ISR register value
+ * for the threaded handler use.
+ *
+ * voting for wake thread - need at least 1 vote
+ */
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+ (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+ (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+ (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ /* if thread is requested, it will unmask IRQ */
+ if (rc != IRQ_WAKE_THREAD)
+ wil6210_unmask_irq_pseudo(wil);
+
+ wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause);
+
+ return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ /*
+ * IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+
+ rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+ WIL_NAME"_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+ WIL_NAME"_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME"_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+ /* error branch */
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
+ if (rc)
+ return rc;
+
+ wil6210_enable_irq(wil);
+
+ return 0;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+ wil6210_disable_irq(wil);
+ free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644
index 000000000000..95fcd361322b
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count)
+{
+ u32 *d = dst;
+ const volatile u32 __iomem *s = src;
+
+ /* size_t is unsigned, if (count%4 != 0) it will wrap */
+ for (count += 4; count > 4; count -= 4)
+ *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+ const u32 *s = src;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(*s++, d++);
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ uint i;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg(wil, "%s()\n", __func__);
+
+ wil_link_off(wil);
+ clear_bit(wil_status_fwconnected, &wil->status);
+
+ switch (wdev->sme_state) {
+ case CFG80211_SME_CONNECTED:
+ cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+ NULL, 0, GFP_KERNEL);
+ break;
+ case CFG80211_SME_CONNECTING:
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ default:
+ ;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
+ wil_vring_fini_tx(wil, i);
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, disconnect_worker);
+
+ _wil6210_disconnect(wil, NULL);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg(wil, "Connect timeout\n");
+
+ /* reschedule to thread context - disconnect won't
+ * run from atomic context
+ */
+ schedule_work(&wil->disconnect_worker);
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+ wil_dbg(wil, "%s()\n", __func__);
+
+ mutex_init(&wil->mutex);
+ mutex_init(&wil->wmi_mutex);
+
+ init_completion(&wil->wmi_ready);
+
+ wil->pending_connect_cid = -1;
+ setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+
+ INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+ INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+
+ INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ spin_lock_init(&wil->wmi_ev_lock);
+
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+ if (!wil->wmi_wq)
+ return -EAGAIN;
+
+ wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
+ if (!wil->wmi_wq_conn) {
+ destroy_workqueue(wil->wmi_wq);
+ return -EAGAIN;
+ }
+
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+ return 0;
+}
+
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ del_timer_sync(&wil->connect_timer);
+ _wil6210_disconnect(wil, bssid);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+ wmi_event_flush(wil);
+ destroy_workqueue(wil->wmi_wq_conn);
+ destroy_workqueue(wil->wmi_wq);
+}
+
+static void wil_target_reset(struct wil6210_priv *wil)
+{
+ wil_dbg(wil, "Resetting...\n");
+
+ /* register write */
+#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
+ /* register set = read, OR, write */
+#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
+ wil->csr + HOSTADDR(a))
+
+ /* hpal_perst_from_pad_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
+ /* car_perst_rst_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+
+ W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
+ W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ msleep(2000);
+
+ W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
+
+ msleep(2000);
+
+ wil_dbg(wil, "Reset completed\n");
+
+#undef W
+#undef S
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+ le32_to_cpus(&r->base);
+ le16_to_cpus(&r->entry_size);
+ le16_to_cpus(&r->size);
+ le32_to_cpus(&r->tail);
+ le32_to_cpus(&r->head);
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+ ulong to = msecs_to_jiffies(1000);
+ ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+ if (0 == left) {
+ wil_err(wil, "Firmware not ready\n");
+ return -ETIME;
+ } else {
+ wil_dbg(wil, "FW ready after %d ms\n",
+ jiffies_to_msecs(to-left));
+ }
+ return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil)
+{
+ int rc;
+
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+
+ wmi_event_flush(wil);
+
+ flush_workqueue(wil->wmi_wq);
+ flush_workqueue(wil->wmi_wq_conn);
+
+ wil6210_disable_irq(wil);
+ wil->status = 0;
+
+ /* TODO: put MAC in reset */
+ wil_target_reset(wil);
+
+ /* init after reset */
+ wil->pending_connect_cid = -1;
+ INIT_COMPLETION(wil->wmi_ready);
+
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+ /* TODO: release MAC reset */
+ wil6210_enable_irq(wil);
+
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+
+ return rc;
+}
+
+
+void wil_link_on(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg(wil, "%s()\n", __func__);
+
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+void wil_link_off(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg(wil, "%s()\n", __func__);
+
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+}
+
+static int __wil_up(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct ieee80211_channel *channel = wdev->preset_chandef.chan;
+ int rc;
+ int bi;
+ u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
+ wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ wil_dbg(wil, "type: STATION\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_AP:
+ wil_dbg(wil, "type: AP\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_dbg(wil, "type: P2P_CLIENT\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wil_dbg(wil, "type: P2P_GO\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg(wil, "type: Monitor\n");
+ bi = 0;
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Apply profile in the following order: */
+ /* SSID and channel for the AP */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->ssid_len == 0) {
+ wil_err(wil, "SSID not set\n");
+ return -EINVAL;
+ }
+ wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+ if (channel)
+ wmi_set_channel(wil, channel->hw_value);
+ break;
+ default:
+ ;
+ }
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* Set up beaconing if required. */
+ rc = wmi_set_bcon(wil, bi, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ wil_rx_init(wil);
+
+ return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+static int __wil_down(struct wil6210_priv *wil)
+{
+ if (wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
+ wil6210_disconnect(wil, NULL);
+ wil_rx_fini(wil);
+
+ return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_down(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644
index 000000000000..3068b5cb53a7
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+
+#include "wil6210.h"
+
+static int wil_open(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_down(wil);
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 3
+ * AC_VI -> queue 2
+ * AC_BE -> queue 1
+ * AC_BK -> queue 0
+ */
+static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb)
+{
+ static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ u16 rc;
+
+ skb->priority = cfg80211_classify8021d(skb);
+
+ rc = wil_1d_to_queue[skb->priority];
+
+ wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority,
+ (int)rc);
+
+ return rc;
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+ .ndo_open = wil_open,
+ .ndo_stop = wil_stop,
+ .ndo_start_xmit = wil_start_xmit,
+ .ndo_select_queue = wil_select_queue,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wil6210_priv *wil;
+ struct ieee80211_channel *ch;
+ int rc = 0;
+
+ wdev = wil_cfg80211_init(dev);
+ if (IS_ERR(wdev)) {
+ dev_err(dev, "wil_cfg80211_init failed\n");
+ return wdev;
+ }
+
+ wil = wdev_to_wil(wdev);
+ wil->csr = csr;
+ wil->wdev = wdev;
+
+ rc = wil_priv_init(wil);
+ if (rc) {
+ dev_err(dev, "wil_priv_init failed\n");
+ goto out_wdev;
+ }
+
+ wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+ /* default monitor channel */
+ ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+ ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1);
+ if (!ndev) {
+ dev_err(dev, "alloc_netdev_mqs failed\n");
+ rc = -ENOMEM;
+ goto out_priv;
+ }
+
+ ndev->netdev_ops = &wil_netdev_ops;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ wil_link_off(wil);
+
+ return wil;
+
+ out_priv:
+ wil_priv_deinit(wil);
+
+ out_wdev:
+ wil_wdev_free(wil);
+
+ return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ if (!ndev)
+ return;
+
+ free_netdev(ndev);
+ wil_priv_deinit(wil);
+ wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+
+ rc = register_netdev(ndev);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+ return rc;
+ }
+
+ wil_link_off(wil);
+
+ return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644
index 000000000000..0fc83edd6bad
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+ " Use MSI interrupt: "
+ "0 - don't, 1 - (default) - single, or 3");
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+ int rc;
+
+ pci_set_master(pdev);
+
+ /*
+ * how many MSI interrupts to request?
+ */
+ switch (use_msi) {
+ case 3:
+ case 1:
+ case 0:
+ break;
+ default:
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n",
+ use_msi);
+ use_msi = 1;
+ }
+ wil->n_msi = use_msi;
+ if (wil->n_msi) {
+ wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi);
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ if (rc && (wil->n_msi == 3)) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ wil->n_msi = 1;
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ }
+ if (rc) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ wil->n_msi = 0;
+ }
+ } else {
+ wil_dbg(wil, "MSI interrupts disabled, use INTx\n");
+ }
+
+ rc = wil6210_init_irq(wil, pdev->irq);
+ if (rc)
+ goto stop_master;
+
+ /* need reset here to obtain MAC */
+ rc = wil_reset(wil);
+ if (rc)
+ goto release_irq;
+
+ return 0;
+
+ release_irq:
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ stop_master:
+ pci_clear_master(pdev);
+ return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+
+ pci_clear_master(pdev);
+ /* disable and release IRQ */
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ /* TODO: disable HW */
+
+ return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct wil6210_priv *wil;
+ struct device *dev = &pdev->dev;
+ void __iomem *csr;
+ int rc;
+
+ /* check HW */
+ dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+ dev_err(&pdev->dev, "Not " WIL_NAME "? "
+ "BAR0 size is %lu while expecting %lu\n",
+ (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ return -ENODEV;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+ /* rollback to err_disable_pdev */
+
+ rc = pci_request_region(pdev, 0, WIL_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_region failed\n");
+ goto err_disable_pdev;
+ }
+ /* rollback to err_release_reg */
+
+ csr = pci_ioremap_bar(pdev, 0);
+ if (!csr) {
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ rc = -ENODEV;
+ goto err_release_reg;
+ }
+ /* rollback to err_iounmap */
+ dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+
+ wil = wil_if_alloc(dev, csr);
+ if (IS_ERR(wil)) {
+ rc = (int)PTR_ERR(wil);
+ dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+ goto err_iounmap;
+ }
+ /* rollback to if_free */
+
+ pci_set_drvdata(pdev, wil);
+ wil->pdev = pdev;
+
+ /* FW should raise IRQ when ready */
+ rc = wil_if_pcie_enable(wil);
+ if (rc) {
+ wil_err(wil, "Enable device failed\n");
+ goto if_free;
+ }
+ /* rollback to bus_disable */
+
+ rc = wil_if_add(wil);
+ if (rc) {
+ wil_err(wil, "wil_if_add failed: %d\n", rc);
+ goto bus_disable;
+ }
+
+ wil6210_debugfs_init(wil);
+
+ /* check FW is alive */
+ wmi_echo(wil);
+
+ return 0;
+
+ bus_disable:
+ wil_if_pcie_disable(wil);
+ if_free:
+ wil_if_free(wil);
+ err_iounmap:
+ pci_iounmap(pdev, csr);
+ err_release_reg:
+ pci_release_region(pdev, 0);
+ err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil6210_debugfs_remove(wil);
+ wil_if_pcie_disable(wil);
+ wil_if_remove(wil);
+ wil_if_free(wil);
+ pci_iounmap(pdev, wil->csr);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
+ { PCI_DEVICE(0x1ae9, 0x0301) },
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+ .probe = wil_pcie_probe,
+ .remove = wil_pcie_remove,
+ .id_table = wil6210_pcie_ids,
+ .name = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644
index 000000000000..f29c294413cf
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -0,0 +1,871 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+ " Include PHY info in the radiotap header, default - no");
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+ return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+ return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+ vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+ return wil_vring_next_tail(vring) == vring->swhead;
+}
+/*
+ * Available space in Tx Vring
+ */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+ u32 swhead = vring->swhead;
+ u32 swtail = vring->swtail;
+ int used = (vring->size + swhead - swtail) % vring->size;
+
+ return vring->size - used - 1;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+ uint i;
+
+ BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+ vring->swhead = 0;
+ vring->swtail = 0;
+ vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ if (!vring->ctx) {
+ wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n",
+ vring->size);
+ vring->va = NULL;
+ return -ENOMEM;
+ }
+ /*
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+ if (!vring->va) {
+ wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
+ vring->size);
+ kfree(vring->ctx);
+ vring->ctx = NULL;
+ return -ENOMEM;
+ }
+ /* initially, all descriptors are SW owned
+ * For Tx and Rx, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ }
+
+ wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+ vring->va, (unsigned long long)vring->pa, vring->ctx);
+
+ return 0;
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+ int tx)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+
+ while (!wil_vring_is_empty(vring)) {
+ if (tx) {
+ volatile struct vring_tx_desc *d =
+ &vring->va[vring->swtail].tx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swtail];
+ if (skb) {
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ }
+ vring->swtail = wil_vring_next_tail(vring);
+ } else { /* rx */
+ volatile struct vring_rx_desc *d =
+ &vring->va[vring->swtail].rx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ wil_vring_advance_head(vring, 1);
+ }
+ }
+ dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+ kfree(vring->ctx);
+ vring->pa = 0;
+ vring->va = NULL;
+ vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+ u32 i, int headroom)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = RX_BUF_LEN;
+ volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+ dma_addr_t pa;
+
+ /* TODO align */
+ struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, sz);
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ /* ip_length don't care */
+ /* b11 don't care */
+ /* error don't care */
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = sz;
+ vring->ctx[i] = skb;
+
+ return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ * - Rx descriptor: 32 bytes
+ * - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ volatile struct vring_rx_desc *d)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct wil6210_rtap {
+ struct ieee80211_radiotap_header rthdr;
+ /* fields should be in the order of bits in rthdr.it_present */
+ /* flags */
+ u8 flags;
+ /* channel */
+ __le16 chnl_freq __aligned(2);
+ __le16 chnl_flags;
+ /* MCS */
+ u8 mcs_present;
+ u8 mcs_flags;
+ u8 mcs_index;
+ } __packed;
+ struct wil6210_rtap_vendor {
+ struct wil6210_rtap rtap;
+ /* vendor */
+ u8 vendor_oui[3] __aligned(2);
+ u8 vendor_ns;
+ __le16 vendor_skip;
+ u8 vendor_data[0];
+ } __packed;
+ struct wil6210_rtap_vendor *rtap_vendor;
+ int rtap_len = sizeof(struct wil6210_rtap);
+ int phy_length = 0; /* phy info header size, bytes */
+ static char phy_data[128];
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ if (rtap_include_phy_info) {
+ rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+ /* calculate additional length */
+ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+ /**
+ * PHY info starts from 8-byte boundary
+ * there are 8-byte lines, last line may be partially
+ * written (HW bug), thus FW configures for last line
+ * to be excessive. Driver skips this last line.
+ */
+ int len = min_t(int, 8 + sizeof(phy_data),
+ wil_rxdesc_phy_length(d));
+ if (len > 8) {
+ void *p = skb_tail_pointer(skb);
+ void *pa = PTR_ALIGN(p, 8);
+ if (skb_tailroom(skb) >= len + (pa - p)) {
+ phy_length = len - 8;
+ memcpy(phy_data, pa, phy_length);
+ }
+ }
+ }
+ rtap_len += phy_length;
+ }
+
+ if (skb_headroom(skb) < rtap_len &&
+ pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+ wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ return;
+ }
+
+ rtap_vendor = (void *)skb_push(skb, rtap_len);
+ memset(rtap_vendor, 0, rtap_len);
+
+ rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+ (1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_MCS));
+ if (d->dma.status & RX_DMA_STATUS_ERROR)
+ rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+ rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap_vendor->rtap.mcs_flags = 0;
+ rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+ if (rtap_include_phy_info) {
+ rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+ /* OUI for Wilocity 04:ce:14 */
+ rtap_vendor->vendor_oui[0] = 0x04;
+ rtap_vendor->vendor_oui[1] = 0xce;
+ rtap_vendor->vendor_oui[2] = 0x14;
+ rtap_vendor->vendor_ns = 1;
+ /* Rx descriptor + PHY data */
+ rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+ phy_length);
+ memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+ memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+ phy_length);
+ }
+}
+
+/*
+ * Fast swap in place between 2 registers
+ */
+static void wil_swap_u16(u16 *a, u16 *b)
+{
+ *a ^= *b;
+ *b ^= *a;
+ *a ^= *b;
+}
+
+static void wil_swap_ethaddr(void *data)
+{
+ struct ethhdr *eth = data;
+ u16 *s = (u16 *)eth->h_source;
+ u16 *d = (u16 *)eth->h_dest;
+
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s, d);
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+ struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+ volatile struct vring_rx_desc *d;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ unsigned int sz = RX_BUF_LEN;
+ u8 ftype;
+ u8 ds_bits;
+
+ if (wil_vring_is_empty(vring))
+ return NULL;
+
+ d = &(vring->va[vring->swhead].rx);
+ if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+ /* it is not error, we just reached end of Rx done area */
+ return NULL;
+ }
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ skb_trim(skb, d->dma.length);
+
+ wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+
+ /* use radiotap header only if required */
+ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+ wil_rx_add_radiotap_header(wil, skb, d);
+
+ wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+ wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_vring_advance_head(vring, 1);
+
+ /* no extra checks if in sniffer mode */
+ if (ndev->type != ARPHRD_ETHER)
+ return skb;
+ /*
+ * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ * Driver should recognize it by frame type, that is found
+ * in Rx descriptor. If type is not data, it is 802.11 frame as is
+ */
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (ftype != IEEE80211_FTYPE_DATA) {
+ wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype);
+ /* TODO: process it */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ wil_err(wil, "Short frame, len = %d\n", skb->len);
+ /* TODO: process it (i.e. BAR) */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ ds_bits = wil_rxdesc_ds_bits(d);
+ if (ds_bits == 1) {
+ /*
+ * HW bug - in ToDS mode, i.e. Rx on AP side,
+ * addresses get swapped
+ */
+ wil_swap_ethaddr(skb->data);
+ }
+
+ return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ u32 next_tail;
+ int rc = 0;
+ int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+ WIL6210_RTAP_SIZE : 0;
+
+ for (; next_tail = wil_vring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
+ rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+ if (rc) {
+ wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+ rc, v->swtail);
+ break;
+ }
+ }
+ iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+ return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ */
+static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+ unsigned int len = skb->len;
+
+ if (in_interrupt())
+ rc = netif_rx(skb);
+ else
+ rc = netif_rx_ni(skb);
+
+ if (likely(rc == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_rx_handle(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ struct sk_buff *skb;
+
+ if (!v->va) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_TXRX(wil, "%s()\n", __func__);
+ while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
+ wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ skb_orphan(skb);
+
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ skb->dev = ndev;
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ } else {
+ skb->protocol = eth_type_trans(skb, ndev);
+ }
+
+ wil_netif_rx_any(skb, ndev);
+ }
+ wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct vring *vring = &wil->vring_rx;
+ int rc;
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+
+ vring->size = WIL6210_RX_RING_SIZE;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ return rc;
+
+ cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size);
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ }
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ goto err_free;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ rc = wil_rx_refill(wil, vring->size);
+ if (rc)
+ goto err_free;
+
+ return 0;
+ err_free:
+ wil_vring_free(wil, vring, 0);
+
+ return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+
+ if (vring->va) {
+ int rc;
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = cpu_to_le32(WMI_RX_CHAIN_DEL),
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event cfg;
+ } __packed wmi_rx_cfg_reply;
+
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID,
+ &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply),
+ 100);
+ wil_vring_free(wil, vring, 0);
+ }
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid)
+{
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+ },
+ .ringid = id,
+ .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 16,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (!vring->va)
+ return;
+
+ wil_vring_free(wil, vring, 1);
+}
+
+static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v = &wil->vring_tx[0];
+
+ if (v->va)
+ return v;
+
+ return NULL;
+}
+
+static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
+ dma_addr_t pa, u32 len)
+{
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = len;
+ d->dma.d0 = 0;
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* use dst index 0 */
+ d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
+ (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+ volatile struct vring_tx_desc *d;
+ u32 swhead = vring->swhead;
+ int avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ uint f;
+ int vring_index = vring - wil->vring_tx;
+ uint i = swhead;
+ dma_addr_t pa;
+
+ wil_dbg_TXRX(wil, "%s()\n", __func__);
+
+ if (avail < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+ if (avail < 1 + nr_frags) {
+ wil_err(wil, "Tx ring full. No space for %d fragments\n",
+ 1 + nr_frags);
+ return -ENOMEM;
+ }
+ d = &(vring->va[i].tx);
+
+ /* FIXME FW can accept only unicast frames for the peer */
+ memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
+
+ pa = dma_map_single(dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+ skb->data, (unsigned long long)pa);
+ wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ return -EINVAL;
+ /* 1-st segment */
+ wil_tx_desc_map(d, pa, skb_headlen(skb));
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ /* middle segments */
+ for (f = 0; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+ i = (swhead + f + 1) % vring->size;
+ d = &(vring->va[i].tx);
+ pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+ wil_tx_desc_map(d, pa, len);
+ vring->ctx[i] = NULL;
+ }
+ /* for the last seg only */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+ d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+ d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+
+ wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ /* advance swhead */
+ wil_vring_advance_head(vring, nr_frags + 1);
+ wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i] = skb_get(skb);
+
+ return 0;
+ dma_error:
+ /* unmap what we have mapped */
+ /* Note: increment @f to operate with positive index */
+ for (f++; f > 0; f--) {
+ i = (swhead + f) % vring->size;
+ d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ if (vring->ctx[i])
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+
+ return -EINVAL;
+}
+
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct vring *vring;
+ int rc;
+
+ wil_dbg_TXRX(wil, "%s()\n", __func__);
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ goto drop;
+ }
+ if (!test_bit(wil_status_fwconnected, &wil->status)) {
+ wil_err(wil, "FW not connected\n");
+ goto drop;
+ }
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ wil_err(wil, "Xmit in monitor mode not supported\n");
+ goto drop;
+ }
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ rc = wmi_tx_eapol(wil, skb);
+ } else {
+ /* find vring */
+ vring = wil_find_tx_vring(wil, skb);
+ if (!vring) {
+ wil_err(wil, "No Tx VRING available\n");
+ goto drop;
+ }
+ /* set up vring entry */
+ rc = wil_tx_vring(wil, vring, skb);
+ }
+ switch (rc) {
+ case 0:
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ case -ENOMEM:
+ return NETDEV_TX_BUSY;
+ default:
+ ; /* goto drop; */
+ break;
+ }
+ drop:
+ netif_tx_stop_all_queues(ndev);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+
+ return NET_XMIT_DROP;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct vring *vring = &wil->vring_tx[ringid];
+
+ if (!vring->va) {
+ wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+ return;
+ }
+
+ wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid);
+
+ while (!wil_vring_is_empty(vring)) {
+ volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
+ dma_addr_t pa;
+ struct sk_buff *skb;
+ if (!(d->dma.status & TX_DMA_STATUS_DU))
+ break;
+
+ wil_dbg_TXRX(wil,
+ "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ vring->swtail, d->dma.length, d->dma.status,
+ d->dma.error);
+ wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swtail];
+ if (skb) {
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+ d->dma.addr_low = 0;
+ d->dma.addr_high = 0;
+ d->dma.length = 0;
+ d->dma.status = TX_DMA_STATUS_DU;
+ vring->swtail = wil_vring_next_tail(vring);
+ }
+ if (wil_vring_avail_tx(vring) > vring->size/4)
+ netif_tx_wake_all_queues(wil_to_ndev(wil));
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644
index 000000000000..45a61f597c5c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED (1)
+#define BUF_HW_OWNED (0)
+
+/* size of max. Rx packet */
+#define RX_BUF_LEN (2048)
+#define TX_BUF_LEN (2048)
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+/*
+ * Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrup_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..29 : reserved1:2
+ * bit 30 : reserved2:1
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5.. 7 : reserved0:3
+ * bit 8..13 : reserved1:6
+ * bit 14 : reserved2:1
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit 0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+ u32 d[3];
+ u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+
+
+#define TX_DMA_STATUS_DU BIT(0)
+
+struct vring_tx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ u8 error; /* 0..2: err; 3..7: reserved; */
+ u8 status; /* 0: used; 1..7; reserved */
+ u16 length;
+} __packed;
+
+/*
+ * Rx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit 4.. 6 : connection_id:3 :The Source index that was found during
+ * Parsing the TA. This field is used to define the source of the packet
+ * bit 7 : reserved:1
+ * bit 8.. 9 : mac_id:2 : The MAC virtual Ring number (always zero)
+ * bit 10..11 : frame_type:2 : The FC Control (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC Control (b7-4) - Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit 0.. 3 : reserved
+ * bit 4.. 5 : key_id:2
+ * bit 6 : decrypt_bypass:1
+ * bit 7 : security:1
+ * bit 8.. 9 : ds_bits:2
+ * bit 10 : a_msdu_present:1 from qos header
+ * bit 11 : a_msdu_type:1 from qos header
+ * bit 12 : a_mpdu:1 part of AMPDU aggregation
+ * bit 13 : broadcast:1
+ * bit 14 : mutlicast:1
+ * bit 15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit 3 : fc_protocol_ver:1 The FC Control (b0) - Protocol Version
+ * bit 4 : fc_order:1 The FC Control (b15) -Order
+ * bit 5.. 7 : qos_ack_policy:3 The QoS (b6-5) ack policy Field
+ * bit 8 : esop:1 The QoS (b4) ESOP field
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit 0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+ u32 d0;
+ u32 d1;
+ u16 w4;
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+/*
+ * Rx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8.. 9 : reserved:2
+ * bit 10 : cmd_dma_it:1
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1
+ * [dword 3]
+ * [byte 12] error
+ * [byte 13] status
+ * bit 0 : du:1
+ * bit 1 : eop:1
+ * bit 2 : error:1
+ * bit 3 : mi:1
+ * bit 4 : l3_identified:1
+ * bit 5 : l4_identified:1
+ * bit 6 : phy_info_included:1
+ * bit 7 : reserved:1
+ * [word 7] length
+ *
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
+
+struct vring_rx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11;
+ u8 error;
+ u8 status;
+ u16 length;
+} __packed;
+
+struct vring_tx_desc {
+ struct vring_tx_mac mac;
+ struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+ struct vring_rx_mac mac;
+ struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+ struct vring_tx_desc tx;
+ struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644
index 000000000000..9bcfffa4006c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "dbg_hexdump.h"
+
+#define WIL_NAME "wil6210"
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+ return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL6210_TX_QUEUES (4)
+
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24)
+
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File | Host addr | FW addr
+ * | |
+ * user_rgf | 0x000000 | 0x880000
+ * dma_rgf | 0x001000 | 0x881000
+ * pcie_rgf | 0x002000 | 0x882000
+ * | |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF (0x880000UL)
+
+#define HOSTADDR(fwaddr) (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+ u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+ u32 ICR; /* Cause, W1C/COR depending on ICC */
+ u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+ u32 ICS; /* Cause Set, WO */
+ u32 IMV; /* Mask, RW+S/C */
+ u32 IMS; /* Mask Set, write 1 to set */
+ u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_USER_CPU_0 (0x8801e0)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
+ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
+ #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
+ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
+ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
+ #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT0 BIT(28)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT1 BIT(29)
+
+/* Interrupt moderation control */
+#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL (0x881C64)
+ #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
+ #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
+ #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
+ #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+
+/* popular locations */
+#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+ offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1
+
+/* Hardware definitions end */
+
+struct wil6210_mbox_ring {
+ u32 base;
+ u16 entry_size; /* max. size of mbox entry, incl. all headers */
+ u16 size;
+ u32 tail;
+ u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+ __le32 sync;
+ __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+ struct wil6210_mbox_ring tx;
+ struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+ __le16 seq;
+ __le16 len; /* payload, bytes after this header */
+ __le16 type;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE (240)
+
+struct wil6210_mbox_hdr_wmi {
+ u8 reserved0[2];
+ __le16 id;
+ __le16 info1; /* bits [0..3] - device_id, rest - unused */
+ u8 reserved1[2];
+} __packed;
+
+struct pending_wmi_event {
+ struct list_head list;
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ u8 data[0];
+ } __packed event;
+};
+
+union vring_desc;
+
+struct vring {
+ dma_addr_t pa;
+ volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+ u16 size; /* number of vring_desc elements */
+ u32 swtail;
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ void **ctx; /* void *ctx[size] - software context */
+};
+
+enum { /* for wil6210_priv.status */
+ wil_status_fwready = 0,
+ wil_status_fwconnected,
+ wil_status_dontscan,
+ wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+};
+
+struct pci_dev;
+
+struct wil6210_stats {
+ u64 tsf;
+ u32 snr;
+ u16 last_mcs_rx;
+ u16 bf_mcs; /* last BF, used for Tx */
+ u16 my_rx_sector;
+ u16 my_tx_sector;
+ u16 peer_rx_sector;
+ u16 peer_tx_sector;
+};
+
+struct wil6210_priv {
+ struct pci_dev *pdev;
+ int n_msi;
+ struct wireless_dev *wdev;
+ void __iomem *csr;
+ ulong status;
+ /* profile */
+ u32 monitor_flags;
+ u32 secure_pcp; /* create secure PCP? */
+ int sinfo_gen;
+ /* cached ISR registers */
+ u32 isr_misc;
+ /* mailbox related */
+ struct mutex wmi_mutex;
+ struct wil6210_mbox_ctl mbox_ctl;
+ struct completion wmi_ready;
+ u16 wmi_seq;
+ u16 reply_id; /**< wait for this WMI event */
+ void *reply_buf;
+ u16 reply_size;
+ struct workqueue_struct *wmi_wq; /* for deferred calls */
+ struct work_struct wmi_event_worker;
+ struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+ struct work_struct wmi_connect_worker;
+ struct work_struct disconnect_worker;
+ struct timer_list connect_timer;
+ int pending_connect_cid;
+ struct list_head pending_wmi_ev;
+ /*
+ * protect pending_wmi_ev
+ * - fill in IRQ from wil6210_irq_misc,
+ * - consumed in thread by wmi_event_worker
+ */
+ spinlock_t wmi_ev_lock;
+ /* DMA related */
+ struct vring vring_rx;
+ struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+ u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+ /* scan */
+ struct cfg80211_scan_request *scan_request;
+
+ struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* statistics */
+ struct wil6210_stats stats;
+ /* debugfs */
+ struct dentry *debug;
+ struct debugfs_blob_wrapper fw_code_blob;
+ struct debugfs_blob_wrapper fw_data_blob;
+ struct debugfs_blob_wrapper fw_peri_blob;
+ struct debugfs_blob_wrapper uc_code_blob;
+ struct debugfs_blob_wrapper uc_data_blob;
+ struct debugfs_blob_wrapper rgf_blob;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
+#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
+#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+
+#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+
+#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil);
+void wil_link_on(struct wil6210_priv *wil);
+void wil_link_off(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_connect_worker(struct work_struct *work);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil6210_disable_irq(struct wil6210_priv *wil);
+void wil6210_enable_irq(struct wil6210_priv *wil);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+
+int wil_rx_init(struct wil6210_priv *wil);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644
index 000000000000..12915f6e7617
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ * - MAC CPU (ucode)
+ * - User CPU (firmware)
+ * - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
+ * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ */
+static const struct {
+ u32 from; /* linker address - from, inclusive */
+ u32 to; /* linker address - to, exclusive */
+ u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+} fw_mapping[] = {
+ {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
+ {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
+ {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
+ {0x880000, 0x88a000, 0x880000}, /* various RGF */
+ {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+ /*
+ * 920000..930000 ucode code RAM
+ * 930000..932000 ucode data RAM
+ */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+ return x + fw_mapping[i].host - fw_mapping[i].from;
+ }
+
+ return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ u32 off;
+ u32 ptr = le32_to_cpu(ptr_);
+
+ if (ptr % 4)
+ return NULL;
+
+ ptr = wmi_addr_remap(ptr);
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+ u32 off;
+
+ if (ptr % 4)
+ return NULL;
+
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr)
+{
+ void __iomem *src = wmi_buffer(wil, ptr);
+ if (!src)
+ return -EINVAL;
+
+ wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+ return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ } __packed cmd = {
+ .hdr = {
+ .type = WIL_MBOX_HDR_TYPE_WMI,
+ .flags = 0,
+ .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+ },
+ .wmi = {
+ .id = cpu_to_le16(cmdid),
+ .info1 = 0,
+ },
+ };
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+ struct wil6210_mbox_ring_desc d_head;
+ u32 next_head;
+ void __iomem *dst;
+ void __iomem *head = wmi_addr(wil, r->head);
+ uint retry;
+
+ if (sizeof(cmd) + len > r->entry_size) {
+ wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+ (int)(sizeof(cmd) + len), r->entry_size);
+ return -ERANGE;
+
+ }
+
+ might_sleep();
+
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ return -EAGAIN;
+ }
+
+ if (!head) {
+ wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+ return -EINVAL;
+ }
+ /* read Tx head till it is not busy */
+ for (retry = 5; retry > 0; retry--) {
+ wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+ if (d_head.sync == 0)
+ break;
+ msleep(20);
+ }
+ if (d_head.sync != 0) {
+ wil_err(wil, "WMI head busy\n");
+ return -EBUSY;
+ }
+ /* next head */
+ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+ wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ /* wait till FW finish with previous command */
+ for (retry = 5; retry > 0; retry--) {
+ r->tail = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
+ if (next_head != r->tail)
+ break;
+ msleep(20);
+ }
+ if (next_head == r->tail) {
+ wil_err(wil, "WMI ring full\n");
+ return -EBUSY;
+ }
+ dst = wmi_buffer(wil, d_head.addr);
+ if (!dst) {
+ wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+ le32_to_cpu(d_head.addr));
+ return -EINVAL;
+ }
+ cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+ /* set command */
+ wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ sizeof(cmd), true);
+ wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+ wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+ /* mark entry as full */
+ iowrite32(1, wil->csr + HOSTADDR(r->head) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* advance next ptr */
+ iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.head));
+
+ /* interrupt to FW */
+ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+ return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ int rc;
+
+ mutex_lock(&wil->wmi_mutex);
+ rc = __wmi_send(wil, cmdid, buf, len);
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_ready_event *evt = d;
+ u32 ver = le32_to_cpu(evt->sw_version);
+
+ wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
+ memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
+ }
+ snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+ "%d", ver);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ wil_dbg_WMI(wil, "WMI: FW ready\n");
+
+ set_bit(wil_status_fwready, &wil->status);
+ /* reuse wmi_ready for the firmware ready indication */
+ complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_rx_mgmt_packet_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int ch_no = data->info.channel+1;
+ u32 freq = ieee80211_channel_to_frequency(ch_no,
+ IEEE80211_BAND_60GHZ);
+ struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+ /* TODO convert LE to CPU */
+ s32 signal = 0; /* TODO */
+ __le16 fc = rx_mgmt_frame->frame_control;
+ u32 d_len = le32_to_cpu(data->info.len);
+ u16 d_status = le16_to_cpu(data->info.status);
+
+ wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n",
+ data->info.channel, data->info.mcs, data->info.snr);
+ wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+ le16_to_cpu(data->info.stype));
+ wil_dbg_WMI(wil, "qid %d mid %d cid %d\n",
+ data->info.qid, data->info.mid, data->info.cid);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+ struct cfg80211_bss *bss;
+ u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+ u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+ u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+ const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+ size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap);
+
+ bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
+ tsf, cap, bi, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_WMI(wil, "Added BSS %pM\n",
+ rx_mgmt_frame->bssid);
+ cfg80211_put_bss(bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss() failed\n");
+ }
+ }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ if (wil->scan_request) {
+ struct wmi_scan_complete_event *data = d;
+ bool aborted = (data->status != 0);
+
+ wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ cfg80211_scan_done(wil->scan_request, aborted);
+ wil->scan_request = NULL;
+ } else {
+ wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+ }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_connect_event *evt = d;
+ int ch; /* channel number */
+ struct station_info sinfo;
+ u8 *assoc_req_ie, *assoc_resp_ie;
+ size_t assoc_req_ielen, assoc_resp_ielen;
+ /* capinfo(u16) + listen_interval(u16) + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Connect event too short : %d bytes\n", len);
+ return;
+ }
+ if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+ evt->assoc_resp_len) {
+ wil_err(wil,
+ "Connect event corrupted : %d != %d + %d + %d + %d\n",
+ len, (int)sizeof(*evt), evt->beacon_ie_len,
+ evt->assoc_req_len, evt->assoc_resp_len);
+ return;
+ }
+ ch = evt->channel + 1;
+ wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n",
+ evt->bssid, ch, evt->cid);
+ wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ evt->assoc_info, len - sizeof(*evt), true);
+
+ /* figure out IE's */
+ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+ assoc_req_ie_offset];
+ assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+ if (evt->assoc_req_len <= assoc_req_ie_offset) {
+ assoc_req_ie = NULL;
+ assoc_req_ielen = 0;
+ }
+
+ assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+ evt->assoc_req_len +
+ assoc_resp_ie_offset];
+ assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+ if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+ assoc_resp_ie = NULL;
+ assoc_resp_ielen = 0;
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+ wil_err(wil, "Not in connecting state\n");
+ return;
+ }
+ del_timer_sync(&wil->connect_timer);
+ cfg80211_connect_result(ndev, evt->bssid,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ sinfo.generation = wil->sinfo_gen++;
+
+ if (assoc_req_ie) {
+ sinfo.assoc_req_ies = assoc_req_ie;
+ sinfo.assoc_req_ies_len = assoc_req_ielen;
+ sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+ }
+
+ cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ }
+ set_bit(wil_status_fwconnected, &wil->status);
+
+ /* FIXME FW can transmit only ucast frames to peer */
+ /* FIXME real ring_id instead of hard coded 0 */
+ memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+
+ wil->pending_connect_cid = evt->cid;
+ queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_disconnect_event *evt = d;
+
+ wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n",
+ evt->bssid,
+ evt->protocol_reason_status, evt->disconnect_reason);
+
+ wil->sinfo_gen++;
+
+ wil6210_disconnect(wil, evt->bssid);
+ clear_bit(wil_status_dontscan, &wil->status);
+}
+
+static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_notify_req_done_event *evt = d;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Short NOTIFY event\n");
+ return;
+ }
+
+ wil->stats.tsf = le64_to_cpu(evt->tsf);
+ wil->stats.snr = le32_to_cpu(evt->snr_val);
+ wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
+ wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
+ wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
+ wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
+ wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
+ wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n"
+ "BF status 0x%08x SNR 0x%08x\n"
+ "Tx Tpt %d goodput %d Rx goodput %d\n"
+ "Sectors(rx:tx) my %d:%d peer %d:%d\n",
+ wil->stats.bf_mcs, wil->stats.tsf, evt->status,
+ wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+ le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_eapol_rx_event *evt = d;
+ u16 eapol_len = le16_to_cpu(evt->eapol_len);
+ int sz = eapol_len + ETH_HLEN;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+
+ wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len,
+ evt->src_mac);
+
+ if (eapol_len > 196) { /* TODO: revisit size limit */
+ wil_err(wil, "EAPOL too large\n");
+ return;
+ }
+
+ skb = alloc_skb(sz, GFP_KERNEL);
+ if (!skb) {
+ wil_err(wil, "Failed to allocate skb\n");
+ return;
+ }
+ eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+ memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_PAE);
+ memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+static const struct {
+ int eventid;
+ void (*handler)(struct wil6210_priv *wil, int eventid,
+ void *data, int data_len);
+} wmi_evt_handlers[] = {
+ {WMI_READY_EVENTID, wmi_evt_ready},
+ {WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
+ {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
+ {WMI_CONNECT_EVENTID, wmi_evt_connect},
+ {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
+ {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
+ {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+ struct wil6210_mbox_ring_desc d_tail;
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ struct pending_wmi_event *evt;
+ u8 *cmd;
+ void __iomem *src;
+ ulong flags;
+
+ for (;;) {
+ u16 len;
+
+ r->head = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail == r->head)
+ return;
+
+ /* read cmd from tail */
+ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+ sizeof(struct wil6210_mbox_ring_desc));
+ if (d_tail.sync == 0) {
+ wil_err(wil, "Mbox evt not owned by FW?\n");
+ return;
+ }
+
+ if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+ wil_err(wil, "Mbox evt at 0x%08x?\n",
+ le32_to_cpu(d_tail.addr));
+ return;
+ }
+
+ len = le16_to_cpu(hdr.len);
+ src = wmi_buffer(wil, d_tail.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+ event.wmi) + len, 4),
+ GFP_KERNEL);
+ if (!evt) {
+ wil_err(wil, "kmalloc for WMI event (%d) failed\n",
+ len);
+ return;
+ }
+ evt->event.hdr = hdr;
+ cmd = (void *)&evt->event.wmi;
+ wil_memcpy_fromio_32(cmd, src, len);
+ /* mark entry as empty */
+ iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* indicate */
+ wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ wil_dbg_WMI(wil, "WMI event 0x%04x\n",
+ evt->event.wmi.id);
+ }
+ wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ &evt->event.hdr, sizeof(hdr) + len, true);
+
+ /* advance tail */
+ r->tail = r->base + ((r->tail - r->base +
+ sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+ iowrite32(r->tail, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ {
+ int q = queue_work(wil->wmi_wq,
+ &wil->wmi_event_worker);
+ wil_dbg_WMI(wil, "queue_work -> %d\n", q);
+ }
+ }
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+ int rc;
+ int remain;
+
+ mutex_lock(&wil->wmi_mutex);
+
+ rc = __wmi_send(wil, cmdid, buf, len);
+ if (rc)
+ goto out;
+
+ wil->reply_id = reply_id;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ remain = wait_for_completion_timeout(&wil->wmi_ready,
+ msecs_to_jiffies(to_msec));
+ if (0 == remain) {
+ wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+ cmdid, reply_id, to_msec);
+ rc = -ETIME;
+ } else {
+ wil_dbg_WMI(wil,
+ "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+ cmdid, reply_id,
+ to_msec - jiffies_to_msecs(remain));
+ }
+ wil->reply_id = 0;
+ wil->reply_buf = NULL;
+ wil->reply_size = 0;
+ out:
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+ struct wmi_echo_cmd cmd = {
+ .value = cpu_to_le32(0x12345678),
+ };
+
+ return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+ struct wmi_set_mac_address_cmd cmd;
+
+ memcpy(cmd.mac, addr, ETH_ALEN);
+
+ wil_dbg_WMI(wil, "Set MAC %pM\n", addr);
+
+ return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+{
+ struct wmi_bcon_ctrl_cmd cmd = {
+ .bcon_interval = cpu_to_le16(bi),
+ .network_type = wmi_nettype,
+ .disable_sec_offload = 1,
+ };
+
+ if (!wil->secure_pcp)
+ cmd.disable_sec = 1;
+
+ return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+ struct wmi_set_ssid_cmd cmd = {
+ .ssid_len = cpu_to_le32(ssid_len),
+ };
+
+ if (ssid_len > sizeof(cmd.ssid))
+ return -EINVAL;
+
+ memcpy(cmd.ssid, ssid, ssid_len);
+
+ return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_ssid_cmd cmd;
+ } __packed reply;
+ int len; /* reply.cmd.ssid_len in CPU order */
+
+ rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+ &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(reply.cmd.ssid_len);
+ if (len > sizeof(reply.cmd.ssid))
+ return -EINVAL;
+
+ *ssid_len = len;
+ memcpy(ssid, reply.cmd.ssid, len);
+
+ return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_set_pcp_channel_cmd cmd = {
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_pcp_channel_cmd cmd;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ if (reply.cmd.channel > 3)
+ return -EINVAL;
+
+ *channel = reply.cmd.channel + 1;
+
+ return 0;
+}
+
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct wmi_eapol_tx_cmd *cmd;
+ struct ethhdr *eth;
+ u16 eapol_len = skb->len - ETH_HLEN;
+ void *eapol = skb->data + ETH_HLEN;
+ uint i;
+ int rc;
+
+ skb_set_mac_header(skb, 0);
+ eth = eth_hdr(skb);
+ wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
+ goto found_dest;
+ }
+
+ return -EINVAL;
+
+ found_dest:
+ /* find out eapol data & len */
+ cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
+ if (!cmd)
+ return -EINVAL;
+
+ memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
+ cmd->eapol_len = cpu_to_le16(eapol_len);
+ memcpy(cmd->eapol, eapol, eapol_len);
+ rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr)
+{
+ struct wmi_delete_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ };
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key)
+{
+ struct wmi_add_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_len = key_len,
+ };
+
+ if (!key || (key_len > sizeof(cmd.key)))
+ return -EINVAL;
+
+ memcpy(cmd.key, key, key_len);
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+ struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ wil_err(wil, "kmalloc(%d) failed\n", len);
+ return -ENOMEM;
+ }
+
+ cmd->mgmt_frm_type = type;
+ /* BUG: FW API define ieLen as u8. Will fix FW */
+ cmd->ie_len = cpu_to_le16(ie_len);
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+ kfree(cmd);
+
+ return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+ struct pending_wmi_event *evt, *t;
+
+ wil_dbg_WMI(wil, "%s()\n", __func__);
+
+ list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+ if (wmi_evt_handlers[i].eventid == id) {
+ wmi_evt_handlers[i].handler(wil, id, d, len);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+ struct wil6210_mbox_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len);
+
+ if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ void *evt_data = (void *)(&wmi[1]);
+ u16 id = le16_to_cpu(wmi->id);
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ } else {
+ wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi));
+ }
+ wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id);
+ complete(&wil->wmi_ready);
+ return;
+ }
+ /* unsolicited event */
+ /* search for handler */
+ if (!wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi))) {
+ wil_err(wil, "Unhandled event 0x%04x\n", id);
+ }
+ } else {
+ wil_err(wil, "Unknown event type\n");
+ print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr, sizeof(*hdr) + len, true);
+ }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct list_head *ret = NULL;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ ret = wil->pending_wmi_ev.next;
+ list_del(ret);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_event_worker);
+ struct pending_wmi_event *evt;
+ struct list_head *lh;
+
+ while ((lh = next_wmi_ev(wil)) != NULL) {
+ evt = list_entry(lh, struct pending_wmi_event, list);
+ wmi_event_handle(wil, &evt->event.hdr);
+ kfree(evt);
+ }
+}
+
+void wmi_connect_worker(struct work_struct *work)
+{
+ int rc;
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_connect_worker);
+
+ if (wil->pending_connect_cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ return;
+ }
+
+ wil_dbg_WMI(wil, "Configure for connection CID %d\n",
+ wil->pending_connect_cid);
+
+ rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
+ wil->pending_connect_cid, 0);
+ wil->pending_connect_cid = -1;
+ if (rc == 0)
+ wil_link_on(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644
index 000000000000..3bbf87572b07
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+
+/* List of Commands */
+enum wmi_command_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_DISCONNECT_CMDID = 0x0003,
+ WMI_START_SCAN_CMDID = 0x0007,
+ WMI_SET_BSS_FILTER_CMDID = 0x0009,
+ WMI_SET_PROBED_SSID_CMDID = 0x000a,
+ WMI_SET_LISTEN_INT_CMDID = 0x000b,
+ WMI_BCON_CTRL_CMDID = 0x000f,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
+ WMI_SET_APPIE_CMDID = 0x003f,
+ WMI_GET_APPIE_CMDID = 0x0040,
+ WMI_SET_WSC_STATUS_CMDID = 0x0041,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
+ WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300,
+ WMI_MEM_READ_CMDID = 0x0800,
+ WMI_MEM_WR_CMDID = 0x0801,
+ WMI_ECHO_CMDID = 0x0803,
+ WMI_DEEP_ECHO_CMDID = 0x0804,
+ WMI_CONFIG_MAC_CMDID = 0x0805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
+ WMI_ADD_STATION_CMDID = 0x0807,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
+ WMI_FS_TUNE_CMDID = 0x080a,
+ WMI_CORR_MEASURE_CMDID = 0x080b,
+ WMI_TEMP_SENSE_CMDID = 0x080e,
+ WMI_DC_CALIB_CMDID = 0x080f,
+ WMI_SEND_TONE_CMDID = 0x0810,
+ WMI_IQ_TX_CALIB_CMDID = 0x0811,
+ WMI_IQ_RX_CALIB_CMDID = 0x0812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x0813,
+ WMI_SET_WORK_MODE_CMDID = 0x0815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
+ WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
+ WMI_MARLON_R_READ_CMDID = 0x0818,
+ WMI_MARLON_R_WRITE_CMDID = 0x0819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
+ WMI_CFG_RX_CHAIN_CMDID = 0x0820,
+ WMI_VRING_CFG_CMDID = 0x0821,
+ WMI_RX_ON_CMDID = 0x0822,
+ WMI_VRING_BA_EN_CMDID = 0x0823,
+ WMI_VRING_BA_DIS_CMDID = 0x0824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
+ WMI_RCP_DELBA_CMDID = 0x0826,
+ WMI_SET_SSID_CMDID = 0x0827,
+ WMI_GET_SSID_CMDID = 0x0828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
+ WMI_SW_TX_REQ_CMDID = 0x082b,
+ WMI_RX_OFF_CMDID = 0x082c,
+ WMI_READ_MAC_RXQ_CMDID = 0x0830,
+ WMI_READ_MAC_TXQ_CMDID = 0x0831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
+ WMI_MLME_PUSH_CMDID = 0x0835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x0837,
+ WMI_BF_SM_MGMT_CMDID = 0x0838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x0839,
+ WMI_SET_SECTORS_CMDID = 0x0849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x0851,
+ WMI_RS_MGMT_CMDID = 0x0852,
+ WMI_RF_MGMT_CMDID = 0x0853,
+ /* Performance monitoring commands */
+ WMI_BF_CTRL_CMDID = 0x0862,
+ WMI_NOTIFY_REQ_CMDID = 0x0863,
+ WMI_GET_STATUS_CMDID = 0x0864,
+ WMI_UNIT_TEST_CMDID = 0x0900,
+ WMI_HICCUP_CMDID = 0x0901,
+ WMI_FLASH_READ_CMDID = 0x0902,
+ WMI_FLASH_WRITE_CMDID = 0x0903,
+ WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
+
+ WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
+ WMI_ABORT_SCAN_CMDID = 0xf007,
+ WMI_SET_PMK_CMDID = 0xf028,
+
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
+ WMI_GET_PMK_CMDID = 0xf048,
+ WMI_SET_PASSPHRASE_CMDID = 0xf049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
+ WMI_EAPOL_TX_CMDID = 0xf04c,
+ WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
+ WMI_FW_VER_CMDID = 0xf04e,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ = 1,
+ WMI_FRAME_PROBE_RESP = 2,
+ WMI_FRAME_ASSOC_REQ = 3,
+ WMI_FRAME_ASSOC_RESP = 4,
+ WMI_NUM_MGMT_FRAME,
+};
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+ WMI_NETTYPE_INFRA = 0x01,
+ WMI_NETTYPE_ADHOC = 0x02,
+ WMI_NETTYPE_ADHOC_CREATOR = 0x04,
+ WMI_NETTYPE_AP = 0x10,
+ WMI_NETTYPE_P2P = 0x20,
+ WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
+};
+
+enum wmi_auth_mode {
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
+};
+
+enum wmi_crypto_type {
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_WEP = 0x02,
+ WMI_CRYPT_TKIP = 0x04,
+ WMI_CRYPT_AES = 0x08,
+ WMI_CRYPT_AES_GCMP = 0x20,
+};
+
+
+enum wmi_connect_ctrl_flag_bits {
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
+ WMI_CONNECT_SEND_REASSOC = 0x0002,
+ WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN (32)
+
+struct wmi_connect_cmd {
+ u8 network_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 pairwise_crypto_type;
+ u8 pairwise_crypto_len;
+ u8 group_crypto_type;
+ u8 group_crypto_len;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le32 ctrl_flags;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved1[2];
+} __packed;
+
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+struct wmi_reconnect_cmd {
+ u8 channel; /* hint */
+ u8 reserved;
+ u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
+} __packed;
+
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX (0)
+#define WMI_MAX_KEY_INDEX (3)
+#define WMI_MAX_KEY_LEN (32)
+#define WMI_PASSPHRASE_LEN (64)
+#define WMI_PMK_LEN (32)
+
+struct wmi_set_pmk_cmd {
+ u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 passphrase[WMI_PASSPHRASE_LEN];
+ u8 ssid_len;
+ u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+ WMI_KEY_USE_PAIRWISE = 0,
+ WMI_KEY_USE_GROUP = 1,
+ WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+ u8 key_usage; /* enum wmi_key_usage */
+ u8 key_len;
+ u8 key_rsc[8]; /* key replay sequence counter */
+ u8 key[WMI_MAX_KEY_LEN];
+ u8 key_op_ctrl; /* Additional Key Control information */
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_start_scan_cmd {
+ u8 reserved[8];
+ __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+ __le32 force_scan_interval; /* Time interval between scans (ms)*/
+ u8 scan_type; /* wmi_scan_type */
+ u8 num_channels; /* how many channels follow */
+ struct {
+ u8 channel;
+ u8 reserved;
+ } channel_list[0]; /* channels ID's */
+ /* 0 - 58320 MHz */
+ /* 1 - 60480 MHz */
+ /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX (15)
+
+enum wmi_ssid_flag {
+ WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
+ WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
+ WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+ u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 flag; /* enum wmi_ssid_flag */
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 reserved;
+ __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ u8 ie_info[0];
+} __packed;
+
+#define WMI_MAX_IE_LEN (1024)
+
+struct wmi_pxmt_range_cfg_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 range;
+} __packed;
+
+struct wmi_pxmt_snr2_range_cfg_cmd {
+ s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+ WMI_RF_MGMT_W_DISABLE = 0,
+ WMI_RF_MGMT_W_ENABLE = 1,
+ WMI_RF_MGMT_GET_STATUS = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+ __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+ __le16 bcon_interval;
+ __le16 frag_num;
+ __le64 ss_mask;
+ u8 network_type;
+ u8 reserved;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+ __le64 ring_mem_base;
+ __le16 ring_size;
+ __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+ __le16 priority;
+ __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+ WMI_VRING_ENC_TYPE_802_3 = 0,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+ WMI_VRING_DS_PBSS = 0,
+ WMI_VRING_DS_STATION = 1,
+ WMI_VRING_DS_AP = 2,
+ WMI_VRING_DS_ADDR4 = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+ WMI_SCH_PRIO_REGULAR = 0,
+ WMI_SCH_PRIO_HIGH = 1,
+};
+
+struct wmi_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+ u8 mac_ctrl;
+
+ #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+ WMI_VRING_CMD_ADD = 0,
+ WMI_VRING_CMD_MODIFY = 1,
+ WMI_VRING_CMD_DELETE = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+ u8 ringid;
+ u8 agg_max_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+ u8 ringid;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+ u8 cid;
+ u8 reserved[3];
+ __le32 interval_usec;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+ WMI_SNIFFER_OFF = 0,
+ WMI_SNIFFER_ON = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+ WMI_SNIFFER_CP = 0,
+ WMI_SNIFFER_DP = 1,
+ WMI_SNIFFER_BOTH_PHYS = 2,
+};
+
+struct wmi_sniffer_cfg {
+ __le32 mode; /* enum wmi_sniffer_cfg_mode */
+ __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+ WMI_RX_CHAIN_ADD = 0,
+ WMI_RX_CHAIN_DEL = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+ WMI_DECAP_TYPE_802_3 = 0,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+ __le32 action;
+ struct wmi_sw_ring_cfg rx_sw_ring;
+ u8 mid;
+ u8 decap_trans_type;
+
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+ u8 l2_802_3_offload_ctrl;
+
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+ u8 l2_nwifi_offload_ctrl;
+
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+ u8 l3_l4_ctrl;
+
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+ u8 ring_ctrl;
+
+ __le16 prefetch_thrsh;
+ __le16 wb_thrsh;
+ __le32 itr_value;
+ __le16 host_thrsh;
+ u8 reserved[2];
+ struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 status_code;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset field as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+ WMI_IMM_RSP_EVENTID = 0x0000,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100a,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
+ WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
+ WMI_ADD_STATION_DONE_EVENTID = 0x1807,
+ WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
+ WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180a,
+ WMI_CORR_MEASURE_DONE_EVENTID = 0x180b,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
+
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_RX_ON_DONE_EVENTID = 0x1822,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
+ WMI_RX_OFF_DONE_EVENTID = 0x182c,
+
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+
+ WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+
+ /* Performance monitoring events */
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+ WMI_RF_ENABLED = 0,
+ WMI_RF_DISABLED_HW = 1,
+ WMI_RF_DISABLED_SW = 2,
+ WMI_RF_DISABLED_HW_SW = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+ __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+ __le32 is_associated;
+ u8 cid;
+ u8 reserved0[3];
+ u8 bssid[WMI_MAC_LEN];
+ u8 channel;
+ u8 reserved1;
+ u8 network_type;
+ u8 reserved2[3];
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ __le32 rf_status;
+ __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+ u8 major;
+ u8 minor;
+ __le16 subminor;
+ __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+ u8 mac[WMI_MAC_LEN];
+ u8 auth_mode;
+ u8 crypt_mode;
+ __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+ u8 src_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+ WMI_11A_CAPABILITY = 1,
+ WMI_11G_CAPABILITY = 2,
+ WMI_11AG_CAPABILITY = 3,
+ WMI_11NA_CAPABILITY = 4,
+ WMI_11NG_CAPABILITY = 5,
+ WMI_11NAG_CAPABILITY = 6,
+ WMI_11AD_CAPABILITY = 7,
+ WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac[WMI_MAC_LEN];
+ u8 phy_capability; /* enum wmi_phy_capability */
+ u8 reserved;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+ __le32 status;
+ __le64 tsf;
+ __le32 snr_val;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le16 bf_mcs;
+ __le16 my_rx_sector;
+ __le16 my_tx_sector;
+ __le16 other_rx_sector;
+ __le16 other_tx_sector;
+ __le16 range;
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le16 listen_interval;
+ __le16 beacon_interval;
+ u8 network_type;
+ u8 reserved1[3];
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 cid;
+ u8 reserved2[3];
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
+ WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
+ WMI_DIS_REASON_DISCONNECT_CMD = 3,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 4,
+ WMI_DIS_REASON_AUTH_FAILED = 5,
+ WMI_DIS_REASON_ASSOC_FAILED = 6,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 8,
+ WMI_DIS_REASON_INVALID_PROFILE = 10,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 12,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 13,
+ WMI_DIS_REASON_IBSS_MERGE = 14,
+};
+
+struct wmi_disconnect_event {
+ __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
+ u8 bssid[WMI_MAC_LEN]; /* set if known */
+ u8 disconnect_reason; /* see wmi_disconnect_reason_e */
+ u8 assoc_resp_len;
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+struct wmi_scan_complete_event {
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+ WMI_BA_AGREED = 0,
+ WMI_BA_NON_AGREED = 1,
+};
+
+struct wmi_vring_ba_status_event {
+ __le16 status;
+ u8 reserved[2];
+ u8 ringid;
+ u8 agg_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 from_initiator;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+enum wmi_vring_cfg_done_event_status {
+ WMI_VRING_CFG_SUCCESS = 0,
+ WMI_VRING_CFG_FAILURE = 1,
+};
+
+struct wmi_vring_cfg_done_event {
+ u8 ringid;
+ u8 status;
+ u8 reserved[2];
+ __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+enum wmi_rcp_addba_resp_sent_event_status {
+ WMI_ADDBA_SUCCESS = 0,
+ WMI_ADDBA_FAIL = 1,
+};
+
+struct wmi_rcp_addba_resp_sent_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ __le16 ba_timeout;
+ __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+ WMI_CFG_RX_CHAIN_SUCCESS = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+ __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+ WMI_WBE_REASON_USER_REQUEST = 0,
+ WMI_WBE_REASON_RX_DISASSOC = 1,
+ WMI_WBE_REASON_BAD_PHY_LINK = 2,
+};
+
+struct wmi_wbe_link_down_event {
+ u8 cid;
+ u8 reserved[3];
+ __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+ WMI_TX_SW_STATUS_SUCCESS = 0,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
+ WMI_TX_SW_STATUS_FAILED_TX = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+ u8 status; /* enum wmi_sw_tx_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 snr;
+ __le16 range;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ u8 qid;
+ u8 mid;
+ u8 cid;
+ u8 channel; /* From Radio MNGR */
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+ __le32 echoed_value;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 4a4e98f71807..4374079dfc2a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2963,7 +2963,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
ssid_el_p[1] = priv->SSID_size;
memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
ssid_el_p[2 + priv->SSID_size] = WLAN_EID_SUPP_RATES;
- ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */
+ ssid_el_p[3 + priv->SSID_size] = 4; /* len of supported rates */
memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 51e33b53386e..c1b159ebcffe 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -45,11 +45,11 @@ static struct pci_driver atmel_driver = {
.name = "atmel",
.id_table = card_ids,
.probe = atmel_pci_probe,
- .remove = __devexit_p(atmel_pci_remove),
+ .remove = atmel_pci_remove,
};
-static int __devinit atmel_pci_probe(struct pci_dev *pdev,
+static int atmel_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct net_device *dev;
@@ -69,7 +69,7 @@ static int __devinit atmel_pci_probe(struct pci_dev *pdev,
return 0;
}
-static void __devexit atmel_pci_remove(struct pci_dev *pdev)
+static void atmel_pci_remove(struct pci_dev *pdev)
{
stop_atmel_card(pci_get_drvdata(pdev));
}
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b298e5d68be2..10e288d470e7 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -7,6 +7,7 @@
#include <linux/hw_random.h>
#include <linux/bcma/bcma.h>
#include <linux/ssb/ssb.h>
+#include <linux/completion.h>
#include <net/mac80211.h>
#include "debugfs.h"
@@ -722,6 +723,10 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
+ /* a pointer to the firmware object */
+ const struct firmware *blob;
/* The type of firmware to request. */
enum b43_firmware_file_type req_type;
/* Error messages for each firmware type. */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 777cd74921d7..38bc5a7997ff 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -409,7 +409,10 @@ static inline
struct b43_dmadesc_meta *meta)
{
if (meta->skb) {
- dev_kfree_skb_any(meta->skb);
+ if (ring->tx)
+ ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
+ else
+ dev_kfree_skb_any(meta->skb);
meta->skb = NULL;
}
}
@@ -1454,7 +1457,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (unlikely(err == -ENOKEY)) {
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev->wl->hw, skb);
err = 0;
goto out;
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c5a99c8c8168..806e34c19281 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
b43warn(wl, text);
}
+static void b43_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct b43_request_fw_context *ctx = context;
+
+ ctx->blob = firmware;
+ complete(&ctx->fw_load_complete);
+}
+
int b43_do_request_fw(struct b43_request_fw_context *ctx,
const char *name,
- struct b43_firmware_file *fw)
+ struct b43_firmware_file *fw, bool async)
{
- const struct firmware *blob;
struct b43_fw_header *hdr;
u32 size;
int err;
@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
B43_WARN_ON(1);
return -ENOSYS;
}
- err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
+ if (async) {
+ /* do this part asynchronously */
+ init_completion(&ctx->fw_load_complete);
+ err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+ ctx->dev->dev->dev, GFP_KERNEL,
+ ctx, b43_fw_cb);
+ if (err < 0) {
+ pr_err("Unable to load firmware\n");
+ return err;
+ }
+ /* stall here until fw ready */
+ wait_for_completion(&ctx->fw_load_complete);
+ if (ctx->blob)
+ goto fw_ready;
+ /* On some ARM systems, the async request will fail, but the next sync
+ * request works. For this reason, we dall through here
+ */
+ }
+ err = request_firmware(&ctx->blob, ctx->fwname,
+ ctx->dev->dev->dev);
if (err == -ENOENT) {
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
- "Firmware file \"%s\" not found\n", ctx->fwname);
+ "Firmware file \"%s\" not found\n",
+ ctx->fwname);
return err;
} else if (err) {
snprintf(ctx->errors[ctx->req_type],
@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
ctx->fwname, err);
return err;
}
- if (blob->size < sizeof(struct b43_fw_header))
+fw_ready:
+ if (ctx->blob->size < sizeof(struct b43_fw_header))
goto err_format;
- hdr = (struct b43_fw_header *)(blob->data);
+ hdr = (struct b43_fw_header *)(ctx->blob->data);
switch (hdr->type) {
case B43_FW_TYPE_UCODE:
case B43_FW_TYPE_PCM:
size = be32_to_cpu(hdr->size);
- if (size != blob->size - sizeof(struct b43_fw_header))
+ if (size != ctx->blob->size - sizeof(struct b43_fw_header))
goto err_format;
/* fallthrough */
case B43_FW_TYPE_IV:
@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
goto err_format;
}
- fw->data = blob;
+ fw->data = ctx->blob;
fw->filename = name;
fw->type = ctx->req_type;
@@ -2172,7 +2200,7 @@ err_format:
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
"Firmware file \"%s\" format error.\n", ctx->fwname);
- release_firmware(blob);
+ release_firmware(ctx->blob);
return -EPROTO;
}
@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
goto err_no_ucode;
}
}
- err = b43_do_request_fw(ctx, filename, &fw->ucode);
+ err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
if (err)
goto err_load;
@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
else
goto err_no_pcm;
fw->pcm_request_failed = false;
- err = b43_do_request_fw(ctx, filename, &fw->pcm);
+ err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
if (err)
goto err_load;
@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
if (err)
goto err_load;
@@ -3397,7 +3425,7 @@ static void b43_tx_work(struct work_struct *work)
break;
}
if (unlikely(err))
- dev_kfree_skb(skb); /* Drop it */
+ ieee80211_free_txskb(wl->hw, skb);
err = 0;
}
@@ -3419,7 +3447,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
@@ -4229,8 +4257,12 @@ redo:
/* Drain all TX queues. */
for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
- while (skb_queue_len(&wl->tx_queue[queue_num]))
- dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ ieee80211_free_txskb(wl->hw, skb);
+ }
}
b43_mac_suspend(dev);
@@ -4652,7 +4684,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
- bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci,
+ bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
dev->dev->bdev, true);
break;
#endif
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 8c684cd33529..abac25ee958d 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
struct b43_request_fw_context;
-int b43_do_request_fw(struct b43_request_fw_context *ctx,
- const char *name,
- struct b43_firmware_file *fw);
+int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
+ struct b43_firmware_file *fw, bool async);
void b43_do_release_fw(struct b43_firmware_file *fw);
#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 714cad649c45..f2ea2ceec8a9 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -60,7 +60,7 @@ static int b43_pcmcia_resume(struct pcmcia_device *dev)
# define b43_pcmcia_resume NULL
#endif /* CONFIG_PM */
-static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
+static int b43_pcmcia_probe(struct pcmcia_device *dev)
{
struct ssb_bus *ssb;
int err = -ENOMEM;
@@ -110,7 +110,7 @@ out_error:
return err;
}
-static void __devexit b43_pcmcia_remove(struct pcmcia_device *dev)
+static void b43_pcmcia_remove(struct pcmcia_device *dev)
{
struct ssb_bus *ssb = dev->priv;
@@ -125,7 +125,7 @@ static struct pcmcia_driver b43_pcmcia_driver = {
.name = "b43-pcmcia",
.id_table = b43_pcmcia_tbl,
.probe = b43_pcmcia_probe,
- .remove = __devexit_p(b43_pcmcia_remove),
+ .remove = b43_pcmcia_remove,
.suspend = b43_pcmcia_suspend,
.resume = b43_pcmcia_resume,
};
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 3533ab86bd36..a73ff8c9deb5 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -196,7 +196,7 @@ static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
pack = &(q->packets[i]);
if (pack->skb) {
- dev_kfree_skb_any(pack->skb);
+ ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
pack->skb = NULL;
}
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (unlikely(err == -ENOKEY)) {
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev->wl->hw, skb);
err = 0;
goto out;
}
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index a54fb2d29089..59a521800694 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -93,7 +93,7 @@ void b43_sdio_free_irq(struct b43_wldev *dev)
sdio->irq_handler = NULL;
}
-static int __devinit b43_sdio_probe(struct sdio_func *func,
+static int b43_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct b43_sdio *sdio;
@@ -171,7 +171,7 @@ out:
return error;
}
-static void __devexit b43_sdio_remove(struct sdio_func *func)
+static void b43_sdio_remove(struct sdio_func *func)
{
struct b43_sdio *sdio = sdio_get_drvdata(func);
@@ -193,7 +193,7 @@ static struct sdio_driver b43_sdio_driver = {
.name = "b43-sdio",
.id_table = b43_sdio_ids,
.probe = b43_sdio_probe,
- .remove = __devexit_p(b43_sdio_remove),
+ .remove = b43_sdio_remove,
};
int b43_sdio_init(void)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 136510edf3cf..8cb206a89083 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -796,7 +796,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
- status.flag |= RX_FLAG_MACTIME_MPDU;
+ status.flag |= RX_FLAG_MACTIME_START;
}
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index a29da674e69d..482476fdb1f3 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -13,6 +13,7 @@
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
+#include <linux/completion.h>
#include <net/mac80211.h>
@@ -733,6 +734,10 @@ struct b43legacy_wldev {
/* Firmware data */
struct b43legacy_firmware fw;
+ const struct firmware *fwp; /* needed to pass fw pointer */
+
+ /* completion struct for firmware loading */
+ struct completion fw_load_complete;
/* Devicelist in struct b43legacy_wl (all 802.11 cores) */
struct list_head list;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 18e208e3eca1..8c3f70e1a013 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1513,9 +1513,17 @@ static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
"and download the correct firmware (version 3).\n");
}
+static void b43legacy_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct b43legacy_wldev *dev = context;
+
+ dev->fwp = firmware;
+ complete(&dev->fw_load_complete);
+}
+
static int do_request_fw(struct b43legacy_wldev *dev,
const char *name,
- const struct firmware **fw)
+ const struct firmware **fw, bool async)
{
char path[sizeof(modparam_fwpostfix) + 32];
struct b43legacy_fw_header *hdr;
@@ -1528,7 +1536,24 @@ static int do_request_fw(struct b43legacy_wldev *dev,
snprintf(path, ARRAY_SIZE(path),
"b43legacy%s/%s.fw",
modparam_fwpostfix, name);
- err = request_firmware(fw, path, dev->dev->dev);
+ b43legacyinfo(dev->wl, "Loading firmware %s\n", path);
+ if (async) {
+ init_completion(&dev->fw_load_complete);
+ err = request_firmware_nowait(THIS_MODULE, 1, path,
+ dev->dev->dev, GFP_KERNEL,
+ dev, b43legacy_fw_cb);
+ if (err) {
+ b43legacyerr(dev->wl, "Unable to load firmware\n");
+ return err;
+ }
+ /* stall here until fw ready */
+ wait_for_completion(&dev->fw_load_complete);
+ if (!dev->fwp)
+ err = -EINVAL;
+ *fw = dev->fwp;
+ } else {
+ err = request_firmware(fw, path, dev->dev->dev);
+ }
if (err) {
b43legacyerr(dev->wl, "Firmware file \"%s\" not found "
"or load failed.\n", path);
@@ -1580,7 +1605,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
filename = "ucode4";
else
filename = "ucode5";
- err = do_request_fw(dev, filename, &fw->ucode);
+ err = do_request_fw(dev, filename, &fw->ucode, true);
if (err)
goto err_load;
}
@@ -1589,7 +1614,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
filename = "pcm4";
else
filename = "pcm5";
- err = do_request_fw(dev, filename, &fw->pcm);
+ err = do_request_fw(dev, filename, &fw->pcm, false);
if (err)
goto err_load;
}
@@ -1607,7 +1632,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
default:
goto err_no_initvals;
}
- err = do_request_fw(dev, filename, &fw->initvals);
+ err = do_request_fw(dev, filename, &fw->initvals, false);
if (err)
goto err_load;
}
@@ -1627,7 +1652,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
default:
goto err_no_initvals;
}
- err = do_request_fw(dev, filename, &fw->initvals_band);
+ err = do_request_fw(dev, filename, &fw->initvals_band, false);
if (err)
goto err_load;
}
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index b8ffea6f5c64..849a28c80302 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -557,7 +557,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
- status.flag |= RX_FLAG_MACTIME_MPDU;
+ status.flag |= RX_FLAG_MACTIME_START;
}
chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index c9d811eb6556..1d92d874ebb6 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -55,13 +55,16 @@ config BRCMFMAC_USB
IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
use the driver for an USB wireless card.
-config BRCMISCAN
- bool "Broadcom I-Scan (OBSOLETE)"
- depends on BRCMFMAC
+config BRCM_TRACING
+ bool "Broadcom device tracing"
+ depends on BRCMSMAC || BRCMFMAC
---help---
- This option enables the I-Scan method. By default fullmac uses the
- new E-Scan method which uses less memory in firmware and gives no
- limitation on the number of scan results.
+ If you say Y here, the Broadcom wireless drivers will register
+ with ftrace to dump event information into the trace ringbuffer.
+ Tracing can be enabled at runtime to aid in debugging wireless
+ issues. This option adds a small amount of overhead when tracing
+ is disabled. If unsure, say Y to allow developers to better help
+ you when wireless problems occur.
config BRCMDBG
bool "Broadcom driver debug functions"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 9d5170b6df50..1a6661a9f008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,8 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
+ fwil.o \
+ fweh.o \
dhd_cdc.o \
dhd_common.o \
dhd_linux.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3b2c4c20e7fc..be35a2f99b1c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -42,7 +42,8 @@
#ifdef CONFIG_BRCMFMAC_SDIO_OOB
static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id)
{
- struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(dev_id);
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(INTR, "oob intr triggered\n");
@@ -66,12 +67,11 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
u8 data;
unsigned long flags;
- brcmf_dbg(TRACE, "Entering\n");
+ brcmf_dbg(TRACE, "Entering: irq %d\n", sdiodev->irq);
- brcmf_dbg(ERROR, "requesting irq %d\n", sdiodev->irq);
ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler,
sdiodev->irq_flags, "brcmf_oob_intr",
- &sdiodev->func[1]->card->dev);
+ &sdiodev->func[1]->dev);
if (ret != 0)
return ret;
spin_lock_init(&sdiodev->irq_en_lock);
@@ -84,6 +84,8 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
return ret;
sdiodev->irq_wake = true;
+ sdio_claim_host(sdiodev->func[1]);
+
/* must configure SDIO_CCCR_IENx to enable irq */
data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
@@ -95,6 +97,8 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
data |= SDIO_SEPINT_ACT_HI;
brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
+ sdio_release_host(sdiodev->func[1]);
+
return 0;
}
@@ -102,14 +106,16 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(TRACE, "Entering\n");
+ sdio_claim_host(sdiodev->func[1]);
brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+ sdio_release_host(sdiodev->func[1]);
if (sdiodev->irq_wake) {
disable_irq_wake(sdiodev->irq);
sdiodev->irq_wake = false;
}
- free_irq(sdiodev->irq, &sdiodev->func[1]->card->dev);
+ free_irq(sdiodev->irq, &sdiodev->func[1]->dev);
sdiodev->irq_en = false;
return 0;
@@ -117,7 +123,8 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
#else /* CONFIG_BRCMFMAC_SDIO_OOB */
static void brcmf_sdio_irqhandler(struct sdio_func *func)
{
- struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+ struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(INTR, "ib intr triggered\n");
@@ -176,7 +183,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
} while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (err) {
- brcmf_dbg(ERROR, "failed at addr:0x%0x\n",
+ brcmf_err("failed at addr:0x%0x\n",
SBSDIO_FUNC1_SBADDRLOW + i);
break;
}
@@ -238,7 +245,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
} while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret != 0)
- brcmf_dbg(ERROR, "failed with %d\n", ret);
+ brcmf_err("failed with %d\n", ret);
return ret;
}
@@ -249,9 +256,7 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
int retval;
brcmf_dbg(INFO, "addr:0x%08x\n", addr);
- sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
- sdio_release_host(sdiodev->func[1]);
brcmf_dbg(INFO, "data:0x%02x\n", data);
if (ret)
@@ -266,9 +271,7 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
int retval;
brcmf_dbg(INFO, "addr:0x%08x\n", addr);
- sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
- sdio_release_host(sdiodev->func[1]);
brcmf_dbg(INFO, "data:0x%08x\n", data);
if (ret)
@@ -283,9 +286,7 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
int retval;
brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
- sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
- sdio_release_host(sdiodev->func[1]);
if (ret)
*ret = retval;
@@ -297,9 +298,7 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
int retval;
brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
- sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
- sdio_release_host(sdiodev->func[1]);
if (ret)
*ret = retval;
@@ -340,7 +339,7 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
mypkt = brcmu_pkt_buf_get_skb(nbytes);
if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
return -EIO;
}
@@ -364,8 +363,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
- sdio_claim_host(sdiodev->func[1]);
-
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
if (err)
@@ -376,8 +373,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
fn, addr, pkt);
done:
- sdio_release_host(sdiodev->func[1]);
-
return err;
}
@@ -391,8 +386,6 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pktq->qlen);
- sdio_claim_host(sdiodev->func[1]);
-
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
if (err)
@@ -403,8 +396,6 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
pktq);
done:
- sdio_release_host(sdiodev->func[1]);
-
return err;
}
@@ -417,7 +408,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
mypkt = brcmu_pkt_buf_get_skb(nbytes);
if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
return -EIO;
}
@@ -446,8 +437,6 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
- sdio_claim_host(sdiodev->func[1]);
-
if (bar0 != sdiodev->sbwad) {
err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
if (err)
@@ -467,8 +456,6 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr, pkt);
done:
- sdio_release_host(sdiodev->func[1]);
-
return err;
}
@@ -484,7 +471,7 @@ int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
mypkt = brcmu_pkt_buf_get_skb(nbytes);
if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
return -EIO;
}
@@ -510,10 +497,8 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
brcmf_dbg(TRACE, "Enter\n");
/* issue abort cmd52 command through F0 */
- sdio_claim_host(sdiodev->func[1]);
brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
SDIO_CCCR_ABORT, &t_func);
- sdio_release_host(sdiodev->func[1]);
brcmf_dbg(TRACE, "Exit\n");
return 0;
@@ -530,13 +515,10 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
regs = SI_ENUM_BASE;
- /* Report the BAR, to fix if needed */
- sdiodev->sbwad = SI_ENUM_BASE;
-
/* try to attach to the target device */
sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
if (!sdiodev->bus) {
- brcmf_dbg(ERROR, "device attach failed\n");
+ brcmf_err("device attach failed\n");
ret = -ENODEV;
goto out;
}
@@ -551,6 +533,8 @@ EXPORT_SYMBOL(brcmf_sdio_probe);
int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
{
+ sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+
if (sdiodev->bus) {
brcmf_sdbrcm_disconnect(sdiodev->bus);
sdiodev->bus = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3247d5b3c22..d33e5598611b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -107,15 +107,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
/* Enable Function 2 */
err_ret = sdio_enable_func(sdfunc);
if (err_ret)
- brcmf_dbg(ERROR,
- "enable F2 failed:%d\n",
+ brcmf_err("enable F2 failed:%d\n",
err_ret);
} else {
/* Disable Function 2 */
err_ret = sdio_disable_func(sdfunc);
if (err_ret)
- brcmf_dbg(ERROR,
- "Disable F2 failed:%d\n",
+ brcmf_err("Disable F2 failed:%d\n",
err_ret);
}
}
@@ -129,7 +127,7 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
kfree(sdfunc);
} else if (regaddr < 0xF0) {
- brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
+ brcmf_err("F0 Wr:0x%02x: write disallowed\n", regaddr);
err_ret = -EPERM;
} else {
sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
@@ -166,7 +164,7 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
}
if (err_ret)
- brcmf_dbg(ERROR, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
rw ? "write" : "read", func, regaddr, *byte, err_ret);
return err_ret;
@@ -179,7 +177,7 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
int err_ret = -EIO;
if (func == 0) {
- brcmf_dbg(ERROR, "Only CMD52 allowed to F0\n");
+ brcmf_err("Only CMD52 allowed to F0\n");
return -EINVAL;
}
@@ -198,7 +196,7 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
addr, &err_ret);
else
- brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
+ brcmf_err("Invalid nbytes: %d\n", nbytes);
} else { /* CMD52 Read */
if (nbytes == 4)
*word = sdio_readl(sdiodev->func[func], addr, &err_ret);
@@ -206,11 +204,11 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
*word = sdio_readw(sdiodev->func[func], addr,
&err_ret) & 0xFFFF;
else
- brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
+ brcmf_err("Invalid nbytes: %d\n", nbytes);
}
if (err_ret)
- brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
+ brcmf_err("Failed to %s word, Err: 0x%08x\n",
rw ? "write" : "read", err_ret);
return err_ret;
@@ -270,7 +268,7 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
addr, pkt, pkt_len);
if (err_ret) {
- brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
@@ -315,7 +313,7 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
addr, pkt, pkt_len);
if (status) {
- brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
write ? "TX" : "RX", pkt, addr, pkt_len, status);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
@@ -336,7 +334,7 @@ static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
for (i = 0; i < 3; i++) {
regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
if (ret != 0)
- brcmf_dbg(ERROR, "Can't read!\n");
+ brcmf_err("Can't read!\n");
*ptr++ = (u8) regdata;
regaddr++;
@@ -372,11 +370,9 @@ static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
}
/* Enable Function 1 */
- sdio_claim_host(sdiodev->func[1]);
err_ret = sdio_enable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
if (err_ret)
- brcmf_dbg(ERROR, "Failed to enable F1 Err: 0x%08x\n", err_ret);
+ brcmf_err("Failed to enable F1 Err: 0x%08x\n", err_ret);
return false;
}
@@ -393,24 +389,23 @@ int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
sdiodev->num_funcs = 2;
sdio_claim_host(sdiodev->func[1]);
+
err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
- sdio_release_host(sdiodev->func[1]);
if (err_ret) {
- brcmf_dbg(ERROR, "Failed to set F1 blocksize\n");
+ brcmf_err("Failed to set F1 blocksize\n");
goto out;
}
- sdio_claim_host(sdiodev->func[2]);
err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
- sdio_release_host(sdiodev->func[2]);
if (err_ret) {
- brcmf_dbg(ERROR, "Failed to set F2 blocksize\n");
+ brcmf_err("Failed to set F2 blocksize\n");
goto out;
}
brcmf_sdioh_enablefuncs(sdiodev);
out:
+ sdio_release_host(sdiodev->func[1]);
brcmf_dbg(TRACE, "Done\n");
return err_ret;
}
@@ -437,7 +432,7 @@ static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
struct brcmf_sdio_oobirq *oobirq_entry;
if (list_empty(&oobirq_lh)) {
- brcmf_dbg(ERROR, "no valid oob irq resource\n");
+ brcmf_err("no valid oob irq resource\n");
return -ENXIO;
}
@@ -459,95 +454,106 @@ static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
static int brcmf_ops_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
+ const struct sdio_device_id *id)
{
- int ret = 0;
+ int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(TRACE, "func->class=%x\n", func->class);
- brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
- brcmf_dbg(TRACE, "sdio_device: 0x%04x\n", func->device);
- brcmf_dbg(TRACE, "Function#: 0x%04x\n", func->num);
-
- if (func->num == 1) {
- if (dev_get_drvdata(&func->card->dev)) {
- brcmf_dbg(ERROR, "card private drvdata occupied\n");
- return -ENXIO;
- }
- bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
- if (!bus_if)
- return -ENOMEM;
- sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev) {
- kfree(bus_if);
- return -ENOMEM;
- }
- sdiodev->func[0] = func;
- sdiodev->func[1] = func;
- sdiodev->bus_if = bus_if;
- bus_if->bus_priv.sdio = sdiodev;
- bus_if->type = SDIO_BUS;
- bus_if->align = BRCMF_SDALIGN;
- dev_set_drvdata(&func->card->dev, sdiodev);
-
- atomic_set(&sdiodev->suspend, false);
- init_waitqueue_head(&sdiodev->request_byte_wait);
- init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_chain_wait);
- init_waitqueue_head(&sdiodev->request_buffer_wait);
+ brcmf_dbg(TRACE, "Class=%x\n", func->class);
+ brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(TRACE, "Function#: %d\n", func->num);
+
+ /* Consume func num 1 but dont do anything with it. */
+ if (func->num == 1)
+ return 0;
+
+ /* Ignore anything but func 2 */
+ if (func->num != 2)
+ return -ENODEV;
+
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
+ sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ if (!sdiodev) {
+ kfree(bus_if);
+ return -ENOMEM;
}
- if (func->num == 2) {
- sdiodev = dev_get_drvdata(&func->card->dev);
- if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
- return -ENODEV;
-
- ret = brcmf_sdio_getintrcfg(sdiodev);
- if (ret)
- return ret;
- sdiodev->func[2] = func;
+ sdiodev->func[0] = func->card->sdio_func[0];
+ sdiodev->func[1] = func->card->sdio_func[0];
+ sdiodev->func[2] = func;
- bus_if = sdiodev->bus_if;
- sdiodev->dev = &func->dev;
- dev_set_drvdata(&func->dev, bus_if);
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv.sdio = sdiodev;
+ bus_if->align = BRCMF_SDALIGN;
+ dev_set_drvdata(&func->dev, bus_if);
+ dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
+ sdiodev->dev = &sdiodev->func[1]->dev;
- brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
- ret = brcmf_sdio_probe(sdiodev);
+ atomic_set(&sdiodev->suspend, false);
+ init_waitqueue_head(&sdiodev->request_byte_wait);
+ init_waitqueue_head(&sdiodev->request_word_wait);
+ init_waitqueue_head(&sdiodev->request_chain_wait);
+ init_waitqueue_head(&sdiodev->request_buffer_wait);
+ err = brcmf_sdio_getintrcfg(sdiodev);
+ if (err)
+ goto fail;
+
+ brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
+ err = brcmf_sdio_probe(sdiodev);
+ if (err) {
+ brcmf_err("F2 error, probe failed %d...\n", err);
+ goto fail;
}
+ brcmf_dbg(TRACE, "F2 init completed...\n");
+ return 0;
- return ret;
+fail:
+ dev_set_drvdata(&func->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ kfree(sdiodev);
+ kfree(bus_if);
+ return err;
}
static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
+
brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(INFO, "func->class=%x\n", func->class);
- brcmf_dbg(INFO, "sdio_vendor: 0x%04x\n", func->vendor);
- brcmf_dbg(INFO, "sdio_device: 0x%04x\n", func->device);
- brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
+ brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor);
+ brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device);
+ brcmf_dbg(TRACE, "Function: %d\n", func->num);
- if (func->num == 2) {
- bus_if = dev_get_drvdata(&func->dev);
+ if (func->num != 1 && func->num != 2)
+ return;
+
+ bus_if = dev_get_drvdata(&func->dev);
+ if (bus_if) {
sdiodev = bus_if->bus_priv.sdio;
- brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
- dev_set_drvdata(&func->card->dev, NULL);
- dev_set_drvdata(&func->dev, NULL);
+
+ dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
+ dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+
kfree(bus_if);
kfree(sdiodev);
}
+
+ brcmf_dbg(TRACE, "Exit\n");
}
#ifdef CONFIG_PM_SLEEP
static int brcmf_sdio_suspend(struct device *dev)
{
mmc_pm_flag_t sdio_flags;
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
int ret = 0;
brcmf_dbg(TRACE, "\n");
@@ -556,13 +562,13 @@ static int brcmf_sdio_suspend(struct device *dev)
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- brcmf_dbg(ERROR, "Host can't keep power while suspended\n");
+ brcmf_err("Host can't keep power while suspended\n");
return -EINVAL;
}
ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
if (ret) {
- brcmf_dbg(ERROR, "Failed to set pm_flags\n");
+ brcmf_err("Failed to set pm_flags\n");
return ret;
}
@@ -573,8 +579,8 @@ static int brcmf_sdio_suspend(struct device *dev)
static int brcmf_sdio_resume(struct device *dev)
{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false);
@@ -627,7 +633,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
ret = sdio_register_driver(&brcmf_sdmmc_driver);
if (ret)
- brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+ brcmf_err("sdio_register_driver failed: %d\n", ret);
return ret;
}
@@ -657,7 +663,7 @@ void brcmf_sdio_init(void)
ret = platform_driver_register(&brcmf_sdio_pd);
if (ret)
- brcmf_dbg(ERROR, "platform_driver_register failed: %d\n", ret);
+ brcmf_err("platform_driver_register failed: %d\n", ret);
}
#else
void brcmf_sdio_exit(void)
@@ -676,6 +682,6 @@ void brcmf_sdio_init(void)
ret = sdio_register_driver(&brcmf_sdmmc_driver);
if (ret)
- brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+ brcmf_err("sdio_register_driver failed: %d\n", ret);
}
#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 17e7ae73e008..fd672bf53867 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -23,6 +23,8 @@
#define BRCMF_VERSION_STR "4.218.248.5"
+#include "fweh.h"
+
/*******************************************************************************
* IO codes that are interpreted by dongle firmware
******************************************************************************/
@@ -38,8 +40,11 @@
#define BRCMF_C_GET_SSID 25
#define BRCMF_C_SET_SSID 26
#define BRCMF_C_GET_CHANNEL 29
+#define BRCMF_C_SET_CHANNEL 30
#define BRCMF_C_GET_SRL 31
+#define BRCMF_C_SET_SRL 32
#define BRCMF_C_GET_LRL 33
+#define BRCMF_C_SET_LRL 34
#define BRCMF_C_GET_RADIO 37
#define BRCMF_C_SET_RADIO 38
#define BRCMF_C_GET_PHYTYPE 39
@@ -58,6 +63,7 @@
#define BRCMF_C_SET_COUNTRY 84
#define BRCMF_C_GET_PM 85
#define BRCMF_C_SET_PM 86
+#define BRCMF_C_GET_CURR_RATESET 114
#define BRCMF_C_GET_AP 117
#define BRCMF_C_SET_AP 118
#define BRCMF_C_GET_RSSI 127
@@ -65,6 +71,7 @@
#define BRCMF_C_SET_WSEC 134
#define BRCMF_C_GET_PHY_NOISE 135
#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_GET_PHYLIST 180
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
@@ -100,29 +107,8 @@
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
-#define BRCMF_SCAN_ACTION_START 1
-#define BRCMF_SCAN_ACTION_CONTINUE 2
-#define WL_SCAN_ACTION_ABORT 3
-
-#define BRCMF_ISCAN_REQ_VERSION 1
-
-/* brcmf_iscan_results status values */
-#define BRCMF_SCAN_RESULTS_SUCCESS 0
-#define BRCMF_SCAN_RESULTS_PARTIAL 1
-#define BRCMF_SCAN_RESULTS_PENDING 2
-#define BRCMF_SCAN_RESULTS_ABORTED 3
-#define BRCMF_SCAN_RESULTS_NO_MEM 4
-
-/* Indicates this key is using soft encrypt */
-#define WL_SOFT_KEY (1 << 0)
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
-/* Reserved for backward compat */
-#define WL_KF_RES_4 (1 << 4)
-/* Reserved for backward compat */
-#define WL_KF_RES_5 (1 << 5)
-/* Indicates a group key for a IBSS PEER */
-#define WL_IBSS_PEER_GROUP_KEY (1 << 6)
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
@@ -130,10 +116,6 @@
#define DOT11_BSSTYPE_ANY 2
#define DOT11_MAX_DEFAULT_KEYS 4
-#define BRCMF_EVENT_MSG_LINK 0x01
-#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
-#define BRCMF_EVENT_MSG_GROUP 0x04
-
#define BRCMF_ESCAN_REQ_VERSION 1
#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
@@ -141,108 +123,6 @@
#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
#define BRCMF_STA_ASSOC 0x10 /* Associated */
-struct brcmf_event_msg {
- __be16 version;
- __be16 flags;
- __be32 event_type;
- __be32 status;
- __be32 reason;
- __be32 auth_type;
- __be32 datalen;
- u8 addr[ETH_ALEN];
- char ifname[IFNAMSIZ];
- u8 ifidx;
- u8 bsscfgidx;
-} __packed;
-
-struct brcm_ethhdr {
- u16 subtype;
- u16 length;
- u8 version;
- u8 oui[3];
- u16 usr_subtype;
-} __packed;
-
-struct brcmf_event {
- struct ethhdr eth;
- struct brcm_ethhdr hdr;
- struct brcmf_event_msg msg;
-} __packed;
-
-/* event codes sent by the dongle to this driver */
-#define BRCMF_E_SET_SSID 0
-#define BRCMF_E_JOIN 1
-#define BRCMF_E_START 2
-#define BRCMF_E_AUTH 3
-#define BRCMF_E_AUTH_IND 4
-#define BRCMF_E_DEAUTH 5
-#define BRCMF_E_DEAUTH_IND 6
-#define BRCMF_E_ASSOC 7
-#define BRCMF_E_ASSOC_IND 8
-#define BRCMF_E_REASSOC 9
-#define BRCMF_E_REASSOC_IND 10
-#define BRCMF_E_DISASSOC 11
-#define BRCMF_E_DISASSOC_IND 12
-#define BRCMF_E_QUIET_START 13
-#define BRCMF_E_QUIET_END 14
-#define BRCMF_E_BEACON_RX 15
-#define BRCMF_E_LINK 16
-#define BRCMF_E_MIC_ERROR 17
-#define BRCMF_E_NDIS_LINK 18
-#define BRCMF_E_ROAM 19
-#define BRCMF_E_TXFAIL 20
-#define BRCMF_E_PMKID_CACHE 21
-#define BRCMF_E_RETROGRADE_TSF 22
-#define BRCMF_E_PRUNE 23
-#define BRCMF_E_AUTOAUTH 24
-#define BRCMF_E_EAPOL_MSG 25
-#define BRCMF_E_SCAN_COMPLETE 26
-#define BRCMF_E_ADDTS_IND 27
-#define BRCMF_E_DELTS_IND 28
-#define BRCMF_E_BCNSENT_IND 29
-#define BRCMF_E_BCNRX_MSG 30
-#define BRCMF_E_BCNLOST_MSG 31
-#define BRCMF_E_ROAM_PREP 32
-#define BRCMF_E_PFN_NET_FOUND 33
-#define BRCMF_E_PFN_NET_LOST 34
-#define BRCMF_E_RESET_COMPLETE 35
-#define BRCMF_E_JOIN_START 36
-#define BRCMF_E_ROAM_START 37
-#define BRCMF_E_ASSOC_START 38
-#define BRCMF_E_IBSS_ASSOC 39
-#define BRCMF_E_RADIO 40
-#define BRCMF_E_PSM_WATCHDOG 41
-#define BRCMF_E_PROBREQ_MSG 44
-#define BRCMF_E_SCAN_CONFIRM_IND 45
-#define BRCMF_E_PSK_SUP 46
-#define BRCMF_E_COUNTRY_CODE_CHANGED 47
-#define BRCMF_E_EXCEEDED_MEDIUM_TIME 48
-#define BRCMF_E_ICV_ERROR 49
-#define BRCMF_E_UNICAST_DECODE_ERROR 50
-#define BRCMF_E_MULTICAST_DECODE_ERROR 51
-#define BRCMF_E_TRACE 52
-#define BRCMF_E_IF 54
-#define BRCMF_E_RSSI 56
-#define BRCMF_E_PFN_SCAN_COMPLETE 57
-#define BRCMF_E_EXTLOG_MSG 58
-#define BRCMF_E_ACTION_FRAME 59
-#define BRCMF_E_ACTION_FRAME_COMPLETE 60
-#define BRCMF_E_PRE_ASSOC_IND 61
-#define BRCMF_E_PRE_REASSOC_IND 62
-#define BRCMF_E_CHANNEL_ADOPTED 63
-#define BRCMF_E_AP_STARTED 64
-#define BRCMF_E_DFS_AP_STOP 65
-#define BRCMF_E_DFS_AP_RESUME 66
-#define BRCMF_E_RESERVED1 67
-#define BRCMF_E_RESERVED2 68
-#define BRCMF_E_ESCAN_RESULT 69
-#define BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
-#define BRCMF_E_DCS_REQUEST 73
-
-#define BRCMF_E_FIFO_CREDIT_MAP 74
-
-#define BRCMF_E_LAST 75
-
#define BRCMF_E_STATUS_SUCCESS 0
#define BRCMF_E_STATUS_FAIL 1
#define BRCMF_E_STATUS_TIMEOUT 2
@@ -318,6 +198,12 @@ struct brcmf_event {
#define BRCMF_E_LINK_ASSOC_REC 3
#define BRCMF_E_LINK_BSSCFG_DIS 4
+/* Small, medium and maximum buffer size for dcmd
+ */
+#define BRCMF_DCMD_SMLEN 256
+#define BRCMF_DCMD_MEDLEN 1536
+#define BRCMF_DCMD_MAXLEN 8192
+
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -397,7 +283,7 @@ struct brcm_rateset_le {
/* # rates in this set */
__le32 count;
/* rates in 500kbps units w/hi bit set if basic */
- u8 rates[WL_NUMRATES];
+ u8 rates[BRCMF_MAXRATES_IN_SET];
};
struct brcmf_ssid {
@@ -446,14 +332,6 @@ struct brcmf_scan_params_le {
__le16 channel_list[1]; /* list of chanspecs */
};
-/* incremental scan struct */
-struct brcmf_iscan_params_le {
- __le32 version;
- __le16 action;
- __le16 scan_duration;
- struct brcmf_scan_params_le params_le;
-};
-
struct brcmf_scan_results {
u32 buflen;
u32 version;
@@ -461,12 +339,6 @@ struct brcmf_scan_results {
struct brcmf_bss_info_le bss_info_le[];
};
-struct brcmf_scan_results_le {
- __le32 buflen;
- __le32 version;
- __le32 count;
-};
-
struct brcmf_escan_params_le {
__le32 version;
__le16 action;
@@ -502,23 +374,6 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
-/* incremental scan results struct */
-struct brcmf_iscan_results {
- union {
- u32 status;
- __le32 status_le;
- };
- union {
- struct brcmf_scan_results results;
- struct brcmf_scan_results_le results_le;
- };
-};
-
-/* size of brcmf_iscan_results not including variable length array */
-#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
- (sizeof(struct brcmf_scan_results) + \
- offsetof(struct brcmf_iscan_results, results))
-
struct brcmf_wsec_key {
u32 index; /* key index */
u32 len; /* key length */
@@ -615,7 +470,6 @@ struct brcmf_pub {
struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
struct brcmf_cfg80211_info *config;
- struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
uint hdrlen; /* Total BRCMF header length (proto + bus) */
@@ -623,7 +477,6 @@ struct brcmf_pub {
u8 wme_dp; /* wme discard priority */
/* Dongle media info */
- bool iswl; /* Dongle-resident driver is wl */
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
@@ -651,26 +504,26 @@ struct brcmf_pub {
int in_suspend; /* flag set to 1 when early suspend called */
int dtim_skip; /* dtim skip , default 0 means wake each dtim */
- /* Pkt filter defination */
- char *pktfilter[100];
- int pktfilter_count;
-
- u8 country_code[BRCM_CNTRY_BUF_SZ];
- char eventmask[BRCMF_EVENTING_MASK_LEN];
-
struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct mutex proto_block;
+ unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- struct work_struct setmacaddr_work;
- struct work_struct multicast_work;
u8 macvalue[ETH_ALEN];
atomic_t pend_8021x_cnt;
+ wait_queue_head_t pend_8021x_wait;
+
+ struct brcmf_fweh_info fweh;
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
};
+struct bcmevent_name {
+ uint event;
+ const char *name;
+};
+
struct brcmf_if_event {
u8 ifidx;
u8 action;
@@ -678,47 +531,54 @@ struct brcmf_if_event {
u8 bssidx;
};
-struct bcmevent_name {
- uint event;
- const char *name;
+/* forward declaration */
+struct brcmf_cfg80211_vif;
+
+/**
+ * struct brcmf_if - interface control information.
+ *
+ * @drvr: points to device related information.
+ * @vif: points to cfg80211 specific interface information.
+ * @ndev: associated network device.
+ * @stats: interface specific network statistics.
+ * @idx: interface index in device firmware.
+ * @bssidx: index of bss associated with this interface.
+ * @mac_addr: assigned mac address.
+ */
+struct brcmf_if {
+ struct brcmf_pub *drvr;
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ struct work_struct setmacaddr_work;
+ struct work_struct multicast_work;
+ int idx;
+ s32 bssidx;
+ u8 mac_addr[ETH_ALEN];
};
-extern const struct bcmevent_name bcmevent_names[];
+static inline s32 brcmf_ndev_bssidx(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ return ifp->bssidx;
+}
-extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
- char *buf, uint len);
-extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
- char *buf, uint buflen, s32 bssidx);
+extern const struct bcmevent_name bcmevent_names[];
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
-extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
-extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
-
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
/* Query dongle */
extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
+extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len);
-#ifdef DEBUG
-extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
-#endif /* DEBUG */
-
-extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
-extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
- void *pktdata, struct brcmf_event_msg *,
- void **data_ptr);
-
+extern int brcmf_net_attach(struct brcmf_if *ifp);
+extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
+ s32 bssidx, char *name, u8 *mac_addr);
extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
-extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
-extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
- int enable, int master_mode);
-
-#define BRCMF_DCMD_SMLEN 256 /* "small" cmd buffer required */
-#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
-#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
-
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 9b8ee19ea55d..dd38b78a9726 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -43,37 +43,90 @@ struct brcmf_bus_dcmd {
struct list_head list;
};
-/* interface structure between common and bus layer */
+/**
+ * struct brcmf_bus_ops - bus callback operations.
+ *
+ * @init: prepare for communication with dongle.
+ * @stop: clear pending frames, disable data flow.
+ * @txdata: send a data frame to the dongle (callee disposes skb).
+ * @txctl: transmit a control request message to dongle.
+ * @rxctl: receive a control response message from dongle.
+ *
+ * This structure provides an abstract interface towards the
+ * bus specific driver. For control messages to common driver
+ * will assure there is only one active transaction.
+ */
+struct brcmf_bus_ops {
+ int (*init)(struct device *dev);
+ void (*stop)(struct device *dev);
+ int (*txdata)(struct device *dev, struct sk_buff *skb);
+ int (*txctl)(struct device *dev, unsigned char *msg, uint len);
+ int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
+};
+
+/**
+ * struct brcmf_bus - interface structure between common and bus layer
+ *
+ * @bus_priv: pointer to private bus device.
+ * @dev: device pointer of bus device.
+ * @drvr: public driver information.
+ * @state: operational state of the bus interface.
+ * @maxctl: maximum size for rxctl request message.
+ * @drvr_up: indicates driver up/down status.
+ * @tx_realloc: number of tx packets realloced for headroom.
+ * @dstats: dongle-based statistical data.
+ * @align: alignment requirement for the bus.
+ * @dcmd_list: bus/device specific dongle initialization commands.
+ */
struct brcmf_bus {
- u8 type; /* bus type */
union {
struct brcmf_sdio_dev *sdio;
struct brcmf_usbdev *usb;
} bus_priv;
- struct brcmf_pub *drvr; /* pointer to driver pub structure brcmf_pub */
+ struct device *dev;
+ struct brcmf_pub *drvr;
enum brcmf_bus_state state;
- uint maxctl; /* Max size rxctl request from proto to bus */
- bool drvr_up; /* Status flag of driver up/down */
- unsigned long tx_realloc; /* Tx packets realloced for headroom */
- struct dngl_stats dstats; /* Stats for dongle-based data */
- u8 align; /* bus alignment requirement */
+ uint maxctl;
+ bool drvr_up;
+ unsigned long tx_realloc;
+ struct dngl_stats dstats;
+ u8 align;
struct list_head dcmd_list;
- /* interface functions pointers */
- /* Stop bus module: clear pending frames, disable data flow */
- void (*brcmf_bus_stop)(struct device *);
- /* Initialize bus module: prepare for communication w/dongle */
- int (*brcmf_bus_init)(struct device *);
- /* Send a data frame to the dongle. Callee disposes of txp. */
- int (*brcmf_bus_txdata)(struct device *, struct sk_buff *);
- /* Send/receive a control message to/from the dongle.
- * Expects caller to enforce a single outstanding transaction.
- */
- int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint);
- int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint);
+ struct brcmf_bus_ops *ops;
};
/*
+ * callback wrappers
+ */
+static inline int brcmf_bus_init(struct brcmf_bus *bus)
+{
+ return bus->ops->init(bus->dev);
+}
+
+static inline void brcmf_bus_stop(struct brcmf_bus *bus)
+{
+ bus->ops->stop(bus->dev);
+}
+
+static inline int brcmf_bus_txdata(struct brcmf_bus *bus, struct sk_buff *skb)
+{
+ return bus->ops->txdata(bus->dev, skb);
+}
+
+static inline
+int brcmf_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
+{
+ return bus->ops->txctl(bus->dev, msg, len);
+}
+
+static inline
+int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
+{
+ return bus->ops->rxctl(bus->dev, msg, len);
+}
+
+/*
* interface functions from common layer
*/
@@ -85,7 +138,7 @@ extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct device *dev, int ifidx,
+extern void brcmf_rx_frame(struct device *dev, u8 ifidx,
struct sk_buff_head *rxlist);
static inline void brcmf_rx_packet(struct device *dev, int ifidx,
struct sk_buff *pkt)
@@ -111,9 +164,6 @@ extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
extern int brcmf_bus_start(struct device *dev);
-extern int brcmf_add_if(struct device *dev, int ifidx,
- char *name, u8 *mac_addr);
-
#ifdef CONFIG_BRCMFMAC_SDIO
extern void brcmf_sdio_exit(void);
extern void brcmf_sdio_init(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a5c15cac5e7d..83923553f1ac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -23,8 +23,6 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <defs.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -119,9 +117,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
len = CDC_MAX_MSG_SIZE;
/* Send request */
- return drvr->bus_if->brcmf_bus_txctl(drvr->dev,
- (unsigned char *)&prot->msg,
- len);
+ return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&prot->msg, len);
}
static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -130,11 +126,10 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
struct brcmf_proto *prot = drvr->prot;
brcmf_dbg(TRACE, "Enter\n");
-
+ len += sizeof(struct brcmf_proto_cdc_dcmd);
do {
- ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev,
- (unsigned char *)&prot->msg,
- len + sizeof(struct brcmf_proto_cdc_dcmd));
+ ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
+ len);
if (ret < 0)
break;
} while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id);
@@ -181,7 +176,7 @@ brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
ret = brcmf_proto_cdc_msg(drvr);
if (ret < 0) {
- brcmf_dbg(ERROR, "brcmf_proto_cdc_msg failed w/status %d\n",
+ brcmf_err("brcmf_proto_cdc_msg failed w/status %d\n",
ret);
goto done;
}
@@ -198,7 +193,7 @@ retry:
if ((id < prot->reqid) && (++retries < RETRIES))
goto retry;
if (id != prot->reqid) {
- brcmf_dbg(ERROR, "%s: unexpected request id %d (expected %d)\n",
+ brcmf_err("%s: unexpected request id %d (expected %d)\n",
brcmf_ifname(drvr, ifidx), id, prot->reqid);
ret = -EINVAL;
goto done;
@@ -260,7 +255,7 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
if (id != prot->reqid) {
- brcmf_dbg(ERROR, "%s: unexpected request id %d (expected %d)\n",
+ brcmf_err("%s: unexpected request id %d (expected %d)\n",
brcmf_ifname(drvr, ifidx), id, prot->reqid);
ret = -EINVAL;
goto done;
@@ -277,76 +272,6 @@ done:
return ret;
}
-int
-brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
- int len)
-{
- struct brcmf_proto *prot = drvr->prot;
- int ret = -1;
-
- if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
- brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
- return ret;
- }
- mutex_lock(&drvr->proto_block);
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (len > BRCMF_DCMD_MAXLEN)
- goto done;
-
- if (prot->pending == true) {
- brcmf_dbg(TRACE, "CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
- dcmd->cmd, (unsigned long)dcmd->cmd, prot->lastcmd,
- (unsigned long)prot->lastcmd);
- if (dcmd->cmd == BRCMF_C_SET_VAR ||
- dcmd->cmd == BRCMF_C_GET_VAR)
- brcmf_dbg(TRACE, "iovar cmd=%s\n", (char *)dcmd->buf);
-
- goto done;
- }
-
- prot->pending = true;
- prot->lastcmd = dcmd->cmd;
- if (dcmd->set)
- ret = brcmf_proto_cdc_set_dcmd(drvr, ifidx, dcmd->cmd,
- dcmd->buf, len);
- else {
- ret = brcmf_proto_cdc_query_dcmd(drvr, ifidx, dcmd->cmd,
- dcmd->buf, len);
- if (ret > 0)
- dcmd->used = ret -
- sizeof(struct brcmf_proto_cdc_dcmd);
- }
-
- if (ret >= 0)
- ret = 0;
- else {
- struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
- /* len == needed when set/query fails from dongle */
- dcmd->needed = le32_to_cpu(msg->len);
- }
-
- /* Intercept the wme_dp dongle cmd here */
- if (!ret && dcmd->cmd == BRCMF_C_SET_VAR &&
- !strcmp(dcmd->buf, "wme_dp")) {
- int slen;
- __le32 val = 0;
-
- slen = strlen("wme_dp") + 1;
- if (len >= (int)(slen + sizeof(int)))
- memcpy(&val, (char *)dcmd->buf + slen, sizeof(int));
- drvr->wme_dp = (u8) le32_to_cpu(val);
- }
-
- prot->pending = false;
-
-done:
- mutex_unlock(&drvr->proto_block);
-
- return ret;
-}
-
static bool pkt_sum_needed(struct sk_buff *skb)
{
return skb->ip_summed == CHECKSUM_PARTIAL;
@@ -392,7 +317,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
/* Pop BDC header used to convey priority for buses that don't */
if (pktbuf->len < BDC_HEADER_LEN) {
- brcmf_dbg(ERROR, "rx data too short (%d < %d)\n",
+ brcmf_err("rx data too short (%d < %d)\n",
pktbuf->len, BDC_HEADER_LEN);
return -EBADE;
}
@@ -401,13 +326,13 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
*ifidx = BDC_GET_IF_IDX(h);
if (*ifidx >= BRCMF_MAX_IFS) {
- brcmf_dbg(ERROR, "rx data ifnum out of range (%d)\n", *ifidx);
+ brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
return -EBADE;
}
if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
BDC_PROTO_VER) {
- brcmf_dbg(ERROR, "%s: non-BDC packet received, flags 0x%x\n",
+ brcmf_err("%s: non-BDC packet received, flags 0x%x\n",
brcmf_ifname(drvr, *ifidx), h->flags);
return -EBADE;
}
@@ -436,7 +361,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
/* ensure that the msg buf directly follows the cdc msg struct */
if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
- brcmf_dbg(ERROR, "struct brcmf_proto is not correctly defined\n");
+ brcmf_err("struct brcmf_proto is not correctly defined\n");
goto fail;
}
@@ -458,35 +383,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr)
drvr->prot = NULL;
}
-int brcmf_proto_init(struct brcmf_pub *drvr)
-{
- int ret = 0;
- char buf[128];
-
- brcmf_dbg(TRACE, "Enter\n");
-
- mutex_lock(&drvr->proto_block);
-
- /* Get the device MAC address */
- strcpy(buf, "cur_etheraddr");
- ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
- buf, sizeof(buf));
- if (ret < 0) {
- mutex_unlock(&drvr->proto_block);
- return ret;
- }
- memcpy(drvr->mac, buf, ETH_ALEN);
-
- mutex_unlock(&drvr->proto_block);
-
- ret = brcmf_c_preinit_dcmds(drvr);
-
- /* Always assumes wl for now */
- drvr->iswl = true;
-
- return ret;
-}
-
void brcmf_proto_stop(struct brcmf_pub *drvr)
{
/* Nothing to do for CDC */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 15c5db5752d1..f8b52e5b941a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -18,28 +18,21 @@
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/sched.h>
#include <linux/netdevice.h>
-#include <asm/unaligned.h>
-#include <defs.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
+#include "fwil.h"
-#define BRCM_OUI "\x00\x10\x18"
-#define DOT11_OUI_LEN 3
-#define BCMILCP_BCM_SUBTYPE_EVENT 1
-#define PKTFILTER_BUF_SIZE 2048
+#define PKTFILTER_BUF_SIZE 128
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
-
-#define MSGTRACE_VERSION 1
-
-#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
-#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN \
- offsetof(struct brcmf_pkt_filter_pattern_le, mask_and_pattern)
+#define BRCMF_DEFAULT_BCN_TIMEOUT 3
+#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
+#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
+#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
#ifdef DEBUG
static const char brcmf_version[] =
@@ -50,89 +43,6 @@ static const char brcmf_version[] =
"Dongle Host Driver, version " BRCMF_VERSION_STR;
#endif
-/* Message trace header */
-struct msgtrace_hdr {
- u8 version;
- u8 spare;
- __be16 len; /* Len of the trace */
- __be32 seqnum; /* Sequence number of message. Useful
- * if the messsage has been lost
- * because of DMA error or a bus reset
- * (ex: SDIO Func2)
- */
- __be32 discarded_bytes; /* Number of discarded bytes because of
- trace overflow */
- __be32 discarded_printf; /* Number of discarded printf
- because of trace overflow */
-} __packed;
-
-
-uint
-brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
-{
- uint len;
-
- len = strlen(name) + 1;
-
- if ((len + datalen) > buflen)
- return 0;
-
- strncpy(buf, name, buflen);
-
- /* append data onto the end of the name string */
- if (data && datalen) {
- memcpy(&buf[len], data, datalen);
- len += datalen;
- }
-
- return len;
-}
-
-uint
-brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
- char *buf, uint buflen, s32 bssidx)
-{
- const s8 *prefix = "bsscfg:";
- s8 *p;
- u32 prefixlen;
- u32 namelen;
- u32 iolen;
- __le32 bssidx_le;
-
- if (bssidx == 0)
- return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
-
- prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
- namelen = (u32) strlen(name) + 1; /* lengh of iovar name + null */
- iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
-
- if (buflen < 0 || iolen > (u32)buflen) {
- brcmf_dbg(ERROR, "buffer is too short\n");
- return 0;
- }
-
- p = buf;
-
- /* copy prefix, no null */
- memcpy(p, prefix, prefixlen);
- p += prefixlen;
-
- /* copy iovar name including null */
- memcpy(p, name, namelen);
- p += namelen;
-
- /* bss config index as first data */
- bssidx_le = cpu_to_le32(bssidx);
- memcpy(p, &bssidx_le, sizeof(bssidx_le));
- p += sizeof(bssidx_le);
-
- /* parameter buffer follows */
- if (datalen)
- memcpy(p, data, datalen);
-
- return iolen;
-
-}
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
@@ -170,7 +80,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
brcmu_pktq_pdeq_tail(q, eprec);
if (p == NULL)
- brcmf_dbg(ERROR, "brcmu_pktq_penq() failed, oldest %d\n",
+ brcmf_err("brcmu_pktq_penq() failed, oldest %d\n",
discard_oldest);
brcmu_pkt_buf_free_skb(p);
@@ -179,415 +89,22 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
/* Enqueue */
p = brcmu_pktq_penq(q, prec, pkt);
if (p == NULL)
- brcmf_dbg(ERROR, "brcmu_pktq_penq() failed\n");
+ brcmf_err("brcmu_pktq_penq() failed\n");
return p != NULL;
}
-#ifdef DEBUG
-static void
-brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
-{
- uint i, status, reason;
- bool group = false, flush_txq = false, link = false;
- char *auth_str, *event_name;
- unsigned char *buf;
- char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
- static struct {
- uint event;
- char *event_name;
- } event_names[] = {
- {
- BRCMF_E_SET_SSID, "SET_SSID"}, {
- BRCMF_E_JOIN, "JOIN"}, {
- BRCMF_E_START, "START"}, {
- BRCMF_E_AUTH, "AUTH"}, {
- BRCMF_E_AUTH_IND, "AUTH_IND"}, {
- BRCMF_E_DEAUTH, "DEAUTH"}, {
- BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, {
- BRCMF_E_ASSOC, "ASSOC"}, {
- BRCMF_E_ASSOC_IND, "ASSOC_IND"}, {
- BRCMF_E_REASSOC, "REASSOC"}, {
- BRCMF_E_REASSOC_IND, "REASSOC_IND"}, {
- BRCMF_E_DISASSOC, "DISASSOC"}, {
- BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, {
- BRCMF_E_QUIET_START, "START_QUIET"}, {
- BRCMF_E_QUIET_END, "END_QUIET"}, {
- BRCMF_E_BEACON_RX, "BEACON_RX"}, {
- BRCMF_E_LINK, "LINK"}, {
- BRCMF_E_MIC_ERROR, "MIC_ERROR"}, {
- BRCMF_E_NDIS_LINK, "NDIS_LINK"}, {
- BRCMF_E_ROAM, "ROAM"}, {
- BRCMF_E_TXFAIL, "TXFAIL"}, {
- BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, {
- BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
- BRCMF_E_PRUNE, "PRUNE"}, {
- BRCMF_E_AUTOAUTH, "AUTOAUTH"}, {
- BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, {
- BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
- BRCMF_E_ADDTS_IND, "ADDTS_IND"}, {
- BRCMF_E_DELTS_IND, "DELTS_IND"}, {
- BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, {
- BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, {
- BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
- BRCMF_E_ROAM_PREP, "ROAM_PREP"}, {
- BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
- BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
- BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
- BRCMF_E_JOIN_START, "JOIN_START"}, {
- BRCMF_E_ROAM_START, "ROAM_START"}, {
- BRCMF_E_ASSOC_START, "ASSOC_START"}, {
- BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
- BRCMF_E_RADIO, "RADIO"}, {
- BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
- BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
- BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
- BRCMF_E_PSK_SUP, "PSK_SUP"}, {
- BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
- BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
- BRCMF_E_ICV_ERROR, "ICV_ERROR"}, {
- BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
- BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
- BRCMF_E_TRACE, "TRACE"}, {
- BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, {
- BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
- BRCMF_E_IF, "IF"}, {
- BRCMF_E_RSSI, "RSSI"}, {
- BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
- BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
- };
- uint event_type, flags, auth_type, datalen;
- static u32 seqnum_prev;
- struct msgtrace_hdr hdr;
- u32 nblost;
- char *s, *p;
-
- event_type = be32_to_cpu(event->event_type);
- flags = be16_to_cpu(event->flags);
- status = be32_to_cpu(event->status);
- reason = be32_to_cpu(event->reason);
- auth_type = be32_to_cpu(event->auth_type);
- datalen = be32_to_cpu(event->datalen);
- /* debug dump of event messages */
- sprintf(eabuf, "%pM", event->addr);
-
- event_name = "UNKNOWN";
- for (i = 0; i < ARRAY_SIZE(event_names); i++) {
- if (event_names[i].event == event_type)
- event_name = event_names[i].event_name;
- }
-
- brcmf_dbg(EVENT, "EVENT: %s, event ID = %d\n", event_name, event_type);
- brcmf_dbg(EVENT, "flags 0x%04x, status %d, reason %d, auth_type %d MAC %s\n",
- flags, status, reason, auth_type, eabuf);
-
- if (flags & BRCMF_EVENT_MSG_LINK)
- link = true;
- if (flags & BRCMF_EVENT_MSG_GROUP)
- group = true;
- if (flags & BRCMF_EVENT_MSG_FLUSHTXQ)
- flush_txq = true;
-
- switch (event_type) {
- case BRCMF_E_START:
- case BRCMF_E_DEAUTH:
- case BRCMF_E_DISASSOC:
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
- break;
-
- case BRCMF_E_ASSOC_IND:
- case BRCMF_E_REASSOC_IND:
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
- break;
-
- case BRCMF_E_ASSOC:
- case BRCMF_E_REASSOC:
- if (status == BRCMF_E_STATUS_SUCCESS)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, SUCCESS\n",
- event_name, eabuf);
- else if (status == BRCMF_E_STATUS_TIMEOUT)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, TIMEOUT\n",
- event_name, eabuf);
- else if (status == BRCMF_E_STATUS_FAIL)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
- event_name, eabuf, (int)reason);
- else
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, unexpected status %d\n",
- event_name, eabuf, (int)status);
- break;
-
- case BRCMF_E_DEAUTH_IND:
- case BRCMF_E_DISASSOC_IND:
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, reason %d\n",
- event_name, eabuf, (int)reason);
- break;
-
- case BRCMF_E_AUTH:
- case BRCMF_E_AUTH_IND:
- if (auth_type == WLAN_AUTH_OPEN)
- auth_str = "Open System";
- else if (auth_type == WLAN_AUTH_SHARED_KEY)
- auth_str = "Shared Key";
- else {
- sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
- auth_str = err_msg;
- }
- if (event_type == BRCMF_E_AUTH_IND)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s\n",
- event_name, eabuf, auth_str);
- else if (status == BRCMF_E_STATUS_SUCCESS)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, SUCCESS\n",
- event_name, eabuf, auth_str);
- else if (status == BRCMF_E_STATUS_TIMEOUT)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
- event_name, eabuf, auth_str);
- else if (status == BRCMF_E_STATUS_FAIL) {
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
- event_name, eabuf, auth_str, (int)reason);
- }
-
- break;
-
- case BRCMF_E_JOIN:
- case BRCMF_E_ROAM:
- case BRCMF_E_SET_SSID:
- if (status == BRCMF_E_STATUS_SUCCESS)
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n",
- event_name, eabuf);
- else if (status == BRCMF_E_STATUS_FAIL)
- brcmf_dbg(EVENT, "MACEVENT: %s, failed\n", event_name);
- else if (status == BRCMF_E_STATUS_NO_NETWORKS)
- brcmf_dbg(EVENT, "MACEVENT: %s, no networks found\n",
- event_name);
- else
- brcmf_dbg(EVENT, "MACEVENT: %s, unexpected status %d\n",
- event_name, (int)status);
- break;
-
- case BRCMF_E_BEACON_RX:
- if (status == BRCMF_E_STATUS_SUCCESS)
- brcmf_dbg(EVENT, "MACEVENT: %s, SUCCESS\n", event_name);
- else if (status == BRCMF_E_STATUS_FAIL)
- brcmf_dbg(EVENT, "MACEVENT: %s, FAIL\n", event_name);
- else
- brcmf_dbg(EVENT, "MACEVENT: %s, status %d\n",
- event_name, status);
- break;
-
- case BRCMF_E_LINK:
- brcmf_dbg(EVENT, "MACEVENT: %s %s\n",
- event_name, link ? "UP" : "DOWN");
- break;
-
- case BRCMF_E_MIC_ERROR:
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
- event_name, eabuf, group, flush_txq);
- break;
-
- case BRCMF_E_ICV_ERROR:
- case BRCMF_E_UNICAST_DECODE_ERROR:
- case BRCMF_E_MULTICAST_DECODE_ERROR:
- brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
- break;
-
- case BRCMF_E_TXFAIL:
- brcmf_dbg(EVENT, "MACEVENT: %s, RA %s\n", event_name, eabuf);
- break;
-
- case BRCMF_E_SCAN_COMPLETE:
- case BRCMF_E_PMKID_CACHE:
- brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
- break;
-
- case BRCMF_E_ESCAN_RESULT:
- brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
- datalen = 0;
- break;
-
- case BRCMF_E_PFN_NET_FOUND:
- case BRCMF_E_PFN_NET_LOST:
- case BRCMF_E_PFN_SCAN_COMPLETE:
- brcmf_dbg(EVENT, "PNOEVENT: %s\n", event_name);
- break;
-
- case BRCMF_E_PSK_SUP:
- case BRCMF_E_PRUNE:
- brcmf_dbg(EVENT, "MACEVENT: %s, status %d, reason %d\n",
- event_name, (int)status, (int)reason);
- break;
-
- case BRCMF_E_TRACE:
- buf = (unsigned char *) event_data;
- memcpy(&hdr, buf, sizeof(struct msgtrace_hdr));
-
- if (hdr.version != MSGTRACE_VERSION) {
- brcmf_dbg(ERROR,
- "MACEVENT: %s [unsupported version --> brcmf"
- " version:%d dongle version:%d]\n",
- event_name, MSGTRACE_VERSION, hdr.version);
- /* Reset datalen to avoid display below */
- datalen = 0;
- break;
- }
-
- /* There are 2 bytes available at the end of data */
- *(buf + sizeof(struct msgtrace_hdr)
- + be16_to_cpu(hdr.len)) = '\0';
-
- if (be32_to_cpu(hdr.discarded_bytes)
- || be32_to_cpu(hdr.discarded_printf))
- brcmf_dbg(ERROR,
- "WLC_E_TRACE: [Discarded traces in dongle -->"
- " discarded_bytes %d discarded_printf %d]\n",
- be32_to_cpu(hdr.discarded_bytes),
- be32_to_cpu(hdr.discarded_printf));
-
- nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1;
- if (nblost > 0)
- brcmf_dbg(ERROR, "WLC_E_TRACE: [Event lost --> seqnum "
- " %d nblost %d\n", be32_to_cpu(hdr.seqnum),
- nblost);
- seqnum_prev = be32_to_cpu(hdr.seqnum);
-
- /* Display the trace buffer. Advance from \n to \n to
- * avoid display big
- * printf (issue with Linux printk )
- */
- p = (char *)&buf[sizeof(struct msgtrace_hdr)];
- while ((s = strstr(p, "\n")) != NULL) {
- *s = '\0';
- pr_debug("%s\n", p);
- p = s + 1;
- }
- pr_debug("%s\n", p);
-
- /* Reset datalen to avoid display below */
- datalen = 0;
- break;
-
- case BRCMF_E_RSSI:
- brcmf_dbg(EVENT, "MACEVENT: %s %d\n",
- event_name, be32_to_cpu(*((__be32 *)event_data)));
- break;
-
- default:
- brcmf_dbg(EVENT,
- "MACEVENT: %s %d, MAC %s, status %d, reason %d, "
- "auth %d\n", event_name, event_type, eabuf,
- (int)status, (int)reason, (int)auth_type);
- break;
- }
-
- /* show any appended data */
- brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
-}
-#endif /* DEBUG */
-
-int
-brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
- struct brcmf_event_msg *event, void **data_ptr)
-{
- /* check whether packet is a BRCM event pkt */
- struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
- struct brcmf_if_event *ifevent;
- char *event_data;
- u32 type, status;
- u16 flags;
- int evlen;
-
- if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) {
- brcmf_dbg(ERROR, "mismatched OUI, bailing\n");
- return -EBADE;
- }
-
- /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
- if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) !=
- BCMILCP_BCM_SUBTYPE_EVENT) {
- brcmf_dbg(ERROR, "mismatched subtype, bailing\n");
- return -EBADE;
- }
-
- *data_ptr = &pvt_data[1];
- event_data = *data_ptr;
-
- /* memcpy since BRCM event pkt may be unaligned. */
- memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg));
-
- type = get_unaligned_be32(&event->event_type);
- flags = get_unaligned_be16(&event->flags);
- status = get_unaligned_be32(&event->status);
- evlen = get_unaligned_be32(&event->datalen) +
- sizeof(struct brcmf_event);
-
- switch (type) {
- case BRCMF_E_IF:
- ifevent = (struct brcmf_if_event *) event_data;
- brcmf_dbg(TRACE, "if event\n");
-
- if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
- if (ifevent->action == BRCMF_E_IF_ADD)
- brcmf_add_if(drvr->dev, ifevent->ifidx,
- event->ifname,
- pvt_data->eth.h_dest);
- else
- brcmf_del_if(drvr, ifevent->ifidx);
- } else {
- brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
- ifevent->ifidx, event->ifname);
- }
-
- /* send up the if event: btamp user needs it */
- *ifidx = brcmf_ifname2idx(drvr, event->ifname);
- break;
-
- /* These are what external supplicant/authenticator wants */
- case BRCMF_E_LINK:
- case BRCMF_E_ASSOC_IND:
- case BRCMF_E_REASSOC_IND:
- case BRCMF_E_DISASSOC_IND:
- case BRCMF_E_MIC_ERROR:
- default:
- /* Fall through: this should get _everything_ */
-
- *ifidx = brcmf_ifname2idx(drvr, event->ifname);
- brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
- type, flags, status);
-
- /* put it back to BRCMF_E_NDIS_LINK */
- if (type == BRCMF_E_NDIS_LINK) {
- u32 temp1;
- __be32 temp2;
-
- temp1 = get_unaligned_be32(&event->event_type);
- brcmf_dbg(TRACE, "Converted to WLC_E_LINK type %d\n",
- temp1);
-
- temp2 = cpu_to_be32(BRCMF_E_NDIS_LINK);
- memcpy((void *)(&pvt_data->msg.event_type), &temp2,
- sizeof(pvt_data->msg.event_type));
- }
- break;
- }
-
-#ifdef DEBUG
- if (BRCMF_EVENT_ON())
- brcmf_c_show_host_event(event, event_data);
-#endif /* DEBUG */
-
- return 0;
-}
-
/* Convert user's input in hex pattern to byte-size mask */
static int brcmf_c_pattern_atoh(char *src, char *dst)
{
int i;
if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- brcmf_dbg(ERROR, "Mask invalid format. Needs to start with 0x\n");
+ brcmf_err("Mask invalid format. Needs to start with 0x\n");
return -EINVAL;
}
src = src + 2; /* Skip past 0x */
if (strlen(src) % 2 != 0) {
- brcmf_dbg(ERROR, "Mask invalid format. Length must be even.\n");
+ brcmf_err("Mask invalid format. Length must be even.\n");
return -EINVAL;
}
for (i = 0; *src != '\0'; i++) {
@@ -603,90 +120,57 @@ static int brcmf_c_pattern_atoh(char *src, char *dst)
return i;
}
-void
-brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable,
- int master_mode)
+static void
+brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable,
+ int master_mode)
{
unsigned long res;
- char *argv[8];
- int i = 0;
- const char *str;
- int buf_len;
- int str_len;
+ char *argv;
char *arg_save = NULL, *arg_org = NULL;
- int rc;
- char buf[128];
+ s32 err;
struct brcmf_pkt_filter_enable_le enable_parm;
- struct brcmf_pkt_filter_enable_le *pkt_filterp;
- __le32 mmode_le;
- arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
+ arg_save = kstrdup(arg, GFP_ATOMIC);
if (!arg_save)
goto fail;
arg_org = arg_save;
- memcpy(arg_save, arg, strlen(arg) + 1);
- argv[i] = strsep(&arg_save, " ");
+ argv = strsep(&arg_save, " ");
- i = 0;
- if (NULL == argv[i]) {
- brcmf_dbg(ERROR, "No args provided\n");
+ if (argv == NULL) {
+ brcmf_err("No args provided\n");
goto fail;
}
- str = "pkt_filter_enable";
- str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[str_len] = '\0';
- buf_len = str_len + 1;
-
- pkt_filterp = (struct brcmf_pkt_filter_enable_le *) (buf + str_len + 1);
-
/* Parse packet filter id. */
enable_parm.id = 0;
- if (!kstrtoul(argv[i], 0, &res))
+ if (!kstrtoul(argv, 0, &res))
enable_parm.id = cpu_to_le32((u32)res);
- /* Parse enable/disable value. */
+ /* Enable/disable the specified filter. */
enable_parm.enable = cpu_to_le32(enable);
- buf_len += sizeof(enable_parm);
- memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm));
+ err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm,
+ sizeof(enable_parm));
+ if (err)
+ brcmf_err("Set pkt_filter_enable error (%d)\n", err);
- /* Enable/disable the specified filter. */
- rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
- rc = rc >= 0 ? 0 : rc;
- if (rc)
- brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
- arg, rc);
- else
- brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
-
- /* Contorl the master mode */
- mmode_le = cpu_to_le32(master_mode);
- brcmf_c_mkiovar("pkt_filter_mode", (char *)&mmode_le, 4, buf,
- sizeof(buf));
- rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf,
- sizeof(buf));
- rc = rc >= 0 ? 0 : rc;
- if (rc)
- brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
- arg, rc);
+ /* Control the master mode */
+ err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode);
+ if (err)
+ brcmf_err("Set pkt_filter_mode error (%d)\n", err);
fail:
kfree(arg_org);
}
-void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
+static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg)
{
- const char *str;
- struct brcmf_pkt_filter_le pkt_filter;
- struct brcmf_pkt_filter_le *pkt_filterp;
+ struct brcmf_pkt_filter_le *pkt_filter;
unsigned long res;
int buf_len;
- int str_len;
- int rc;
+ s32 err;
u32 mask_size;
u32 pattern_size;
char *argv[8], *buf = NULL;
@@ -704,104 +188,64 @@ void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
goto fail;
argv[i] = strsep(&arg_save, " ");
- while (argv[i++])
+ while (argv[i]) {
+ i++;
+ if (i >= 8) {
+ brcmf_err("Too many parameters\n");
+ goto fail;
+ }
argv[i] = strsep(&arg_save, " ");
+ }
- i = 0;
- if (NULL == argv[i]) {
- brcmf_dbg(ERROR, "No args provided\n");
+ if (i != 6) {
+ brcmf_err("Not enough args provided %d\n", i);
goto fail;
}
- str = "pkt_filter_add";
- strcpy(buf, str);
- str_len = strlen(str);
- buf_len = str_len + 1;
-
- pkt_filterp = (struct brcmf_pkt_filter_le *) (buf + str_len + 1);
+ pkt_filter = (struct brcmf_pkt_filter_le *)buf;
/* Parse packet filter id. */
- pkt_filter.id = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.id = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Polarity not provided\n");
- goto fail;
- }
+ pkt_filter->id = 0;
+ if (!kstrtoul(argv[0], 0, &res))
+ pkt_filter->id = cpu_to_le32((u32)res);
/* Parse filter polarity. */
- pkt_filter.negate_match = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.negate_match = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Filter type not provided\n");
- goto fail;
- }
+ pkt_filter->negate_match = 0;
+ if (!kstrtoul(argv[1], 0, &res))
+ pkt_filter->negate_match = cpu_to_le32((u32)res);
/* Parse filter type. */
- pkt_filter.type = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.type = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Offset not provided\n");
- goto fail;
- }
+ pkt_filter->type = 0;
+ if (!kstrtoul(argv[2], 0, &res))
+ pkt_filter->type = cpu_to_le32((u32)res);
/* Parse pattern filter offset. */
- pkt_filter.u.pattern.offset = 0;
- if (!kstrtoul(argv[i], 0, &res))
- pkt_filter.u.pattern.offset = cpu_to_le32((u32)res);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Bitmask not provided\n");
- goto fail;
- }
+ pkt_filter->u.pattern.offset = 0;
+ if (!kstrtoul(argv[3], 0, &res))
+ pkt_filter->u.pattern.offset = cpu_to_le32((u32)res);
/* Parse pattern filter mask. */
- mask_size =
- brcmf_c_pattern_atoh
- (argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
-
- if (NULL == argv[++i]) {
- brcmf_dbg(ERROR, "Pattern not provided\n");
- goto fail;
- }
+ mask_size = brcmf_c_pattern_atoh(argv[4],
+ (char *)pkt_filter->u.pattern.mask_and_pattern);
/* Parse pattern filter pattern. */
- pattern_size =
- brcmf_c_pattern_atoh(argv[i],
- (char *)&pkt_filterp->u.pattern.
- mask_and_pattern[mask_size]);
+ pattern_size = brcmf_c_pattern_atoh(argv[5],
+ (char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]);
if (mask_size != pattern_size) {
- brcmf_dbg(ERROR, "Mask and pattern not the same size\n");
+ brcmf_err("Mask and pattern not the same size\n");
goto fail;
}
- pkt_filter.u.pattern.size_bytes = cpu_to_le32(mask_size);
- buf_len += BRCMF_PKT_FILTER_FIXED_LEN;
- buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
-
- /* Keep-alive attributes are set in local
- * variable (keep_alive_pkt), and
- ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
- ** guarantee that the buffer is properly aligned.
- */
- memcpy((char *)pkt_filterp,
- &pkt_filter,
- BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
-
- rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
- rc = rc >= 0 ? 0 : rc;
+ pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size);
+ buf_len = offsetof(struct brcmf_pkt_filter_le,
+ u.pattern.mask_and_pattern);
+ buf_len += mask_size + pattern_size;
- if (rc)
- brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
- arg, rc);
- else
- brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
+ err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter,
+ buf_len);
+ if (err)
+ brcmf_err("Set pkt_filter_add error (%d)\n", err);
fail:
kfree(arg_org);
@@ -809,130 +253,125 @@ fail:
kfree(buf);
}
-static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
- char iovbuf[32];
- int retcode;
- __le32 arp_mode_le;
-
- arp_mode_le = cpu_to_le32(arp_mode);
- brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
- sizeof(iovbuf));
- retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- retcode = retcode >= 0 ? 0 : retcode;
- if (retcode)
- brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, retcode = %d\n",
- arp_mode, retcode);
- else
- brcmf_dbg(TRACE, "successfully set ARP offload mode to 0x%x\n",
- arp_mode);
-}
-
-static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
-{
- char iovbuf[32];
- int retcode;
- __le32 arp_enable_le;
-
- arp_enable_le = cpu_to_le32(arp_enable);
-
- brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
- iovbuf, sizeof(iovbuf));
- retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
- retcode = retcode >= 0 ? 0 : retcode;
- if (retcode)
- brcmf_dbg(TRACE, "failed to enable ARP offload to %d, retcode = %d\n",
- arp_enable, retcode);
- else
- brcmf_dbg(TRACE, "successfully enabled ARP offload to %d\n",
- arp_enable);
-}
-
-int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
-{
- char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
- "event_msgs" + '\0' + bitvec */
- char buf[128], *ptr;
- __le32 roaming_le = cpu_to_le32(1);
- __le32 bcn_timeout_le = cpu_to_le32(3);
- __le32 scan_assoc_time_le = cpu_to_le32(40);
- __le32 scan_unassoc_time_le = cpu_to_le32(40);
- int i;
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ u8 buf[BRCMF_DCMD_SMLEN];
+ char *ptr;
+ s32 err;
struct brcmf_bus_dcmd *cmdlst;
struct list_head *cur, *q;
- mutex_lock(&drvr->proto_block);
-
- /* Set Country code */
- if (drvr->country_code[0] != 0) {
- if (brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_COUNTRY,
- drvr->country_code,
- sizeof(drvr->country_code)) < 0)
- brcmf_dbg(ERROR, "country code setting failed\n");
+ /* retreive mac address */
+ err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+ sizeof(ifp->mac_addr));
+ if (err < 0) {
+ brcmf_err("Retreiving cur_etheraddr failed, %d\n",
+ err);
+ goto done;
}
+ memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
- ptr = buf;
- brcmf_c_mkiovar("ver", NULL, 0, buf, sizeof(buf));
- brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf));
+ strcpy(buf, "ver");
+ err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
+ if (err < 0) {
+ brcmf_err("Retreiving version information failed, %d\n",
+ err);
+ goto done;
+ }
+ ptr = (char *)buf;
strsep(&ptr, "\n");
/* Print fw version info */
- brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
+ brcmf_err("Firmware version = %s\n", buf);
- /* Setup timeout if Beacons are lost and roam is off to report
- link down */
- brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
+ /*
+ * Setup timeout if Beacons are lost and roam is off to report
+ * link down
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
+ BRCMF_DEFAULT_BCN_TIMEOUT);
+ if (err) {
+ brcmf_err("bcn_timeout error (%d)\n", err);
+ goto done;
+ }
/* Enable/Disable build-in roaming to allowed ext supplicant to take
- of romaing */
- brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* Setup event_msgs */
- brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
-
- /* Set and enable ARP offload feature */
- brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
- brcmf_c_arp_offload_enable(drvr, true);
-
- /* Set up pkt filter */
- for (i = 0; i < drvr->pktfilter_count; i++) {
- brcmf_c_pktfilter_offload_set(drvr, drvr->pktfilter[i]);
- brcmf_c_pktfilter_offload_enable(drvr, drvr->pktfilter[i],
- 0, true);
+ * of romaing
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (err) {
+ brcmf_err("roam_off error (%d)\n", err);
+ goto done;
+ }
+
+ /* Setup event_msgs, enable E_IF */
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_err("Get event_msgs error (%d)\n", err);
+ goto done;
+ }
+ setbit(eventmask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ if (err) {
+ brcmf_err("Set event_msgs error (%d)\n", err);
+ goto done;
}
+ /* Setup default scan channel time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
+ if (err) {
+ brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* Setup default scan unassoc time */
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
+ if (err) {
+ brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
+ err);
+ goto done;
+ }
+
+ /* Try to set and enable ARP offload feature, this may fail */
+ err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
+ BRCMF_ARPOL_MODE, err);
+ err = 0;
+ } else {
+ err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n",
+ err);
+ err = 0;
+ } else
+ brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n",
+ BRCMF_ARPOL_MODE);
+ }
+
+ /* Setup packet filter */
+ brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
+ brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
+ 0, true);
+
/* set bus specific command if there is any */
- list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
+ list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
- brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
- cmdlst->param_len, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
+ brcmf_fil_iovar_data_set(ifp, cmdlst->name,
+ cmdlst->param,
+ cmdlst->param_len);
}
list_del(cur);
kfree(cmdlst);
}
-
- mutex_unlock(&drvr->proto_block);
-
- return 0;
+done:
+ return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 7f89540b56da..57671eddf79d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -14,12 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/debugfs.h>
-#include <linux/if_ether.h>
-#include <linux/if.h>
-#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
#include <linux/module.h>
-#include <defs.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "dhd.h"
@@ -46,10 +43,12 @@ void brcmf_debugfs_exit(void)
int brcmf_debugfs_attach(struct brcmf_pub *drvr)
{
+ struct device *dev = drvr->bus_if->dev;
+
if (!root_folder)
return -ENODEV;
- drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
+ drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
return PTR_RET(drvr->dbgfs_dir);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index fb508c2256dd..f2ab01cd7966 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -18,7 +18,6 @@
#define _BRCMF_DBG_H_
/* message levels */
-#define BRCMF_ERROR_VAL 0x0001
#define BRCMF_TRACE_VAL 0x0002
#define BRCMF_INFO_VAL 0x0004
#define BRCMF_DATA_VAL 0x0008
@@ -27,27 +26,34 @@
#define BRCMF_HDRS_VAL 0x0040
#define BRCMF_BYTES_VAL 0x0080
#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0400
-#define BRCMF_EVENT_VAL 0x0800
-#define BRCMF_BTA_VAL 0x1000
-#define BRCMF_ISCAN_VAL 0x2000
+#define BRCMF_GLOM_VAL 0x0200
+#define BRCMF_EVENT_VAL 0x0400
+#define BRCMF_BTA_VAL 0x0800
+#define BRCMF_FIL_VAL 0x1000
+#define BRCMF_USB_VAL 0x2000
+#define BRCMF_SCAN_VAL 0x4000
+#define BRCMF_CONN_VAL 0x8000
+
+/* Macro for error messages. net_ratelimit() is used when driver
+ * debugging is not selected. When debugging the driver error
+ * messages are as important as other tracing or even more so.
+ */
+#ifdef CONFIG_BRCMDBG
+#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
+#else
+#define brcmf_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#endif
#if defined(DEBUG)
-#define brcmf_dbg(level, fmt, ...) \
-do { \
- if (BRCMF_ERROR_VAL == BRCMF_##level##_VAL) { \
- if (brcmf_msg_level & BRCMF_##level##_VAL) { \
- if (net_ratelimit()) \
- pr_debug("%s: " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } else { \
- if (brcmf_msg_level & BRCMF_##level##_VAL) { \
- pr_debug("%s: " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } \
+#define brcmf_dbg(level, fmt, ...) \
+do { \
+ if (brcmf_msg_level & BRCMF_##level##_VAL) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
@@ -56,6 +62,7 @@ do { \
#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
+#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
#else /* (defined DEBUG) || (defined DEBUG) */
@@ -67,6 +74,7 @@ do { \
#define BRCMF_BYTES_ON() 0
#define BRCMF_GLOM_ON() 0
#define BRCMF_EVENT_ON() 0
+#define BRCMF_FIL_ON() 0
#endif /* defined(DEBUG) */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d7c76ce9d8cb..74a616b4de8e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -16,27 +16,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/random.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/hardirq.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
#include <linux/module.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
-#include <defs.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -45,55 +29,29 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "wl_cfg80211.h"
+#include "fwil.h"
MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
+MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11 WLAN fullmac cards");
MODULE_LICENSE("Dual BSD/GPL");
-
-/* Interface control information */
-struct brcmf_if {
- struct brcmf_pub *drvr; /* back pointer to brcmf_pub */
- /* OS/stack specifics */
- struct net_device *ndev;
- struct net_device_stats stats;
- int idx; /* iface idx in dongle */
- u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
-};
+#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
/* Error bits */
-int brcmf_msg_level = BRCMF_ERROR_VAL;
+int brcmf_msg_level;
module_param(brcmf_msg_level, int, 0);
-int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
-{
- int i = BRCMF_MAX_IFS;
- struct brcmf_if *ifp;
-
- if (name == NULL || *name == '\0')
- return 0;
-
- while (--i > 0) {
- ifp = drvr->iflist[i];
- if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
- break;
- }
-
- brcmf_dbg(TRACE, "return idx %d for \"%s\"\n", i, name);
-
- return i; /* default - the primary interface */
-}
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
- brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
+ brcmf_err("ifidx %d out of range\n", ifidx);
return "<if_bad>";
}
if (drvr->iflist[ifidx] == NULL) {
- brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
+ brcmf_err("null i/f %d\n", ifidx);
return "<if_null>";
}
@@ -105,38 +63,33 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
static void _brcmf_set_multicast_list(struct work_struct *work)
{
+ struct brcmf_if *ifp;
struct net_device *ndev;
struct netdev_hw_addr *ha;
- u32 dcmd_value, cnt;
+ u32 cmd_value, cnt;
__le32 cnt_le;
- __le32 dcmd_le_value;
-
- struct brcmf_dcmd dcmd;
char *buf, *bufp;
- uint buflen;
- int ret;
+ u32 buflen;
+ s32 err;
- struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
- multicast_work);
+ brcmf_dbg(TRACE, "enter\n");
- ndev = drvr->iflist[0]->ndev;
- cnt = netdev_mc_count(ndev);
+ ifp = container_of(work, struct brcmf_if, multicast_work);
+ ndev = ifp->ndev;
/* Determine initial value of allmulti flag */
- dcmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
+ cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
/* Send down the multicast list first. */
-
- buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
- bufp = buf = kmalloc(buflen, GFP_ATOMIC);
- if (!bufp)
+ cnt = netdev_mc_count(ndev);
+ buflen = sizeof(cnt) + (cnt * ETH_ALEN);
+ buf = kmalloc(buflen, GFP_ATOMIC);
+ if (!buf)
return;
-
- strcpy(bufp, "mcast_list");
- bufp += strlen("mcast_list") + 1;
+ bufp = buf;
cnt_le = cpu_to_le32(cnt);
- memcpy(bufp, &cnt_le, sizeof(cnt));
+ memcpy(bufp, &cnt_le, sizeof(cnt_le));
bufp += sizeof(cnt_le);
netdev_for_each_mc_addr(ha, ndev) {
@@ -147,129 +100,66 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
cnt--;
}
- memset(&dcmd, 0, sizeof(dcmd));
- dcmd.cmd = BRCMF_C_SET_VAR;
- dcmd.buf = buf;
- dcmd.len = buflen;
- dcmd.set = true;
-
- ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
- if (ret < 0) {
- brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
- brcmf_ifname(drvr, 0), cnt);
- dcmd_value = cnt ? true : dcmd_value;
+ err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
+ if (err < 0) {
+ brcmf_err("Setting mcast_list failed, %d\n", err);
+ cmd_value = cnt ? true : cmd_value;
}
kfree(buf);
- /* Now send the allmulti setting. This is based on the setting in the
+ /*
+ * Now send the allmulti setting. This is based on the setting in the
* net_device flags, but might be modified above to be turned on if we
* were trying to set some addresses and dongle rejected it...
*/
+ err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
+ if (err < 0)
+ brcmf_err("Setting allmulti failed, %d\n", err);
- buflen = sizeof("allmulti") + sizeof(dcmd_value);
- buf = kmalloc(buflen, GFP_ATOMIC);
- if (!buf)
- return;
-
- dcmd_le_value = cpu_to_le32(dcmd_value);
-
- if (!brcmf_c_mkiovar
- ("allmulti", (void *)&dcmd_le_value,
- sizeof(dcmd_le_value), buf, buflen)) {
- brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
- brcmf_ifname(drvr, 0),
- (int)sizeof(dcmd_value), buflen);
- kfree(buf);
- return;
- }
-
- memset(&dcmd, 0, sizeof(dcmd));
- dcmd.cmd = BRCMF_C_SET_VAR;
- dcmd.buf = buf;
- dcmd.len = buflen;
- dcmd.set = true;
-
- ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
- if (ret < 0) {
- brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
- brcmf_ifname(drvr, 0),
- le32_to_cpu(dcmd_le_value));
- }
-
- kfree(buf);
-
- /* Finally, pick up the PROMISC flag as well, like the NIC
- driver does */
-
- dcmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
- dcmd_le_value = cpu_to_le32(dcmd_value);
-
- memset(&dcmd, 0, sizeof(dcmd));
- dcmd.cmd = BRCMF_C_SET_PROMISC;
- dcmd.buf = &dcmd_le_value;
- dcmd.len = sizeof(dcmd_le_value);
- dcmd.set = true;
-
- ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
- if (ret < 0) {
- brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
- brcmf_ifname(drvr, 0),
- le32_to_cpu(dcmd_le_value));
- }
+ /*Finally, pick up the PROMISC flag */
+ cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
+ if (err < 0)
+ brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
+ err);
}
static void
_brcmf_set_mac_address(struct work_struct *work)
{
- char buf[32];
- struct brcmf_dcmd dcmd;
- int ret;
-
- struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
- setmacaddr_work);
+ struct brcmf_if *ifp;
+ s32 err;
brcmf_dbg(TRACE, "enter\n");
- if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
- ETH_ALEN, buf, 32)) {
- brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
- brcmf_ifname(drvr, 0));
- return;
- }
- memset(&dcmd, 0, sizeof(dcmd));
- dcmd.cmd = BRCMF_C_SET_VAR;
- dcmd.buf = buf;
- dcmd.len = 32;
- dcmd.set = true;
-
- ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
- if (ret < 0)
- brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
- brcmf_ifname(drvr, 0));
- else
- memcpy(drvr->iflist[0]->ndev->dev_addr,
- drvr->macvalue, ETH_ALEN);
- return;
+ ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
+ ETH_ALEN);
+ if (err < 0) {
+ brcmf_err("Setting cur_etheraddr failed, %d\n", err);
+ } else {
+ brcmf_dbg(TRACE, "MAC address updated to %pM\n",
+ ifp->mac_addr);
+ memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ }
}
static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
struct sockaddr *sa = (struct sockaddr *)addr;
- memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN);
- schedule_work(&drvr->setmacaddr_work);
+ memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
+ schedule_work(&ifp->setmacaddr_work);
return 0;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
- schedule_work(&drvr->multicast_work);
+ schedule_work(&ifp->multicast_work);
}
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@@ -282,8 +172,8 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Reject if down */
if (!drvr->bus_if->drvr_up ||
- (drvr->bus_if->state == BRCMF_BUS_DOWN)) {
- brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n",
+ (drvr->bus_if->state != BRCMF_BUS_DATA)) {
+ brcmf_err("xmit rejected drvup=%d state=%d\n",
drvr->bus_if->drvr_up,
drvr->bus_if->state);
netif_stop_queue(ndev);
@@ -291,7 +181,7 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
if (!drvr->iflist[ifp->idx]) {
- brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx);
+ brcmf_err("bad ifidx %d\n", ifp->idx);
netif_stop_queue(ndev);
return -ENODEV;
}
@@ -307,7 +197,7 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
- brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
+ brcmf_err("%s: skb_realloc_headroom failed\n",
brcmf_ifname(drvr, ifp->idx));
ret = -ENOMEM;
goto done;
@@ -329,7 +219,7 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
brcmf_proto_hdrpush(drvr, ifp->idx, skb);
/* Use bus module to send data frame */
- ret = drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb);
+ ret = brcmf_bus_txdata(drvr->bus_if, skb);
done:
if (ret)
@@ -360,32 +250,13 @@ void brcmf_txflowblock(struct device *dev, bool state)
}
}
-static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
- void *pktdata, struct brcmf_event_msg *event,
- void **data)
-{
- int bcmerror = 0;
-
- bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
- if (bcmerror != 0)
- return bcmerror;
-
- if (drvr->iflist[*ifidx]->ndev)
- brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
- event, *data);
-
- return bcmerror;
-}
-
-void brcmf_rx_frame(struct device *dev, int ifidx,
+void brcmf_rx_frame(struct device *dev, u8 ifidx,
struct sk_buff_head *skb_list)
{
unsigned char *eth;
uint len;
- void *data;
struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
- struct brcmf_event_msg event;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -432,10 +303,7 @@ void brcmf_rx_frame(struct device *dev, int ifidx,
skb_pull(skb, ETH_HLEN);
/* Process special event packets and then discard them */
- if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
- brcmf_host_event(drvr, &ifidx,
- skb_mac_header(skb),
- &event, &data);
+ brcmf_fweh_process_skb(drvr, skb, &ifidx);
if (drvr->iflist[ifidx]) {
ifp = drvr->iflist[ifidx];
@@ -471,9 +339,11 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
- if (type == ETH_P_PAE)
+ if (type == ETH_P_PAE) {
atomic_dec(&drvr->pend_8021x_cnt);
-
+ if (waitqueue_active(&drvr->pend_8021x_wait))
+ wake_up(&drvr->pend_8021x_wait);
+ }
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
@@ -497,83 +367,26 @@ static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
return &ifp->stats;
}
-/* Retrieve current toe component enables, which are kept
- as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol)
-{
- struct brcmf_dcmd dcmd;
- __le32 toe_le;
- char buf[32];
- int ret;
-
- memset(&dcmd, 0, sizeof(dcmd));
-
- dcmd.cmd = BRCMF_C_GET_VAR;
- dcmd.buf = buf;
- dcmd.len = (uint) sizeof(buf);
- dcmd.set = false;
-
- strcpy(buf, "toe_ol");
- ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
- if (ret < 0) {
- /* Check for older dongle image that doesn't support toe_ol */
- if (ret == -EIO) {
- brcmf_dbg(ERROR, "%s: toe not supported by device\n",
- brcmf_ifname(drvr, ifidx));
- return -EOPNOTSUPP;
- }
-
- brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
- brcmf_ifname(drvr, ifidx), ret);
- return ret;
- }
-
- memcpy(&toe_le, buf, sizeof(u32));
- *toe_ol = le32_to_cpu(toe_le);
- return 0;
-}
-
-/* Set current toe component enables in toe_ol iovar,
- and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
+/*
+ * Set current toe component enables in toe_ol iovar,
+ * and set toe global enable iovar
+ */
+static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
{
- struct brcmf_dcmd dcmd;
- char buf[32];
- int ret;
- __le32 toe_le = cpu_to_le32(toe_ol);
-
- memset(&dcmd, 0, sizeof(dcmd));
-
- dcmd.cmd = BRCMF_C_SET_VAR;
- dcmd.buf = buf;
- dcmd.len = (uint) sizeof(buf);
- dcmd.set = true;
-
- /* Set toe_ol as requested */
- strcpy(buf, "toe_ol");
- memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
+ s32 err;
- ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
- if (ret < 0) {
- brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
- brcmf_ifname(drvr, ifidx), ret);
- return ret;
+ err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
+ if (err < 0) {
+ brcmf_err("Setting toe_ol failed, %d\n", err);
+ return err;
}
- /* Enable toe globally only if any components are enabled. */
- toe_le = cpu_to_le32(toe_ol != 0);
-
- strcpy(buf, "toe");
- memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
+ err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
+ if (err < 0)
+ brcmf_err("Setting toe failed, %d\n", err);
- ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
- if (ret < 0) {
- brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
- brcmf_ifname(drvr, ifidx), ret);
- return ret;
- }
+ return err;
- return 0;
}
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
@@ -584,15 +397,16 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
sprintf(info->driver, KBUILD_MODNAME);
sprintf(info->version, "%lu", drvr->drv_version);
- sprintf(info->bus_info, "%s", dev_name(drvr->dev));
+ sprintf(info->bus_info, "%s", dev_name(drvr->bus_if->dev));
}
static const struct ethtool_ops brcmf_ethtool_ops = {
.get_drvinfo = brcmf_ethtool_get_drvinfo,
};
-static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
+static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
{
+ struct brcmf_pub *drvr = ifp->drvr;
struct ethtool_drvinfo info;
char drvname[sizeof(info.driver)];
u32 cmd;
@@ -626,15 +440,12 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
/* otherwise, require dongle to be up */
else if (!drvr->bus_if->drvr_up) {
- brcmf_dbg(ERROR, "dongle is not up\n");
+ brcmf_err("dongle is not up\n");
return -ENODEV;
}
-
/* finally, report dongle driver type */
- else if (drvr->iswl)
- sprintf(info.driver, "wl");
else
- sprintf(info.driver, "xx");
+ sprintf(info.driver, "wl");
sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -646,7 +457,7 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
/* Get toe offload components from dongle */
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
- ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
+ ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
if (ret < 0)
return ret;
@@ -667,7 +478,7 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
return -EFAULT;
/* Read the current settings, update and write back */
- ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
+ ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
if (ret < 0)
return ret;
@@ -679,18 +490,16 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
else
toe_cmpnt &= ~csum_dir;
- ret = brcmf_toe_set(drvr, 0, toe_cmpnt);
+ ret = brcmf_toe_set(ifp, toe_cmpnt);
if (ret < 0)
return ret;
/* If setting TX checksum mode, tell Linux the new mode */
if (cmd == ETHTOOL_STXCSUM) {
if (edata.data)
- drvr->iflist[0]->ndev->features |=
- NETIF_F_IP_CSUM;
+ ifp->ndev->features |= NETIF_F_IP_CSUM;
else
- drvr->iflist[0]->ndev->features &=
- ~NETIF_F_IP_CSUM;
+ ifp->ndev->features &= ~NETIF_F_IP_CSUM;
}
break;
@@ -714,80 +523,23 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
return -1;
if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(drvr, ifr->ifr_data);
+ return brcmf_ethtool(ifp, ifr->ifr_data);
return -EOPNOTSUPP;
}
-/* called only from within this driver. Sends a command to the dongle. */
-s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
-{
- struct brcmf_dcmd dcmd;
- s32 err = 0;
- int buflen = 0;
- bool is_set_key_cmd;
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- memset(&dcmd, 0, sizeof(dcmd));
- dcmd.cmd = cmd;
- dcmd.buf = arg;
- dcmd.len = len;
-
- if (dcmd.buf != NULL)
- buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
-
- /* send to dongle (must be up, and wl) */
- if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
- brcmf_dbg(ERROR, "DONGLE_DOWN\n");
- err = -EIO;
- goto done;
- }
-
- if (!drvr->iswl) {
- err = -EIO;
- goto done;
- }
-
- /*
- * Intercept BRCMF_C_SET_KEY CMD - serialize M4 send and
- * set key CMD to prevent M4 encryption.
- */
- is_set_key_cmd = ((dcmd.cmd == BRCMF_C_SET_KEY) ||
- ((dcmd.cmd == BRCMF_C_SET_VAR) &&
- !(strncmp("wsec_key", dcmd.buf, 9))) ||
- ((dcmd.cmd == BRCMF_C_SET_VAR) &&
- !(strncmp("bsscfg:wsec_key", dcmd.buf, 15))));
- if (is_set_key_cmd)
- brcmf_netdev_wait_pend8021x(ndev);
-
- err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
-
-done:
- if (err > 0)
- err = 0;
-
- return err;
-}
-
-int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
-{
- brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
- dcmd->cmd, dcmd->buf, dcmd->len);
-
- return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
-}
-
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
- brcmf_cfg80211_down(drvr->config);
+
if (drvr->bus_if->drvr_up == 0)
return 0;
+ brcmf_cfg80211_down(ndev);
+
/* Set state and stop OS transmissions */
drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
@@ -802,39 +554,36 @@ static int brcmf_netdev_open(struct net_device *ndev)
struct brcmf_bus *bus_if = drvr->bus_if;
u32 toe_ol;
s32 ret = 0;
- uint up = 0;
brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
- if (ifp->idx == 0) { /* do it only for primary eth0 */
- /* If bus is not ready, can't continue */
- if (bus_if->state != BRCMF_BUS_DATA) {
- brcmf_dbg(ERROR, "failed bus is not ready\n");
- return -EAGAIN;
- }
+ /* If bus is not ready, can't continue */
+ if (bus_if->state != BRCMF_BUS_DATA) {
+ brcmf_err("failed bus is not ready\n");
+ return -EAGAIN;
+ }
- atomic_set(&drvr->pend_8021x_cnt, 0);
+ atomic_set(&drvr->pend_8021x_cnt, 0);
- memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
+ memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
- /* Get current TOE mode from dongle */
- if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0
- && (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr->iflist[ifp->idx]->ndev->features |=
- NETIF_F_IP_CSUM;
- else
- drvr->iflist[ifp->idx]->ndev->features &=
- ~NETIF_F_IP_CSUM;
- }
+ /* Get current TOE mode from dongle */
+ if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
+ && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ drvr->iflist[ifp->idx]->ndev->features |=
+ NETIF_F_IP_CSUM;
+ else
+ drvr->iflist[ifp->idx]->ndev->features &=
+ ~NETIF_F_IP_CSUM;
/* make sure RF is ready for work */
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
/* Allow transmit calls */
netif_start_queue(ndev);
drvr->bus_if->drvr_up = true;
- if (brcmf_cfg80211_up(drvr->config)) {
- brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
+ if (brcmf_cfg80211_up(ndev)) {
+ brcmf_err("failed to bring up cfg80211\n");
return -1;
}
@@ -851,51 +600,41 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-static int brcmf_net_attach(struct brcmf_if *ifp)
+static const struct net_device_ops brcmf_netdev_ops_virt = {
+ .ndo_open = brcmf_cfg80211_up,
+ .ndo_stop = brcmf_cfg80211_down,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
+int brcmf_net_attach(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
- u8 temp_addr[ETH_ALEN];
-
- brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
- ndev = drvr->iflist[ifp->idx]->ndev;
- ndev->netdev_ops = &brcmf_netdev_ops_pri;
+ brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
+ ndev = ifp->ndev;
- /*
- * determine mac address to use
- */
- if (is_valid_ether_addr(ifp->mac_addr))
- memcpy(temp_addr, ifp->mac_addr, ETH_ALEN);
+ /* set appropriate operations */
+ if (!ifp->idx)
+ ndev->netdev_ops = &brcmf_netdev_ops_pri;
else
- memcpy(temp_addr, drvr->mac, ETH_ALEN);
-
- if (ifp->idx == 1) {
- brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
- /* ACCESSPOINT INTERFACE CASE */
- temp_addr[0] |= 0X02; /* set bit 2 ,
- - Locally Administered address */
+ ndev->netdev_ops = &brcmf_netdev_ops_virt;
- }
ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
drvr->rxsz = ndev->mtu + ndev->hard_header_len +
drvr->hdrlen;
- memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
-
- /* attach to cfg80211 for primary interface */
- if (!ifp->idx) {
- drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
- if (drvr->config == NULL) {
- brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
- goto fail;
- }
- }
+ /* set the mac address */
+ memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
if (register_netdev(ndev) != 0) {
- brcmf_dbg(ERROR, "couldn't register the net device\n");
+ brcmf_err("couldn't register the net device\n");
goto fail;
}
@@ -908,13 +647,12 @@ fail:
return -EBADE;
}
-int
-brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
+ char *name, u8 *addr_mask)
{
struct brcmf_if *ifp;
struct net_device *ndev;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
+ int i;
brcmf_dbg(TRACE, "idx %d\n", ifidx);
@@ -924,19 +662,24 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
* in case we missed the BRCMF_E_IF_DEL event.
*/
if (ifp) {
- brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+ brcmf_err("ERROR: netdev:%s already exists\n",
ifp->ndev->name);
- netif_stop_queue(ifp->ndev);
- unregister_netdev(ifp->ndev);
- free_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
+ if (ifidx) {
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ } else {
+ brcmf_err("ignore IF event\n");
+ return ERR_PTR(-EINVAL);
+ }
}
/* Allocate netdev, including space for private structure */
ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
if (!ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- return -ENOMEM;
+ brcmf_err("OOM - alloc_netdev\n");
+ return ERR_PTR(-ENOMEM);
}
ifp = netdev_priv(ndev);
@@ -944,20 +687,19 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
ifp->drvr = drvr;
drvr->iflist[ifidx] = ifp;
ifp->idx = ifidx;
- if (mac_addr != NULL)
- memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
+ ifp->bssidx = bssidx;
- if (brcmf_net_attach(ifp)) {
- brcmf_dbg(ERROR, "brcmf_net_attach failed");
- free_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
- return -EOPNOTSUPP;
- }
+ INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
- brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
- current->pid, ifp->ndev->name);
+ if (addr_mask != NULL)
+ for (i = 0; i < ETH_ALEN; i++)
+ ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
- return 0;
+ brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
+ current->pid, ifp->ndev->name, ifp->mac_addr);
+
+ return ifp;
}
void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
@@ -968,7 +710,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
ifp = drvr->iflist[ifidx];
if (!ifp) {
- brcmf_dbg(ERROR, "Null interface\n");
+ brcmf_err("Null interface\n");
return;
}
if (ifp->ndev) {
@@ -982,6 +724,9 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
netif_stop_queue(ifp->ndev);
}
+ cancel_work_sync(&ifp->setmacaddr_work);
+ cancel_work_sync(&ifp->multicast_work);
+
unregister_netdev(ifp->ndev);
drvr->iflist[ifidx] = NULL;
if (ifidx == 0)
@@ -1008,7 +753,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
drvr->hdrlen = bus_hdrlen;
drvr->bus_if = dev_get_drvdata(dev);
drvr->bus_if->drvr = drvr;
- drvr->dev = dev;
/* create device debugfs folder */
brcmf_debugfs_attach(drvr);
@@ -1016,15 +760,17 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
/* Attach and link in the protocol */
ret = brcmf_proto_attach(drvr);
if (ret != 0) {
- brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
+ brcmf_err("brcmf_prot_attach failed\n");
goto fail;
}
- INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
+ /* attach firmware event handler */
+ brcmf_fweh_attach(drvr);
INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
+ init_waitqueue_head(&drvr->pend_8021x_wait);
+
return ret;
fail:
@@ -1036,63 +782,53 @@ fail:
int brcmf_bus_start(struct device *dev)
{
int ret = -1;
- /* Room for "event_msgs" + '\0' + bitvec */
- char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_if *ifp;
brcmf_dbg(TRACE, "\n");
/* Bring up the bus */
- ret = bus_if->brcmf_bus_init(dev);
+ ret = brcmf_bus_init(bus_if);
if (ret != 0) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
+ brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
return ret;
}
- brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
- sizeof(iovbuf));
- memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
- setbit(drvr->eventmask, BRCMF_E_SET_SSID);
- setbit(drvr->eventmask, BRCMF_E_PRUNE);
- setbit(drvr->eventmask, BRCMF_E_AUTH);
- setbit(drvr->eventmask, BRCMF_E_REASSOC);
- setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
- setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_DISASSOC);
- setbit(drvr->eventmask, BRCMF_E_JOIN);
- setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
- setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
- setbit(drvr->eventmask, BRCMF_E_LINK);
- setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
- setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
- setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
- setbit(drvr->eventmask, BRCMF_E_TXFAIL);
- setbit(drvr->eventmask, BRCMF_E_JOIN_START);
- setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
-
-/* enable dongle roaming event */
-
- drvr->pktfilter_count = 1;
- /* Setup filter to allow only unicast */
- drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
-
- /* Bus is ready, do any protocol initialization */
- ret = brcmf_proto_init(drvr);
+ /* add primary networking interface */
+ ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
+ if (IS_ERR(ifp))
+ return PTR_ERR(ifp);
+
+ /* signal bus ready */
+ bus_if->state = BRCMF_BUS_DATA;
+
+ /* Bus is ready, do any initialization */
+ ret = brcmf_c_preinit_dcmds(ifp);
if (ret < 0)
- return ret;
+ goto fail;
- /* add primary networking interface */
- ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac);
+ drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
+ if (drvr->config == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = brcmf_fweh_activate_events(ifp);
if (ret < 0)
+ goto fail;
+
+ ret = brcmf_net_attach(ifp);
+fail:
+ if (ret < 0) {
+ brcmf_err("failed: %d\n", ret);
+ if (drvr->config)
+ brcmf_cfg80211_detach(drvr->config);
+ free_netdev(drvr->iflist[0]->ndev);
+ drvr->iflist[0] = NULL;
return ret;
+ }
- /* signal bus ready */
- bus_if->state = BRCMF_BUS_DATA;
return 0;
}
@@ -1105,7 +841,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
brcmf_proto_stop(drvr);
/* Stop the bus module */
- drvr->bus_if->brcmf_bus_stop(drvr->dev);
+ brcmf_bus_stop(drvr->bus_if);
}
}
@@ -1117,6 +853,11 @@ void brcmf_detach(struct device *dev)
brcmf_dbg(TRACE, "Enter\n");
+ if (drvr == NULL)
+ return;
+
+ /* stop firmware event handling */
+ brcmf_fweh_detach(drvr);
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
@@ -1126,8 +867,6 @@ void brcmf_detach(struct device *dev)
brcmf_bus_detach(drvr);
if (drvr->prot) {
- cancel_work_sync(&drvr->setmacaddr_work);
- cancel_work_sync(&drvr->multicast_work);
brcmf_proto_detach(drvr);
}
@@ -1141,63 +880,20 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
return atomic_read(&drvr->pend_8021x_cnt);
}
-#define MAX_WAIT_FOR_8021X_TX 10
-
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- int timeout = 10 * HZ / 1000;
- int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = brcmf_get_pend_8021x_cnt(drvr);
-
- while (ntimes && pend) {
- if (pend) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
- ntimes--;
- }
- pend = brcmf_get_pend_8021x_cnt(drvr);
- }
- return pend;
-}
+ int err;
-#ifdef DEBUG
-int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size)
-{
- int ret = 0;
- struct file *fp;
- mm_segment_t old_fs;
- loff_t pos = 0;
-
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- /* open file to write */
- fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
- if (!fp) {
- brcmf_dbg(ERROR, "open file error\n");
- ret = -1;
- goto exit;
- }
+ err = wait_event_timeout(drvr->pend_8021x_wait,
+ !brcmf_get_pend_8021x_cnt(drvr),
+ msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
- /* Write buf to file */
- fp->f_op->write(fp, (char __user *)buf, size, &pos);
+ WARN_ON(!err);
-exit:
- /* free buf before return */
- kfree(buf);
- /* close file before return */
- if (fp)
- filp_close(fp, NULL);
- /* restore previous address limit */
- set_fs(old_fs);
-
- return ret;
+ return !err;
}
-#endif /* DEBUG */
static void brcmf_driver_init(struct work_struct *work)
{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 6bc4425a8b0f..48fa70302192 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -27,11 +27,6 @@ extern int brcmf_proto_attach(struct brcmf_pub *drvr);
/* Unlink, frees allocated protocol memory (including brcmf_proto) */
extern void brcmf_proto_detach(struct brcmf_pub *drvr);
-/* Initialize protocol: sync w/dongle state.
- * Sets dongle media info (iswl, drv_version, mac address).
- */
-extern int brcmf_proto_init(struct brcmf_pub *drvr);
-
/* Stop protocol: sync w/dongle state. */
extern void brcmf_proto_stop(struct brcmf_pub *drvr);
@@ -41,13 +36,7 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
struct sk_buff *txp);
-/* Use protocol to issue command to dongle */
-extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
- struct brcmf_dcmd *dcmd, int len);
-
-extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
-
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len);
+/* Sets dongle media info (drv_version, mac address). */
+extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3564686add9a..cf857f1edf8c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -533,9 +533,11 @@ struct brcmf_sdio {
u8 *rxbuf; /* Buffer for receiving control packets */
uint rxblen; /* Allocated length of rxbuf */
u8 *rxctl; /* Aligned pointer into rxbuf */
+ u8 *rxctl_orig; /* pointer for freeing rxctl */
u8 *databuf; /* Buffer for receiving big glom packet */
u8 *dataptr; /* Aligned pointer into databuf */
uint rxlen; /* Length of valid data in buffer */
+ spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
u8 sdpcm_ver; /* Bus protocol reported by dongle */
@@ -582,8 +584,6 @@ struct brcmf_sdio {
struct list_head dpc_tsklst;
spinlock_t dpc_tl_lock;
- struct semaphore sdsem;
-
const struct firmware *firmware;
u32 fw_ptr;
@@ -614,6 +614,12 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4
+enum brcmf_sdio_frmtype {
+ BRCMF_SDIO_FT_NORMAL,
+ BRCMF_SDIO_FT_SUPER,
+ BRCMF_SDIO_FT_SUB,
+};
+
static void pkt_align(struct sk_buff *p, int len, int align)
{
uint datalign;
@@ -683,7 +689,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
clkreq, &err);
if (err) {
- brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
+ brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
}
@@ -691,7 +697,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
- brcmf_dbg(ERROR, "HT Avail read error: %d\n", err);
+ brcmf_err("HT Avail read error: %d\n", err);
return -EBADE;
}
@@ -701,7 +707,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
devctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
if (err) {
- brcmf_dbg(ERROR, "Devctl error setting CA: %d\n",
+ brcmf_err("Devctl error setting CA: %d\n",
err);
return -EBADE;
}
@@ -735,11 +741,11 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
usleep_range(5000, 10000);
}
if (err) {
- brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
+ brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
}
if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- brcmf_dbg(ERROR, "HT Avail timeout (%d): clkctl 0x%02x\n",
+ brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
PMU_MAX_TRANSITION_DLY, clkctl);
return -EBADE;
}
@@ -751,7 +757,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
#if defined(DEBUG)
if (!bus->alp_only) {
if (SBSDIO_ALPONLY(clkctl))
- brcmf_dbg(ERROR, "HT Clock should be on\n");
+ brcmf_err("HT Clock should be on\n");
}
#endif /* defined (DEBUG) */
@@ -773,7 +779,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq, &err);
brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
if (err) {
- brcmf_dbg(ERROR, "Failed access turning clock off: %d\n",
+ brcmf_err("Failed access turning clock off: %d\n",
err);
return -EBADE;
}
@@ -830,7 +836,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
else if (bus->clkstate == CLK_AVAIL)
brcmf_sdbrcm_htclk(bus, false, false);
else
- brcmf_dbg(ERROR, "request for %d -> %d\n",
+ brcmf_err("request for %d -> %d\n",
bus->clkstate, target);
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
break;
@@ -874,7 +880,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n",
bus->rx_seq);
if (!bus->rxskip)
- brcmf_dbg(ERROR, "unexpected NAKHANDLED!\n");
+ brcmf_err("unexpected NAKHANDLED!\n");
bus->rxskip = false;
intstatus |= I_HMB_FRAME_IND;
@@ -888,7 +894,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
(hmb_data & HMB_DATA_VERSION_MASK) >>
HMB_DATA_VERSION_SHIFT;
if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
- brcmf_dbg(ERROR, "Version mismatch, dongle reports %d, "
+ brcmf_err("Version mismatch, dongle reports %d, "
"expecting %d\n",
bus->sdpcm_ver, SDPCM_PROT_VERSION);
else
@@ -921,7 +927,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
HMB_DATA_FC |
HMB_DATA_FWREADY |
HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
- brcmf_dbg(ERROR, "Unknown mailbox data content: 0x%02x\n",
+ brcmf_err("Unknown mailbox data content: 0x%02x\n",
hmb_data);
return intstatus;
@@ -934,7 +940,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
u8 hi, lo;
int err;
- brcmf_dbg(ERROR, "%sterminate frame%s\n",
+ brcmf_err("%sterminate frame%s\n",
abort ? "abort command, " : "",
rtx ? ", send NAK" : "");
@@ -957,14 +963,14 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
break;
if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
- brcmf_dbg(ERROR, "count growing: last 0x%04x now 0x%04x\n",
+ brcmf_err("count growing: last 0x%04x now 0x%04x\n",
lastrbc, (hi << 8) + lo);
}
lastrbc = (hi << 8) + lo;
}
if (!retries)
- brcmf_dbg(ERROR, "count never zeroed: last 0x%04x\n", lastrbc);
+ brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
else
brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
@@ -1031,8 +1037,9 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
}
}
-static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
- struct brcmf_sdio_read *rd)
+static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
+ struct brcmf_sdio_read *rd,
+ enum brcmf_sdio_frmtype type)
{
u16 len, checksum;
u8 rx_seq, fc, tx_seq_max;
@@ -1047,17 +1054,26 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
/* All zero means no more to read */
if (!(len | checksum)) {
bus->rxpending = false;
- return false;
+ return -ENODATA;
}
if ((u16)(~(len ^ checksum))) {
- brcmf_dbg(ERROR, "HW header checksum error\n");
+ brcmf_err("HW header checksum error\n");
bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
- return false;
+ return -EIO;
}
if (len < SDPCM_HDRLEN) {
- brcmf_dbg(ERROR, "HW header length error\n");
- return false;
+ brcmf_err("HW header length error\n");
+ return -EPROTO;
+ }
+ if (type == BRCMF_SDIO_FT_SUPER &&
+ (roundup(len, bus->blocksize) != rd->len)) {
+ brcmf_err("HW superframe header length error\n");
+ return -EPROTO;
+ }
+ if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
+ brcmf_err("HW subframe header length error\n");
+ return -EPROTO;
}
rd->len = len;
@@ -1071,35 +1087,56 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
* Byte 5: Maximum Sequence number allow for Tx
* Byte 6~7: Reserved
*/
+ if (type == BRCMF_SDIO_FT_SUPER &&
+ SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
+ brcmf_err("Glom descriptor found in superframe head\n");
+ rd->len = 0;
+ return -EINVAL;
+ }
rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
- if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
- brcmf_dbg(ERROR, "HW header length too long\n");
+ if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
+ type != BRCMF_SDIO_FT_SUPER) {
+ brcmf_err("HW header length too long\n");
bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
rd->len = 0;
- return false;
+ return -EPROTO;
+ }
+ if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
+ brcmf_err("Wrong channel for superframe\n");
+ rd->len = 0;
+ return -EINVAL;
+ }
+ if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
+ rd->channel != SDPCM_EVENT_CHANNEL) {
+ brcmf_err("Wrong channel for subframe\n");
+ rd->len = 0;
+ return -EINVAL;
}
rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
- brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
+ brcmf_err("seq %d: bad data offset\n", rx_seq);
bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
rd->len = 0;
- return false;
+ return -ENXIO;
}
if (rd->seq_num != rx_seq) {
- brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
+ brcmf_err("seq %d: sequence number error, expect %d\n",
rx_seq, rd->seq_num);
bus->sdcnt.rx_badseq++;
rd->seq_num = rx_seq;
}
+ /* no need to check the reset for subframe */
+ if (type == BRCMF_SDIO_FT_SUB)
+ return 0;
rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
/* only warm for NON glom packet */
if (rd->channel != SDPCM_GLOM_CHANNEL)
- brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq);
+ brcmf_err("seq %d: next length error\n", rx_seq);
rd->len_nxtfrm = 0;
}
fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
@@ -1113,12 +1150,12 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
}
tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
- brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq);
+ brcmf_err("seq %d: max tx seq number error\n", rx_seq);
tx_seq_max = bus->tx_seq + 2;
}
bus->tx_max = tx_seq_max;
- return true;
+ return 0;
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1126,16 +1163,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
u16 dlen, totlen;
u8 *dptr, num = 0;
- u16 sublen, check;
+ u16 sublen;
struct sk_buff *pfirst, *pnext;
int errcode;
- u8 chan, seq, doff, sfdoff;
- u8 txmax;
+ u8 doff, sfdoff;
int ifidx = 0;
bool usechain = bus->use_rxchain;
- u16 next_len;
+
+ struct brcmf_sdio_read rd_new;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
@@ -1149,7 +1186,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
- brcmf_dbg(ERROR, "bad glomd len(%d), ignore descriptor\n",
+ brcmf_err("bad glomd len(%d), ignore descriptor\n",
dlen);
dlen = 0;
}
@@ -1161,13 +1198,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
dptr += sizeof(u16);
if ((sublen < SDPCM_HDRLEN) ||
((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
- brcmf_dbg(ERROR, "descriptor len %d bad: %d\n",
+ brcmf_err("descriptor len %d bad: %d\n",
num, sublen);
pnext = NULL;
break;
}
if (sublen % BRCMF_SDALIGN) {
- brcmf_dbg(ERROR, "sublen %d not multiple of %d\n",
+ brcmf_err("sublen %d not multiple of %d\n",
sublen, BRCMF_SDALIGN);
usechain = false;
}
@@ -1184,7 +1221,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
/* Allocate/chain packet for next subframe */
pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
if (pnext == NULL) {
- brcmf_dbg(ERROR, "bcm_pkt_buf_get_skb failed, num %d len %d\n",
+ brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
num, sublen);
break;
}
@@ -1235,6 +1272,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
+ sdio_claim_host(bus->sdiodev->func[1]);
if (usechain) {
errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
@@ -1246,24 +1284,26 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->dataptr, dlen);
sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
- brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
+ brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
dlen, sublen);
errcode = -1;
}
pnext = NULL;
} else {
- brcmf_dbg(ERROR, "COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
+ brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
dlen);
errcode = -1;
}
+ sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe, allow a couple retries */
if (errcode < 0) {
- brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
+ brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
bus->sdiodev->bus_if->dstats.rx_errors++;
+ sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
brcmf_sdbrcm_rxfail(bus, true, true);
} else {
@@ -1272,6 +1312,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
+ sdio_release_host(bus->sdiodev->func[1]);
return 0;
}
@@ -1279,68 +1320,17 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst->data, min_t(int, pfirst->len, 48),
"SUPERFRAME:\n");
- /* Validate the superframe header */
- dptr = (u8 *) (pfirst->data);
- sublen = get_unaligned_le16(dptr);
- check = get_unaligned_le16(dptr + sizeof(u16));
-
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
- next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
- if ((next_len << 4) > MAX_RX_DATASZ) {
- brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
- next_len, seq);
- next_len = 0;
- }
- bus->cur_read.len = next_len << 4;
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-
- errcode = 0;
- if ((u16)~(sublen ^ check)) {
- brcmf_dbg(ERROR, "(superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
- sublen, check);
- errcode = -1;
- } else if (roundup(sublen, bus->blocksize) != dlen) {
- brcmf_dbg(ERROR, "(superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
- sublen, roundup(sublen, bus->blocksize),
- dlen);
- errcode = -1;
- } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
- SDPCM_GLOM_CHANNEL) {
- brcmf_dbg(ERROR, "(superframe): bad channel %d\n",
- SDPCM_PACKET_CHANNEL(
- &dptr[SDPCM_FRAMETAG_LEN]));
- errcode = -1;
- } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
- brcmf_dbg(ERROR, "(superframe): got 2nd descriptor?\n");
- errcode = -1;
- } else if ((doff < SDPCM_HDRLEN) ||
- (doff > (pfirst->len - SDPCM_HDRLEN))) {
- brcmf_dbg(ERROR, "(superframe): Bad data offset %d: HW %d pkt %d min %d\n",
- doff, sublen, pfirst->len, SDPCM_HDRLEN);
- errcode = -1;
- }
-
- /* Check sequence number of superframe SW header */
- if (rxseq != seq) {
- brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
- seq, rxseq);
- bus->sdcnt.rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
- txmax, bus->tx_seq);
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
+ rd_new.seq_num = rxseq;
+ rd_new.len = dlen;
+ sdio_claim_host(bus->sdiodev->func[1]);
+ errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
+ BRCMF_SDIO_FT_SUPER);
+ sdio_release_host(bus->sdiodev->func[1]);
+ bus->cur_read.len = rd_new.len_nxtfrm << 4;
/* Remove superframe header, remember offset */
- skb_pull(pfirst, doff);
- sfdoff = doff;
+ skb_pull(pfirst, rd_new.dat_offset);
+ sfdoff = rd_new.dat_offset;
num = 0;
/* Validate all the subframe headers */
@@ -1349,40 +1339,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode)
break;
- dptr = (u8 *) (pnext->data);
- dlen = (u16) (pnext->len);
- sublen = get_unaligned_le16(dptr);
- check = get_unaligned_le16(dptr + sizeof(u16));
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ rd_new.len = pnext->len;
+ rd_new.seq_num = rxseq++;
+ sdio_claim_host(bus->sdiodev->func[1]);
+ errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
+ BRCMF_SDIO_FT_SUB);
+ sdio_release_host(bus->sdiodev->func[1]);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
- dptr, 32, "subframe:\n");
+ pnext->data, 32, "subframe:\n");
- if ((u16)~(sublen ^ check)) {
- brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
- num, sublen, check);
- errcode = -1;
- } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
- brcmf_dbg(ERROR, "(subframe %d): length mismatch: len 0x%04x, expect 0x%04x\n",
- num, sublen, dlen);
- errcode = -1;
- } else if ((chan != SDPCM_DATA_CHANNEL) &&
- (chan != SDPCM_EVENT_CHANNEL)) {
- brcmf_dbg(ERROR, "(subframe %d): bad channel %d\n",
- num, chan);
- errcode = -1;
- } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
- brcmf_dbg(ERROR, "(subframe %d): Bad data offset %d: HW %d min %d\n",
- num, doff, sublen, SDPCM_HDRLEN);
- errcode = -1;
- }
- /* increase the subframe count */
num++;
}
if (errcode) {
/* Terminate frame on error, request
a couple retries */
+ sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
/* Restore superframe header space */
skb_push(pfirst, sfdoff);
@@ -1393,6 +1365,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
+ sdio_release_host(bus->sdiodev->func[1]);
bus->cur_read.len = 0;
return 0;
}
@@ -1402,27 +1375,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
- chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
- brcmf_dbg(GLOM, "Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
- num, pfirst, pfirst->data,
- pfirst->len, sublen, chan, seq);
-
- /* precondition: chan == SDPCM_DATA_CHANNEL ||
- chan == SDPCM_EVENT_CHANNEL */
-
- if (rxseq != seq) {
- brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
- seq, rxseq);
- bus->sdcnt.rx_badseq++;
- rxseq = seq;
- }
- rxseq++;
-
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
- dptr, dlen, "Rx Subframe Data:\n");
+ dptr, pfirst->len,
+ "Rx Subframe Data:\n");
__skb_trim(pfirst, sublen);
skb_pull(pfirst, doff);
@@ -1433,7 +1390,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
continue;
} else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
&ifidx, pfirst) != 0) {
- brcmf_dbg(ERROR, "rx protocol error\n");
+ brcmf_err("rx protocol error\n");
bus->sdiodev->bus_if->dstats.rx_errors++;
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
@@ -1449,11 +1406,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst->prev);
}
/* sent any remaining packets up */
- if (bus->glom.qlen) {
- up(&bus->sdsem);
+ if (bus->glom.qlen)
brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
- down(&bus->sdsem);
- }
bus->sdcnt.rxglomframes++;
bus->sdcnt.rxglompkts += bus->glom.qlen;
@@ -1494,21 +1448,24 @@ static void
brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
-
+ u8 *buf = NULL, *rbuf;
int sdret;
brcmf_dbg(TRACE, "Enter\n");
- /* Set rxctl for frame (w/optional alignment) */
- bus->rxctl = bus->rxbuf;
- bus->rxctl += BRCMF_FIRSTREAD;
- pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN);
+ if (bus->rxblen)
+ buf = vzalloc(bus->rxblen);
+ if (!buf) {
+ brcmf_err("no memory for control frame\n");
+ goto done;
+ }
+ rbuf = bus->rxbuf;
+ pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
if (pad)
- bus->rxctl += (BRCMF_SDALIGN - pad);
- bus->rxctl -= BRCMF_FIRSTREAD;
+ rbuf += (BRCMF_SDALIGN - pad);
/* Copy the already-read portion over */
- memcpy(bus->rxctl, hdr, BRCMF_FIRSTREAD);
+ memcpy(buf, hdr, BRCMF_FIRSTREAD);
if (len <= BRCMF_FIRSTREAD)
goto gotpkt;
@@ -1529,7 +1486,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
/* Drop if the read is too big or it exceeds our maximum */
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
- brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
+ brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
@@ -1537,7 +1494,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
}
if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
- brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
@@ -1545,30 +1502,40 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
goto done;
}
- /* Read remainder of frame body into the rxctl buffer */
+ /* Read remain of frame body */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
- F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
+ F2SYNC, rbuf, rdlen);
bus->sdcnt.f2rxdata++;
/* Control frame failures need retransmission */
if (sdret < 0) {
- brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
+ brcmf_err("read %d control bytes failed: %d\n",
rdlen, sdret);
bus->sdcnt.rxc_errors++;
brcmf_sdbrcm_rxfail(bus, true, true);
goto done;
- }
+ } else
+ memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
gotpkt:
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
- bus->rxctl, len, "RxCtrl:\n");
+ buf, len, "RxCtrl:\n");
/* Point to valid data and indicate its length */
- bus->rxctl += doff;
+ spin_lock_bh(&bus->rxctl_lock);
+ if (bus->rxctl) {
+ brcmf_err("last control frame is being processed.\n");
+ spin_unlock_bh(&bus->rxctl_lock);
+ vfree(buf);
+ goto done;
+ }
+ bus->rxctl = buf + doff;
+ bus->rxctl_orig = buf;
bus->rxlen = len - doff;
+ spin_unlock_bh(&bus->rxctl_lock);
done:
/* Awake any waiters */
@@ -1623,6 +1590,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_left = rd->len;
/* read header first for unknow frame length */
+ sdio_claim_host(bus->sdiodev->func[1]);
if (!rd->len) {
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
@@ -1631,10 +1599,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
BRCMF_FIRSTREAD);
bus->sdcnt.f2rxhdrs++;
if (sdret < 0) {
- brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n",
+ brcmf_err("RXHEADER FAILED: %d\n",
sdret);
bus->sdcnt.rx_hdrfail++;
brcmf_sdbrcm_rxfail(bus, true, true);
+ sdio_release_host(bus->sdiodev->func[1]);
continue;
}
@@ -1642,7 +1611,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxhdr, SDPCM_HDRLEN,
"RxHdr:\n");
- if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
+ if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
+ BRCMF_SDIO_FT_NORMAL)) {
+ sdio_release_host(bus->sdiodev->func[1]);
if (!bus->rxpending)
break;
else
@@ -1658,6 +1629,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_nxtfrm = 0;
/* treat all packet as event if we don't know */
rd->channel = SDPCM_EVENT_CHANNEL;
+ sdio_release_host(bus->sdiodev->func[1]);
continue;
}
rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@@ -1671,10 +1643,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
BRCMF_SDALIGN);
if (!pkt) {
/* Give up on data, request rtx of events */
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n");
+ brcmf_err("brcmu_pkt_buf_get_skb failed\n");
bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false,
RETRYCHAN(rd->channel));
+ sdio_release_host(bus->sdiodev->func[1]);
continue;
}
skb_pull(pkt, head_read);
@@ -1683,14 +1656,17 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->sdcnt.f2rxdata++;
+ sdio_release_host(bus->sdiodev->func[1]);
if (sdret < 0) {
- brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
+ brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, sdret);
brcmu_pkt_buf_free_skb(pkt);
bus->sdiodev->bus_if->dstats.rx_errors++;
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, true,
RETRYCHAN(rd->channel));
+ sdio_release_host(bus->sdiodev->func[1]);
continue;
}
@@ -1701,20 +1677,24 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
- if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
+ BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
}
bus->sdcnt.rx_readahead_cnt++;
if (rd->len != roundup(rd_new.len, 16)) {
- brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n",
+ brcmf_err("frame length mismatch:read %d, should be %d\n",
rd->len,
roundup(rd_new.len, 16) >> 4);
rd->len = 0;
brcmf_sdbrcm_rxfail(bus, true, true);
+ sdio_release_host(bus->sdiodev->func[1]);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
+ sdio_release_host(bus->sdiodev->func[1]);
rd->len_nxtfrm = rd_new.len_nxtfrm;
rd->channel = rd_new.channel;
rd->dat_offset = rd_new.dat_offset;
@@ -1726,11 +1706,13 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
"RxHdr:\n");
if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
- brcmf_dbg(ERROR, "readahead on control packet %d?\n",
+ brcmf_err("readahead on control packet %d?\n",
rd_new.seq_num);
/* Force retry w/normal header read */
rd->len = 0;
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, false, true);
+ sdio_release_host(bus->sdiodev->func[1]);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
@@ -1751,9 +1733,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
skb_pull(pkt, SDPCM_HDRLEN);
bus->glomd = pkt;
} else {
- brcmf_dbg(ERROR, "%s: glom superframe w/o "
+ brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, false, false);
+ sdio_release_host(bus->sdiodev->func[1]);
}
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
@@ -1778,16 +1762,13 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
continue;
} else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
pkt) != 0) {
- brcmf_dbg(ERROR, "rx protocol error\n");
+ brcmf_err("rx protocol error\n");
brcmu_pkt_buf_free_skb(pkt);
bus->sdiodev->bus_if->dstats.rx_errors++;
continue;
}
- /* Unlock during rx call */
- up(&bus->sdsem);
brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
- down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
@@ -1805,15 +1786,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
}
static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
-{
- up(&bus->sdsem);
- wait_event_interruptible_timeout(bus->ctrl_wait, !*lockvar, HZ * 2);
- down(&bus->sdsem);
- return;
-}
-
-static void
brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
@@ -1846,7 +1818,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
bus->sdiodev->bus_if->tx_realloc++;
new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
if (!new) {
- brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
+ brcmf_err("couldn't allocate new %d-byte packet\n",
pkt->len + BRCMF_SDALIGN);
ret = -ENOMEM;
goto done;
@@ -1914,6 +1886,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
+ sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->sdcnt.f2txdata++;
@@ -1941,15 +1914,14 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
}
}
+ sdio_release_host(bus->sdiodev->func[1]);
if (ret == 0)
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
- up(&bus->sdsem);
brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
- down(&bus->sdsem);
if (free_pkt)
brcmu_pkt_buf_free_skb(pkt);
@@ -1990,9 +1962,11 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
/* Check device status, signal pending interrupt */
+ sdio_claim_host(bus->sdiodev->func[1]);
ret = r_sdreg32(bus, &intstatus,
offsetof(struct sdpcmd_regs,
intstatus));
+ sdio_release_host(bus->sdiodev->func[1]);
bus->sdcnt.f2txdata++;
if (ret != 0)
break;
@@ -2029,7 +2003,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL;
}
- down(&bus->sdsem);
+ sdio_claim_host(bus->sdiodev->func[1]);
/* Enable clock for device interrupts */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -2050,7 +2024,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
(saveclk | SBSDIO_FORCE_HT), &err);
}
if (err)
- brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+ brcmf_err("Failed to force clock for F2: err %d\n", err);
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
@@ -2063,6 +2037,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
/* Turn off the backplane clock (only) */
brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+ sdio_release_host(bus->sdiodev->func[1]);
/* Clear the data packet queues */
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@@ -2073,14 +2048,14 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
brcmf_sdbrcm_free_glom(bus);
/* Clear rx control and wake any waiters */
+ spin_lock_bh(&bus->rxctl_lock);
bus->rxlen = 0;
+ spin_unlock_bh(&bus->rxctl_lock);
brcmf_sdbrcm_dcmd_resp_wake(bus);
/* Reset some F2 state stuff */
bus->rxskip = false;
bus->tx_seq = bus->rx_seq = 0;
-
- up(&bus->sdsem);
}
#ifdef CONFIG_BRCMFMAC_SDIO_OOB
@@ -2164,7 +2139,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
- down(&bus->sdsem);
+ sdio_claim_host(bus->sdiodev->func[1]);
/* If waiting for HTAVAIL, check status */
if (bus->clkstate == CLK_PENDING) {
@@ -2175,7 +2150,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
devctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
if (err) {
- brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
+ brcmf_err("error reading DEVCTL: %d\n", err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
#endif /* DEBUG */
@@ -2184,7 +2159,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
clkctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
- brcmf_dbg(ERROR, "error reading CSR: %d\n",
+ brcmf_err("error reading CSR: %d\n",
err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
@@ -2196,7 +2171,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
devctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
if (err) {
- brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
+ brcmf_err("error reading DEVCTL: %d\n",
err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
@@ -2204,7 +2179,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
devctl, &err);
if (err) {
- brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
+ brcmf_err("error writing DEVCTL: %d\n",
err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
@@ -2218,9 +2193,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Pending interrupt indicates new device status */
if (atomic_read(&bus->ipend) > 0) {
atomic_set(&bus->ipend, 0);
- sdio_claim_host(bus->sdiodev->func[1]);
err = brcmf_sdio_intr_rstatus(bus);
- sdio_release_host(bus->sdiodev->func[1]);
}
/* Start with leftover status bits */
@@ -2249,19 +2222,21 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
intstatus |= brcmf_sdbrcm_hostmail(bus);
}
+ sdio_release_host(bus->sdiodev->func[1]);
+
/* Generally don't ask for these, can get CRC errors... */
if (intstatus & I_WR_OOSYNC) {
- brcmf_dbg(ERROR, "Dongle reports WR_OOSYNC\n");
+ brcmf_err("Dongle reports WR_OOSYNC\n");
intstatus &= ~I_WR_OOSYNC;
}
if (intstatus & I_RD_OOSYNC) {
- brcmf_dbg(ERROR, "Dongle reports RD_OOSYNC\n");
+ brcmf_err("Dongle reports RD_OOSYNC\n");
intstatus &= ~I_RD_OOSYNC;
}
if (intstatus & I_SBINT) {
- brcmf_dbg(ERROR, "Dongle reports SBINT\n");
+ brcmf_err("Dongle reports SBINT\n");
intstatus &= ~I_SBINT;
}
@@ -2295,6 +2270,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
(bus->clkstate == CLK_AVAIL)) {
int i;
+ sdio_claim_host(bus->sdiodev->func[1]);
err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
(u32) bus->ctrl_frame_len);
@@ -2328,6 +2304,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
} else {
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
}
+ sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
brcmf_sdbrcm_wait_event_wakeup(bus);
}
@@ -2342,7 +2319,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
}
if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
- brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
+ brcmf_err("failed backplane access over SDIO, halting operation\n");
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
atomic_set(&bus->intstatus, 0);
} else if (atomic_read(&bus->intstatus) ||
@@ -2357,10 +2334,10 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
if ((bus->clkstate != CLK_PENDING)
&& bus->idletime == BRCMF_IDLE_IMMEDIATE) {
bus->activity = false;
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
}
-
- up(&bus->sdsem);
}
static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2393,7 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
skb_pull(pkt, SDPCM_HDRLEN);
brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
brcmu_pkt_buf_free_skb(pkt);
- brcmf_dbg(ERROR, "out of bus->txq !!!\n");
+ brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
} else {
ret = 0;
@@ -2443,7 +2420,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
/* Set the backplane window to include the start address */
bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
if (bcmerror) {
- brcmf_dbg(ERROR, "window change failed\n");
+ brcmf_err("window change failed\n");
goto xfer_done;
}
@@ -2455,7 +2432,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write,
sdaddr, data, dsize);
if (bcmerror) {
- brcmf_dbg(ERROR, "membytes transfer failed\n");
+ brcmf_err("membytes transfer failed\n");
break;
}
@@ -2467,7 +2444,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev,
address);
if (bcmerror) {
- brcmf_dbg(ERROR, "window change failed\n");
+ brcmf_err("window change failed\n");
break;
}
sdaddr = 0;
@@ -2478,7 +2455,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
xfer_done:
/* Return the window to backplane enumeration space for core access */
if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad))
- brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
+ brcmf_err("FAILED to set window back to 0x%x\n",
bus->sdiodev->sbwad);
sdio_release_host(bus->sdiodev->func[1]);
@@ -2651,11 +2628,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
/* precondition: IS_ALIGNED((unsigned long)frame, 2) */
- /* Need to lock here to protect txseq and SDIO tx calls */
- down(&bus->sdsem);
-
/* Make sure backplane clock is on */
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ sdio_release_host(bus->sdiodev->func[1]);
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
*(__le16 *) frame = cpu_to_le16((u16) msglen);
@@ -2678,7 +2654,9 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
bus->ctrl_frame_buf = frame;
bus->ctrl_frame_len = len;
- brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
+ wait_event_interruptible_timeout(bus->ctrl_wait,
+ !bus->ctrl_frame_stat,
+ msecs_to_jiffies(2000));
if (!bus->ctrl_frame_stat) {
brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
@@ -2697,7 +2675,9 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
frame, min_t(u16, len, 16), "TxHdr:\n");
do {
+ sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_tx_frame(bus, frame, len);
+ sdio_release_host(bus->sdiodev->func[1]);
} while (ret < 0 && retries++ < TXRETRIES);
}
@@ -2707,13 +2687,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
bus->activity = false;
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ sdio_release_host(bus->sdiodev->func[1]);
} else {
spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
- up(&bus->sdsem);
-
if (ret)
bus->sdcnt.tx_ctlerrs++;
else
@@ -2743,8 +2723,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* Read last word in socram to determine
* address of sdpcm_shared structure
*/
+ sdio_claim_host(bus->sdiodev->func[1]);
rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
(u8 *)&addr_le, 4);
+ sdio_claim_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2757,14 +2739,16 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* NVRAM length at the end of memory should have been overwritten.
*/
if (!brcmf_sdio_valid_shared_address(addr)) {
- brcmf_dbg(ERROR, "invalid sdpcm_shared address 0x%08X\n",
+ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
addr);
return -EINVAL;
}
/* Read hndrte_shared structure */
+ sdio_claim_host(bus->sdiodev->func[1]);
rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
+ sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2778,8 +2762,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
- brcmf_dbg(ERROR,
- "sdpcm_shared version mismatch: dhd %d dongle %d\n",
+ brcmf_err("sdpcm_shared version mismatch: dhd %d dongle %d\n",
SDPCM_SHARED_VERSION,
sh->flags & SDPCM_SHARED_VERSION_MASK);
return -EPROTO;
@@ -2867,12 +2850,14 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
return 0;
+ sdio_claim_host(bus->sdiodev->func[1]);
error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
+ sdio_release_host(bus->sdiodev->func[1]);
if (nbytes < 0)
return nbytes;
@@ -2918,6 +2903,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
return 0;
}
+ sdio_claim_host(bus->sdiodev->func[1]);
if (sh->assert_file_addr != 0) {
error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
(u8 *)file, 80);
@@ -2930,6 +2916,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
if (error < 0)
return error;
}
+ sdio_release_host(bus->sdiodev->func[1]);
res = scnprintf(buf, sizeof(buf),
"dongle assert: %s:%d: assert(%s)\n",
@@ -2942,9 +2929,7 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
int error;
struct sdpcm_shared sh;
- down(&bus->sdsem);
error = brcmf_sdio_readshared(bus, &sh);
- up(&bus->sdsem);
if (error < 0)
return error;
@@ -2952,10 +2937,10 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
brcmf_dbg(INFO, "firmware not built with -assert\n");
else if (sh.flags & SDPCM_SHARED_ASSERT)
- brcmf_dbg(ERROR, "assertion in dongle\n");
+ brcmf_err("assertion in dongle\n");
if (sh.flags & SDPCM_SHARED_TRAP)
- brcmf_dbg(ERROR, "firmware trap in dongle\n");
+ brcmf_err("firmware trap in dongle\n");
return 0;
}
@@ -2971,7 +2956,6 @@ static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
if (pos != 0)
return 0;
- down(&bus->sdsem);
error = brcmf_sdio_readshared(bus, &sh);
if (error < 0)
goto done;
@@ -2988,7 +2972,6 @@ static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
error += nbytes;
*ppos += error;
done:
- up(&bus->sdsem);
return error;
}
@@ -3039,6 +3022,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
int timeleft;
uint rxlen = 0;
bool pending;
+ u8 *buf;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
@@ -3048,17 +3032,21 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
/* Wait until control frame is available */
timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
- down(&bus->sdsem);
+ spin_lock_bh(&bus->rxctl_lock);
rxlen = bus->rxlen;
memcpy(msg, bus->rxctl, min(msglen, rxlen));
+ bus->rxctl = NULL;
+ buf = bus->rxctl_orig;
+ bus->rxctl_orig = NULL;
bus->rxlen = 0;
- up(&bus->sdsem);
+ spin_unlock_bh(&bus->rxctl_lock);
+ vfree(buf);
if (rxlen) {
brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
rxlen, msglen);
} else if (timeleft == 0) {
- brcmf_dbg(ERROR, "resumed on timeout\n");
+ brcmf_err("resumed on timeout\n");
brcmf_sdbrcm_checkdied(bus);
} else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
@@ -3109,14 +3097,14 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
nvram_ularray, bus->varsz);
if (bcmerror) {
- brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
+ brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
bcmerror, bus->varsz, varaddr);
}
/* Compare the org NVRAM with the one read from RAM */
if (memcmp(bus->vars, nvram_ularray, bus->varsz))
- brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
+ brcmf_err("Downloaded NVRAM image is corrupted\n");
else
- brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
+ brcmf_err("Download/Upload/Compare of NVRAM ok\n");
kfree(nvram_ularray);
#endif /* DEBUG */
@@ -3174,14 +3162,14 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
}
} else {
if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
- brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
+ brcmf_err("SOCRAM core is down after reset?\n");
bcmerror = -EBADE;
goto fail;
}
bcmerror = brcmf_sdbrcm_write_vars(bus);
if (bcmerror) {
- brcmf_dbg(ERROR, "no vars written to RAM\n");
+ brcmf_err("no vars written to RAM\n");
bcmerror = 0;
}
@@ -3221,7 +3209,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
- brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
+ brcmf_err("Fail to request firmware %d\n", ret);
return ret;
}
bus->fw_ptr = 0;
@@ -3240,7 +3228,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
if (ret) {
- brcmf_dbg(ERROR, "error %d on writing %d membytes at 0x%08x\n",
+ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
ret, MEMBLOCK, offset);
goto err;
}
@@ -3340,7 +3328,7 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
- brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
+ brcmf_err("Fail to request nvram %d\n", ret);
return ret;
}
@@ -3357,23 +3345,23 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
/* Keep arm in reset */
if (brcmf_sdbrcm_download_state(bus, true)) {
- brcmf_dbg(ERROR, "error placing ARM core in reset\n");
+ brcmf_err("error placing ARM core in reset\n");
goto err;
}
/* External image takes precedence if specified */
if (brcmf_sdbrcm_download_code_file(bus)) {
- brcmf_dbg(ERROR, "dongle image file download failed\n");
+ brcmf_err("dongle image file download failed\n");
goto err;
}
/* External nvram takes precedence if specified */
if (brcmf_sdbrcm_download_nvram(bus))
- brcmf_dbg(ERROR, "dongle nvram file download failed\n");
+ brcmf_err("dongle nvram file download failed\n");
/* Take arm out of reset */
if (brcmf_sdbrcm_download_state(bus, false)) {
- brcmf_dbg(ERROR, "error getting out of ARM core reset\n");
+ brcmf_err("error getting out of ARM core reset\n");
goto err;
}
@@ -3388,13 +3376,16 @@ brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
bool ret;
- /* Download the firmware */
+ sdio_claim_host(bus->sdiodev->func[1]);
+
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+ sdio_release_host(bus->sdiodev->func[1]);
+
return ret;
}
@@ -3423,7 +3414,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
bus->sdcnt.tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
- down(&bus->sdsem);
+ sdio_claim_host(bus->sdiodev->func[1]);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -3438,7 +3429,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
(saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
- brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+ brcmf_err("Failed to force clock for F2: err %d\n", err);
goto exit;
}
@@ -3484,7 +3475,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
if (ret == 0) {
ret = brcmf_sdio_intr_register(bus->sdiodev);
if (ret != 0)
- brcmf_dbg(ERROR, "intr register failed:%d\n", ret);
+ brcmf_err("intr register failed:%d\n", ret);
}
/* If we didn't come up, turn off backplane clock */
@@ -3492,7 +3483,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
- up(&bus->sdsem);
+ sdio_release_host(bus->sdiodev->func[1]);
return ret;
}
@@ -3504,12 +3495,12 @@ void brcmf_sdbrcm_isr(void *arg)
brcmf_dbg(TRACE, "Enter\n");
if (!bus) {
- brcmf_dbg(ERROR, "bus is null pointer, exiting\n");
+ brcmf_err("bus is null pointer, exiting\n");
return;
}
if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
- brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
+ brcmf_err("bus is down. we have nothing to do\n");
return;
}
/* Count the interrupt call */
@@ -3518,13 +3509,13 @@ void brcmf_sdbrcm_isr(void *arg)
atomic_set(&bus->ipend, 1);
else
if (brcmf_sdio_intr_rstatus(bus)) {
- brcmf_dbg(ERROR, "failed backplane access\n");
+ brcmf_err("failed backplane access\n");
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
/* Disable additional interrupts (is this needed now)? */
if (!bus->intr)
- brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
+ brcmf_err("isr w/o interrupt configured!\n");
brcmf_sdbrcm_adddpctsk(bus);
queue_work(bus->brcmf_wq, &bus->datawork);
@@ -3539,8 +3530,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
brcmf_dbg(TIMER, "Enter\n");
- down(&bus->sdsem);
-
/* Poll period: check device if appropriate. */
if (bus->poll && (++bus->polltick >= bus->pollrate)) {
u32 intstatus = 0;
@@ -3557,9 +3546,11 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
u8 devpend;
spin_unlock_irqrestore(&bus->dpc_tl_lock,
flags);
+ sdio_claim_host(bus->sdiodev->func[1]);
devpend = brcmf_sdio_regrb(bus->sdiodev,
SDIO_CCCR_INTx,
NULL);
+ sdio_release_host(bus->sdiodev->func[1]);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
@@ -3584,16 +3575,18 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
}
#ifdef DEBUG
/* Poll for console output periodically */
- if (bus_if->state == BRCMF_BUS_DATA &&
+ if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
+ sdio_claim_host(bus->sdiodev->func[1]);
/* Make sure backplane clock is on */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
if (brcmf_sdbrcm_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
+ sdio_release_host(bus->sdiodev->func[1]);
}
}
#endif /* DEBUG */
@@ -3606,13 +3599,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->activity = false;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
} else {
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
}
}
}
- up(&bus->sdsem);
-
return (atomic_read(&bus->ipend) > 0);
}
@@ -3707,6 +3700,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
bus->alp_only = true;
+ sdio_claim_host(bus->sdiodev->func[1]);
+
pr_debug("F1 signature read @0x18000000=0x%4x\n",
brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
@@ -3722,18 +3717,18 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
- brcmf_dbg(ERROR, "ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
err, BRCMF_INIT_CLKCTL1, clkctl);
goto fail;
}
if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
- brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n");
+ brcmf_err("brcmf_sdio_chip_attach failed!\n");
goto fail;
}
if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
- brcmf_dbg(ERROR, "unsupported chip: 0x%04x\n", bus->ci->chip);
+ brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
goto fail;
}
@@ -3743,7 +3738,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
/* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
if (!(bus->ramsize)) {
- brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
+ brcmf_err("failed to find SOCRAM memory!\n");
goto fail;
}
@@ -3754,6 +3749,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL);
brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL);
+ sdio_release_host(bus->sdiodev->func[1]);
+
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
/* Locate an appropriately-aligned portion of hdrbuf */
@@ -3769,6 +3766,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
return true;
fail:
+ sdio_release_host(bus->sdiodev->func[1]);
return false;
}
@@ -3776,6 +3774,8 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
+ sdio_claim_host(bus->sdiodev->func[1]);
+
/* Disable F2 to clear any intermediate frame state on the dongle */
brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
@@ -3786,6 +3786,8 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
/* Done with backplane-dependent accesses, can drop clock... */
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ sdio_release_host(bus->sdiodev->func[1]);
+
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
bus->idletime = BRCMF_IDLE_INTERVAL;
@@ -3841,8 +3843,10 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
if (bus->ci) {
+ sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ sdio_release_host(bus->sdiodev->func[1]);
brcmf_sdio_chip_detach(&bus->ci);
if (bus->vars && bus->varsz)
kfree(bus->vars);
@@ -3862,7 +3866,8 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
brcmf_sdio_intr_unregister(bus->sdiodev);
cancel_work_sync(&bus->datawork);
- destroy_workqueue(bus->brcmf_wq);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
@@ -3877,6 +3882,14 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Disconnected\n");
}
+static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
+ .stop = brcmf_sdbrcm_bus_stop,
+ .init = brcmf_sdbrcm_bus_init,
+ .txdata = brcmf_sdbrcm_bus_txdata,
+ .txctl = brcmf_sdbrcm_bus_txctl,
+ .rxctl = brcmf_sdbrcm_bus_rxctl,
+};
+
void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
@@ -3904,31 +3917,29 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->txminmax = BRCMF_TXMINMAX;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+ bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
+ if (bus->brcmf_wq == NULL) {
+ brcmf_err("insufficient memory to create txworkqueue\n");
+ goto fail;
+ }
+
/* attempt to attach to the dongle */
if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_attach failed\n");
+ brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
goto fail;
}
+ spin_lock_init(&bus->rxctl_lock);
spin_lock_init(&bus->txqlock);
init_waitqueue_head(&bus->ctrl_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);
- bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
- if (bus->brcmf_wq == NULL) {
- brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
- goto fail;
- }
- INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
-
/* Set up the watchdog timer */
init_timer(&bus->timer);
bus->timer.data = (unsigned long)bus;
bus->timer.function = brcmf_sdbrcm_watchdog;
- /* Initialize thread based operation and lock */
- sema_init(&bus->sdsem, 1);
-
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
@@ -3942,26 +3953,24 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
spin_lock_init(&bus->dpc_tl_lock);
/* Assign bus interface call back */
- bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
- bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init;
- bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata;
- bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl;
- bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl;
+ bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
+ bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
+
/* Attach to the brcmf/OS/network interface */
ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
if (ret != 0) {
- brcmf_dbg(ERROR, "brcmf_attach failed\n");
+ brcmf_err("brcmf_attach failed\n");
goto fail;
}
/* Allocate buffers */
if (!(brcmf_sdbrcm_probe_malloc(bus))) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_malloc failed\n");
+ brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
goto fail;
}
if (!(brcmf_sdbrcm_probe_init(bus))) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_init failed\n");
+ brcmf_err("brcmf_sdbrcm_probe_init failed\n");
goto fail;
}
@@ -3991,10 +4000,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* if firmware path present try to download and bring up bus */
ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
- if (ret == -ENOLINK) {
- brcmf_dbg(ERROR, "dongle is not responding\n");
- goto fail;
- }
+ brcmf_err("dongle is not responding\n");
+ goto fail;
}
return bus;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
new file mode 100644
index 000000000000..ba0b22512f12
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/netdevice.h>
+
+#include "brcmu_wifi.h"
+#include "brcmu_utils.h"
+
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "fweh.h"
+#include "fwil.h"
+
+/**
+ * struct brcm_ethhdr - broadcom specific ether header.
+ *
+ * @subtype: subtype for this packet.
+ * @length: TODO: length of appended data.
+ * @version: version indication.
+ * @oui: OUI of this packet.
+ * @usr_subtype: subtype for this OUI.
+ */
+struct brcm_ethhdr {
+ __be16 subtype;
+ __be16 length;
+ u8 version;
+ u8 oui[3];
+ __be16 usr_subtype;
+} __packed;
+
+struct brcmf_event_msg_be {
+ __be16 version;
+ __be16 flags;
+ __be32 event_type;
+ __be32 status;
+ __be32 reason;
+ __be32 auth_type;
+ __be32 datalen;
+ u8 addr[ETH_ALEN];
+ char ifname[IFNAMSIZ];
+ u8 ifidx;
+ u8 bsscfgidx;
+} __packed;
+
+/**
+ * struct brcmf_event - contents of broadcom event packet.
+ *
+ * @eth: standard ether header.
+ * @hdr: broadcom specific ether header.
+ * @msg: common part of the actual event message.
+ */
+struct brcmf_event {
+ struct ethhdr eth;
+ struct brcm_ethhdr hdr;
+ struct brcmf_event_msg_be msg;
+} __packed;
+
+/**
+ * struct brcmf_fweh_queue_item - event item on event queue.
+ *
+ * @q: list element for queuing.
+ * @code: event code.
+ * @ifidx: interface index related to this event.
+ * @ifaddr: ethernet address for interface.
+ * @emsg: common parameters of the firmware event message.
+ * @data: event specific data part of the firmware event.
+ */
+struct brcmf_fweh_queue_item {
+ struct list_head q;
+ enum brcmf_fweh_event_code code;
+ u8 ifidx;
+ u8 ifaddr[ETH_ALEN];
+ struct brcmf_event_msg_be emsg;
+ u8 data[0];
+};
+
+/**
+ * struct brcmf_fweh_event_name - code, name mapping entry.
+ */
+struct brcmf_fweh_event_name {
+ enum brcmf_fweh_event_code code;
+ const char *name;
+};
+
+#ifdef DEBUG
+#define BRCMF_ENUM_DEF(id, val) \
+ { val, #id },
+
+/* array for mapping code to event name */
+static struct brcmf_fweh_event_name fweh_event_names[] = {
+ BRCMF_FWEH_EVENT_ENUM_DEFLIST
+};
+#undef BRCMF_ENUM_DEF
+
+/**
+ * brcmf_fweh_event_name() - returns name for given event code.
+ *
+ * @code: code to lookup.
+ */
+static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(fweh_event_names); i++) {
+ if (fweh_event_names[i].code == code)
+ return fweh_event_names[i].name;
+ }
+ return "unknown";
+}
+#else
+static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
+{
+ return "nodebug";
+}
+#endif
+
+/**
+ * brcmf_fweh_queue_event() - create and queue event.
+ *
+ * @fweh: firmware event handling info.
+ * @event: event queue entry.
+ */
+static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
+ struct brcmf_fweh_queue_item *event)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&fweh->evt_q_lock, flags);
+ list_add_tail(&event->q, &fweh->event_q);
+ spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
+ schedule_work(&fweh->event_work);
+}
+
+static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
+ enum brcmf_fweh_event_code code,
+ struct brcmf_event_msg *emsg,
+ void *data)
+{
+ struct brcmf_fweh_info *fweh;
+ int err = -EINVAL;
+
+ if (ifp) {
+ fweh = &ifp->drvr->fweh;
+
+ /* handle the event if valid interface and handler */
+ if (ifp->ndev && fweh->evt_handler[code])
+ err = fweh->evt_handler[code](ifp, emsg, data);
+ else
+ brcmf_err("unhandled event %d ignored\n", code);
+ } else {
+ brcmf_err("no interface object\n");
+ }
+ return err;
+}
+
+/**
+ * brcmf_fweh_handle_if_event() - handle IF event.
+ *
+ * @drvr: driver information object.
+ * @item: queue entry.
+ * @ifpp: interface object (may change upon ADD action).
+ */
+static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
+ struct brcmf_event_msg *emsg,
+ void *data)
+{
+ struct brcmf_if_event *ifevent = data;
+ struct brcmf_if *ifp;
+ int err = 0;
+
+ brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u\n",
+ ifevent->action, ifevent->ifidx,
+ ifevent->bssidx, ifevent->flags);
+
+ if (ifevent->ifidx >= BRCMF_MAX_IFS) {
+ brcmf_err("invalid interface index: %u\n",
+ ifevent->ifidx);
+ return;
+ }
+
+ ifp = drvr->iflist[ifevent->ifidx];
+
+ if (ifevent->action == BRCMF_E_IF_ADD) {
+ brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
+ emsg->addr);
+ ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
+ emsg->ifname, emsg->addr);
+ if (IS_ERR(ifp))
+ return;
+
+ if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+ err = brcmf_net_attach(ifp);
+ }
+
+ err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
+
+ if (ifevent->action == BRCMF_E_IF_DEL)
+ brcmf_del_if(drvr, ifevent->ifidx);
+}
+
+/**
+ * brcmf_fweh_dequeue_event() - get event from the queue.
+ *
+ * @fweh: firmware event handling info.
+ */
+static struct brcmf_fweh_queue_item *
+brcmf_fweh_dequeue_event(struct brcmf_fweh_info *fweh)
+{
+ struct brcmf_fweh_queue_item *event = NULL;
+ ulong flags;
+
+ spin_lock_irqsave(&fweh->evt_q_lock, flags);
+ if (!list_empty(&fweh->event_q)) {
+ event = list_first_entry(&fweh->event_q,
+ struct brcmf_fweh_queue_item, q);
+ list_del(&event->q);
+ }
+ spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
+
+ return event;
+}
+
+/**
+ * brcmf_fweh_event_worker() - firmware event worker.
+ *
+ * @work: worker object.
+ */
+static void brcmf_fweh_event_worker(struct work_struct *work)
+{
+ struct brcmf_pub *drvr;
+ struct brcmf_if *ifp;
+ struct brcmf_fweh_info *fweh;
+ struct brcmf_fweh_queue_item *event;
+ int err = 0;
+ struct brcmf_event_msg_be *emsg_be;
+ struct brcmf_event_msg emsg;
+
+ fweh = container_of(work, struct brcmf_fweh_info, event_work);
+ drvr = container_of(fweh, struct brcmf_pub, fweh);
+
+ while ((event = brcmf_fweh_dequeue_event(fweh))) {
+ ifp = drvr->iflist[event->ifidx];
+
+ brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
+ brcmf_fweh_event_name(event->code), event->code,
+ event->emsg.ifidx, event->emsg.bsscfgidx,
+ event->emsg.addr);
+
+ /* convert event message */
+ emsg_be = &event->emsg;
+ emsg.version = be16_to_cpu(emsg_be->version);
+ emsg.flags = be16_to_cpu(emsg_be->flags);
+ emsg.event_code = event->code;
+ emsg.status = be32_to_cpu(emsg_be->status);
+ emsg.reason = be32_to_cpu(emsg_be->reason);
+ emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
+ emsg.datalen = be32_to_cpu(emsg_be->datalen);
+ memcpy(emsg.addr, emsg_be->addr, ETH_ALEN);
+ memcpy(emsg.ifname, emsg_be->ifname, sizeof(emsg.ifname));
+ emsg.ifidx = emsg_be->ifidx;
+ emsg.bsscfgidx = emsg_be->bsscfgidx;
+
+ brcmf_dbg(EVENT, " version %u flags %u status %u reason %u\n",
+ emsg.version, emsg.flags, emsg.status, emsg.reason);
+ brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data,
+ min_t(u32, emsg.datalen, 64),
+ "event payload, len=%d\n", emsg.datalen);
+
+ /* special handling of interface event */
+ if (event->code == BRCMF_E_IF) {
+ brcmf_fweh_handle_if_event(drvr, &emsg, event->data);
+ goto event_free;
+ }
+
+ err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
+ event->data);
+ if (err) {
+ brcmf_err("event handler failed (%d)\n",
+ event->code);
+ err = 0;
+ }
+event_free:
+ kfree(event);
+ }
+}
+
+/**
+ * brcmf_fweh_attach() - initialize firmware event handling.
+ *
+ * @drvr: driver information object.
+ */
+void brcmf_fweh_attach(struct brcmf_pub *drvr)
+{
+ struct brcmf_fweh_info *fweh = &drvr->fweh;
+ INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
+ spin_lock_init(&fweh->evt_q_lock);
+ INIT_LIST_HEAD(&fweh->event_q);
+}
+
+/**
+ * brcmf_fweh_detach() - cleanup firmware event handling.
+ *
+ * @drvr: driver information object.
+ */
+void brcmf_fweh_detach(struct brcmf_pub *drvr)
+{
+ struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_if *ifp = drvr->iflist[0];
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+
+ if (ifp) {
+ /* clear all events */
+ memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN);
+ (void)brcmf_fil_iovar_data_set(ifp, "event_msgs",
+ eventmask,
+ BRCMF_EVENTING_MASK_LEN);
+ }
+ /* cancel the worker */
+ cancel_work_sync(&fweh->event_work);
+ WARN_ON(!list_empty(&fweh->event_q));
+ memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+}
+
+/**
+ * brcmf_fweh_register() - register handler for given event code.
+ *
+ * @drvr: driver information object.
+ * @code: event code.
+ * @handler: handler for the given event code.
+ */
+int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
+ brcmf_fweh_handler_t handler)
+{
+ if (drvr->fweh.evt_handler[code]) {
+ brcmf_err("event code %d already registered\n", code);
+ return -ENOSPC;
+ }
+ drvr->fweh.evt_handler[code] = handler;
+ brcmf_dbg(TRACE, "event handler registered for %s\n",
+ brcmf_fweh_event_name(code));
+ return 0;
+}
+
+/**
+ * brcmf_fweh_unregister() - remove handler for given code.
+ *
+ * @drvr: driver information object.
+ * @code: event code.
+ */
+void brcmf_fweh_unregister(struct brcmf_pub *drvr,
+ enum brcmf_fweh_event_code code)
+{
+ brcmf_dbg(TRACE, "event handler cleared for %s\n",
+ brcmf_fweh_event_name(code));
+ drvr->fweh.evt_handler[code] = NULL;
+}
+
+/**
+ * brcmf_fweh_activate_events() - enables firmware events registered.
+ *
+ * @ifp: primary interface object.
+ */
+int brcmf_fweh_activate_events(struct brcmf_if *ifp)
+{
+ int i, err;
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+
+ for (i = 0; i < BRCMF_E_LAST; i++) {
+ if (ifp->drvr->fweh.evt_handler[i]) {
+ brcmf_dbg(EVENT, "enable event %s\n",
+ brcmf_fweh_event_name(i));
+ setbit(eventmask, i);
+ }
+ }
+
+ /* want to handle IF event as well */
+ brcmf_dbg(EVENT, "enable event IF\n");
+ setbit(eventmask, BRCMF_E_IF);
+
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
+ eventmask, BRCMF_EVENTING_MASK_LEN);
+ if (err)
+ brcmf_err("Set event_msgs error (%d)\n", err);
+
+ return err;
+}
+
+/**
+ * brcmf_fweh_process_event() - process skb as firmware event.
+ *
+ * @drvr: driver information object.
+ * @event_packet: event packet to process.
+ * @ifidx: index of the firmware interface (may change).
+ *
+ * If the packet buffer contains a firmware event message it will
+ * dispatch the event to a registered handler (using worker).
+ */
+void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+ struct brcmf_event *event_packet, u8 *ifidx)
+{
+ enum brcmf_fweh_event_code code;
+ struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_queue_item *event;
+ gfp_t alloc_flag = GFP_KERNEL;
+ void *data;
+ u32 datalen;
+
+ /* get event info */
+ code = get_unaligned_be32(&event_packet->msg.event_type);
+ datalen = get_unaligned_be32(&event_packet->msg.datalen);
+ *ifidx = event_packet->msg.ifidx;
+ data = &event_packet[1];
+
+ if (code >= BRCMF_E_LAST)
+ return;
+
+ if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+ return;
+
+ if (in_interrupt())
+ alloc_flag = GFP_ATOMIC;
+
+ event = kzalloc(sizeof(*event) + datalen, alloc_flag);
+ if (!event)
+ return;
+
+ event->code = code;
+ event->ifidx = *ifidx;
+
+ /* use memcpy to get aligned event message */
+ memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
+ memcpy(event->data, data, datalen);
+ memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN);
+
+ brcmf_fweh_queue_event(fweh, event);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
new file mode 100644
index 000000000000..36901f76a3b5
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWEH_H_
+#define FWEH_H_
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+
+/* formward declarations */
+struct brcmf_pub;
+struct brcmf_if;
+struct brcmf_cfg80211_info;
+struct brcmf_event;
+
+/* list of firmware events */
+#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
+ BRCMF_ENUM_DEF(SET_SSID, 0) \
+ BRCMF_ENUM_DEF(JOIN, 1) \
+ BRCMF_ENUM_DEF(START, 2) \
+ BRCMF_ENUM_DEF(AUTH, 3) \
+ BRCMF_ENUM_DEF(AUTH_IND, 4) \
+ BRCMF_ENUM_DEF(DEAUTH, 5) \
+ BRCMF_ENUM_DEF(DEAUTH_IND, 6) \
+ BRCMF_ENUM_DEF(ASSOC, 7) \
+ BRCMF_ENUM_DEF(ASSOC_IND, 8) \
+ BRCMF_ENUM_DEF(REASSOC, 9) \
+ BRCMF_ENUM_DEF(REASSOC_IND, 10) \
+ BRCMF_ENUM_DEF(DISASSOC, 11) \
+ BRCMF_ENUM_DEF(DISASSOC_IND, 12) \
+ BRCMF_ENUM_DEF(QUIET_START, 13) \
+ BRCMF_ENUM_DEF(QUIET_END, 14) \
+ BRCMF_ENUM_DEF(BEACON_RX, 15) \
+ BRCMF_ENUM_DEF(LINK, 16) \
+ BRCMF_ENUM_DEF(MIC_ERROR, 17) \
+ BRCMF_ENUM_DEF(NDIS_LINK, 18) \
+ BRCMF_ENUM_DEF(ROAM, 19) \
+ BRCMF_ENUM_DEF(TXFAIL, 20) \
+ BRCMF_ENUM_DEF(PMKID_CACHE, 21) \
+ BRCMF_ENUM_DEF(RETROGRADE_TSF, 22) \
+ BRCMF_ENUM_DEF(PRUNE, 23) \
+ BRCMF_ENUM_DEF(AUTOAUTH, 24) \
+ BRCMF_ENUM_DEF(EAPOL_MSG, 25) \
+ BRCMF_ENUM_DEF(SCAN_COMPLETE, 26) \
+ BRCMF_ENUM_DEF(ADDTS_IND, 27) \
+ BRCMF_ENUM_DEF(DELTS_IND, 28) \
+ BRCMF_ENUM_DEF(BCNSENT_IND, 29) \
+ BRCMF_ENUM_DEF(BCNRX_MSG, 30) \
+ BRCMF_ENUM_DEF(BCNLOST_MSG, 31) \
+ BRCMF_ENUM_DEF(ROAM_PREP, 32) \
+ BRCMF_ENUM_DEF(PFN_NET_FOUND, 33) \
+ BRCMF_ENUM_DEF(PFN_NET_LOST, 34) \
+ BRCMF_ENUM_DEF(RESET_COMPLETE, 35) \
+ BRCMF_ENUM_DEF(JOIN_START, 36) \
+ BRCMF_ENUM_DEF(ROAM_START, 37) \
+ BRCMF_ENUM_DEF(ASSOC_START, 38) \
+ BRCMF_ENUM_DEF(IBSS_ASSOC, 39) \
+ BRCMF_ENUM_DEF(RADIO, 40) \
+ BRCMF_ENUM_DEF(PSM_WATCHDOG, 41) \
+ BRCMF_ENUM_DEF(PROBREQ_MSG, 44) \
+ BRCMF_ENUM_DEF(SCAN_CONFIRM_IND, 45) \
+ BRCMF_ENUM_DEF(PSK_SUP, 46) \
+ BRCMF_ENUM_DEF(COUNTRY_CODE_CHANGED, 47) \
+ BRCMF_ENUM_DEF(EXCEEDED_MEDIUM_TIME, 48) \
+ BRCMF_ENUM_DEF(ICV_ERROR, 49) \
+ BRCMF_ENUM_DEF(UNICAST_DECODE_ERROR, 50) \
+ BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
+ BRCMF_ENUM_DEF(TRACE, 52) \
+ BRCMF_ENUM_DEF(IF, 54) \
+ BRCMF_ENUM_DEF(RSSI, 56) \
+ BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
+ BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
+ BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
+ BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
+ BRCMF_ENUM_DEF(PRE_ASSOC_IND, 61) \
+ BRCMF_ENUM_DEF(PRE_REASSOC_IND, 62) \
+ BRCMF_ENUM_DEF(CHANNEL_ADOPTED, 63) \
+ BRCMF_ENUM_DEF(AP_STARTED, 64) \
+ BRCMF_ENUM_DEF(DFS_AP_STOP, 65) \
+ BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
+ BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
+ BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
+ BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
+ BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74)
+
+#define BRCMF_ENUM_DEF(id, val) \
+ BRCMF_E_##id = (val),
+
+/* firmware event codes sent by the dongle */
+enum brcmf_fweh_event_code {
+ BRCMF_FWEH_EVENT_ENUM_DEFLIST
+ BRCMF_E_LAST
+};
+#undef BRCMF_ENUM_DEF
+
+/* flags field values in struct brcmf_event_msg */
+#define BRCMF_EVENT_MSG_LINK 0x01
+#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
+#define BRCMF_EVENT_MSG_GROUP 0x04
+
+/**
+ * definitions for event packet validation.
+ */
+#define BRCMF_EVENT_OUI_OFFSET 19
+#define BRCM_OUI "\x00\x10\x18"
+#define DOT11_OUI_LEN 3
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+
+
+/**
+ * struct brcmf_event_msg - firmware event message.
+ *
+ * @version: version information.
+ * @flags: event flags.
+ * @event_code: firmware event code.
+ * @status: status information.
+ * @reason: reason code.
+ * @auth_type: authentication type.
+ * @datalen: lenght of event data buffer.
+ * @addr: ether address.
+ * @ifname: interface name.
+ * @ifidx: interface index.
+ * @bsscfgidx: bsscfg index.
+ */
+struct brcmf_event_msg {
+ u16 version;
+ u16 flags;
+ u32 event_code;
+ u32 status;
+ u32 reason;
+ s32 auth_type;
+ u32 datalen;
+ u8 addr[ETH_ALEN];
+ char ifname[IFNAMSIZ];
+ u8 ifidx;
+ u8 bsscfgidx;
+};
+
+typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *evtmsg,
+ void *data);
+
+/**
+ * struct brcmf_fweh_info - firmware event handling information.
+ *
+ * @event_work: event worker.
+ * @evt_q_lock: lock for event queue protection.
+ * @event_q: event queue.
+ * @evt_handler: registered event handlers.
+ */
+struct brcmf_fweh_info {
+ struct work_struct event_work;
+ spinlock_t evt_q_lock;
+ struct list_head event_q;
+ int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *evtmsg,
+ void *data);
+};
+
+void brcmf_fweh_attach(struct brcmf_pub *drvr);
+void brcmf_fweh_detach(struct brcmf_pub *drvr);
+int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
+ int (*handler)(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *evtmsg,
+ void *data));
+void brcmf_fweh_unregister(struct brcmf_pub *drvr,
+ enum brcmf_fweh_event_code code);
+int brcmf_fweh_activate_events(struct brcmf_if *ifp);
+void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+ struct brcmf_event *event_packet, u8 *ifidx);
+
+static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
+ struct sk_buff *skb, u8 *ifidx)
+{
+ struct brcmf_event *event_packet;
+ u8 *data;
+ u16 usr_stype;
+
+ /* only process events when protocol matches */
+ if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
+ return;
+
+ /* check for BRCM oui match */
+ event_packet = (struct brcmf_event *)skb_mac_header(skb);
+ data = (u8 *)event_packet;
+ data += BRCMF_EVENT_OUI_OFFSET;
+ if (memcmp(BRCM_OUI, data, DOT11_OUI_LEN))
+ return;
+
+ /* final match on usr_subtype */
+ data += DOT11_OUI_LEN;
+ usr_stype = get_unaligned_be16(data);
+ if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
+ return;
+
+ brcmf_fweh_process_event(drvr, event_packet, ifidx);
+}
+
+#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
new file mode 100644
index 000000000000..d8d8b6549dc5
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* FWIL is the Firmware Interface Layer. In this module the support functions
+ * are located to set and get variables to and from the firmware.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+#include "fwil.h"
+
+
+#define MAX_HEX_DUMP_LEN 64
+
+
+static s32
+brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+
+ if (drvr->bus_if->state != BRCMF_BUS_DATA) {
+ brcmf_err("bus is down. we have nothing to do.\n");
+ return -EIO;
+ }
+
+ if (data != NULL)
+ len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
+ if (set)
+ err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+ else
+ err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+ len);
+
+ if (err >= 0)
+ err = 0;
+ else
+ brcmf_err("Failed err=%d\n", err);
+
+ return err;
+}
+
+s32
+brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+ s32 err;
+
+ mutex_lock(&ifp->drvr->proto_block);
+
+ brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+ err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
+ mutex_unlock(&ifp->drvr->proto_block);
+
+ return err;
+}
+
+s32
+brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
+{
+ s32 err;
+
+ mutex_lock(&ifp->drvr->proto_block);
+ err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
+
+ brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+ mutex_unlock(&ifp->drvr->proto_block);
+
+ return err;
+}
+
+
+s32
+brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(data);
+
+ mutex_lock(&ifp->drvr->proto_block);
+ err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
+ mutex_unlock(&ifp->drvr->proto_block);
+
+ return err;
+}
+
+s32
+brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(*data);
+
+ mutex_lock(&ifp->drvr->proto_block);
+ err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
+ mutex_unlock(&ifp->drvr->proto_block);
+ *data = le32_to_cpu(data_le);
+
+ return err;
+}
+
+static u32
+brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+{
+ u32 len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ memcpy(buf, name, len);
+
+ /* append data onto the end of the name string */
+ if (data && datalen)
+ memcpy(&buf[len], data, datalen);
+
+ return len + datalen;
+}
+
+
+s32
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+ u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+ buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+ sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+ buflen, true);
+ } else {
+ err = -EPERM;
+ brcmf_err("Creating iovar failed\n");
+ }
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+}
+
+s32
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
+ sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+ buflen, false);
+ if (err == 0)
+ memcpy(data, drvr->proto_buf, len);
+ } else {
+ err = -EPERM;
+ brcmf_err("Creating iovar failed\n");
+ }
+
+ brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+}
+
+s32
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+
+s32
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
+static u32
+brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf,
+ u32 buflen)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+ __le32 bssidx_le;
+
+ if (bssidx == 0)
+ return brcmf_create_iovar(name, data, datalen, buf, buflen);
+
+ prefixlen = strlen(prefix);
+ namelen = strlen(name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+ if (buflen < iolen) {
+ brcmf_err("buffer is too short\n");
+ return 0;
+ }
+
+ p = buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, name, namelen);
+ p += namelen;
+
+ /* bss config index as first data */
+ bssidx_le = cpu_to_le32(bssidx);
+ memcpy(p, &bssidx_le, sizeof(bssidx_le));
+ p += sizeof(bssidx_le);
+
+ /* parameter buffer follows */
+ if (datalen)
+ memcpy(p, data, datalen);
+
+ return iolen;
+}
+
+s32
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+ void *data, u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+ buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+ drvr->proto_buf, sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
+ buflen, true);
+ } else {
+ err = -EPERM;
+ brcmf_err("Creating bsscfg failed\n");
+ }
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+}
+
+s32
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+ void *data, u32 len)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ s32 err;
+ u32 buflen;
+
+ mutex_lock(&drvr->proto_block);
+
+ buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
+ drvr->proto_buf, sizeof(drvr->proto_buf));
+ if (buflen) {
+ err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
+ buflen, false);
+ if (err == 0)
+ memcpy(data, drvr->proto_buf, len);
+ } else {
+ err = -EPERM;
+ brcmf_err("Creating bsscfg failed\n");
+ }
+ brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
+ brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
+ min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
+
+ mutex_unlock(&drvr->proto_block);
+ return err;
+
+}
+
+s32
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+ sizeof(data_le));
+}
+
+s32
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+ sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
new file mode 100644
index 000000000000..16eb8202fb1e
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _fwil_h_
+#define _fwil_h_
+
+s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+ u32 len);
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+
+#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 9434440bbc65..b1bb46c49799 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -186,7 +186,7 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
CORE_SB(base, sbtmstatehigh),
NULL);
if (regdata & SSB_TMSHIGH_BUSY)
- brcmf_dbg(ERROR, "core state still busy\n");
+ brcmf_err("core state still busy\n");
regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
NULL);
@@ -438,7 +438,7 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->ramsize = 0x80000;
break;
default:
- brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+ brcmf_err("chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
}
@@ -456,7 +456,7 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->resetcore = brcmf_sdio_ai_resetcore;
break;
default:
- brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
+ brcmf_err("socitype %u not supported\n", ci->socitype);
return -ENODEV;
}
@@ -473,7 +473,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
- brcmf_dbg(ERROR, "error writing for HT off\n");
+ brcmf_err("error writing for HT off\n");
return err;
}
@@ -483,7 +483,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
- brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
clkset, clkval);
return -EACCES;
}
@@ -493,7 +493,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
- brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
clkval);
return -EBUSY;
}
@@ -618,7 +618,7 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
str_shift = 11;
break;
default:
- brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
brcmf_sdio_chip_name(ci->chip, chn, 8),
ci->chiprev, ci->pmurev);
break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 7a6dfdc67b6c..914c56fe6c5f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -14,24 +14,12 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
#include <linux/firmware.h>
#include <linux/usb.h>
#include <linux/vmalloc.h>
-#include <net/cfg80211.h>
-#include <defs.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include <dhd_bus.h>
@@ -42,14 +30,11 @@
#define IOCTL_RESP_TIMEOUT 2000
-#define BRCMF_USB_SYNC_TIMEOUT 300 /* ms */
-#define BRCMF_USB_DLIMAGE_SPINWAIT 100 /* in unit of ms */
-#define BRCMF_USB_DLIMAGE_LIMIT 500 /* spinwait limit (ms) */
+#define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */
+#define BRCMF_USB_RESET_GETVER_LOOP_CNT 10
#define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle
has boot up */
-#define BRCMF_USB_RESETCFG_SPINWAIT 1 /* wait after resetcfg (ms) */
-
#define BRCMF_USB_NRXQ 50
#define BRCMF_USB_NTXQ 50
@@ -70,16 +55,6 @@
#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
-enum usbdev_suspend_state {
- USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
- suspend */
- USBOS_SUSPEND_STATE_SUSPEND_PENDING, /* Device is idle, can be
- * suspended. Wating PM to
- * suspend the device
- */
- USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
-};
-
struct brcmf_usb_image {
struct list_head list;
s8 *fwname;
@@ -100,10 +75,8 @@ struct brcmf_usbdev_info {
struct list_head rx_postq;
struct list_head tx_freeq;
struct list_head tx_postq;
- enum usbdev_suspend_state suspend_state;
uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
- bool activity;
int rx_low_watermark;
int tx_low_watermark;
int tx_high_watermark;
@@ -116,10 +89,6 @@ struct brcmf_usbdev_info {
u8 *image; /* buffer for combine fw and nvram */
int image_len;
- wait_queue_head_t wait;
- bool waitdone;
- int sync_urb_status;
-
struct usb_device *usbdev;
struct device *dev;
@@ -131,7 +100,6 @@ struct brcmf_usbdev_info {
int ctl_urb_status;
int ctl_completed;
wait_queue_head_t ioctl_resp_wait;
- wait_queue_head_t ctrl_wait;
ulong ctl_op;
struct urb *bulk_urb; /* used for FW download */
@@ -176,6 +144,7 @@ static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
static void
brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
{
+ brcmf_dbg(USB, "Enter, status=%d\n", status);
if (unlikely(devinfo == NULL))
return;
@@ -203,6 +172,7 @@ brcmf_usb_ctlread_complete(struct urb *urb)
struct brcmf_usbdev_info *devinfo =
(struct brcmf_usbdev_info *)urb->context;
+ brcmf_dbg(USB, "Enter\n");
devinfo->ctl_urb_actual_length = urb->actual_length;
brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
urb->status);
@@ -214,33 +184,22 @@ brcmf_usb_ctlwrite_complete(struct urb *urb)
struct brcmf_usbdev_info *devinfo =
(struct brcmf_usbdev_info *)urb->context;
+ brcmf_dbg(USB, "Enter\n");
brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
urb->status);
}
-static int brcmf_usb_pnp(struct brcmf_usbdev_info *devinfo, uint state)
-{
- return 0;
-}
-
static int
brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
{
int ret;
u16 size;
+ brcmf_dbg(USB, "Enter\n");
if (devinfo == NULL || buf == NULL ||
len == 0 || devinfo->ctl_urb == NULL)
return -EINVAL;
- /* If the USB/HSIC bus in sleep state, wake it up */
- if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED)
- if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
- brcmf_dbg(ERROR, "Could not Resume the bus!\n");
- return -EIO;
- }
-
- devinfo->activity = true;
size = len;
devinfo->ctl_write.wLength = cpu_to_le16p(&size);
devinfo->ctl_urb->transfer_buffer_length = size;
@@ -257,7 +216,7 @@ brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
if (ret < 0)
- brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+ brcmf_err("usb_submit_urb failed %d\n", ret);
return ret;
}
@@ -268,6 +227,7 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
int ret;
u16 size;
+ brcmf_dbg(USB, "Enter\n");
if ((devinfo == NULL) || (buf == NULL) || (len == 0)
|| (devinfo->ctl_urb == NULL))
return -EINVAL;
@@ -290,7 +250,7 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
if (ret < 0)
- brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+ brcmf_err("usb_submit_urb failed %d\n", ret);
return ret;
}
@@ -301,10 +261,9 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
int timeout = 0;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
- if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
- /* TODO: handle suspend/resume */
+ brcmf_dbg(USB, "Enter\n");
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
return -EIO;
- }
if (test_and_set_bit(0, &devinfo->ctl_op))
return -EIO;
@@ -312,14 +271,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
devinfo->ctl_completed = false;
err = brcmf_usb_send_ctl(devinfo, buf, len);
if (err) {
- brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+ brcmf_err("fail %d bytes: %d\n", err, len);
clear_bit(0, &devinfo->ctl_op);
return err;
}
timeout = brcmf_usb_ioctl_resp_wait(devinfo);
clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
- brcmf_dbg(ERROR, "Txctl wait timed out\n");
+ brcmf_err("Txctl wait timed out\n");
err = -EIO;
}
return err;
@@ -331,17 +290,17 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
int timeout = 0;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
- if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
- /* TODO: handle suspend/resume */
+ brcmf_dbg(USB, "Enter\n");
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
return -EIO;
- }
+
if (test_and_set_bit(0, &devinfo->ctl_op))
return -EIO;
devinfo->ctl_completed = false;
err = brcmf_usb_recv_ctl(devinfo, buf, len);
if (err) {
- brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+ brcmf_err("fail %d bytes: %d\n", err, len);
clear_bit(0, &devinfo->ctl_op);
return err;
}
@@ -349,7 +308,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
err = devinfo->ctl_urb_status;
clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
- brcmf_dbg(ERROR, "rxctl wait timed out\n");
+ brcmf_err("rxctl wait timed out\n");
err = -EIO;
}
if (!err)
@@ -397,7 +356,7 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
if (reqs == NULL) {
- brcmf_dbg(ERROR, "fail to allocate memory!\n");
+ brcmf_err("fail to allocate memory!\n");
return NULL;
}
req = reqs;
@@ -413,7 +372,7 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
}
return reqs;
fail:
- brcmf_dbg(ERROR, "fail!\n");
+ brcmf_err("fail!\n");
while (!list_empty(q)) {
req = list_entry(q->next, struct brcmf_usbreq, list);
if (req && req->urb)
@@ -430,7 +389,7 @@ static void brcmf_usb_free_q(struct list_head *q, bool pending)
int i = 0;
list_for_each_entry_safe(req, next, q, list) {
if (!req->urb) {
- brcmf_dbg(ERROR, "bad req\n");
+ brcmf_err("bad req\n");
break;
}
i++;
@@ -459,6 +418,8 @@ static void brcmf_usb_tx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
+ brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
+ req->skb);
brcmf_usb_del_fromq(devinfo, req);
if (urb->status == 0)
devinfo->bus_pub.bus->dstats.tx_packets++;
@@ -484,6 +445,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct sk_buff *skb;
int ifidx = 0;
+ brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
skb = req->skb;
req->skb = NULL;
@@ -497,10 +459,10 @@ static void brcmf_usb_rx_complete(struct urb *urb)
return;
}
- if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP) {
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
skb_put(skb, urb->actual_length);
if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
- brcmf_dbg(ERROR, "rx protocol error\n");
+ brcmf_err("rx protocol error\n");
brcmu_pkt_buf_free_skb(skb);
devinfo->bus_pub.bus->dstats.rx_errors++;
} else
@@ -550,8 +512,8 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
{
struct brcmf_usbreq *req;
- if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
- brcmf_dbg(ERROR, "bus is not up\n");
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+ brcmf_err("bus is not up=%d\n", devinfo->bus_pub.state);
return;
}
while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
@@ -564,29 +526,24 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
int old_state;
+ brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
+ devinfo->bus_pub.state, state);
if (devinfo->bus_pub.state == state)
return;
old_state = devinfo->bus_pub.state;
- brcmf_dbg(TRACE, "dbus state change from %d to to %d\n",
- old_state, state);
-
- /* Don't update state if it's PnP firmware re-download */
- if (state != BCMFMAC_USB_STATE_PNP_FWDL) /* TODO */
- devinfo->bus_pub.state = state;
-
- if ((old_state == BCMFMAC_USB_STATE_SLEEP)
- && (state == BCMFMAC_USB_STATE_UP)) {
- brcmf_usb_rx_fill_all(devinfo);
- }
+ devinfo->bus_pub.state = state;
/* update state of upper layer */
- if (state == BCMFMAC_USB_STATE_DOWN) {
- brcmf_dbg(INFO, "DBUS is down\n");
+ if (state == BRCMFMAC_USB_STATE_DOWN) {
+ brcmf_dbg(USB, "DBUS is down\n");
bcmf_bus->state = BRCMF_BUS_DOWN;
+ } else if (state == BRCMFMAC_USB_STATE_UP) {
+ brcmf_dbg(USB, "DBUS is up\n");
+ bcmf_bus->state = BRCMF_BUS_DATA;
} else {
- brcmf_dbg(INFO, "DBUS current state=%d\n", state);
+ brcmf_dbg(USB, "DBUS current state=%d\n", state);
}
}
@@ -595,30 +552,32 @@ brcmf_usb_intr_complete(struct urb *urb)
{
struct brcmf_usbdev_info *devinfo =
(struct brcmf_usbdev_info *)urb->context;
- bool killed;
+ int err;
+
+ brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
if (devinfo == NULL)
return;
if (unlikely(urb->status)) {
- if (devinfo->suspend_state ==
- USBOS_SUSPEND_STATE_SUSPEND_PENDING)
- killed = true;
-
- if ((urb->status == -ENOENT && (!killed))
- || urb->status == -ESHUTDOWN ||
- urb->status == -ENODEV) {
- brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
+ if (urb->status == -ENOENT ||
+ urb->status == -ESHUTDOWN ||
+ urb->status == -ENODEV) {
+ brcmf_usb_state_change(devinfo,
+ BRCMFMAC_USB_STATE_DOWN);
}
}
- if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN) {
- brcmf_dbg(ERROR, "intr cb when DBUS down, ignoring\n");
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
+ brcmf_err("intr cb when DBUS down, ignoring\n");
return;
}
- if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
- usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+ err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
+ if (err)
+ brcmf_err("usb_submit_urb, err=%d\n", err);
+ }
}
static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
@@ -627,16 +586,15 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
struct brcmf_usbreq *req;
int ret;
- if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
- /* TODO: handle suspend/resume */
+ brcmf_dbg(USB, "Enter, skb=%p\n", skb);
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
return -EIO;
- }
req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
&devinfo->tx_freecount);
if (!req) {
brcmu_pkt_buf_free_skb(skb);
- brcmf_dbg(ERROR, "no req to send\n");
+ brcmf_err("no req to send\n");
return -ENOMEM;
}
@@ -648,7 +606,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
ret = usb_submit_urb(req->urb, GFP_ATOMIC);
if (ret) {
- brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
+ brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
brcmf_usb_del_fromq(devinfo, req);
brcmu_pkt_buf_free_skb(req->skb);
req->skb = NULL;
@@ -670,25 +628,16 @@ static int brcmf_usb_up(struct device *dev)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
u16 ifnum;
+ int ret;
- if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP)
+ brcmf_dbg(USB, "Enter\n");
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
return 0;
- /* If the USB/HSIC bus in sleep state, wake it up */
- if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED) {
- if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
- brcmf_dbg(ERROR, "Could not Resume the bus!\n");
- return -EIO;
- }
- }
- devinfo->activity = true;
-
/* Success, indicate devinfo is fully up */
- brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_UP);
+ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
if (devinfo->intr_urb) {
- int ret;
-
usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
devinfo->intr_pipe,
&devinfo->intr,
@@ -699,7 +648,7 @@ static int brcmf_usb_up(struct device *dev)
ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
if (ret) {
- brcmf_dbg(ERROR, "USB_SUBMIT_URB failed with status %d\n",
+ brcmf_err("USB_SUBMIT_URB failed with status %d\n",
ret);
return -EINVAL;
}
@@ -733,14 +682,14 @@ static void brcmf_usb_down(struct device *dev)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ brcmf_dbg(USB, "Enter\n");
if (devinfo == NULL)
return;
- brcmf_dbg(TRACE, "enter\n");
- if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN)
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN)
return;
- brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
+ brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
if (devinfo->intr_urb)
usb_kill_urb(devinfo->intr_urb);
@@ -754,34 +703,14 @@ static void brcmf_usb_down(struct device *dev)
brcmf_usb_free_q(&devinfo->rx_postq, true);
}
-static int
-brcmf_usb_sync_wait(struct brcmf_usbdev_info *devinfo, u16 time)
-{
- int ret;
- int err = 0;
- int ms = time;
-
- ret = wait_event_interruptible_timeout(devinfo->wait,
- devinfo->waitdone == true, (ms * HZ / 1000));
-
- if ((devinfo->waitdone == false) || (devinfo->sync_urb_status)) {
- brcmf_dbg(ERROR, "timeout(%d) or urb err=%d\n",
- ret, devinfo->sync_urb_status);
- err = -EINVAL;
- }
- devinfo->waitdone = false;
- return err;
-}
-
static void
brcmf_usb_sync_complete(struct urb *urb)
{
struct brcmf_usbdev_info *devinfo =
(struct brcmf_usbdev_info *)urb->context;
- devinfo->waitdone = true;
- wake_up_interruptible(&devinfo->wait);
- devinfo->sync_urb_status = urb->status;
+ devinfo->ctl_completed = true;
+ brcmf_usb_ioctl_resp_wake(devinfo);
}
static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
@@ -813,18 +742,19 @@ static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
(void *) tmpbuf, size,
(usb_complete_t)brcmf_usb_sync_complete, devinfo);
+ devinfo->ctl_completed = false;
ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
if (ret < 0) {
- brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+ brcmf_err("usb_submit_urb failed %d\n", ret);
kfree(tmpbuf);
return false;
}
- ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
+ ret = brcmf_usb_ioctl_resp_wait(devinfo);
memcpy(buffer, tmpbuf, buflen);
kfree(tmpbuf);
- return (ret == 0);
+ return ret;
}
static bool
@@ -833,27 +763,25 @@ brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo)
struct bootrom_id_le id;
u32 chipid, chiprev;
- brcmf_dbg(TRACE, "enter\n");
+ brcmf_dbg(USB, "Enter\n");
if (devinfo == NULL)
return false;
/* Check if firmware downloaded already by querying runtime ID */
id.chip = cpu_to_le32(0xDEAD);
- brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id,
- sizeof(struct bootrom_id_le));
+ brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
chipid = le32_to_cpu(id.chip);
chiprev = le32_to_cpu(id.chiprev);
if ((chipid & 0x4300) == 0x4300)
- brcmf_dbg(INFO, "chip %x rev 0x%x\n", chipid, chiprev);
+ brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev);
else
- brcmf_dbg(INFO, "chip %d rev 0x%x\n", chipid, chiprev);
+ brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev);
if (chipid == BRCMF_POSTBOOT_ID) {
- brcmf_dbg(INFO, "firmware already downloaded\n");
- brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
- sizeof(struct bootrom_id_le));
+ brcmf_dbg(USB, "firmware already downloaded\n");
+ brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
return false;
} else {
devinfo->bus_pub.devid = chipid;
@@ -866,38 +794,29 @@ static int
brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
{
struct bootrom_id_le id;
- u16 wait = 0, wait_time;
-
- brcmf_dbg(TRACE, "enter\n");
+ u32 loop_cnt;
- if (devinfo == NULL)
- return -EINVAL;
+ brcmf_dbg(USB, "Enter\n");
- /* Give dongle chance to boot */
- wait_time = BRCMF_USB_DLIMAGE_SPINWAIT;
- while (wait < BRCMF_USB_DLIMAGE_LIMIT) {
- mdelay(wait_time);
- wait += wait_time;
+ loop_cnt = 0;
+ do {
+ mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
+ loop_cnt++;
id.chip = cpu_to_le32(0xDEAD); /* Get the ID */
- brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id,
- sizeof(struct bootrom_id_le));
+ brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
break;
- }
+ } while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
- brcmf_dbg(INFO, "download done %d ms postboot chip 0x%x/rev 0x%x\n",
- wait, le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
-
- brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
- sizeof(struct bootrom_id_le));
+ brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n",
+ le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
- /* XXX this wait may not be necessary */
- mdelay(BRCMF_USB_RESETCFG_SPINWAIT);
+ brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
return 0;
} else {
- brcmf_dbg(ERROR, "Cannot talk to Dongle. Firmware is not UP, %d ms\n",
- wait);
+ brcmf_err("Cannot talk to Dongle. Firmware is not UP, %d ms\n",
+ BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt);
return -EINVAL;
}
}
@@ -918,13 +837,14 @@ brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
+ devinfo->ctl_completed = false;
ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
if (ret) {
- brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret);
+ brcmf_err("usb_submit_urb failed %d\n", ret);
return ret;
}
- ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT);
- return ret;
+ ret = brcmf_usb_ioctl_resp_wait(devinfo);
+ return (ret == 0);
}
static int
@@ -935,7 +855,8 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
struct rdl_state_le state;
u32 rdlstate, rdlbytes;
int err = 0;
- brcmf_dbg(TRACE, "fw %p, len %d\n", fw, fwlen);
+
+ brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
if (bulkchunk == NULL) {
@@ -952,7 +873,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
/* 2) Check we are in the Waiting state */
if (rdlstate != DL_WAITING) {
- brcmf_dbg(ERROR, "Failed to DL_START\n");
+ brcmf_err("Failed to DL_START\n");
err = -EINVAL;
goto fail;
}
@@ -981,7 +902,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
memcpy(bulkchunk, dlpos, sendlen);
if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk,
sendlen)) {
- brcmf_dbg(ERROR, "send_bulk failed\n");
+ brcmf_err("send_bulk failed\n");
err = -EINVAL;
goto fail;
}
@@ -991,7 +912,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
}
if (!brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
sizeof(struct rdl_state_le))) {
- brcmf_dbg(ERROR, "DL_GETSTATE Failed xxxx\n");
+ brcmf_err("DL_GETSTATE Failed xxxx\n");
err = -EINVAL;
goto fail;
}
@@ -1001,7 +922,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
/* restart if an error is reported */
if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) {
- brcmf_dbg(ERROR, "Bad Hdr or Bad CRC state %d\n",
+ brcmf_err("Bad Hdr or Bad CRC state %d\n",
rdlstate);
err = -EINVAL;
goto fail;
@@ -1010,7 +931,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
fail:
kfree(bulkchunk);
- brcmf_dbg(TRACE, "err=%d\n", err);
+ brcmf_dbg(USB, "Exit, err=%d\n", err);
return err;
}
@@ -1018,7 +939,7 @@ static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
{
int err;
- brcmf_dbg(TRACE, "enter\n");
+ brcmf_dbg(USB, "Enter\n");
if (devinfo == NULL)
return -EINVAL;
@@ -1028,10 +949,10 @@ static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
err = brcmf_usb_dl_writeimage(devinfo, fw, len);
if (err == 0)
- devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_DONE;
+ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE;
else
- devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_PENDING;
- brcmf_dbg(TRACE, "exit: err=%d\n", err);
+ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL;
+ brcmf_dbg(USB, "Exit, err=%d\n", err);
return err;
}
@@ -1040,7 +961,7 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
{
struct rdl_state_le state;
- brcmf_dbg(TRACE, "enter\n");
+ brcmf_dbg(USB, "Enter\n");
if (!devinfo)
return -EINVAL;
@@ -1060,10 +981,10 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
return -ENODEV;
/* The Dongle may go for re-enumeration. */
} else {
- brcmf_dbg(ERROR, "Dongle not runnable\n");
+ brcmf_err("Dongle not runnable\n");
return -EINVAL;
}
- brcmf_dbg(TRACE, "exit\n");
+ brcmf_dbg(USB, "Exit\n");
return 0;
}
@@ -1090,7 +1011,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
int devid, chiprev;
int err;
- brcmf_dbg(TRACE, "enter\n");
+ brcmf_dbg(USB, "Enter\n");
if (devinfo == NULL)
return -ENODEV;
@@ -1098,13 +1019,13 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
chiprev = devinfo->bus_pub.chiprev;
if (!brcmf_usb_chip_support(devid, chiprev)) {
- brcmf_dbg(ERROR, "unsupported chip %d rev %d\n",
+ brcmf_err("unsupported chip %d rev %d\n",
devid, chiprev);
return -EINVAL;
}
if (!devinfo->image) {
- brcmf_dbg(ERROR, "No firmware!\n");
+ brcmf_err("No firmware!\n");
return -ENOENT;
}
@@ -1118,7 +1039,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
{
- brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
+ brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
/* free the URBS */
brcmf_usb_free_q(&devinfo->rx_freeq, false);
@@ -1153,6 +1074,7 @@ static int check_file(const u8 *headers)
struct trx_header_le *trx;
int actual_len = -1;
+ brcmf_dbg(USB, "Enter\n");
/* Extract trx header */
trx = (struct trx_header_le *) headers;
if (trx->magic != cpu_to_le32(TRX_MAGIC))
@@ -1174,6 +1096,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
struct brcmf_usb_image *fw_image;
int err;
+ brcmf_dbg(USB, "Enter\n");
switch (devinfo->bus_pub.devid) {
case 43143:
fwname = BRCMF_USB_43143_FW_NAME;
@@ -1190,7 +1113,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
return -EINVAL;
break;
}
-
+ brcmf_dbg(USB, "Loading FW %s\n", fwname);
list_for_each_entry(fw_image, &fw_image_list, list) {
if (fw_image->fwname == fwname) {
devinfo->image = fw_image->image;
@@ -1201,11 +1124,11 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
/* fw image not yet loaded. Load it now and add to list */
err = request_firmware(&fw, fwname, devinfo->dev);
if (!fw) {
- brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
+ brcmf_err("fail to request firmware %s\n", fwname);
return err;
}
if (check_file(fw->data) < 0) {
- brcmf_dbg(ERROR, "invalid firmware %s\n", fwname);
+ brcmf_err("invalid firmware %s\n", fwname);
return -EINVAL;
}
@@ -1235,10 +1158,13 @@ static
struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
int nrxq, int ntxq)
{
+ brcmf_dbg(USB, "Enter\n");
+
devinfo->bus_pub.nrxq = nrxq;
devinfo->rx_low_watermark = nrxq / 2;
devinfo->bus_pub.devinfo = devinfo;
devinfo->bus_pub.ntxq = ntxq;
+ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN;
/* flow control when too many tx urbs posted */
devinfo->tx_low_watermark = ntxq / 4;
@@ -1270,25 +1196,24 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->intr_urb) {
- brcmf_dbg(ERROR, "usb_alloc_urb (intr) failed\n");
+ brcmf_err("usb_alloc_urb (intr) failed\n");
goto error;
}
devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->ctl_urb) {
- brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
+ brcmf_err("usb_alloc_urb (ctl) failed\n");
goto error;
}
devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->bulk_urb) {
- brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
+ brcmf_err("usb_alloc_urb (bulk) failed\n");
goto error;
}
- init_waitqueue_head(&devinfo->wait);
if (!brcmf_usb_dlneeded(devinfo))
return &devinfo->bus_pub;
- brcmf_dbg(TRACE, "start fw downloading\n");
+ brcmf_dbg(USB, "Start fw downloading\n");
if (brcmf_usb_get_fw(devinfo))
goto error;
@@ -1298,19 +1223,27 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
return &devinfo->bus_pub;
error:
- brcmf_dbg(ERROR, "failed!\n");
+ brcmf_err("failed!\n");
brcmf_usb_detach(devinfo);
return NULL;
}
-static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
- const char *desc, u32 bustype, u32 hdrlen)
+static struct brcmf_bus_ops brcmf_usb_bus_ops = {
+ .txdata = brcmf_usb_tx,
+ .init = brcmf_usb_up,
+ .stop = brcmf_usb_down,
+ .txctl = brcmf_usb_tx_ctlpkt,
+ .rxctl = brcmf_usb_rx_ctlpkt,
+};
+
+static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
{
struct brcmf_bus *bus = NULL;
struct brcmf_usbdev *bus_pub = NULL;
int ret;
struct device *dev = devinfo->dev;
+ brcmf_dbg(USB, "Enter\n");
bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
if (!bus_pub)
return -ENODEV;
@@ -1321,26 +1254,22 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
goto fail;
}
+ bus->dev = dev;
bus_pub->bus = bus;
- bus->brcmf_bus_txdata = brcmf_usb_tx;
- bus->brcmf_bus_init = brcmf_usb_up;
- bus->brcmf_bus_stop = brcmf_usb_down;
- bus->brcmf_bus_txctl = brcmf_usb_tx_ctlpkt;
- bus->brcmf_bus_rxctl = brcmf_usb_rx_ctlpkt;
- bus->type = bustype;
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
+ bus->ops = &brcmf_usb_bus_ops;
/* Attach to the common driver interface */
- ret = brcmf_attach(hdrlen, dev);
+ ret = brcmf_attach(0, dev);
if (ret) {
- brcmf_dbg(ERROR, "dhd_attach failed\n");
+ brcmf_err("brcmf_attach failed\n");
goto fail;
}
ret = brcmf_bus_start(dev);
if (ret) {
- brcmf_dbg(ERROR, "dongle is not responding\n");
+ brcmf_err("dongle is not responding\n");
brcmf_detach(dev);
goto fail;
}
@@ -1358,7 +1287,7 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
{
if (!devinfo)
return;
- brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
+ brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo);
brcmf_detach(devinfo->dev);
kfree(devinfo->bus_pub.bus);
@@ -1376,7 +1305,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
u8 endpoint_num;
struct brcmf_usbdev_info *devinfo;
- brcmf_dbg(TRACE, "enter\n");
+ brcmf_dbg(USB, "Enter\n");
devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
if (devinfo == NULL)
@@ -1415,7 +1344,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) {
- brcmf_dbg(ERROR, "invalid control interface: class %d, subclass %d, proto %d\n",
+ brcmf_err("invalid control interface: class %d, subclass %d, proto %d\n",
IFDESC(usb, CONTROL_IF).bInterfaceClass,
IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
IFDESC(usb, CONTROL_IF).bInterfaceProtocol);
@@ -1427,7 +1356,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
!= USB_ENDPOINT_XFER_INT) {
- brcmf_dbg(ERROR, "invalid control endpoint %d\n",
+ brcmf_err("invalid control endpoint %d\n",
endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
ret = -1;
goto fail;
@@ -1446,7 +1375,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
endpoint = &IFEPDESC(usb, BULK_IF, ep);
if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
USB_ENDPOINT_XFER_BULK) {
- brcmf_dbg(ERROR, "invalid data endpoint %d\n", ep);
+ brcmf_err("invalid data endpoint %d\n", ep);
ret = -1;
goto fail;
}
@@ -1477,11 +1406,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
if (usb->speed == USB_SPEED_HIGH)
- brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
+ brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
else
- brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
+ brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
- ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
+ ret = brcmf_usb_probe_cb(devinfo);
if (ret)
goto fail;
@@ -1489,7 +1418,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return 0;
fail:
- brcmf_dbg(ERROR, "failed with errno %d\n", ret);
+ brcmf_err("failed with errno %d\n", ret);
kfree(devinfo);
usb_set_intfdata(intf, NULL);
return ret;
@@ -1501,40 +1430,55 @@ brcmf_usb_disconnect(struct usb_interface *intf)
{
struct brcmf_usbdev_info *devinfo;
- brcmf_dbg(TRACE, "enter\n");
+ brcmf_dbg(USB, "Enter\n");
devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
brcmf_usb_disconnect_cb(devinfo);
kfree(devinfo);
+ brcmf_dbg(USB, "Exit\n");
}
/*
- * only need to signal the bus being down and update the suspend state.
+ * only need to signal the bus being down and update the state.
*/
static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
- brcmf_dbg(TRACE, "enter\n");
- devinfo->bus_pub.state = BCMFMAC_USB_STATE_DOWN;
- devinfo->suspend_state = USBOS_SUSPEND_STATE_SUSPENDED;
+ brcmf_dbg(USB, "Enter\n");
+ devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
+ brcmf_detach(&usb->dev);
return 0;
}
/*
- * mark suspend state active and crank up the bus.
+ * (re-) start the bus.
*/
static int brcmf_usb_resume(struct usb_interface *intf)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
- brcmf_dbg(TRACE, "enter\n");
- devinfo->suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE;
- brcmf_bus_start(&usb->dev);
+ brcmf_dbg(USB, "Enter\n");
+ if (!brcmf_attach(0, devinfo->dev))
+ return brcmf_bus_start(&usb->dev);
+
return 0;
}
+static int brcmf_usb_reset_resume(struct usb_interface *intf)
+{
+ struct usb_device *usb = interface_to_usbdev(intf);
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
+ brcmf_dbg(USB, "Enter\n");
+
+ if (!brcmf_usb_fw_download(devinfo))
+ return brcmf_usb_resume(intf);
+
+ return -EIO;
+}
+
#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
#define BRCMF_USB_DEVICE_ID_43143 0xbd1e
#define BRCMF_USB_DEVICE_ID_43236 0xbd17
@@ -1554,7 +1498,6 @@ MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
-/* TODO: suspend and resume entries */
static struct usb_driver brcmf_usbdrvr = {
.name = KBUILD_MODNAME,
.probe = brcmf_usb_probe,
@@ -1562,6 +1505,7 @@ static struct usb_driver brcmf_usbdrvr = {
.id_table = brcmf_usb_devid_table,
.suspend = brcmf_usb_suspend,
.resume = brcmf_usb_resume,
+ .reset_resume = brcmf_usb_reset_resume,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
@@ -1579,12 +1523,14 @@ static void brcmf_release_fw(struct list_head *q)
void brcmf_usb_exit(void)
{
+ brcmf_dbg(USB, "Enter\n");
usb_deregister(&brcmf_usbdrvr);
brcmf_release_fw(&fw_image_list);
}
void brcmf_usb_init(void)
{
+ brcmf_dbg(USB, "Enter\n");
INIT_LIST_HEAD(&fw_image_list);
usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
index acfa5e89872f..f483a8c9945b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
@@ -17,19 +17,11 @@
#define BRCMFMAC_USB_H
enum brcmf_usb_state {
- BCMFMAC_USB_STATE_DL_PENDING,
- BCMFMAC_USB_STATE_DL_DONE,
- BCMFMAC_USB_STATE_UP,
- BCMFMAC_USB_STATE_DOWN,
- BCMFMAC_USB_STATE_PNP_FWDL,
- BCMFMAC_USB_STATE_DISCONNECT,
- BCMFMAC_USB_STATE_SLEEP
-};
-
-enum brcmf_usb_pnp_state {
- BCMFMAC_USB_PNP_DISCONNECT,
- BCMFMAC_USB_PNP_SLEEP,
- BCMFMAC_USB_PNP_RESUME,
+ BRCMFMAC_USB_STATE_DOWN,
+ BRCMFMAC_USB_STATE_DL_FAIL,
+ BRCMFMAC_USB_STATE_DL_DONE,
+ BRCMFMAC_USB_STATE_UP,
+ BRCMFMAC_USB_STATE_SLEEP
};
struct brcmf_stats {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 481345c23ded..75464ad4fbd1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -19,14 +19,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/if_arp.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/netdevice.h>
-#include <linux/bitops.h>
#include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/uaccess.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
@@ -34,7 +27,9 @@
#include <defs.h>
#include <brcmu_wifi.h>
#include "dhd.h"
+#include "dhd_dbg.h"
#include "wl_cfg80211.h"
+#include "fwil.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define BRCMF_PNO_VERSION 2
@@ -48,6 +43,8 @@
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_SCAN_INCOMPLETE 0
+#define BRCMF_IFACE_MAX_CNT 2
+
#define TLV_LEN_OFF 1 /* length offset */
#define TLV_HDR_LEN 2 /* header length */
#define TLV_BODY_OFF 2 /* body offset */
@@ -91,16 +88,11 @@
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
-static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
-
-static u32 brcmf_dbg_level = WL_DBG_ERR;
-
-static bool check_sys_up(struct wiphy *wiphy)
+static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- if (!test_bit(WL_STATUS_READY, &cfg->status)) {
- WL_INFO("device is not ready : status (%d)\n",
- (int)cfg->status);
+ if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
+ brcmf_dbg(INFO, "device is not ready : status (%lu)\n",
+ vif->sme_state);
return false;
}
return true;
@@ -391,55 +383,29 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
-/* function for reading/writing a single u32 from/to the dongle */
-static int
-brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
-{
- int err;
- __le32 par_le = cpu_to_le32(*par);
-
- err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32));
- *par = le32_to_cpu(par_le);
-
- return err;
-}
-
-static s32
-brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
- void *param, s32 paramlen,
- void *buf, s32 buflen, s32 bssidx)
+static u16 channel_to_chanspec(struct ieee80211_channel *ch)
{
- s32 err = -ENOMEM;
- u32 len;
-
- len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
- buf, buflen, bssidx);
- BUG_ON(!len);
- if (len > 0)
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
- if (err)
- WL_ERR("error (%d)\n", err);
+ u16 chanspec;
- return err;
-}
+ chanspec = ieee80211_frequency_to_channel(ch->center_freq);
+ chanspec &= WL_CHANSPEC_CHAN_MASK;
-static s32
-brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
- void *param, s32 paramlen,
- void *buf, s32 buflen, s32 bssidx)
-{
- s32 err = -ENOMEM;
- u32 len;
-
- len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
- buf, buflen, bssidx);
- BUG_ON(!len);
- if (len > 0)
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
- if (err)
- WL_ERR("error (%d)\n", err);
+ if (ch->band == IEEE80211_BAND_2GHZ)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
- return err;
+ if (ch->flags & IEEE80211_CHAN_NO_HT40) {
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ } else {
+ chanspec |= WL_CHANSPEC_BW_40;
+ if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+ else
+ chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+ }
+ return chanspec;
}
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
@@ -457,21 +423,20 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
}
static int
-send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
- struct net_device *ndev, struct brcmf_wsec_key *key)
+send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
{
int err;
struct brcmf_wsec_key_le key_le;
convert_key_from_CPU(key, &key_le);
- err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
- sizeof(key_le),
- cfg->extra_buf,
- WL_EXTRA_BUF_MAX, bssidx);
+ brcmf_netdev_wait_pend8021x(ndev);
+
+ err = brcmf_fil_bsscfg_data_set(netdev_priv(ndev), "wsec_key", &key_le,
+ sizeof(key_le));
if (err)
- WL_ERR("wsec_key error (%d)\n", err);
+ brcmf_err("wsec_key error (%d)\n", err);
return err;
}
@@ -480,29 +445,30 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
s32 infra = 0;
s32 ap = 0;
s32 err = 0;
- WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type);
+ brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type);
switch (type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
- WL_ERR("type (%d) : currently we do not support this type\n",
- type);
+ brcmf_err("type (%d) : currently we do not support this type\n",
+ type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
- cfg->conf->mode = WL_MODE_IBSS;
+ vif->mode = WL_MODE_IBSS;
infra = 0;
break;
case NL80211_IFTYPE_STATION:
- cfg->conf->mode = WL_MODE_BSS;
+ vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
- cfg->conf->mode = WL_MODE_AP;
+ vif->mode = WL_MODE_AP;
ap = 1;
break;
default:
@@ -511,338 +477,41 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
if (ap) {
- set_bit(WL_STATUS_AP_CREATING, &cfg->status);
- if (!cfg->ap_info)
- cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
- GFP_KERNEL);
- if (!cfg->ap_info) {
- err = -ENOMEM;
- goto done;
- }
- WL_INFO("IF Type = AP\n");
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
+ brcmf_dbg(INFO, "IF Type = AP\n");
} else {
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
if (err) {
- WL_ERR("WLC_SET_INFRA error (%d)\n", err);
+ brcmf_err("WLC_SET_INFRA error (%d)\n", err);
err = -EAGAIN;
goto done;
}
- WL_INFO("IF Type = %s\n",
- (cfg->conf->mode == WL_MODE_IBSS) ?
- "Adhoc" : "Infra");
+ brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
+ "Adhoc" : "Infra");
}
ndev->ieee80211_ptr->iftype = type;
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
-static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val)
-{
- s8 buf[BRCMF_DCMD_SMLEN];
- u32 len;
- s32 err = 0;
- __le32 val_le;
-
- val_le = cpu_to_le32(val);
- len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf,
- sizeof(buf));
- BUG_ON(!len);
-
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
- if (err)
- WL_ERR("error (%d)\n", err);
-
- return err;
-}
-
-static s32
-brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
-{
- union {
- s8 buf[BRCMF_DCMD_SMLEN];
- __le32 val;
- } var;
- u32 len;
- u32 data_null;
- s32 err = 0;
-
- len =
- brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
- sizeof(var.buf));
- BUG_ON(!len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len);
- if (err)
- WL_ERR("error (%d)\n", err);
-
- *retval = le32_to_cpu(var.val);
-
- return err;
-}
-
-static s32
-brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
- s32 bssidx)
-{
- s8 buf[BRCMF_DCMD_SMLEN];
- __le32 val_le;
-
- val_le = cpu_to_le32(val);
-
- return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
- sizeof(val_le), buf, sizeof(buf),
- bssidx);
-}
-
-static s32
-brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
- s32 bssidx)
-{
- s8 buf[BRCMF_DCMD_SMLEN];
- s32 err;
- __le32 val_le;
-
- memset(buf, 0, sizeof(buf));
- err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
- sizeof(buf), bssidx);
- if (err == 0) {
- memcpy(&val_le, buf, sizeof(val_le));
- *val = le32_to_cpu(val_le);
- }
- return err;
-}
-
-
-/*
- * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
- * should return the ndev matching bssidx.
- */
-static s32
-brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
-{
- return 0;
-}
-
static void brcmf_set_mpc(struct net_device *ndev, int mpc)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- if (test_bit(WL_STATUS_READY, &cfg->status)) {
- err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
+ if (check_vif_up(ifp->vif)) {
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
if (err) {
- WL_ERR("fail to set mpc\n");
+ brcmf_err("fail to set mpc\n");
return;
}
- WL_INFO("MPC : %d\n", mpc);
- }
-}
-
-static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
- struct brcmf_ssid *ssid)
-{
- memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
- params_le->bss_type = DOT11_BSSTYPE_ANY;
- params_le->scan_type = 0;
- params_le->channel_num = 0;
- params_le->nprobes = cpu_to_le32(-1);
- params_le->active_time = cpu_to_le32(-1);
- params_le->passive_time = cpu_to_le32(-1);
- params_le->home_time = cpu_to_le32(-1);
- if (ssid && ssid->SSID_len) {
- params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len);
- memcpy(&params_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len);
+ brcmf_dbg(INFO, "MPC : %d\n", mpc);
}
}
-static s32
-brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param,
- s32 paramlen, void *bufptr, s32 buflen)
-{
- s32 iolen;
-
- iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
- BUG_ON(!iolen);
-
- return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen);
-}
-
-static s32
-brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param,
- s32 paramlen, void *bufptr, s32 buflen)
-{
- s32 iolen;
-
- iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
- BUG_ON(!iolen);
-
- return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen);
-}
-
-static s32
-brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
- struct brcmf_ssid *ssid, u16 action)
-{
- s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
- offsetof(struct brcmf_iscan_params_le, params_le);
- struct brcmf_iscan_params_le *params;
- s32 err = 0;
-
- if (ssid && ssid->SSID_len)
- params_size += sizeof(struct brcmf_ssid);
- params = kzalloc(params_size, GFP_KERNEL);
- if (!params)
- return -ENOMEM;
- BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
-
- brcmf_iscan_prep(&params->params_le, ssid);
-
- params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
- params->action = cpu_to_le16(action);
- params->scan_duration = cpu_to_le16(0);
-
- err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size,
- iscan->dcmd_buf, BRCMF_DCMD_SMLEN);
- if (err) {
- if (err == -EBUSY)
- WL_INFO("system busy : iscan canceled\n");
- else
- WL_ERR("error (%d)\n", err);
- }
-
- kfree(params);
- return err;
-}
-
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
- struct net_device *ndev = cfg_to_ndev(cfg);
- struct brcmf_ssid ssid;
- __le32 passive_scan;
- s32 err = 0;
-
- /* Broadcast scan by default */
- memset(&ssid, 0, sizeof(ssid));
-
- iscan->state = WL_ISCAN_STATE_SCANING;
-
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (err) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- brcmf_set_mpc(ndev, 0);
- cfg->iscan_kickstart = true;
- err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
- if (err) {
- brcmf_set_mpc(ndev, 1);
- cfg->iscan_kickstart = false;
- return err;
- }
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
- return err;
-}
-
-static s32
-brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
-{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct cfg80211_ssid *ssids;
- struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
- __le32 passive_scan;
- bool iscan_req;
- bool spec_scan;
- s32 err = 0;
- u32 SSID_len;
-
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg->status);
- return -EAGAIN;
- }
- if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
- WL_ERR("Scanning being aborted : status (%lu)\n",
- cfg->status);
- return -EAGAIN;
- }
- if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
- WL_ERR("Connecting : status (%lu)\n",
- cfg->status);
- return -EAGAIN;
- }
-
- iscan_req = false;
- spec_scan = false;
- if (request) {
- /* scan bss */
- ssids = request->ssids;
- if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
- iscan_req = true;
- } else {
- /* scan in ibss */
- /* we don't do iscan in ibss */
- ssids = this_ssid;
- }
-
- cfg->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &cfg->status);
- if (iscan_req) {
- err = brcmf_do_iscan(cfg);
- if (!err)
- return err;
- else
- goto scan_out;
- } else {
- WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
- ssids->ssid, ssids->ssid_len);
- memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
- SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
- sr->ssid_le.SSID_len = cpu_to_le32(0);
- if (SSID_len) {
- memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
- sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
- spec_scan = true;
- } else {
- WL_SCAN("Broadcast scan\n");
- }
-
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (err) {
- WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
- goto scan_out;
- }
- brcmf_set_mpc(ndev, 0);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
- sizeof(sr->ssid_le));
- if (err) {
- if (err == -EBUSY)
- WL_INFO("system busy : scan for \"%s\" "
- "canceled\n", sr->ssid_le.SSID);
- else
- WL_ERR("WLC_SCAN error (%d)\n", err);
-
- brcmf_set_mpc(ndev, 1);
- goto scan_out;
- }
- }
-
- return 0;
-
-scan_out:
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
- cfg->scan_request = NULL;
- return err;
-}
-
static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
struct cfg80211_scan_request *request)
{
@@ -851,12 +520,10 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
s32 i;
s32 offset;
u16 chanspec;
- u16 channel;
- struct ieee80211_channel *req_channel;
char *ptr;
struct brcmf_ssid_le ssid_le;
- memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+ memset(params_le->bssid, 0xFF, ETH_ALEN);
params_le->bss_type = DOT11_BSSTYPE_ANY;
params_le->scan_type = 0;
params_le->channel_num = 0;
@@ -873,40 +540,20 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
n_ssids = request->n_ssids;
n_channels = request->n_channels;
/* Copy channel array if applicable */
- WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
+ brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+ n_channels);
if (n_channels > 0) {
for (i = 0; i < n_channels; i++) {
- chanspec = 0;
- req_channel = request->channels[i];
- channel = ieee80211_frequency_to_channel(
- req_channel->center_freq);
- if (req_channel->band == IEEE80211_BAND_2GHZ)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- } else {
- chanspec |= WL_CHANSPEC_BW_40;
- if (req_channel->flags &
- IEEE80211_CHAN_NO_HT40PLUS)
- chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
- else
- chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
- }
-
- chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
- WL_SCAN("Chan : %d, Channel spec: %x\n",
- channel, chanspec);
+ chanspec = channel_to_chanspec(request->channels[i]);
+ brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n",
+ request->channels[i]->hw_value, chanspec);
params_le->channel_list[i] = cpu_to_le16(chanspec);
}
} else {
- WL_SCAN("Scanning all channels\n");
+ brcmf_dbg(SCAN, "Scanning all channels\n");
}
/* Copy ssid array if applicable */
- WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids);
+ brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
if (n_ssids > 0) {
offset = offsetof(struct brcmf_scan_params_le, channel_list) +
n_channels * sizeof(u16);
@@ -919,18 +566,19 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
memcpy(ssid_le.SSID, request->ssids[i].ssid,
request->ssids[i].ssid_len);
if (!ssid_le.SSID_len)
- WL_SCAN("%d: Broadcast scan\n", i);
+ brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
else
- WL_SCAN("%d: scan for %s size =%d\n", i,
- ssid_le.SSID, ssid_le.SSID_len);
+ brcmf_dbg(SCAN, "%d: scan for %s size =%d\n",
+ i, ssid_le.SSID, ssid_le.SSID_len);
memcpy(ptr, &ssid_le, sizeof(ssid_le));
ptr += sizeof(ssid_le);
}
} else {
- WL_SCAN("Broadcast scan %p\n", request->ssids);
+ brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
if ((request->ssids) && request->ssids->ssid_len) {
- WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID,
- request->ssids->ssid_len);
+ brcmf_dbg(SCAN, "SSID %s len=%d\n",
+ params_le->ssid_le.SSID,
+ request->ssids->ssid_len);
params_le->ssid_le.SSID_len =
cpu_to_le32(request->ssids->ssid_len);
memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
@@ -952,7 +600,7 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
struct cfg80211_scan_request *scan_request;
s32 err = 0;
- WL_SCAN("Enter\n");
+ brcmf_dbg(SCAN, "Enter\n");
/* clear scan request, because the FW abort can cause a second call */
/* to this functon and might cause a double cfg80211_scan_done */
@@ -964,9 +612,9 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
- WL_SCAN("ABORT scan in firmware\n");
+ brcmf_dbg(SCAN, "ABORT scan in firmware\n");
memset(&params_le, 0, sizeof(params_le));
- memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(params_le.bssid, 0xFF, ETH_ALEN);
params_le.bss_type = DOT11_BSSTYPE_ANY;
params_le.scan_type = 0;
params_le.channel_num = cpu_to_le32(1);
@@ -977,29 +625,29 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
/* Scan is aborted by setting channel_list[0] to -1 */
params_le.channel_list[0] = cpu_to_le16(-1);
/* E-Scan (or anyother type) can be aborted by SCAN */
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
- sizeof(params_le));
+ err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+ &params_le, sizeof(params_le));
if (err)
- WL_ERR("Scan abort failed\n");
+ brcmf_err("Scan abort failed\n");
}
/*
* e-scan can be initiated by scheduled scan
* which takes precedence.
*/
if (cfg->sched_escan) {
- WL_SCAN("scheduled scan completed\n");
+ brcmf_dbg(SCAN, "scheduled scan completed\n");
cfg->sched_escan = false;
if (!aborted)
cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
brcmf_set_mpc(ndev, 1);
} else if (scan_request) {
- WL_SCAN("ESCAN Completed scan: %s\n",
- aborted ? "Aborted" : "Done");
+ brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
cfg80211_scan_done(scan_request, aborted);
brcmf_set_mpc(ndev, 1);
}
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scan complete while device not scanning\n");
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ brcmf_err("Scan complete while device not scanning\n");
return -EPERM;
}
@@ -1015,7 +663,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
struct brcmf_escan_params_le *params;
s32 err = 0;
- WL_SCAN("E-SCAN START\n");
+ brcmf_dbg(SCAN, "E-SCAN START\n");
if (request != NULL) {
/* Allocate space for populating ssids in struct */
@@ -1036,13 +684,13 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
params->action = cpu_to_le16(action);
params->sync_id = cpu_to_le16(0x1234);
- err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
- cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
+ err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan",
+ params, params_size);
if (err) {
if (err == -EBUSY)
- WL_INFO("system busy : escan canceled\n");
+ brcmf_dbg(INFO, "system busy : escan canceled\n");
else
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
}
kfree(params);
@@ -1055,18 +703,18 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
struct net_device *ndev, struct cfg80211_scan_request *request)
{
s32 err;
- __le32 passive_scan;
+ u32 passive_scan;
struct brcmf_scan_results *results;
- WL_SCAN("Enter\n");
+ brcmf_dbg(SCAN, "Enter\n");
cfg->escan_info.ndev = ndev;
cfg->escan_info.wiphy = wiphy;
cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
+ passive_scan);
if (err) {
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
return err;
}
brcmf_set_mpc(ndev, 0);
@@ -1086,29 +734,29 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request,
struct cfg80211_ssid *this_ssid)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
struct cfg80211_ssid *ssids;
- struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
- __le32 passive_scan;
+ struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int;
+ u32 passive_scan;
bool escan_req;
bool spec_scan;
s32 err;
u32 SSID_len;
- WL_SCAN("START ESCAN\n");
+ brcmf_dbg(SCAN, "START ESCAN\n");
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
- WL_ERR("Scanning being aborted : status (%lu)\n",
- cfg->status);
+ if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
+ brcmf_err("Scanning being aborted: status (%lu)\n",
+ cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
- WL_ERR("Connecting : status (%lu)\n",
- cfg->status);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
+ brcmf_err("Connecting: status (%lu)\n", ifp->vif->sme_state);
return -EAGAIN;
}
@@ -1128,16 +776,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
}
cfg->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &cfg->status);
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (escan_req) {
err = brcmf_do_escan(cfg, wiphy, ndev, request);
- if (!err)
- return err;
- else
+ if (err)
goto scan_out;
} else {
- WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
- ssids->ssid, ssids->ssid_len);
+ brcmf_dbg(SCAN, "ssid \"%s\", ssid_len (%d)\n",
+ ssids->ssid, ssids->ssid_len);
memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
sr->ssid_le.SSID_len = cpu_to_le32(0);
@@ -1147,24 +793,24 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
spec_scan = true;
} else
- WL_SCAN("Broadcast scan\n");
+ brcmf_dbg(SCAN, "Broadcast scan\n");
- passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
+ passive_scan);
if (err) {
- WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
+ brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
goto scan_out;
}
brcmf_set_mpc(ndev, 0);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
- sizeof(sr->ssid_le));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+ &sr->ssid_le, sizeof(sr->ssid_le));
if (err) {
if (err == -EBUSY)
- WL_INFO("BUSY: scan for \"%s\" canceled\n",
- sr->ssid_le.SSID);
+ brcmf_dbg(INFO, "BUSY: scan for \"%s\" canceled\n",
+ sr->ssid_le.SSID);
else
- WL_ERR("WLC_SCAN error (%d)\n", err);
+ brcmf_err("WLC_SCAN error (%d)\n", err);
brcmf_set_mpc(ndev, 1);
goto scan_out;
@@ -1174,7 +820,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
return 0;
scan_out:
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (timer_pending(&cfg->escan_timeout))
del_timer_sync(&cfg->escan_timeout);
cfg->scan_request = NULL;
@@ -1182,27 +828,23 @@ scan_out:
}
static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy,
- struct cfg80211_scan_request *request)
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct net_device *ndev = request->wdev->netdev;
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
s32 err = 0;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(container_of(request->wdev,
+ struct brcmf_cfg80211_vif, wdev)))
return -EIO;
- if (cfg->iscan_on)
- err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
- else if (cfg->escan_on)
- err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
+ err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
if (err)
- WL_ERR("scan error (%d)\n", err);
+ brcmf_err("scan error (%d)\n", err);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -1210,9 +852,10 @@ static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
{
s32 err = 0;
- err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh",
+ rts_threshold);
if (err)
- WL_ERR("Error (%d)\n", err);
+ brcmf_err("Error (%d)\n", err);
return err;
}
@@ -1221,9 +864,10 @@ static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
{
s32 err = 0;
- err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh",
+ frag_threshold);
if (err)
- WL_ERR("Error (%d)\n", err);
+ brcmf_err("Error (%d)\n", err);
return err;
}
@@ -1231,11 +875,11 @@ static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
{
s32 err = 0;
- u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL);
+ u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL);
- err = brcmf_exec_dcmd_u32(ndev, cmd, &retry);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry);
if (err) {
- WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
+ brcmf_err("cmd (%d) , error (%d)\n", cmd, err);
return err;
}
return err;
@@ -1245,10 +889,11 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
@@ -1281,7 +926,7 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
}
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -1311,28 +956,26 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
join_params->params_le.chanspec_num = cpu_to_le32(1);
- WL_CONN("join_params->params.chanspec_list[0]= %#X,"
- "channel %d, chanspec %#X\n",
- chanspec, ch, chanspec);
+ brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec);
}
}
-static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
+static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
{
- struct net_device *ndev = NULL;
s32 err = 0;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
- if (cfg->link_up) {
- ndev = cfg_to_ndev(cfg);
- WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
- err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) {
+ brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
+ err = brcmf_fil_cmd_data_set(vif->ifp,
+ BRCMF_C_DISASSOC, NULL, 0);
if (err)
- WL_ERR("WLC_DISASSOC failed (%d)\n", err);
- cfg->link_up = false;
+ brcmf_err("WLC_DISASSOC failed (%d)\n", err);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
}
- WL_TRACE("Exit\n");
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
+ brcmf_dbg(TRACE, "Exit\n");
}
static s32
@@ -1340,68 +983,71 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ibss_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_join_params join_params;
size_t join_params_size = 0;
s32 err = 0;
s32 wsec = 0;
s32 bcnprd;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (params->ssid)
- WL_CONN("SSID: %s\n", params->ssid);
+ brcmf_dbg(CONN, "SSID: %s\n", params->ssid);
else {
- WL_CONN("SSID: NULL, Not supported\n");
+ brcmf_dbg(CONN, "SSID: NULL, Not supported\n");
return -EOPNOTSUPP;
}
- set_bit(WL_STATUS_CONNECTING, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (params->bssid)
- WL_CONN("BSSID: %pM\n", params->bssid);
+ brcmf_dbg(CONN, "BSSID: %pM\n", params->bssid);
else
- WL_CONN("No BSSID specified\n");
+ brcmf_dbg(CONN, "No BSSID specified\n");
- if (params->channel)
- WL_CONN("channel: %d\n", params->channel->center_freq);
+ if (params->chandef.chan)
+ brcmf_dbg(CONN, "channel: %d\n",
+ params->chandef.chan->center_freq);
else
- WL_CONN("no channel specified\n");
+ brcmf_dbg(CONN, "no channel specified\n");
if (params->channel_fixed)
- WL_CONN("fixed channel required\n");
+ brcmf_dbg(CONN, "fixed channel required\n");
else
- WL_CONN("no fixed channel required\n");
+ brcmf_dbg(CONN, "no fixed channel required\n");
if (params->ie && params->ie_len)
- WL_CONN("ie len: %d\n", params->ie_len);
+ brcmf_dbg(CONN, "ie len: %d\n", params->ie_len);
else
- WL_CONN("no ie specified\n");
+ brcmf_dbg(CONN, "no ie specified\n");
if (params->beacon_interval)
- WL_CONN("beacon interval: %d\n", params->beacon_interval);
+ brcmf_dbg(CONN, "beacon interval: %d\n",
+ params->beacon_interval);
else
- WL_CONN("no beacon interval specified\n");
+ brcmf_dbg(CONN, "no beacon interval specified\n");
if (params->basic_rates)
- WL_CONN("basic rates: %08X\n", params->basic_rates);
+ brcmf_dbg(CONN, "basic rates: %08X\n", params->basic_rates);
else
- WL_CONN("no basic rates specified\n");
+ brcmf_dbg(CONN, "no basic rates specified\n");
if (params->privacy)
- WL_CONN("privacy required\n");
+ brcmf_dbg(CONN, "privacy required\n");
else
- WL_CONN("no privacy required\n");
+ brcmf_dbg(CONN, "no privacy required\n");
/* Configure Privacy for starter */
if (params->privacy)
wsec |= WEP_ENABLED;
- err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+ err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec);
if (err) {
- WL_ERR("wsec failed (%d)\n", err);
+ brcmf_err("wsec failed (%d)\n", err);
goto done;
}
@@ -1411,9 +1057,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
else
bcnprd = 100;
- err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, bcnprd);
if (err) {
- WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err);
+ brcmf_err("WLC_SET_BCNPRD failed (%d)\n", err);
goto done;
}
@@ -1434,17 +1080,17 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
BRCMF_ASSOC_PARAMS_FIXED_SIZE;
memcpy(profile->bssid, params->bssid, ETH_ALEN);
} else {
- memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
memset(profile->bssid, 0, ETH_ALEN);
}
/* Channel */
- if (params->channel) {
+ if (params->chandef.chan) {
u32 target_channel;
cfg->channel =
ieee80211_frequency_to_channel(
- params->channel->center_freq);
+ params->chandef.chan->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
brcmf_ch_to_chanspec(cfg->channel,
@@ -1453,10 +1099,10 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
/* set channel for starter */
target_channel = cfg->channel;
- err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
- &target_channel);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL,
+ target_channel);
if (err) {
- WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err);
+ brcmf_err("WLC_SET_CHANNEL failed (%d)\n", err);
goto done;
}
} else
@@ -1465,33 +1111,33 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
cfg->ibss_starter = false;
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
- &join_params, join_params_size);
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
if (err) {
- WL_ERR("WLC_SET_SSID failed (%d)\n", err);
+ brcmf_err("WLC_SET_SSID failed (%d)\n", err);
goto done;
}
done:
if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
- WL_TRACE("Exit\n");
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
static s32
brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
- brcmf_link_down(cfg);
+ brcmf_link_down(ifp->vif);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -1499,8 +1145,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1511,10 +1156,10 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
else
val = WPA_AUTH_DISABLED;
- WL_CONN("setting wpa_auth to 0x%0x\n", val);
- err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+ brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
- WL_ERR("set wpa_auth failed (%d)\n", err);
+ brcmf_err("set wpa_auth failed (%d)\n", err);
return err;
}
sec = &profile->sec;
@@ -1525,8 +1170,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
static s32 brcmf_set_auth_type(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1534,27 +1178,27 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
switch (sme->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
val = 0;
- WL_CONN("open system\n");
+ brcmf_dbg(CONN, "open system\n");
break;
case NL80211_AUTHTYPE_SHARED_KEY:
val = 1;
- WL_CONN("shared key\n");
+ brcmf_dbg(CONN, "shared key\n");
break;
case NL80211_AUTHTYPE_AUTOMATIC:
val = 2;
- WL_CONN("automatic\n");
+ brcmf_dbg(CONN, "automatic\n");
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
- WL_CONN("network eap\n");
+ brcmf_dbg(CONN, "network eap\n");
default:
val = 2;
- WL_ERR("invalid auth type (%d)\n", sme->auth_type);
+ brcmf_err("invalid auth type (%d)\n", sme->auth_type);
break;
}
- err = brcmf_dev_intvar_set(ndev, "auth", val);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
if (err) {
- WL_ERR("set auth failed (%d)\n", err);
+ brcmf_err("set auth failed (%d)\n", err);
return err;
}
sec = &profile->sec;
@@ -1566,8 +1210,7 @@ static s32
brcmf_set_set_cipher(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
@@ -1589,8 +1232,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
pval = AES_ENABLED;
break;
default:
- WL_ERR("invalid cipher pairwise (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
+ brcmf_err("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
return -EINVAL;
}
}
@@ -1610,16 +1253,16 @@ brcmf_set_set_cipher(struct net_device *ndev,
gval = AES_ENABLED;
break;
default:
- WL_ERR("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ brcmf_err("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
}
- WL_CONN("pval (%d) gval (%d)\n", pval, gval);
- err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval);
+ brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
if (err) {
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
return err;
}
@@ -1633,16 +1276,16 @@ brcmf_set_set_cipher(struct net_device *ndev,
static s32
brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
if (sme->crypto.n_akm_suites) {
- err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val);
+ err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
- WL_ERR("could not get wpa_auth (%d)\n", err);
+ brcmf_err("could not get wpa_auth (%d)\n", err);
return err;
}
if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
@@ -1654,8 +1297,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA_AUTH_PSK;
break;
default:
- WL_ERR("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ brcmf_err("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1667,16 +1310,17 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA2_AUTH_PSK;
break;
default:
- WL_ERR("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ brcmf_err("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
return -EINVAL;
}
}
- WL_CONN("setting wpa_auth to %d\n", val);
- err = brcmf_dev_intvar_set(ndev, "wpa_auth", val);
+ brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
+ err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
+ "wpa_auth", val);
if (err) {
- WL_ERR("could not set wpa_auth (%d)\n", err);
+ brcmf_err("could not set wpa_auth (%d)\n", err);
return err;
}
}
@@ -1690,22 +1334,20 @@ static s32
brcmf_set_sharedkey(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
struct brcmf_wsec_key key;
s32 val;
s32 err = 0;
- s32 bssidx;
- WL_CONN("key len (%d)\n", sme->key_len);
+ brcmf_dbg(CONN, "key len (%d)\n", sme->key_len);
if (sme->key_len == 0)
return 0;
sec = &profile->sec;
- WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
- sec->wpa_versions, sec->cipher_pairwise);
+ brcmf_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n",
+ sec->wpa_versions, sec->cipher_pairwise);
if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
return 0;
@@ -1718,7 +1360,7 @@ brcmf_set_sharedkey(struct net_device *ndev,
key.len = (u32) sme->key_len;
key.index = (u32) sme->key_idx;
if (key.len > sizeof(key.data)) {
- WL_ERR("Too long key length (%u)\n", key.len);
+ brcmf_err("Too long key length (%u)\n", key.len);
return -EINVAL;
}
memcpy(key.data, sme->key, key.len);
@@ -1731,25 +1373,24 @@ brcmf_set_sharedkey(struct net_device *ndev,
key.algo = CRYPTO_ALGO_WEP128;
break;
default:
- WL_ERR("Invalid algorithm (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
+ brcmf_err("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
return -EINVAL;
}
/* Set the new key/index */
- WL_CONN("key length (%d) key index (%d) algo (%d)\n",
- key.len, key.index, key.algo);
- WL_CONN("key \"%s\"\n", key.data);
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo);
+ brcmf_dbg(CONN, "key \"%s\"\n", key.data);
+ err = send_key_to_dongle(ndev, &key);
if (err)
return err;
if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
- WL_CONN("set auth_type to shared key\n");
+ brcmf_dbg(CONN, "set auth_type to shared key\n");
val = WL_AUTH_SHARED_KEY; /* shared key */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
if (err)
- WL_ERR("set auth failed (%d)\n", err);
+ brcmf_err("set auth failed (%d)\n", err);
}
return err;
}
@@ -1759,7 +1400,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
@@ -1767,54 +1409,54 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (!sme->ssid) {
- WL_ERR("Invalid ssid\n");
+ brcmf_err("Invalid ssid\n");
return -EOPNOTSUPP;
}
- set_bit(WL_STATUS_CONNECTING, &cfg->status);
+ set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (chan) {
cfg->channel =
ieee80211_frequency_to_channel(chan->center_freq);
- WL_CONN("channel (%d), center_req (%d)\n",
- cfg->channel, chan->center_freq);
+ brcmf_dbg(CONN, "channel (%d), center_req (%d)\n",
+ cfg->channel, chan->center_freq);
} else
cfg->channel = 0;
- WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
+ brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
err = brcmf_set_wpa_version(ndev, sme);
if (err) {
- WL_ERR("wl_set_wpa_version failed (%d)\n", err);
+ brcmf_err("wl_set_wpa_version failed (%d)\n", err);
goto done;
}
err = brcmf_set_auth_type(ndev, sme);
if (err) {
- WL_ERR("wl_set_auth_type failed (%d)\n", err);
+ brcmf_err("wl_set_auth_type failed (%d)\n", err);
goto done;
}
err = brcmf_set_set_cipher(ndev, sme);
if (err) {
- WL_ERR("wl_set_set_cipher failed (%d)\n", err);
+ brcmf_err("wl_set_set_cipher failed (%d)\n", err);
goto done;
}
err = brcmf_set_key_mgmt(ndev, sme);
if (err) {
- WL_ERR("wl_set_key_mgmt failed (%d)\n", err);
+ brcmf_err("wl_set_key_mgmt failed (%d)\n", err);
goto done;
}
err = brcmf_set_sharedkey(ndev, sme);
if (err) {
- WL_ERR("brcmf_set_sharedkey failed (%d)\n", err);
+ brcmf_err("brcmf_set_sharedkey failed (%d)\n", err);
goto done;
}
@@ -1827,23 +1469,23 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
- memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
- WL_CONN("ssid \"%s\", len (%d)\n",
- ssid.SSID, ssid.SSID_len);
+ brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n",
+ ssid.SSID, ssid.SSID_len);
brcmf_ch_to_chanspec(cfg->channel,
&join_params, &join_params_size);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
- &join_params, join_params_size);
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
if (err)
- WL_ERR("WLC_SET_SSID failed (%d)\n", err);
+ brcmf_err("WLC_SET_SSID failed (%d)\n", err);
done:
if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
- WL_TRACE("Exit\n");
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -1851,44 +1493,43 @@ static s32
brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
u16 reason_code)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_scb_val_le scbval;
s32 err = 0;
- WL_TRACE("Enter. Reason code = %d\n", reason_code);
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter. Reason code = %d\n", reason_code);
+ if (!check_vif_up(ifp->vif))
return -EIO;
- clear_bit(WL_STATUS_CONNECTED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
- sizeof(struct brcmf_scb_val_le));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC,
+ &scbval, sizeof(scbval));
if (err)
- WL_ERR("error (%d)\n", err);
-
- cfg->link_up = false;
+ brcmf_err("error (%d)\n", err);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
static s32
-brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
+brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, s32 mbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(ndev);
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
s32 dbm = MBM_TO_DBM(mbm);
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
switch (type) {
@@ -1897,7 +1538,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
case NL80211_TX_POWER_LIMITED:
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
- WL_ERR("TX_POWER_FIXED - dbm is negative\n");
+ brcmf_err("TX_POWER_FIXED - dbm is negative\n");
err = -EINVAL;
goto done;
}
@@ -1905,40 +1546,42 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
}
/* Make sure radio is off or on as far as software is concerned */
disable = WL_RADIO_SW_DISABLE << 16;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable);
if (err)
- WL_ERR("WLC_SET_RADIO error (%d)\n", err);
+ brcmf_err("WLC_SET_RADIO error (%d)\n", err);
if (dbm > 0xffff)
txpwrmw = 0xffff;
else
txpwrmw = (u16) dbm;
- err = brcmf_dev_intvar_set(ndev, "qtxpower",
- (s32) (brcmf_mw_to_qdbm(txpwrmw)));
+ err = brcmf_fil_iovar_int_set(ifp, "qtxpower",
+ (s32)brcmf_mw_to_qdbm(txpwrmw));
if (err)
- WL_ERR("qtxpower error (%d)\n", err);
+ brcmf_err("qtxpower error (%d)\n", err);
cfg->conf->tx_power = dbm;
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
-static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
s32 txpwrdbm;
u8 result;
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
- err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+ err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &txpwrdbm);
if (err) {
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
goto done;
}
@@ -1946,7 +1589,7 @@ static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
*dbm = (s32) brcmf_qdbm_to_mw(result);
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -1954,34 +1597,32 @@ static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool unicast, bool multicast)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
u32 index;
u32 wsec;
s32 err = 0;
- s32 bssidx;
- WL_TRACE("Enter\n");
- WL_CONN("key index (%d)\n", key_idx);
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+ if (!check_vif_up(ifp->vif))
return -EIO;
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- WL_ERR("WLC_GET_WSEC error (%d)\n", err);
+ brcmf_err("WLC_GET_WSEC error (%d)\n", err);
goto done;
}
if (wsec & WEP_ENABLED) {
/* Just select a new current key */
index = key_idx;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY,
- &index);
+ err = brcmf_fil_cmd_int_set(ifp,
+ BRCMF_C_SET_KEY_PRIMARY, index);
if (err)
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
}
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -1989,11 +1630,8 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_wsec_key key;
- struct brcmf_wsec_key_le key_le;
s32 err = 0;
- s32 bssidx;
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@@ -2002,20 +1640,19 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
if (!is_multicast_ether_addr(mac_addr))
memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
key.len = (u32) params->key_len;
- bssidx = brcmf_find_bssidx(cfg, ndev);
/* check for key index change */
if (key.len == 0) {
/* key delete */
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ err = send_key_to_dongle(ndev, &key);
if (err)
- WL_ERR("key delete error (%d)\n", err);
+ brcmf_err("key delete error (%d)\n", err);
} else {
if (key.len > sizeof(key.data)) {
- WL_ERR("Invalid key length (%d)\n", key.len);
+ brcmf_err("Invalid key length (%d)\n", key.len);
return -EINVAL;
}
- WL_CONN("Setting the key index %d\n", key.index);
+ brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
@@ -2039,37 +1676,31 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
- WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
key.algo = CRYPTO_ALGO_WEP128;
- WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
key.algo = CRYPTO_ALGO_TKIP;
- WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
key.algo = CRYPTO_ALGO_AES_CCM;
- WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
- WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
break;
default:
- WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
+ brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
return -EINVAL;
}
- convert_key_from_CPU(&key, &key_le);
-
- brcmf_netdev_wait_pend8021x(ndev);
- err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
- sizeof(key_le),
- cfg->extra_buf,
- WL_EXTRA_BUF_MAX, bssidx);
+ err = send_key_to_dongle(ndev, &key);
if (err)
- WL_ERR("wsec_key error (%d)\n", err);
+ brcmf_err("wsec_key error (%d)\n", err);
}
return err;
}
@@ -2079,21 +1710,20 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 val;
s32 wsec;
s32 err = 0;
u8 keybuf[8];
- s32 bssidx;
- WL_TRACE("Enter\n");
- WL_CONN("key index (%d)\n", key_idx);
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+ if (!check_vif_up(ifp->vif))
return -EIO;
if (mac_addr) {
- WL_TRACE("Exit");
+ brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
memset(&key, 0, sizeof(key));
@@ -2102,7 +1732,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
key.index = (u32) key_idx;
if (key.len > sizeof(key.data)) {
- WL_ERR("Too long key length (%u)\n", key.len);
+ brcmf_err("Too long key length (%u)\n", key.len);
err = -EINVAL;
goto done;
}
@@ -2113,59 +1743,58 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
val = WEP_ENABLED;
- WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
key.algo = CRYPTO_ALGO_WEP128;
val = WEP_ENABLED;
- WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
- if (cfg->conf->mode != WL_MODE_AP) {
- WL_CONN("Swapping key\n");
+ if (ifp->vif->mode != WL_MODE_AP) {
+ brcmf_dbg(CONN, "Swapping key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
}
key.algo = CRYPTO_ALGO_TKIP;
val = TKIP_ENABLED;
- WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
key.algo = CRYPTO_ALGO_AES_CCM;
val = AES_ENABLED;
- WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
val = AES_ENABLED;
- WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
break;
default:
- WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
+ brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
err = -EINVAL;
goto done;
}
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ err = send_key_to_dongle(ndev, &key);
if (err)
goto done;
- err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- WL_ERR("get wsec error (%d)\n", err);
+ brcmf_err("get wsec error (%d)\n", err);
goto done;
}
wsec |= val;
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err) {
- WL_ERR("set wsec error (%d)\n", err);
+ brcmf_err("set wsec error (%d)\n", err);
goto done;
}
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -2173,37 +1802,32 @@ static s32
brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool pairwise, const u8 *mac_addr)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 err = 0;
- s32 bssidx;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
+ if (key_idx >= DOT11_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ brcmf_err("invalid key index (%d)\n", key_idx);
+ return -EINVAL;
+ }
+
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
key.flags = BRCMF_PRIMARY_KEY;
key.algo = CRYPTO_ALGO_OFF;
- WL_CONN("key index (%d)\n", key_idx);
+ brcmf_dbg(CONN, "key index (%d)\n", key_idx);
/* Set the new key/index */
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = send_key_to_dongle(cfg, bssidx, ndev, &key);
- if (err) {
- if (err == -EINVAL) {
- if (key.index >= DOT11_MAX_DEFAULT_KEYS)
- /* we ignore this key index in this case */
- WL_ERR("invalid key index (%d)\n", key_idx);
- }
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- }
+ err = send_key_to_dongle(ndev, &key);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -2213,24 +1837,22 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
void (*callback) (void *cookie, struct key_params * params))
{
struct key_params params;
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_cfg80211_security *sec;
s32 wsec;
s32 err = 0;
- s32 bssidx;
- WL_TRACE("Enter\n");
- WL_CONN("key index (%d)\n", key_idx);
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CONN, "key index (%d)\n", key_idx);
+ if (!check_vif_up(ifp->vif))
return -EIO;
memset(&params, 0, sizeof(params));
- bssidx = brcmf_find_bssidx(cfg, ndev);
- err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
if (err) {
- WL_ERR("WLC_GET_WSEC error (%d)\n", err);
+ brcmf_err("WLC_GET_WSEC error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
err = -EAGAIN;
goto done;
@@ -2240,29 +1862,29 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
sec = &profile->sec;
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
- WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
params.cipher = WLAN_CIPHER_SUITE_WEP104;
- WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
}
break;
case TKIP_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_TKIP;
- WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
break;
case AES_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
- WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
+ brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
default:
- WL_ERR("Invalid algo (0x%x)\n", wsec);
+ brcmf_err("Invalid algo (0x%x)\n", wsec);
err = -EINVAL;
goto done;
}
callback(cookie, &params);
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -2270,7 +1892,7 @@ static s32
brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
struct net_device *ndev, u8 key_idx)
{
- WL_INFO("Not supported\n");
+ brcmf_dbg(INFO, "Not supported\n");
return -EOPNOTSUPP;
}
@@ -2279,73 +1901,73 @@ static s32
brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac, struct station_info *sinfo)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_scb_val_le scb_val;
int rssi;
s32 rate;
s32 err = 0;
u8 *bssid = profile->bssid;
- struct brcmf_sta_info_le *sta_info_le;
+ struct brcmf_sta_info_le sta_info_le;
- WL_TRACE("Enter, MAC %pM\n", mac);
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
+ if (!check_vif_up(ifp->vif))
return -EIO;
- if (cfg->conf->mode == WL_MODE_AP) {
- err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
- cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
+ if (ifp->vif->mode == WL_MODE_AP) {
+ memcpy(&sta_info_le, mac, ETH_ALEN);
+ err = brcmf_fil_iovar_data_get(ifp, "sta_info",
+ &sta_info_le,
+ sizeof(sta_info_le));
if (err < 0) {
- WL_ERR("GET STA INFO failed, %d\n", err);
+ brcmf_err("GET STA INFO failed, %d\n", err);
goto done;
}
- sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
-
sinfo->filled = STATION_INFO_INACTIVE_TIME;
- sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
- if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
+ sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
+ if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
sinfo->filled |= STATION_INFO_CONNECTED_TIME;
- sinfo->connected_time = le32_to_cpu(sta_info_le->in);
+ sinfo->connected_time = le32_to_cpu(sta_info_le.in);
}
- WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
- sinfo->inactive_time, sinfo->connected_time);
- } else if (cfg->conf->mode == WL_MODE_BSS) {
+ brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
+ sinfo->inactive_time, sinfo->connected_time);
+ } else if (ifp->vif->mode == WL_MODE_BSS) {
if (memcmp(mac, bssid, ETH_ALEN)) {
- WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
- mac, bssid);
+ brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
+ mac, bssid);
err = -ENOENT;
goto done;
}
/* Report the current tx rate */
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
if (err) {
- WL_ERR("Could not get rate (%d)\n", err);
+ brcmf_err("Could not get rate (%d)\n", err);
goto done;
} else {
sinfo->filled |= STATION_INFO_TX_BITRATE;
sinfo->txrate.legacy = rate * 5;
- WL_CONN("Rate %d Mbps\n", rate / 2);
+ brcmf_dbg(CONN, "Rate %d Mbps\n", rate / 2);
}
- if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
memset(&scb_val, 0, sizeof(scb_val));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
- sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
if (err) {
- WL_ERR("Could not get rssi (%d)\n", err);
+ brcmf_err("Could not get rssi (%d)\n", err);
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = rssi;
- WL_CONN("RSSI %d dBm\n", rssi);
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
}
} else
err = -EPERM;
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -2356,8 +1978,9 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
s32 pm;
s32 err = 0;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
/*
* Powersave enable/disable request is coming from the
@@ -2367,24 +1990,24 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
* FW later while initializing the dongle
*/
cfg->pwr_save = enabled;
- if (!test_bit(WL_STATUS_READY, &cfg->status)) {
+ if (!check_vif_up(ifp->vif)) {
- WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
+ brcmf_dbg(INFO, "Device is not ready, storing the value in cfg_info struct\n");
goto done;
}
pm = enabled ? PM_FAST : PM_OFF;
- WL_INFO("power save %s\n", (pm ? "enabled" : "disabled"));
+ brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled"));
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
if (err) {
if (err == -ENODEV)
- WL_ERR("net_device is not ready yet\n");
+ brcmf_err("net_device is not ready yet\n");
else
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
}
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -2393,6 +2016,7 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcm_rateset_le rateset_le;
s32 rate;
s32 val;
@@ -2401,16 +2025,16 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
u32 legacy;
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
/* addr param is always NULL. ignore it */
/* Get current rateset */
- err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le,
- sizeof(rateset_le));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CURR_RATESET,
+ &rateset_le, sizeof(rateset_le));
if (err) {
- WL_ERR("could not get current rateset (%d)\n", err);
+ brcmf_err("could not get current rateset (%d)\n", err);
goto done;
}
@@ -2428,22 +2052,23 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
/* Specified rate in bps */
rate = val / 500000;
- WL_CONN("rate %d mbps\n", rate / 2);
+ brcmf_dbg(CONN, "rate %d mbps\n", rate / 2);
/*
*
* Set rate override,
* Since the is a/b/g-blind, both a/bg_rate are enforced.
*/
- err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate);
- err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate);
+ err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate);
+ err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate);
if (err_bg && err_a) {
- WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
+ brcmf_err("could not set fixed rate (%d) (%d)\n", err_bg,
+ err_a);
err = err_bg | err_a;
}
done:
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -2464,7 +2089,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
s32 notify_signal;
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
- WL_ERR("Bss info is larger than buffer. Discarding\n");
+ brcmf_err("Bss info is larger than buffer. Discarding\n");
return 0;
}
@@ -2485,13 +2110,11 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
notify_ielen = le32_to_cpu(bi->ie_length);
notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
- WL_CONN("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- bi->BSSID[0], bi->BSSID[1], bi->BSSID[2],
- bi->BSSID[3], bi->BSSID[4], bi->BSSID[5]);
- WL_CONN("Channel: %d(%d)\n", channel, freq);
- WL_CONN("Capability: %X\n", notify_capability);
- WL_CONN("Beacon interval: %d\n", notify_interval);
- WL_CONN("Signal: %d\n", notify_signal);
+ brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
+ brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
+ brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
+ brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
+ brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID,
0, notify_capability, notify_interval, notify_ie,
@@ -2522,13 +2145,14 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
int i;
bss_list = cfg->bss_list;
- if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
- WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
- bss_list->version);
+ if (bss_list->count != 0 &&
+ bss_list->version != BRCMF_BSS_INFO_VERSION) {
+ brcmf_err("Version %d != WL_BSS_INFO_VERSION\n",
+ bss_list->version);
return -EOPNOTSUPP;
}
- WL_SCAN("scanned AP count (%d)\n", bss_list->count);
- for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
+ brcmf_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count);
+ for (i = 0; i < bss_list->count; i++) {
bi = next_bss_le(bss_list, bi);
err = brcmf_inform_single_bss(cfg, bi);
if (err)
@@ -2555,7 +2179,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
size_t notify_ielen;
s32 notify_signal;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (buf == NULL) {
@@ -2565,9 +2189,10 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+ err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO,
+ buf, WL_BSS_INFO_MAX);
if (err) {
- WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err);
+ brcmf_err("WLC_GET_BSS_INFO failed: %d\n", err);
goto CleanUp;
}
@@ -2590,10 +2215,10 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
notify_ielen = le32_to_cpu(bi->ie_length);
notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
- WL_CONN("channel: %d(%d)\n", channel, freq);
- WL_CONN("capability: %X\n", notify_capability);
- WL_CONN("beacon interval: %d\n", notify_interval);
- WL_CONN("signal: %d\n", notify_signal);
+ brcmf_dbg(CONN, "channel: %d(%d)\n", channel, freq);
+ brcmf_dbg(CONN, "capability: %X\n", notify_capability);
+ brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
+ brcmf_dbg(CONN, "signal: %d\n", notify_signal);
bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
0, notify_capability, notify_interval,
@@ -2610,14 +2235,14 @@ CleanUp:
kfree(buf);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg)
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
{
- return cfg->conf->mode == WL_MODE_IBSS;
+ return vif->mode == WL_MODE_IBSS;
}
/*
@@ -2674,12 +2299,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
return false;
}
-struct brcmf_vs_tlv *
+static struct brcmf_vs_tlv *
brcmf_find_wpaie(u8 *parse, u32 len)
{
struct brcmf_tlv *ie;
- while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
@@ -2689,7 +2314,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
@@ -2699,17 +2326,17 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
u8 *ie;
s32 err = 0;
- WL_TRACE("Enter\n");
- if (brcmf_is_ibssmode(cfg))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (brcmf_is_ibssmode(ifp->vif))
return err;
ssid = &profile->ssid;
*(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
- cfg->extra_buf, WL_EXTRA_BUF_MAX);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+ cfg->extra_buf, WL_EXTRA_BUF_MAX);
if (err) {
- WL_ERR("Could not get bss info %d\n", err);
+ brcmf_err("Could not get bss info %d\n", err);
goto update_bss_info_out;
}
@@ -2732,252 +2359,30 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
* so we speficially query dtim information to dongle.
*/
u32 var;
- err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
- "dtim_assoc", &var);
+ err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
if (err) {
- WL_ERR("wl dtim_assoc failed (%d)\n", err);
+ brcmf_err("wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
}
dtim_period = (u8)var;
}
- profile->beacon_interval = beacon_interval;
- profile->dtim_period = dtim_period;
-
update_bss_info_out:
- WL_TRACE("Exit");
+ brcmf_dbg(TRACE, "Exit");
return err;
}
static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
struct escan_info *escan = &cfg->escan_info;
- struct brcmf_ssid ssid;
-
- set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
- if (cfg->iscan_on) {
- iscan->state = WL_ISCAN_STATE_IDLE;
-
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
-
- cancel_work_sync(&iscan->work);
- /* Abort iscan running in FW */
- memset(&ssid, 0, sizeof(ssid));
- brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
-
- if (cfg->scan_request) {
- /* Indidate scan abort to cfg80211 layer */
- WL_INFO("Terminating scan in progress\n");
- cfg80211_scan_done(cfg->scan_request, true);
- cfg->scan_request = NULL;
- }
- }
- if (cfg->escan_on && cfg->scan_request) {
+ set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
+ if (cfg->scan_request) {
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
}
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
-}
-
-static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
- bool aborted)
-{
- struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
- struct net_device *ndev = cfg_to_ndev(cfg);
-
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scan complete while device not scanning\n");
- return;
- }
- if (cfg->scan_request) {
- WL_SCAN("ISCAN Completed scan: %s\n",
- aborted ? "Aborted" : "Done");
- cfg80211_scan_done(cfg->scan_request, aborted);
- brcmf_set_mpc(ndev, 1);
- cfg->scan_request = NULL;
- }
- cfg->iscan_kickstart = false;
-}
-
-static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
-{
- if (iscan->state != WL_ISCAN_STATE_IDLE) {
- WL_SCAN("wake up iscan\n");
- schedule_work(&iscan->work);
- return 0;
- }
-
- return -EIO;
-}
-
-static s32
-brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
- struct brcmf_scan_results **bss_list)
-{
- struct brcmf_iscan_results list;
- struct brcmf_scan_results *results;
- struct brcmf_scan_results_le *results_le;
- struct brcmf_iscan_results *list_buf;
- s32 err = 0;
-
- memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
- list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
- results = &list_buf->results;
- results_le = &list_buf->results_le;
- results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
-
- memset(&list, 0, sizeof(list));
- list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
- err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list,
- BRCMF_ISCAN_RESULTS_FIXED_SIZE,
- iscan->scan_buf, WL_ISCAN_BUF_MAX);
- if (err) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- results->buflen = le32_to_cpu(results_le->buflen);
- results->version = le32_to_cpu(results_le->version);
- results->count = le32_to_cpu(results_le->count);
- WL_SCAN("results->count = %d\n", results_le->count);
- WL_SCAN("results->buflen = %d\n", results_le->buflen);
- *status = le32_to_cpu(list_buf->status_le);
- WL_SCAN("status = %d\n", *status);
- *bss_list = results;
-
- return err;
-}
-
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
- s32 err = 0;
-
- iscan->state = WL_ISCAN_STATE_IDLE;
- brcmf_inform_bss(cfg);
- brcmf_notify_iscan_complete(iscan, false);
-
- return err;
-}
-
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
- s32 err = 0;
-
- /* Reschedule the timer */
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
-
- return err;
-}
-
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
- s32 err = 0;
-
- brcmf_inform_bss(cfg);
- brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
- /* Reschedule the timer */
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
-
- return err;
-}
-
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
- s32 err = 0;
-
- iscan->state = WL_ISCAN_STATE_IDLE;
- brcmf_notify_iscan_complete(iscan, true);
-
- return err;
-}
-
-static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan =
- container_of(work, struct brcmf_cfg80211_iscan_ctrl,
- work);
- struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
- struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
- u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
-
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
-
- if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
- status = BRCMF_SCAN_RESULTS_ABORTED;
- WL_ERR("Abort iscan\n");
- }
-
- el->handler[status](cfg);
-}
-
-static void brcmf_iscan_timer(unsigned long data)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan =
- (struct brcmf_cfg80211_iscan_ctrl *)data;
-
- if (iscan) {
- iscan->timer_on = 0;
- WL_SCAN("timer expired\n");
- brcmf_wakeup_iscan(iscan);
- }
-}
-
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
-
- if (cfg->iscan_on) {
- iscan->state = WL_ISCAN_STATE_IDLE;
- INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
- }
-
- return 0;
-}
-
-static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
-{
- memset(el, 0, sizeof(*el));
- el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done;
- el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress;
- el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending;
- el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted;
- el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
-}
-
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
- int err = 0;
-
- if (cfg->iscan_on) {
- iscan->ndev = cfg_to_ndev(cfg);
- brcmf_init_iscan_eloop(&iscan->el);
- iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
- init_timer(&iscan->timer);
- iscan->timer.data = (unsigned long) iscan;
- iscan->timer.function = brcmf_iscan_timer;
- err = brcmf_invoke_iscan(cfg);
- if (!err)
- iscan->data = cfg;
- }
-
- return err;
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
+ clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
}
static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
@@ -2996,9 +2401,8 @@ static void brcmf_escan_timeout(unsigned long data)
(struct brcmf_cfg80211_info *)data;
if (cfg->scan_request) {
- WL_ERR("timer expired\n");
- if (cfg->escan_on)
- schedule_work(&cfg->escan_timeout_work);
+ brcmf_err("timer expired\n");
+ schedule_work(&cfg->escan_timeout_work);
}
}
@@ -3035,10 +2439,11 @@ brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
}
static s32
-brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct net_device *ndev = ifp->ndev;
s32 status;
s32 err = 0;
struct brcmf_escan_result_le *escan_result_le;
@@ -3049,31 +2454,29 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
u32 i;
bool aborted;
- status = be32_to_cpu(e->status);
+ status = e->status;
- if (!ndev || !cfg->escan_on ||
- !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
- ndev, cfg->escan_on,
- !test_bit(WL_STATUS_SCANNING, &cfg->status));
+ if (!ndev || !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ brcmf_err("scan not ready ndev %p drv_status %x\n", ndev,
+ !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status));
return -EPERM;
}
if (status == BRCMF_E_STATUS_PARTIAL) {
- WL_SCAN("ESCAN Partial result\n");
+ brcmf_dbg(SCAN, "ESCAN Partial result\n");
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
- WL_ERR("Invalid escan result (NULL pointer)\n");
+ brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
if (!cfg->scan_request) {
- WL_SCAN("result without cfg80211 request\n");
+ brcmf_dbg(SCAN, "result without cfg80211 request\n");
goto exit;
}
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
- WL_ERR("Invalid bss_count %d: ignoring\n",
- escan_result_le->bss_count);
+ brcmf_err("Invalid bss_count %d: ignoring\n",
+ escan_result_le->bss_count);
goto exit;
}
bss_info_le = &escan_result_le->bss_info_le;
@@ -3081,8 +2484,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
bi_length = le32_to_cpu(bss_info_le->length);
if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
WL_ESCAN_RESULTS_FIXED_SIZE)) {
- WL_ERR("Invalid bss_info length %d: ignoring\n",
- bi_length);
+ brcmf_err("Invalid bss_info length %d: ignoring\n",
+ bi_length);
goto exit;
}
@@ -3090,7 +2493,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
BIT(NL80211_IFTYPE_ADHOC))) {
if (le16_to_cpu(bss_info_le->capability) &
WLAN_CAPABILITY_IBSS) {
- WL_ERR("Ignoring IBSS result\n");
+ brcmf_err("Ignoring IBSS result\n");
goto exit;
}
}
@@ -3098,7 +2501,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
list = (struct brcmf_scan_results *)
cfg->escan_info.escan_buf;
if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
- WL_ERR("Buffer is too small: ignoring\n");
+ brcmf_err("Buffer is too small: ignoring\n");
goto exit;
}
@@ -3124,7 +2527,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
brcmf_notify_escan_complete(cfg, ndev, aborted,
false);
} else
- WL_ERR("Unexpected scan result 0x%x\n", status);
+ brcmf_err("Unexpected scan result 0x%x\n", status);
}
exit:
return err;
@@ -3132,18 +2535,15 @@ exit:
static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
{
-
- if (cfg->escan_on) {
- cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
- brcmf_cfg80211_escan_handler;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- /* Init scan_timeout timer */
- init_timer(&cfg->escan_timeout);
- cfg->escan_timeout.data = (unsigned long) cfg;
- cfg->escan_timeout.function = brcmf_escan_timeout;
- INIT_WORK(&cfg->escan_timeout_work,
- brcmf_cfg80211_escan_timeout_worker);
- }
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ESCAN_RESULT,
+ brcmf_cfg80211_escan_handler);
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ /* Init scan_timeout timer */
+ init_timer(&cfg->escan_timeout);
+ cfg->escan_timeout.data = (unsigned long) cfg;
+ cfg->escan_timeout.function = brcmf_escan_timeout;
+ INIT_WORK(&cfg->escan_timeout_work,
+ brcmf_cfg80211_escan_timeout_worker);
}
static __always_inline void brcmf_delay(u32 ms)
@@ -3158,19 +2558,8 @@ static __always_inline void brcmf_delay(u32 ms)
static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-
- /*
- * Check for WL_STATUS_READY before any function call which
- * could result is bus access. Don't block the resume for
- * any driver error conditions
- */
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
- if (test_bit(WL_STATUS_READY, &cfg->status))
- brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
-
- WL_TRACE("Exit\n");
return 0;
}
@@ -3179,85 +2568,49 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_cfg80211_vif *vif;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
/*
- * Check for WL_STATUS_READY before any function call which
- * could result is bus access. Don't block the suspend for
- * any driver error conditions
+ * if the primary net_device is not READY there is nothing
+ * we can do but pray resume goes smoothly.
*/
+ vif = ((struct brcmf_if *)netdev_priv(ndev))->vif;
+ if (!check_vif_up(vif))
+ goto exit;
- /*
- * While going to suspend if associated with AP disassociate
- * from AP to save power while system is in suspended state
- */
- if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
- test_bit(WL_STATUS_READY, &cfg->status)) {
- WL_INFO("Disassociating from AP"
- " while entering suspend state\n");
- brcmf_link_down(cfg);
-
+ list_for_each_entry(vif, &cfg->vif_list, list) {
+ if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state))
+ continue;
/*
- * Make sure WPA_Supplicant receives all the event
+ * While going to suspend if associated with AP disassociate
+ * from AP to save power while system is in suspended state
+ */
+ brcmf_link_down(vif);
+
+ /* Make sure WPA_Supplicant receives all the event
* generated due to DISASSOC call to the fw to keep
* the state fw and WPA_Supplicant state consistent
*/
brcmf_delay(500);
}
- if (test_bit(WL_STATUS_READY, &cfg->status))
+ /* end any scanning */
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
brcmf_abort_scanning(cfg);
- else
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
/* Turn off watchdog timer */
- if (test_bit(WL_STATUS_READY, &cfg->status))
- brcmf_set_mpc(ndev, 1);
-
- WL_TRACE("Exit\n");
+ brcmf_set_mpc(ndev, 1);
+exit:
+ brcmf_dbg(TRACE, "Exit\n");
+ /* clear any scanning activity */
+ cfg->scan_status = 0;
return 0;
}
static __used s32
-brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
-{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- u32 buflen;
-
- buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
- BUG_ON(!buflen);
-
- return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
- buflen);
-}
-
-static s32
-brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
- s32 buf_len)
-{
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- u32 len;
- s32 err = 0;
-
- len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
- BUG_ON(!len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
- WL_DCMD_LEN_MAX);
- if (err) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- memcpy(buf, cfg->dcmd_buf, buf_len);
-
- return err;
-}
-
-static __used s32
brcmf_update_pmklist(struct net_device *ndev,
struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
{
@@ -3266,17 +2619,18 @@ brcmf_update_pmklist(struct net_device *ndev,
pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
- WL_CONN("No of elements %d\n", pmkid_len);
+ brcmf_dbg(CONN, "No of elements %d\n", pmkid_len);
for (i = 0; i < pmkid_len; i++) {
- WL_CONN("PMKID[%d]: %pM =\n", i,
- &pmk_list->pmkids.pmkid[i].BSSID);
+ brcmf_dbg(CONN, "PMKID[%d]: %pM =\n", i,
+ &pmk_list->pmkids.pmkid[i].BSSID);
for (j = 0; j < WLAN_PMKID_LEN; j++)
- WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
+ brcmf_dbg(CONN, "%02x\n",
+ pmk_list->pmkids.pmkid[i].PMKID[j]);
}
if (!err)
- brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
- sizeof(*pmk_list));
+ brcmf_fil_iovar_data_set(netdev_priv(ndev), "pmkid_info",
+ (char *)pmk_list, sizeof(*pmk_list));
return err;
}
@@ -3286,13 +2640,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_pmksa *pmksa)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
s32 err = 0;
int i;
int pmkid_len;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
pmkid_len = le32_to_cpu(pmkids->npmkid);
@@ -3309,14 +2664,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
} else
err = -EINVAL;
- WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- pmkids->pmkid[pmkid_len].BSSID);
+ brcmf_dbg(CONN, "set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ pmkids->pmkid[pmkid_len].BSSID);
for (i = 0; i < WLAN_PMKID_LEN; i++)
- WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+ brcmf_dbg(CONN, "%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -3325,21 +2680,22 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_pmksa *pmksa)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct pmkid_list pmkid;
s32 err = 0;
int i, pmkid_len;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
- WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
- &pmkid.pmkid[0].BSSID);
+ brcmf_dbg(CONN, "del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID);
for (i = 0; i < WLAN_PMKID_LEN; i++)
- WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
+ brcmf_dbg(CONN, "%02x\n", pmkid.pmkid[0].PMKID[i]);
pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
for (i = 0; i < pmkid_len; i++)
@@ -3366,7 +2722,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -3375,16 +2731,17 @@ static s32
brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
+ brcmf_dbg(TRACE, "Enter\n");
+ if (!check_vif_up(ifp->vif))
return -EIO;
memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -3398,10 +2755,11 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
* cfg80211_scan_request one out of the received PNO event.
*/
static s32
-brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct net_device *ndev = ifp->ndev;
struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
struct cfg80211_scan_request *request = NULL;
struct cfg80211_ssid *ssid = NULL;
@@ -3414,10 +2772,10 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
u32 result_count;
u32 status;
- WL_SCAN("Enter\n");
+ brcmf_dbg(SCAN, "Enter\n");
- if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
- WL_SCAN("PFN NET LOST event. Do Nothing\n");
+ if (e->event_code == BRCMF_E_PFN_NET_LOST) {
+ brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n");
return 0;
}
@@ -3430,7 +2788,7 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
* multiple NET_FOUND events. For now place a warning here.
*/
WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
- WL_SCAN("PFN NET FOUND event. count: %d\n", result_count);
+ brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
if (result_count > 0) {
int i;
@@ -3449,13 +2807,14 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
if (!netinfo) {
- WL_ERR("Invalid netinfo ptr. index: %d\n", i);
+ brcmf_err("Invalid netinfo ptr. index: %d\n",
+ i);
err = -EINVAL;
goto out_err;
}
- WL_SCAN("SSID:%s Channel:%d\n",
- netinfo->SSID, netinfo->channel);
+ brcmf_dbg(SCAN, "SSID:%s Channel:%d\n",
+ netinfo->SSID, netinfo->channel);
memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
ssid[i].ssid_len = netinfo->SSID_len;
request->n_ssids++;
@@ -3478,21 +2837,21 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
if (request->n_ssids)
request->ssids = &ssid[0];
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
/* Abort any on-going scan */
brcmf_abort_scanning(cfg);
}
- set_bit(WL_STATUS_SCANNING, &cfg->status);
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
err = brcmf_do_escan(cfg, wiphy, ndev, request);
if (err) {
- clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
goto out_err;
}
cfg->sched_escan = true;
cfg->scan_request = request;
} else {
- WL_ERR("FALSE PNO Event. (pfn_count == 0)\n");
+ brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
goto out_err;
}
@@ -3509,21 +2868,19 @@ out_err:
return err;
}
-#ifndef CONFIG_BRCMISCAN
static int brcmf_dev_pno_clean(struct net_device *ndev)
{
- char iovbuf[128];
int ret;
/* Disable pfn */
- ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
+ ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
if (ret == 0) {
/* clear pfn */
- ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
- iovbuf, sizeof(iovbuf));
+ ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
+ NULL, 0);
}
if (ret < 0)
- WL_ERR("failed code %d\n", ret);
+ brcmf_err("failed code %d\n", ret);
return ret;
}
@@ -3531,7 +2888,6 @@ static int brcmf_dev_pno_clean(struct net_device *ndev)
static int brcmf_dev_pno_config(struct net_device *ndev)
{
struct brcmf_pno_param_le pfn_param;
- char iovbuf[128];
memset(&pfn_param, 0, sizeof(pfn_param));
pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
@@ -3544,9 +2900,8 @@ static int brcmf_dev_pno_config(struct net_device *ndev)
/* set up pno scan fr */
pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
- return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
- &pfn_param, sizeof(pfn_param),
- iovbuf, sizeof(iovbuf));
+ return brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfn_set",
+ &pfn_param, sizeof(pfn_param));
}
static int
@@ -3554,30 +2909,30 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_sched_scan_request *request)
{
- char iovbuf[128];
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_pno_net_param_le pfn;
int i;
int ret = 0;
- WL_SCAN("Enter n_match_sets:%d n_ssids:%d\n",
- request->n_match_sets, request->n_ssids);
- if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
+ request->n_match_sets, request->n_ssids);
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
if (!request || !request->n_ssids || !request->n_match_sets) {
- WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
- request ? request->n_ssids : 0);
+ brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
+ request ? request->n_ssids : 0);
return -EINVAL;
}
if (request->n_ssids > 0) {
for (i = 0; i < request->n_ssids; i++) {
/* Active scan req for ssids */
- WL_SCAN(">>> Active scan req for ssid (%s)\n",
- request->ssids[i].ssid);
+ brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n",
+ request->ssids[i].ssid);
/*
* match_set ssids is a supert set of n_ssid list,
@@ -3590,14 +2945,14 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
/* clean up everything */
ret = brcmf_dev_pno_clean(ndev);
if (ret < 0) {
- WL_ERR("failed error=%d\n", ret);
+ brcmf_err("failed error=%d\n", ret);
return ret;
}
/* configure pno */
ret = brcmf_dev_pno_config(ndev);
if (ret < 0) {
- WL_ERR("PNO setup failed!! ret=%d\n", ret);
+ brcmf_err("PNO setup failed!! ret=%d\n", ret);
return -EINVAL;
}
@@ -3610,7 +2965,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
ssid_len = ssid->ssid_len;
if (!ssid_len) {
- WL_ERR("skip broadcast ssid\n");
+ brcmf_err("skip broadcast ssid\n");
continue;
}
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
@@ -3620,16 +2975,14 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
- ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
- &pfn, sizeof(pfn),
- iovbuf, sizeof(iovbuf));
- WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
- ret == 0 ? "set" : "failed",
- ssid->ssid);
+ ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
+ sizeof(pfn));
+ brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
+ ret == 0 ? "set" : "failed", ssid->ssid);
}
/* Enable the PNO */
- if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
- WL_ERR("PNO enable failed!! ret=%d\n", ret);
+ if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
+ brcmf_err("PNO enable failed!! ret=%d\n", ret);
return -EINVAL;
}
} else {
@@ -3644,24 +2997,31 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- WL_SCAN("enter\n");
+ brcmf_dbg(SCAN, "enter\n");
brcmf_dev_pno_clean(ndev);
if (cfg->sched_escan)
brcmf_notify_escan_complete(cfg, ndev, true, true);
return 0;
}
-#endif /* CONFIG_BRCMISCAN */
#ifdef CONFIG_NL80211_TESTMODE
static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg->wdev->netdev;
+ struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_dcmd *dcmd = data;
struct sk_buff *reply;
int ret;
- ret = brcmf_netlink_dcmd(ndev, dcmd);
+ brcmf_dbg(TRACE, "cmd %x set %d buf %p len %d\n", dcmd->cmd, dcmd->set,
+ dcmd->buf, dcmd->len);
+
+ if (dcmd->set)
+ ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), dcmd->cmd,
+ dcmd->buf, dcmd->len);
+ else
+ ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), dcmd->cmd,
+ dcmd->buf, dcmd->len);
if (ret == 0) {
reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
@@ -3673,25 +3033,25 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
/* set auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0);
if (err < 0) {
- WL_ERR("auth error %d\n", err);
+ brcmf_err("auth error %d\n", err);
return err;
}
/* set wsec */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0);
if (err < 0) {
- WL_ERR("wsec error %d\n", err);
+ brcmf_err("wsec error %d\n", err);
return err;
}
/* set upper-layer auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
- WPA_AUTH_NONE, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE);
if (err < 0) {
- WL_ERR("wpa_auth error %d\n", err);
+ brcmf_err("wpa_auth error %d\n", err);
return err;
}
@@ -3708,8 +3068,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
static s32
brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
- bool is_rsn_ie, s32 bssidx)
+ bool is_rsn_ie)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
u16 count;
s32 err = 0;
@@ -3724,27 +3085,28 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
u16 rsn_cap;
u32 wme_bss_disable;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
if (wpa_ie == NULL)
goto exit;
len = wpa_ie->len + TLV_HDR_LEN;
data = (u8 *)wpa_ie;
- offset = 0;
+ offset = TLV_HDR_LEN;
if (!is_rsn_ie)
offset += VS_IE_FIXED_HDR_LEN;
- offset += WPA_IE_VERSION_LEN;
+ else
+ offset += WPA_IE_VERSION_LEN;
/* check for multicast cipher suite */
if (offset + WPA_IE_MIN_OUI_LEN > len) {
err = -EINVAL;
- WL_ERR("no multicast cipher suite\n");
+ brcmf_err("no multicast cipher suite\n");
goto exit;
}
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- WL_ERR("ivalid OUI\n");
+ brcmf_err("ivalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -3766,7 +3128,7 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
break;
default:
err = -EINVAL;
- WL_ERR("Invalid multi cast cipher info\n");
+ brcmf_err("Invalid multi cast cipher info\n");
goto exit;
}
@@ -3777,13 +3139,13 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
/* Check for unicast suite(s) */
if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
err = -EINVAL;
- WL_ERR("no unicast cipher suite\n");
+ brcmf_err("no unicast cipher suite\n");
goto exit;
}
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- WL_ERR("ivalid OUI\n");
+ brcmf_err("ivalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -3801,7 +3163,7 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
pval |= AES_ENABLED;
break;
default:
- WL_ERR("Ivalid unicast security info\n");
+ brcmf_err("Ivalid unicast security info\n");
}
offset++;
}
@@ -3811,33 +3173,33 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
/* Check for auth key management suite(s) */
if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
err = -EINVAL;
- WL_ERR("no auth key mgmt suite\n");
+ brcmf_err("no auth key mgmt suite\n");
goto exit;
}
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- WL_ERR("ivalid OUI\n");
+ brcmf_err("ivalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
switch (data[offset]) {
case RSN_AKM_NONE:
- WL_TRACE("RSN_AKM_NONE\n");
+ brcmf_dbg(TRACE, "RSN_AKM_NONE\n");
wpa_auth |= WPA_AUTH_NONE;
break;
case RSN_AKM_UNSPECIFIED:
- WL_TRACE("RSN_AKM_UNSPECIFIED\n");
+ brcmf_dbg(TRACE, "RSN_AKM_UNSPECIFIED\n");
is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
(wpa_auth |= WPA_AUTH_UNSPECIFIED);
break;
case RSN_AKM_PSK:
- WL_TRACE("RSN_AKM_PSK\n");
+ brcmf_dbg(TRACE, "RSN_AKM_PSK\n");
is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
(wpa_auth |= WPA_AUTH_PSK);
break;
default:
- WL_ERR("Ivalid key mgmt info\n");
+ brcmf_err("Ivalid key mgmt info\n");
}
offset++;
}
@@ -3850,10 +3212,10 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
wme_bss_disable = 0;
}
/* set wme_bss_disable to sync RSN Capabilities */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
- wme_bss_disable, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
+ wme_bss_disable);
if (err < 0) {
- WL_ERR("wme_bss_disable error %d\n", err);
+ brcmf_err("wme_bss_disable error %d\n", err);
goto exit;
}
}
@@ -3861,21 +3223,21 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
wsec = (pval | gval | SES_OW_ENABLED);
/* set auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth);
if (err < 0) {
- WL_ERR("auth error %d\n", err);
+ brcmf_err("auth error %d\n", err);
goto exit;
}
/* set wsec */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
if (err < 0) {
- WL_ERR("wsec error %d\n", err);
+ brcmf_err("wsec error %d\n", err);
goto exit;
}
/* set upper-layer auth */
- err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
+ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
if (err < 0) {
- WL_ERR("wpa_auth error %d\n", err);
+ brcmf_err("wpa_auth error %d\n", err);
goto exit;
}
@@ -3884,7 +3246,7 @@ exit:
}
static s32
-brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
+brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
struct parsed_vndr_ies *vndr_ies)
{
s32 err = 0;
@@ -3903,15 +3265,15 @@ brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
vndrie = (struct brcmf_vs_tlv *)ie;
/* len should be bigger than OUI length + one */
if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
- WL_ERR("invalid vndr ie. length is too small %d\n",
- vndrie->len);
+ brcmf_err("invalid vndr ie. length is too small %d\n",
+ vndrie->len);
goto next;
}
/* if wpa or wme ie, do not add ie */
if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
((vndrie->oui_type == WPA_OUI_TYPE) ||
(vndrie->oui_type == WME_OUI_TYPE))) {
- WL_TRACE("Found WPA/WME oui. Do not add it\n");
+ brcmf_dbg(TRACE, "Found WPA/WME oui. Do not add it\n");
goto next;
}
@@ -3924,20 +3286,21 @@ brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
vndr_ies->count++;
- WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n",
- parsed_info->vndrie.oui[0],
- parsed_info->vndrie.oui[1],
- parsed_info->vndrie.oui[2],
- parsed_info->vndrie.oui_type);
+ brcmf_dbg(TRACE, "** OUI %02x %02x %02x, type 0x%02x\n",
+ parsed_info->vndrie.oui[0],
+ parsed_info->vndrie.oui[1],
+ parsed_info->vndrie.oui[2],
+ parsed_info->vndrie.oui_type);
if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
break;
next:
- remaining_len -= ie->len;
- if (remaining_len <= 2)
+ remaining_len -= (ie->len + TLV_HDR_LEN);
+ if (remaining_len <= TLV_HDR_LEN)
ie = NULL;
else
- ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len);
+ ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len +
+ TLV_HDR_LEN);
}
return err;
}
@@ -3963,17 +3326,18 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
return ie_len + VNDR_IE_HDR_SIZE;
}
-s32
-brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev, s32 bssidx, s32 pktflag,
- u8 *vndr_ie_buf, u32 vndr_ie_len)
+static
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+ const u8 *vndr_ie_buf, u32 vndr_ie_len)
{
+ struct brcmf_if *ifp;
+ struct vif_saved_ie *saved_ie;
s32 err = 0;
u8 *iovar_ie_buf;
u8 *curr_ie_buf;
u8 *mgmt_ie_buf = NULL;
int mgmt_ie_buf_len;
- u32 *mgmt_ie_len = 0;
+ u32 *mgmt_ie_len;
u32 del_add_ie_buf_len = 0;
u32 total_ie_buf_len = 0;
u32 parsed_ie_buf_len = 0;
@@ -3984,40 +3348,42 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
u8 *ptr;
int remained_buf_len;
- WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
+ if (!vif)
+ return -ENODEV;
+ ifp = vif->ifp;
+ saved_ie = &vif->saved_ie;
+
+ brcmf_dbg(TRACE, "bssidx %d, pktflag : 0x%02X\n", ifp->bssidx, pktflag);
iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (!iovar_ie_buf)
return -ENOMEM;
curr_ie_buf = iovar_ie_buf;
- if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
- test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
+ if (ifp->vif->mode == WL_MODE_AP) {
switch (pktflag) {
case VNDR_IE_PRBRSP_FLAG:
- mgmt_ie_buf = cfg->ap_info->probe_res_ie;
- mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
- mgmt_ie_buf_len =
- sizeof(cfg->ap_info->probe_res_ie);
+ mgmt_ie_buf = saved_ie->probe_res_ie;
+ mgmt_ie_len = &saved_ie->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
break;
case VNDR_IE_BEACON_FLAG:
- mgmt_ie_buf = cfg->ap_info->beacon_ie;
- mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+ mgmt_ie_buf = saved_ie->beacon_ie;
+ mgmt_ie_len = &saved_ie->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
break;
default:
err = -EPERM;
- WL_ERR("not suitable type\n");
+ brcmf_err("not suitable type\n");
goto exit;
}
- bssidx = 0;
} else {
err = -EPERM;
- WL_ERR("not suitable type\n");
+ brcmf_err("not suitable type\n");
goto exit;
}
if (vndr_ie_len > mgmt_ie_buf_len) {
err = -ENOMEM;
- WL_ERR("extra IE size too big\n");
+ brcmf_err("extra IE size too big\n");
goto exit;
}
@@ -4033,11 +3399,11 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
}
}
- if (mgmt_ie_buf != NULL) {
+ if (mgmt_ie_buf && *mgmt_ie_len) {
if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
(memcmp(mgmt_ie_buf, curr_ie_buf,
parsed_ie_buf_len) == 0)) {
- WL_TRACE("Previous mgmt IE is equals to current IE");
+ brcmf_dbg(TRACE, "Previous mgmt IE equals to current IE\n");
goto exit;
}
@@ -4048,12 +3414,12 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
for (i = 0; i < old_vndr_ies.count; i++) {
vndrie_info = &old_vndr_ies.ie_info[i];
- WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
- vndrie_info->vndrie.id,
- vndrie_info->vndrie.len,
- vndrie_info->vndrie.oui[0],
- vndrie_info->vndrie.oui[1],
- vndrie_info->vndrie.oui[2]);
+ brcmf_dbg(TRACE, "DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id,
+ vndrie_info->vndrie.len,
+ vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]);
del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
vndrie_info->ie_ptr,
@@ -4075,24 +3441,27 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
for (i = 0; i < new_vndr_ies.count; i++) {
vndrie_info = &new_vndr_ies.ie_info[i];
- WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
- vndrie_info->vndrie.id,
- vndrie_info->vndrie.len,
- vndrie_info->vndrie.oui[0],
- vndrie_info->vndrie.oui[1],
- vndrie_info->vndrie.oui[2]);
+ /* verify remained buf size before copy data */
+ if (remained_buf_len < (vndrie_info->vndrie.len +
+ VNDR_IE_VSIE_OFFSET)) {
+ brcmf_err("no space in mgmt_ie_buf: len left %d",
+ remained_buf_len);
+ break;
+ }
+ remained_buf_len -= (vndrie_info->ie_len +
+ VNDR_IE_VSIE_OFFSET);
+
+ brcmf_dbg(TRACE, "ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id,
+ vndrie_info->vndrie.len,
+ vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]);
del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
vndrie_info->ie_ptr,
vndrie_info->ie_len,
"add");
- /* verify remained buf size before copy data */
- remained_buf_len -= vndrie_info->ie_len;
- if (remained_buf_len < 0) {
- WL_ERR("no space in mgmt_ie_buf: len left %d",
- remained_buf_len);
- break;
- }
/* save the parsed IE in wl struct */
memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
@@ -4104,13 +3473,10 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
}
}
if (total_ie_buf_len) {
- err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
- iovar_ie_buf,
- total_ie_buf_len,
- cfg->extra_buf,
- WL_EXTRA_BUF_MAX, bssidx);
+ err = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf,
+ total_ie_buf_len);
if (err)
- WL_ERR("vndr ie set error : %d\n", err);
+ brcmf_err("vndr ie set error : %d\n", err);
}
exit:
@@ -4123,25 +3489,25 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
s32 ie_offset;
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
- s32 ioctl_value;
s32 err = -EPERM;
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
s32 bssidx = 0;
- WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
- settings->channel_type, settings->beacon_interval,
- settings->dtim_period);
- WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
- settings->ssid, settings->ssid_len, settings->auth_type,
- settings->inactivity_timeout);
+ brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
+ cfg80211_get_chandef_type(&settings->chandef),
+ settings->beacon_interval,
+ settings->dtim_period);
+ brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
+ settings->ssid, settings->ssid_len, settings->auth_type,
+ settings->inactivity_timeout);
- if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
- WL_ERR("Not in AP creation mode\n");
+ if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
+ brcmf_err("Not in AP creation mode\n");
return -EPERM;
}
@@ -4157,29 +3523,26 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
- WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID);
+ brcmf_dbg(TRACE, "SSID is (%s) in Head\n", ssid_le.SSID);
} else {
memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
}
brcmf_set_mpc(ndev, 0);
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
if (err < 0) {
- WL_ERR("BRCMF_C_DOWN error %d\n", err);
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
goto exit;
}
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- WL_ERR("SET INFRA error %d\n", err);
+ brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
if (err < 0) {
- WL_ERR("setting AP mode failed %d\n", err);
+ brcmf_err("setting AP mode failed %d\n", err);
goto exit;
}
@@ -4191,82 +3554,63 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
settings->beacon.tail_len);
- kfree(cfg->ap_info->rsn_ie);
- cfg->ap_info->rsn_ie = NULL;
- kfree(cfg->ap_info->wpa_ie);
- cfg->ap_info->wpa_ie = NULL;
-
if ((wpa_ie != NULL || rsn_ie != NULL)) {
- WL_TRACE("WPA(2) IE is found\n");
+ brcmf_dbg(TRACE, "WPA(2) IE is found\n");
if (wpa_ie != NULL) {
/* WPA IE */
- err = brcmf_configure_wpaie(ndev, wpa_ie, false,
- bssidx);
+ err = brcmf_configure_wpaie(ndev, wpa_ie, false);
if (err < 0)
goto exit;
- cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
- wpa_ie->len +
- TLV_HDR_LEN,
- GFP_KERNEL);
} else {
/* RSN IE */
err = brcmf_configure_wpaie(ndev,
- (struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
+ (struct brcmf_vs_tlv *)rsn_ie, true);
if (err < 0)
goto exit;
- cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
- rsn_ie->len +
- TLV_HDR_LEN,
- GFP_KERNEL);
}
- cfg->ap_info->security_mode = true;
} else {
- WL_TRACE("No WPA(2) IEs found\n");
+ brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
brcmf_configure_opensecurity(ndev, bssidx);
- cfg->ap_info->security_mode = false;
}
/* Set Beacon IEs to FW */
- err = brcmf_set_management_ie(cfg, ndev, bssidx,
- VNDR_IE_BEACON_FLAG,
- (u8 *)settings->beacon.tail,
- settings->beacon.tail_len);
+ err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
+ VNDR_IE_BEACON_FLAG,
+ settings->beacon.tail,
+ settings->beacon.tail_len);
if (err)
- WL_ERR("Set Beacon IE Failed\n");
+ brcmf_err("Set Beacon IE Failed\n");
else
- WL_TRACE("Applied Vndr IEs for Beacon\n");
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
/* Set Probe Response IEs to FW */
- err = brcmf_set_management_ie(cfg, ndev, bssidx,
- VNDR_IE_PRBRSP_FLAG,
- (u8 *)settings->beacon.proberesp_ies,
- settings->beacon.proberesp_ies_len);
+ err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
+ VNDR_IE_PRBRSP_FLAG,
+ settings->beacon.proberesp_ies,
+ settings->beacon.proberesp_ies_len);
if (err)
- WL_ERR("Set Probe Resp IE Failed\n");
+ brcmf_err("Set Probe Resp IE Failed\n");
else
- WL_TRACE("Applied Vndr IEs for Probe Resp\n");
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
if (settings->beacon_interval) {
- ioctl_value = settings->beacon_interval;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
- &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
+ settings->beacon_interval);
if (err < 0) {
- WL_ERR("Beacon Interval Set Error, %d\n", err);
+ brcmf_err("Beacon Interval Set Error, %d\n", err);
goto exit;
}
}
if (settings->dtim_period) {
- ioctl_value = settings->dtim_period;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
- &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
+ settings->dtim_period);
if (err < 0) {
- WL_ERR("DTIM Interval Set Error, %d\n", err);
+ brcmf_err("DTIM Interval Set Error, %d\n", err);
goto exit;
}
}
- ioctl_value = 1;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0) {
- WL_ERR("BRCMF_C_UP error (%d)\n", err);
+ brcmf_err("BRCMF_C_UP error (%d)\n", err);
goto exit;
}
@@ -4274,14 +3618,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* join parameters starts with ssid */
memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
/* create softap */
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
- sizeof(join_params));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, sizeof(join_params));
if (err < 0) {
- WL_ERR("SET SSID error (%d)\n", err);
+ brcmf_err("SET SSID error (%d)\n", err);
goto exit;
}
- clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
- set_bit(WL_STATUS_AP_CREATED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
exit:
if (err)
@@ -4291,31 +3635,28 @@ exit:
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- s32 ioctl_value;
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = -EPERM;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
- if (cfg->conf->mode == WL_MODE_AP) {
+ if (ifp->vif->mode == WL_MODE_AP) {
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
- ioctl_value = 0;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
if (err < 0) {
- WL_ERR("setting AP mode failed %d\n", err);
+ brcmf_err("setting AP mode failed %d\n", err);
goto exit;
}
- ioctl_value = 0;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
if (err < 0) {
- WL_ERR("BRCMF_C_UP error %d\n", err);
+ brcmf_err("BRCMF_C_UP error %d\n", err);
goto exit;
}
brcmf_set_mpc(ndev, 1);
- clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
- clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
}
exit:
return err;
@@ -4326,24 +3667,25 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac)
{
struct brcmf_scb_val_le scbval;
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
if (!mac)
return -EFAULT;
- WL_TRACE("Enter %pM\n", mac);
+ brcmf_dbg(TRACE, "Enter %pM\n", mac);
- if (!check_sys_up(wiphy))
+ if (!check_vif_up(ifp->vif))
return -EIO;
memcpy(&scbval.ea, mac, ETH_ALEN);
scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
- &scbval, sizeof(scbval));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scbval));
if (err)
- WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
+ brcmf_err("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -4373,11 +3715,8 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.start_ap = brcmf_cfg80211_start_ap,
.stop_ap = brcmf_cfg80211_stop_ap,
.del_station = brcmf_cfg80211_del_station,
-#ifndef CONFIG_BRCMISCAN
- /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
.sched_scan_start = brcmf_cfg80211_sched_scan_start,
.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
-#endif
#ifdef CONFIG_NL80211_TESTMODE
.testmode_cmd = brcmf_cfg80211_testmode
#endif
@@ -4401,106 +3740,126 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
-#ifndef CONFIG_BRCMISCAN
/* scheduled scan settings */
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-#endif
}
-static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
+static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
{
- struct wireless_dev *wdev;
+ struct wiphy *wiphy;
s32 err = 0;
- wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
- if (!wdev)
+ wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
+ if (!wiphy) {
+ brcmf_err("Could not allocate wiphy device\n");
return ERR_PTR(-ENOMEM);
-
- wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
- sizeof(struct brcmf_cfg80211_info));
- if (!wdev->wiphy) {
- WL_ERR("Could not allocate wiphy device\n");
- err = -ENOMEM;
- goto wiphy_new_out;
- }
- set_wiphy_dev(wdev->wiphy, ndev);
- wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
- wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
+ }
+ set_wiphy_dev(wiphy, phydev);
+ wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
* it as 11a by default.
* This will be updated with
* 11n phy tables in
* "ifconfig up"
* if phy has 11n capability
*/
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wdev->wiphy->cipher_suites = __wl_cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
- wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->cipher_suites = __wl_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
* save mode
* by default
*/
- brcmf_wiphy_pno_params(wdev->wiphy);
- err = wiphy_register(wdev->wiphy);
+ brcmf_wiphy_pno_params(wiphy);
+ err = wiphy_register(wiphy);
if (err < 0) {
- WL_ERR("Could not register wiphy device (%d)\n", err);
- goto wiphy_register_out;
+ brcmf_err("Could not register wiphy device (%d)\n", err);
+ wiphy_free(wiphy);
+ return ERR_PTR(err);
}
- return wdev;
+ return wiphy;
+}
+
+static
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+ struct net_device *netdev,
+ s32 mode, bool pm_block)
+{
+ struct brcmf_cfg80211_vif *vif;
-wiphy_register_out:
- wiphy_free(wdev->wiphy);
+ if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
+ return ERR_PTR(-ENOSPC);
-wiphy_new_out:
- kfree(wdev);
+ vif = kzalloc(sizeof(*vif), GFP_KERNEL);
+ if (!vif)
+ return ERR_PTR(-ENOMEM);
+
+ vif->wdev.wiphy = cfg->wiphy;
+ vif->wdev.netdev = netdev;
+ vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
+
+ if (netdev) {
+ vif->ifp = netdev_priv(netdev);
+ netdev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
+ }
- return ERR_PTR(err);
+ vif->mode = mode;
+ vif->pm_block = pm_block;
+ vif->roam_off = -1;
+
+ brcmf_init_prof(&vif->profile);
+
+ list_add_tail(&vif->list, &cfg->vif_list);
+ cfg->vif_cnt++;
+ return vif;
}
-static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
+static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
- struct wireless_dev *wdev = cfg->wdev;
+ struct brcmf_cfg80211_info *cfg;
+ struct wiphy *wiphy;
- if (!wdev) {
- WL_ERR("wdev is invalid\n");
- return;
+ wiphy = vif->wdev.wiphy;
+ cfg = wiphy_priv(wiphy);
+ list_del(&vif->list);
+ cfg->vif_cnt--;
+
+ kfree(vif);
+ if (!cfg->vif_cnt) {
+ wiphy_unregister(wiphy);
+ wiphy_free(wiphy);
}
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
- cfg->wdev = NULL;
}
-static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
- const struct brcmf_event_msg *e)
+static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
{
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
+ u32 event = e->event_code;
+ u32 status = e->status;
if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
- WL_CONN("Processing set ssid\n");
- cfg->link_up = true;
+ brcmf_dbg(CONN, "Processing set ssid\n");
return true;
}
return false;
}
-static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
- const struct brcmf_event_msg *e)
+static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
{
- u32 event = be32_to_cpu(e->event_type);
- u16 flags = be16_to_cpu(e->flags);
+ u32 event = e->event_code;
+ u16 flags = e->flags;
if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
- WL_CONN("Processing link down\n");
+ brcmf_dbg(CONN, "Processing link down\n");
return true;
}
return false;
@@ -4509,18 +3868,17 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
const struct brcmf_event_msg *e)
{
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
+ u32 event = e->event_code;
+ u32 status = e->status;
if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
- WL_CONN("Processing Link %s & no network found\n",
- be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ?
- "up" : "down");
+ brcmf_dbg(CONN, "Processing Link %s & no network found\n",
+ e->flags & BRCMF_EVENT_MSG_LINK ? "up" : "down");
return true;
}
if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) {
- WL_CONN("Processing connecting & no network found\n");
+ brcmf_dbg(CONN, "Processing connecting & no network found\n");
return true;
}
@@ -4541,7 +3899,7 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
{
- struct net_device *ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
@@ -4550,10 +3908,10 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
brcmf_clear_assoc_ies(cfg);
- err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_fil_iovar_data_get(ifp, "assoc_info",
+ cfg->extra_buf, WL_ASSOC_INFO_MAX);
if (err) {
- WL_ERR("could not get assoc info (%d)\n", err);
+ brcmf_err("could not get assoc info (%d)\n", err);
return err;
}
assoc_info =
@@ -4561,11 +3919,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
req_len = le32_to_cpu(assoc_info->req_len);
resp_len = le32_to_cpu(assoc_info->resp_len);
if (req_len) {
- err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
- cfg->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
+ cfg->extra_buf,
+ WL_ASSOC_INFO_MAX);
if (err) {
- WL_ERR("could not get assoc req (%d)\n", err);
+ brcmf_err("could not get assoc req (%d)\n", err);
return err;
}
conn_info->req_ie_len = req_len;
@@ -4577,11 +3935,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->req_ie = NULL;
}
if (resp_len) {
- err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
- cfg->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_fil_iovar_data_get(ifp, "assoc_resp_ies",
+ cfg->extra_buf,
+ WL_ASSOC_INFO_MAX);
if (err) {
- WL_ERR("could not get assoc resp (%d)\n", err);
+ brcmf_err("could not get assoc resp (%d)\n", err);
return err;
}
conn_info->resp_ie_len = resp_len;
@@ -4592,8 +3950,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
}
- WL_CONN("req len (%d) resp len (%d)\n",
- conn_info->req_ie_len, conn_info->resp_ie_len);
+ brcmf_dbg(CONN, "req len (%d) resp len (%d)\n",
+ conn_info->req_ie_len, conn_info->resp_ie_len);
return err;
}
@@ -4603,7 +3961,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct ieee80211_channel *notify_channel = NULL;
@@ -4614,7 +3973,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
u32 target_channel;
u8 *buf;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
brcmf_get_assoc_ies(cfg);
memcpy(profile->bssid, e->addr, ETH_ALEN);
@@ -4628,7 +3987,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
/* data sent to dongle has to be little endian */
*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
+ buf, WL_BSS_INFO_MAX);
if (err)
goto done;
@@ -4650,10 +4010,10 @@ done:
cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
- WL_CONN("Report roaming result\n");
+ brcmf_dbg(CONN, "Report roaming result\n");
- set_bit(WL_STATUS_CONNECTED, &cfg->status);
- WL_TRACE("Exit\n");
+ set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -4662,13 +4022,15 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev, const struct brcmf_event_msg *e,
bool completed)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
s32 err = 0;
- WL_TRACE("Enter\n");
+ brcmf_dbg(TRACE, "Enter\n");
- if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
+ if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state)) {
if (completed) {
brcmf_get_assoc_ies(cfg);
memcpy(profile->bssid, e->addr, ETH_ALEN);
@@ -4684,11 +4046,12 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
if (completed)
- set_bit(WL_STATUS_CONNECTED, &cfg->status);
- WL_CONN("Report connect result - connection %s\n",
- completed ? "succeeded" : "failed");
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
+ brcmf_dbg(CONN, "Report connect result - connection %s\n",
+ completed ? "succeeded" : "failed");
}
- WL_TRACE("Exit\n");
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
@@ -4698,14 +4061,14 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
const struct brcmf_event_msg *e, void *data)
{
s32 err = 0;
- u32 event = be32_to_cpu(e->event_type);
- u32 reason = be32_to_cpu(e->reason);
- u32 len = be32_to_cpu(e->datalen);
+ u32 event = e->event_code;
+ u32 reason = e->reason;
+ u32 len = e->datalen;
static int generation;
struct station_info sinfo;
- WL_CONN("event %d, reason %d\n", event, reason);
+ brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
memset(&sinfo, 0, sizeof(sinfo));
sinfo.filled = 0;
@@ -4713,7 +4076,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
reason == BRCMF_E_STATUS_SUCCESS) {
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
if (!data) {
- WL_ERR("No IEs present in ASSOC/REASSOC_IND");
+ brcmf_err("No IEs present in ASSOC/REASSOC_IND");
return -EINVAL;
}
sinfo.assoc_req_ies = data;
@@ -4732,45 +4095,43 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
}
static s32
-brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+brcmf_notify_connect_status(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
- struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct net_device *ndev = ifp->ndev;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
s32 err = 0;
- if (cfg->conf->mode == WL_MODE_AP) {
+ if (ifp->vif->mode == WL_MODE_AP) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
- } else if (brcmf_is_linkup(cfg, e)) {
- WL_CONN("Linkup\n");
- if (brcmf_is_ibssmode(cfg)) {
+ } else if (brcmf_is_linkup(e)) {
+ brcmf_dbg(CONN, "Linkup\n");
+ if (brcmf_is_ibssmode(ifp->vif)) {
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
- set_bit(WL_STATUS_CONNECTED, &cfg->status);
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
} else
brcmf_bss_connect_done(cfg, ndev, e, true);
- } else if (brcmf_is_linkdown(cfg, e)) {
- WL_CONN("Linkdown\n");
- if (brcmf_is_ibssmode(cfg)) {
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
- if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg->status))
- brcmf_link_down(cfg);
- } else {
+ } else if (brcmf_is_linkdown(e)) {
+ brcmf_dbg(CONN, "Linkdown\n");
+ if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
- if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg->status)) {
+ if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state))
cfg80211_disconnected(ndev, 0, NULL, 0,
- GFP_KERNEL);
- brcmf_link_down(cfg);
- }
+ GFP_KERNEL);
}
- brcmf_init_prof(cfg->profile);
+ brcmf_link_down(ifp->vif);
+ brcmf_init_prof(ndev_to_prof(ndev));
} else if (brcmf_is_nonetwork(cfg, e)) {
- if (brcmf_is_ibssmode(cfg))
- clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+ if (brcmf_is_ibssmode(ifp->vif))
+ clear_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &ifp->vif->sme_state);
else
brcmf_bss_connect_done(cfg, ndev, e, false);
}
@@ -4779,30 +4140,29 @@ brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
}
static s32
-brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+brcmf_notify_roaming_status(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 err = 0;
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
+ u32 event = e->event_code;
+ u32 status = e->status;
if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
- if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
- brcmf_bss_roaming_done(cfg, ndev, e);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
+ brcmf_bss_roaming_done(cfg, ifp->ndev, e);
else
- brcmf_bss_connect_done(cfg, ndev, e, true);
+ brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
}
return err;
}
static s32
-brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+brcmf_notify_mic_status(struct brcmf_if *ifp,
const struct brcmf_event_msg *e, void *data)
{
- u16 flags = be16_to_cpu(e->flags);
+ u16 flags = e->flags;
enum nl80211_key_type key_type;
if (flags & BRCMF_EVENT_MSG_GROUP)
@@ -4810,85 +4170,14 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
else
key_type = NL80211_KEYTYPE_PAIRWISE;
- cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+ cfg80211_michael_mic_failure(ifp->ndev, (u8 *)&e->addr, key_type, -1,
NULL, GFP_KERNEL);
return 0;
}
-static s32
-brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- struct brcmf_channel_info_le channel_inform_le;
- struct brcmf_scan_results_le *bss_list_le;
- u32 len = WL_SCAN_BUF_MAX;
- s32 err = 0;
- bool scan_abort = false;
- u32 scan_channel;
-
- WL_TRACE("Enter\n");
-
- if (cfg->iscan_on && cfg->iscan_kickstart) {
- WL_TRACE("Exit\n");
- return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
- }
-
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
- WL_ERR("Scan complete while device not scanning\n");
- scan_abort = true;
- err = -EINVAL;
- goto scan_done_out;
- }
-
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le,
- sizeof(channel_inform_le));
- if (err) {
- WL_ERR("scan busy (%d)\n", err);
- scan_abort = true;
- goto scan_done_out;
- }
- scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
- if (scan_channel)
- WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
- cfg->bss_list = cfg->scan_results;
- bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
-
- memset(cfg->scan_results, 0, len);
- bss_list_le->buflen = cpu_to_le32(len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
- cfg->scan_results, len);
- if (err) {
- WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
- err = -EINVAL;
- scan_abort = true;
- goto scan_done_out;
- }
- cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
- cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
- cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
-
- err = brcmf_inform_bss(cfg);
- if (err)
- scan_abort = true;
-
-scan_done_out:
- if (cfg->scan_request) {
- WL_SCAN("calling cfg80211_scan_done\n");
- cfg80211_scan_done(cfg->scan_request, scan_abort);
- brcmf_set_mpc(ndev, 1);
- cfg->scan_request = NULL;
- }
-
- WL_TRACE("Exit\n");
-
- return err;
-}
-
static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
{
- conf->mode = (u32)-1;
conf->frag_threshold = (u32)-1;
conf->rts_threshold = (u32)-1;
conf->retry_short = (u32)-1;
@@ -4896,82 +4185,53 @@ static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
conf->tx_power = -1;
}
-static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
-{
- memset(el, 0, sizeof(*el));
- el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
- el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
- el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
- el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
- el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
+static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
+{
+ brcmf_fweh_register(cfg->pub, BRCMF_E_LINK,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH_IND,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_DISASSOC_IND,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ASSOC_IND,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_REASSOC_IND,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ROAM,
+ brcmf_notify_roaming_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_MIC_ERROR,
+ brcmf_notify_mic_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_SET_SSID,
+ brcmf_notify_connect_status);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
+ brcmf_notify_sched_scan_results);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
{
- kfree(cfg->scan_results);
- cfg->scan_results = NULL;
- kfree(cfg->bss_info);
- cfg->bss_info = NULL;
kfree(cfg->conf);
cfg->conf = NULL;
- kfree(cfg->profile);
- cfg->profile = NULL;
- kfree(cfg->scan_req_int);
- cfg->scan_req_int = NULL;
kfree(cfg->escan_ioctl_buf);
cfg->escan_ioctl_buf = NULL;
- kfree(cfg->dcmd_buf);
- cfg->dcmd_buf = NULL;
kfree(cfg->extra_buf);
cfg->extra_buf = NULL;
- kfree(cfg->iscan);
- cfg->iscan = NULL;
kfree(cfg->pmk_list);
cfg->pmk_list = NULL;
- if (cfg->ap_info) {
- kfree(cfg->ap_info->wpa_ie);
- kfree(cfg->ap_info->rsn_ie);
- kfree(cfg->ap_info);
- cfg->ap_info = NULL;
- }
}
static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
{
- cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
- if (!cfg->scan_results)
- goto init_priv_mem_out;
cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
if (!cfg->conf)
goto init_priv_mem_out;
- cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
- if (!cfg->profile)
- goto init_priv_mem_out;
- cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
- if (!cfg->bss_info)
- goto init_priv_mem_out;
- cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
- GFP_KERNEL);
- if (!cfg->scan_req_int)
- goto init_priv_mem_out;
cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
if (!cfg->escan_ioctl_buf)
goto init_priv_mem_out;
- cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
- if (!cfg->dcmd_buf)
- goto init_priv_mem_out;
cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (!cfg->extra_buf)
goto init_priv_mem_out;
- cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
- if (!cfg->iscan)
- goto init_priv_mem_out;
cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
if (!cfg->pmk_list)
goto init_priv_mem_out;
@@ -4984,298 +4244,107 @@ init_priv_mem_out:
return -ENOMEM;
}
-/*
-* retrieve first queued event from head
-*/
-
-static struct brcmf_cfg80211_event_q *brcmf_deq_event(
- struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_event_q *e = NULL;
-
- spin_lock_irq(&cfg->evt_q_lock);
- if (!list_empty(&cfg->evt_q_list)) {
- e = list_first_entry(&cfg->evt_q_list,
- struct brcmf_cfg80211_event_q, evt_q_list);
- list_del(&e->evt_q_list);
- }
- spin_unlock_irq(&cfg->evt_q_lock);
-
- return e;
-}
-
-/*
-* push event to tail of the queue
-*
-* remark: this function may not sleep as it is called in atomic context.
-*/
-
-static s32
-brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
- const struct brcmf_event_msg *msg, void *data)
-{
- struct brcmf_cfg80211_event_q *e;
- s32 err = 0;
- ulong flags;
- u32 data_len;
- u32 total_len;
-
- total_len = sizeof(struct brcmf_cfg80211_event_q);
- if (data)
- data_len = be32_to_cpu(msg->datalen);
- else
- data_len = 0;
- total_len += data_len;
- e = kzalloc(total_len, GFP_ATOMIC);
- if (!e)
- return -ENOMEM;
-
- e->etype = event;
- memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
- if (data)
- memcpy(&e->edata, data, data_len);
-
- spin_lock_irqsave(&cfg->evt_q_lock, flags);
- list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
- spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
-
- return err;
-}
-
-static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
-{
- kfree(e);
-}
-
-static void brcmf_cfg80211_event_handler(struct work_struct *work)
-{
- struct brcmf_cfg80211_info *cfg =
- container_of(work, struct brcmf_cfg80211_info,
- event_work);
- struct brcmf_cfg80211_event_q *e;
-
- e = brcmf_deq_event(cfg);
- if (unlikely(!e)) {
- WL_ERR("event queue empty...\n");
- return;
- }
-
- do {
- WL_INFO("event type (%d)\n", e->etype);
- if (cfg->el.handler[e->etype])
- cfg->el.handler[e->etype](cfg,
- cfg_to_ndev(cfg),
- &e->emsg, e->edata);
- else
- WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
- brcmf_put_event(e);
- } while ((e = brcmf_deq_event(cfg)));
-
-}
-
-static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
-{
- spin_lock_init(&cfg->evt_q_lock);
- INIT_LIST_HEAD(&cfg->evt_q_list);
-}
-
-static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
-{
- struct brcmf_cfg80211_event_q *e;
-
- spin_lock_irq(&cfg->evt_q_lock);
- while (!list_empty(&cfg->evt_q_list)) {
- e = list_first_entry(&cfg->evt_q_list,
- struct brcmf_cfg80211_event_q, evt_q_list);
- list_del(&e->evt_q_list);
- kfree(e);
- }
- spin_unlock_irq(&cfg->evt_q_lock);
-}
-
static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
{
s32 err = 0;
cfg->scan_request = NULL;
cfg->pwr_save = true;
-#ifdef CONFIG_BRCMISCAN
- cfg->iscan_on = true; /* iscan on & off switch.
- we enable iscan per default */
- cfg->escan_on = false; /* escan on & off switch.
- we disable escan per default */
-#else
- cfg->iscan_on = false; /* iscan on & off switch.
- we disable iscan per default */
- cfg->escan_on = true; /* escan on & off switch.
- we enable escan per default */
-#endif
cfg->roam_on = true; /* roam on & off switch.
we enable roam per default */
-
- cfg->iscan_kickstart = false;
cfg->active_scan = true; /* we do active scan for
specific scan per default */
cfg->dongle_up = false; /* dongle is not up yet */
- brcmf_init_eq(cfg);
err = brcmf_init_priv_mem(cfg);
if (err)
return err;
- INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
- brcmf_init_eloop_handler(&cfg->el);
+ brcmf_register_event_handlers(cfg);
mutex_init(&cfg->usr_sync);
- err = brcmf_init_iscan(cfg);
- if (err)
- return err;
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
- brcmf_init_prof(cfg->profile);
- brcmf_link_down(cfg);
return err;
}
static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
{
- cancel_work_sync(&cfg->event_work);
cfg->dongle_up = false; /* dongle down */
- brcmf_flush_eq(cfg);
- brcmf_link_down(cfg);
brcmf_abort_scanning(cfg);
brcmf_deinit_priv_mem(cfg);
}
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
- struct device *busdev,
- struct brcmf_pub *drvr)
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+ struct device *busdev)
{
- struct wireless_dev *wdev;
+ struct net_device *ndev = drvr->iflist[0]->ndev;
struct brcmf_cfg80211_info *cfg;
+ struct wiphy *wiphy;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_if *ifp;
s32 err = 0;
if (!ndev) {
- WL_ERR("ndev is invalid\n");
+ brcmf_err("ndev is invalid\n");
return NULL;
}
- wdev = brcmf_alloc_wdev(busdev);
- if (IS_ERR(wdev)) {
+ ifp = netdev_priv(ndev);
+ wiphy = brcmf_setup_wiphy(busdev);
+ if (IS_ERR(wiphy))
return NULL;
- }
- wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
- cfg = wdev_to_cfg(wdev);
- cfg->wdev = wdev;
+ cfg = wiphy_priv(wiphy);
+ cfg->wiphy = wiphy;
cfg->pub = drvr;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
+ INIT_LIST_HEAD(&cfg->vif_list);
+
+ vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+ if (IS_ERR(vif)) {
+ wiphy_free(wiphy);
+ return NULL;
+ }
+
err = wl_init_priv(cfg);
if (err) {
- WL_ERR("Failed to init iwm_priv (%d)\n", err);
+ brcmf_err("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
+ ifp->vif = vif;
return cfg;
cfg80211_attach_out:
- brcmf_free_wdev(cfg);
+ brcmf_free_vif(vif);
return NULL;
}
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
{
- wl_deinit_priv(cfg);
- brcmf_free_wdev(cfg);
-}
-
-void
-brcmf_cfg80211_event(struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data)
-{
- u32 event_type = be32_to_cpu(e->event_type);
- struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
-
- if (!brcmf_enq_event(cfg, event_type, e, data))
- schedule_work(&cfg->event_work);
-}
-
-static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
-{
- /* Room for "event_msgs" + '\0' + bitvec */
- s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- s32 err = 0;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_cfg80211_vif *tmp;
- WL_TRACE("Enter\n");
-
- /* Setup event_msgs */
- brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
- if (err) {
- WL_ERR("Get event_msgs error (%d)\n", err);
- goto dongle_eventmsg_out;
- }
- memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
-
- setbit(eventmask, BRCMF_E_SET_SSID);
- setbit(eventmask, BRCMF_E_ROAM);
- setbit(eventmask, BRCMF_E_PRUNE);
- setbit(eventmask, BRCMF_E_AUTH);
- setbit(eventmask, BRCMF_E_REASSOC);
- setbit(eventmask, BRCMF_E_REASSOC_IND);
- setbit(eventmask, BRCMF_E_DEAUTH_IND);
- setbit(eventmask, BRCMF_E_DISASSOC_IND);
- setbit(eventmask, BRCMF_E_DISASSOC);
- setbit(eventmask, BRCMF_E_JOIN);
- setbit(eventmask, BRCMF_E_ASSOC_IND);
- setbit(eventmask, BRCMF_E_PSK_SUP);
- setbit(eventmask, BRCMF_E_LINK);
- setbit(eventmask, BRCMF_E_NDIS_LINK);
- setbit(eventmask, BRCMF_E_MIC_ERROR);
- setbit(eventmask, BRCMF_E_PMKID_CACHE);
- setbit(eventmask, BRCMF_E_TXFAIL);
- setbit(eventmask, BRCMF_E_JOIN_START);
- setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
- setbit(eventmask, BRCMF_E_ESCAN_RESULT);
- setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
-
- brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
- if (err) {
- WL_ERR("Set event_msgs error (%d)\n", err);
- goto dongle_eventmsg_out;
+ wl_deinit_priv(cfg);
+ list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
+ brcmf_free_vif(vif);
}
-
-dongle_eventmsg_out:
- WL_TRACE("Exit\n");
- return err;
}
static s32
brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
{
- s8 iovbuf[32];
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
__le32 roamtrigger[2];
__le32 roam_delta[2];
- __le32 bcn_to_le;
- __le32 roamvar_le;
/*
* Setup timeout if Beacons are lost and roam is
* off to report link down
*/
if (roamvar) {
- bcn_to_le = cpu_to_le32(bcn_timeout);
- brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le,
- sizeof(bcn_to_le), iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR,
- iovbuf, sizeof(iovbuf));
+ err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
if (err) {
- WL_ERR("bcn_timeout error (%d)\n", err);
+ brcmf_err("bcn_timeout error (%d)\n", err);
goto dongle_rom_out;
}
}
@@ -5284,31 +4353,28 @@ brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
* Enable/Disable built-in roaming to allow supplicant
* to take care of roaming
*/
- WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On");
- roamvar_le = cpu_to_le32(roamvar);
- brcmf_c_mkiovar("roam_off", (char *)&roamvar_le,
- sizeof(roamvar_le), iovbuf, sizeof(iovbuf));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
if (err) {
- WL_ERR("roam_off error (%d)\n", err);
+ brcmf_err("roam_off error (%d)\n", err);
goto dongle_rom_out;
}
roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL);
roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER,
- (void *)roamtrigger, sizeof(roamtrigger));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
+ (void *)roamtrigger, sizeof(roamtrigger));
if (err) {
- WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
+ brcmf_err("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
goto dongle_rom_out;
}
roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA,
- (void *)roam_delta, sizeof(roam_delta));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
+ (void *)roam_delta, sizeof(roam_delta));
if (err) {
- WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err);
+ brcmf_err("WLC_SET_ROAM_DELTA error (%d)\n", err);
goto dongle_rom_out;
}
@@ -5320,37 +4386,35 @@ static s32
brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
s32 scan_unassoc_time, s32 scan_passive_time)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
- __le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time);
- __le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time);
- __le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME,
- &scan_assoc_tm_le, sizeof(scan_assoc_tm_le));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ scan_assoc_time);
if (err) {
if (err == -EOPNOTSUPP)
- WL_INFO("Scan assoc time is not supported\n");
+ brcmf_dbg(INFO, "Scan assoc time is not supported\n");
else
- WL_ERR("Scan assoc time error (%d)\n", err);
+ brcmf_err("Scan assoc time error (%d)\n", err);
goto dongle_scantime_out;
}
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME,
- &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ scan_unassoc_time);
if (err) {
if (err == -EOPNOTSUPP)
- WL_INFO("Scan unassoc time is not supported\n");
+ brcmf_dbg(INFO, "Scan unassoc time is not supported\n");
else
- WL_ERR("Scan unassoc time error (%d)\n", err);
+ brcmf_err("Scan unassoc time error (%d)\n", err);
goto dongle_scantime_out;
}
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME,
- &scan_passive_tm_le, sizeof(scan_passive_tm_le));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME,
+ scan_passive_time);
if (err) {
if (err == -EOPNOTSUPP)
- WL_INFO("Scan passive time is not supported\n");
+ brcmf_dbg(INFO, "Scan passive time is not supported\n");
else
- WL_ERR("Scan passive time error (%d)\n", err);
+ brcmf_err("Scan passive time error (%d)\n", err);
goto dongle_scantime_out;
}
@@ -5360,20 +4424,21 @@ dongle_scantime_out:
static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct wiphy *wiphy;
s32 phy_list;
s8 phy;
s32 err = 0;
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
- &phy_list, sizeof(phy_list));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
+ &phy_list, sizeof(phy_list));
if (err) {
- WL_ERR("error (%d)\n", err);
+ brcmf_err("error (%d)\n", err);
return err;
}
phy = ((char *)&phy_list)[0];
- WL_INFO("%c phy\n", phy);
+ brcmf_dbg(INFO, "%c phy\n", phy);
if (phy == 'n' || phy == 'a') {
wiphy = cfg_to_wiphy(cfg);
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
@@ -5403,16 +4468,13 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
- err = brcmf_dongle_eventmsg(ndev);
- if (err)
- goto default_conf_out;
-
power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
+ err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM,
+ power_mode);
if (err)
goto default_conf_out;
- WL_INFO("power save set to %s\n",
- (power_mode ? "enabled" : "disabled"));
+ brcmf_dbg(INFO, "power save set to %s\n",
+ (power_mode ? "enabled" : "disabled"));
err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
WL_BEACON_TIMEOUT);
@@ -5436,68 +4498,25 @@ default_conf_out:
}
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
+static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp)
{
- char buf[10+IFNAMSIZ];
- struct dentry *fd;
- s32 err = 0;
-
- sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
- cfg->debugfsdir = debugfs_create_dir(buf,
- cfg_to_wiphy(cfg)->debugfsdir);
-
- fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
- (u16 *)&cfg->profile->beacon_interval);
- if (!fd) {
- err = -ENOMEM;
- goto err_out;
- }
-
- fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
- (u8 *)&cfg->profile->dtim_period);
- if (!fd) {
- err = -ENOMEM;
- goto err_out;
- }
+ set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
+ if (ifp->idx)
+ return 0;
-err_out:
- return err;
+ return brcmf_config_dongle(ifp->drvr->config);
}
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
+static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
{
- debugfs_remove_recursive(cfg->debugfsdir);
- cfg->debugfsdir = NULL;
-}
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
-{
- s32 err = 0;
-
- set_bit(WL_STATUS_READY, &cfg->status);
-
- brcmf_debugfs_add_netdev_params(cfg);
-
- err = brcmf_config_dongle(cfg);
- if (err)
- return err;
-
- brcmf_invoke_iscan(cfg);
-
- return err;
-}
-
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
-{
/*
* While going down, if associated with AP disassociate
* from AP to save power
*/
- if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
- test_bit(WL_STATUS_READY, &cfg->status)) {
- WL_INFO("Disassociating from AP");
- brcmf_link_down(cfg);
+ if (check_vif_up(ifp->vif)) {
+ brcmf_link_down(ifp->vif);
/* Make sure WPA_Supplicant receives all the event
generated due to DISASSOC call to the fw to keep
@@ -5507,30 +4526,32 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
}
brcmf_abort_scanning(cfg);
- clear_bit(WL_STATUS_READY, &cfg->status);
-
- brcmf_debugfs_remove_netdev(cfg);
+ clear_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
return 0;
}
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
+s32 brcmf_cfg80211_up(struct net_device *ndev)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 err = 0;
mutex_lock(&cfg->usr_sync);
- err = __brcmf_cfg80211_up(cfg);
+ err = __brcmf_cfg80211_up(ifp);
mutex_unlock(&cfg->usr_sync);
return err;
}
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
+s32 brcmf_cfg80211_down(struct net_device *ndev)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 err = 0;
mutex_lock(&cfg->usr_sync);
- err = __brcmf_cfg80211_down(cfg);
+ err = __brcmf_cfg80211_down(ifp);
mutex_unlock(&cfg->usr_sync);
return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 71ced174748a..e4d9cc7a8e63 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,98 +17,12 @@
#ifndef _wl_cfg80211_h_
#define _wl_cfg80211_h_
-#define WL_DBG_NONE 0
-#define WL_DBG_CONN (1 << 5)
-#define WL_DBG_SCAN (1 << 4)
-#define WL_DBG_TRACE (1 << 3)
-#define WL_DBG_INFO (1 << 1)
-#define WL_DBG_ERR (1 << 0)
-#define WL_DBG_MASK ((WL_DBG_INFO | WL_DBG_ERR | WL_DBG_TRACE) | \
- (WL_DBG_SCAN) | (WL_DBG_CONN))
-
-#define WL_ERR(fmt, ...) \
-do { \
- if (brcmf_dbg_level & WL_DBG_ERR) { \
- if (net_ratelimit()) { \
- pr_err("ERROR @%s : " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } \
-} while (0)
-
-#if (defined DEBUG)
-#define WL_INFO(fmt, ...) \
-do { \
- if (brcmf_dbg_level & WL_DBG_INFO) { \
- if (net_ratelimit()) { \
- pr_err("INFO @%s : " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } \
-} while (0)
-
-#define WL_TRACE(fmt, ...) \
-do { \
- if (brcmf_dbg_level & WL_DBG_TRACE) { \
- if (net_ratelimit()) { \
- pr_err("TRACE @%s : " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } \
-} while (0)
-
-#define WL_SCAN(fmt, ...) \
-do { \
- if (brcmf_dbg_level & WL_DBG_SCAN) { \
- if (net_ratelimit()) { \
- pr_err("SCAN @%s : " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } \
-} while (0)
-
-#define WL_CONN(fmt, ...) \
-do { \
- if (brcmf_dbg_level & WL_DBG_CONN) { \
- if (net_ratelimit()) { \
- pr_err("CONN @%s : " fmt, \
- __func__, ##__VA_ARGS__); \
- } \
- } \
-} while (0)
-
-#else /* (defined DEBUG) */
-#define WL_INFO(fmt, args...)
-#define WL_TRACE(fmt, args...)
-#define WL_SCAN(fmt, args...)
-#define WL_CONN(fmt, args...)
-#endif /* (defined DEBUG) */
-
-#define WL_NUM_SCAN_MAX 1
-#define WL_NUM_PMKIDS_MAX MAXPMKID /* will be used
- * for 2.6.33 kernel
- * or later
- */
-#define WL_SCAN_BUF_MAX (1024 * 8)
+#define WL_NUM_SCAN_MAX 10
+#define WL_NUM_PMKIDS_MAX MAXPMKID
#define WL_TLV_INFO_MAX 1024
#define WL_BSS_INFO_MAX 2048
-#define WL_ASSOC_INFO_MAX 512 /*
- * needs to grab assoc info from dongle to
- * report it to cfg80211 through "connect"
- * event
- */
-#define WL_DCMD_LEN_MAX 1024
-#define WL_EXTRA_BUF_MAX 2048
-#define WL_ISCAN_BUF_MAX 2048 /*
- * the buf length can be BRCMF_DCMD_MAXLEN
- * to reduce iteration
- */
-#define WL_ISCAN_TIMER_INTERVAL_MS 3000
-#define WL_SCAN_ERSULTS_LAST (BRCMF_SCAN_RESULTS_NO_MEM+1)
-#define WL_AP_MAX 256 /* virtually unlimitted as long
- * as kernel memory allows
- */
-
+#define WL_ASSOC_INFO_MAX 512 /* assoc related fil max buf */
+#define WL_EXTRA_BUF_MAX 2048
#define WL_ROAM_TRIGGER_LEVEL -75
#define WL_ROAM_DELTA 20
#define WL_BEACON_TIMEOUT 3
@@ -127,15 +41,15 @@ do { \
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define IE_MAX_LEN 512
-/* dongle status */
-enum wl_status {
- WL_STATUS_READY,
- WL_STATUS_SCANNING,
- WL_STATUS_SCAN_ABORTING,
- WL_STATUS_CONNECTING,
- WL_STATUS_CONNECTED,
- WL_STATUS_AP_CREATING,
- WL_STATUS_AP_CREATED
+/**
+ * enum brcmf_scan_status - dongle scan status
+ *
+ * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle.
+ * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle.
+ */
+enum brcmf_scan_status {
+ BRCMF_SCAN_STATUS_BUSY,
+ BRCMF_SCAN_STATUS_ABORT,
};
/* wi-fi mode */
@@ -145,28 +59,8 @@ enum wl_mode {
WL_MODE_AP
};
-/* dongle profile list */
-enum wl_prof_list {
- WL_PROF_MODE,
- WL_PROF_SSID,
- WL_PROF_SEC,
- WL_PROF_IBSS,
- WL_PROF_BAND,
- WL_PROF_BSSID,
- WL_PROF_ACT,
- WL_PROF_BEACONINT,
- WL_PROF_DTIMPERIOD
-};
-
-/* dongle iscan state */
-enum wl_iscan_state {
- WL_ISCAN_STATE_IDLE,
- WL_ISCAN_STATE_SCANING
-};
-
/* dongle configuration */
struct brcmf_cfg80211_conf {
- u32 mode; /* adhoc , infrastructure or ap */
u32 frag_threshold;
u32 rts_threshold;
u32 retry_short;
@@ -175,17 +69,6 @@ struct brcmf_cfg80211_conf {
struct ieee80211_channel channel;
};
-/* forward declaration */
-struct brcmf_cfg80211_info;
-
-/* cfg80211 main event loop */
-struct brcmf_cfg80211_event_loop {
- s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
- const struct brcmf_event_msg *e,
- void *data);
-};
-
/* basic structure of scan request */
struct brcmf_cfg80211_scan_req {
struct brcmf_ssid_le ssid_le;
@@ -197,14 +80,6 @@ struct brcmf_cfg80211_ie {
u8 buf[WL_TLV_INFO_MAX];
};
-/* event queue for cfg80211 main event */
-struct brcmf_cfg80211_event_q {
- struct list_head evt_q_list;
- u32 etype;
- struct brcmf_event_msg emsg;
- s8 edata[1];
-};
-
/* security information with currently associated ap */
struct brcmf_cfg80211_security {
u32 wpa_versions;
@@ -214,45 +89,73 @@ struct brcmf_cfg80211_security {
u32 wpa_auth;
};
-/* ibss information for currently joined ibss network */
-struct brcmf_cfg80211_ibss {
- u8 beacon_interval; /* in millisecond */
- u8 atim; /* in millisecond */
- s8 join_only;
- u8 band;
- u8 channel;
-};
-
-/* dongle profile */
+/**
+ * struct brcmf_cfg80211_profile - profile information.
+ *
+ * @ssid: ssid of associated/associating ap.
+ * @bssid: bssid of joined/joining ibss.
+ * @sec: security information.
+ */
struct brcmf_cfg80211_profile {
- u32 mode;
struct brcmf_ssid ssid;
u8 bssid[ETH_ALEN];
- u16 beacon_interval;
- u8 dtim_period;
struct brcmf_cfg80211_security sec;
- struct brcmf_cfg80211_ibss ibss;
- s32 band;
};
-/* dongle iscan event loop */
-struct brcmf_cfg80211_iscan_eloop {
- s32 (*handler[WL_SCAN_ERSULTS_LAST])
- (struct brcmf_cfg80211_info *cfg);
+/**
+ * enum brcmf_vif_status - bit indices for vif status.
+ *
+ * @BRCMF_VIF_STATUS_READY: ready for operation.
+ * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
+ * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
+ * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
+ */
+enum brcmf_vif_status {
+ BRCMF_VIF_STATUS_READY,
+ BRCMF_VIF_STATUS_CONNECTING,
+ BRCMF_VIF_STATUS_CONNECTED,
+ BRCMF_VIF_STATUS_AP_CREATING,
+ BRCMF_VIF_STATUS_AP_CREATED
+};
+
+/**
+ * struct vif_saved_ie - holds saved IEs for a virtual interface.
+ *
+ * @probe_res_ie: IE info for probe response.
+ * @beacon_ie: IE info for beacon frame.
+ * @probe_res_ie_len: IE info length for probe response.
+ * @beacon_ie_len: IE info length for beacon frame.
+ */
+struct vif_saved_ie {
+ u8 probe_res_ie[IE_MAX_LEN];
+ u8 beacon_ie[IE_MAX_LEN];
+ u32 probe_res_ie_len;
+ u32 beacon_ie_len;
};
-/* dongle iscan controller */
-struct brcmf_cfg80211_iscan_ctrl {
- struct net_device *ndev;
- struct timer_list timer;
- u32 timer_ms;
- u32 timer_on;
- s32 state;
- struct work_struct work;
- struct brcmf_cfg80211_iscan_eloop el;
- void *data;
- s8 dcmd_buf[BRCMF_DCMD_SMLEN];
- s8 scan_buf[WL_ISCAN_BUF_MAX];
+/**
+ * struct brcmf_cfg80211_vif - virtual interface specific information.
+ *
+ * @ifp: lower layer interface pointer
+ * @wdev: wireless device.
+ * @profile: profile information.
+ * @mode: operating mode.
+ * @roam_off: roaming state.
+ * @sme_state: SME state using enum brcmf_vif_status bits.
+ * @pm_block: power-management blocked.
+ * @list: linked list.
+ */
+struct brcmf_cfg80211_vif {
+ struct brcmf_if *ifp;
+ struct wireless_dev wdev;
+ struct brcmf_cfg80211_profile profile;
+ s32 mode;
+ s32 roam_off;
+ unsigned long sme_state;
+ bool pm_block;
+ struct vif_saved_ie saved_ie;
+ struct list_head list;
};
/* association inform */
@@ -288,17 +191,6 @@ struct escan_info {
struct net_device *ndev;
};
-/* Structure to hold WPS, WPA IEs for a AP */
-struct ap_info {
- u8 probe_res_ie[IE_MAX_LEN];
- u8 beacon_ie[IE_MAX_LEN];
- u32 probe_res_ie_len;
- u32 beacon_ie_len;
- u8 *wpa_ie;
- u8 *rsn_ie;
- bool security_mode;
-};
-
/**
* struct brcmf_pno_param_le - PNO scan configuration parameters
*
@@ -383,32 +275,22 @@ struct brcmf_pno_scanresults_le {
/**
* struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
*
- * @wdev: representing wl cfg80211 device.
+ * @wiphy: wiphy object for cfg80211 interface.
* @conf: dongle configuration.
* @scan_request: cfg80211 scan request object.
- * @el: main event loop.
- * @evt_q_list: used for event queue.
- * @evt_q_lock: for event queue synchronization.
* @usr_sync: mainly for dongle up/down synchronization.
* @bss_list: bss_list holding scanned ap information.
- * @scan_results: results of the last scan.
* @scan_req_int: internal scan request object.
* @bss_info: bss information for cfg80211 layer.
* @ie: information element object for internal purpose.
- * @profile: holding dongle profile.
- * @iscan: iscan controller information.
* @conn_info: association info.
* @pmk_list: wpa2 pmk list.
- * @event_work: event handler work struct.
- * @status: current dongle status.
+ * @scan_status: scan activity on the dongle.
* @pub: common driver information.
* @channel: current channel.
- * @iscan_on: iscan on/off switch.
- * @iscan_kickstart: indicate iscan already started.
* @active_scan: current scan mode.
* @sched_escan: e-scan for scheduled scan support running.
* @ibss_starter: indicates this sta is ibss starter.
- * @link_up: link/connection up flag.
* @pwr_save: indicate whether dongle to support power save mode.
* @dongle_up: indicate whether dongle up or not.
* @roam_on: on/off switch for dongle self-roaming.
@@ -416,41 +298,30 @@ struct brcmf_pno_scanresults_le {
* @dcmd_buf: dcmd buffer.
* @extra_buf: mainly to grab assoc information.
* @debugfsdir: debugfs folder for this device.
- * @escan_on: escan on/off switch.
* @escan_info: escan information.
* @escan_timeout: Timer for catch scan timeout.
* @escan_timeout_work: scan timeout worker.
* @escan_ioctl_buf: dongle command buffer for escan commands.
- * @ap_info: host ap information.
- * @ci: used to link this structure to netdev private data.
+ * @vif_list: linked list of vif instances.
+ * @vif_cnt: number of vif instances.
*/
struct brcmf_cfg80211_info {
- struct wireless_dev *wdev;
+ struct wiphy *wiphy;
struct brcmf_cfg80211_conf *conf;
struct cfg80211_scan_request *scan_request;
- struct brcmf_cfg80211_event_loop el;
- struct list_head evt_q_list;
- spinlock_t evt_q_lock;
struct mutex usr_sync;
struct brcmf_scan_results *bss_list;
- struct brcmf_scan_results *scan_results;
- struct brcmf_cfg80211_scan_req *scan_req_int;
+ struct brcmf_cfg80211_scan_req scan_req_int;
struct wl_cfg80211_bss_info *bss_info;
struct brcmf_cfg80211_ie ie;
- struct brcmf_cfg80211_profile *profile;
- struct brcmf_cfg80211_iscan_ctrl *iscan;
struct brcmf_cfg80211_connect_info conn_info;
struct brcmf_cfg80211_pmk_list *pmk_list;
- struct work_struct event_work;
- unsigned long status;
+ unsigned long scan_status;
struct brcmf_pub *pub;
u32 channel;
- bool iscan_on;
- bool iscan_kickstart;
bool active_scan;
bool sched_escan;
bool ibss_starter;
- bool link_up;
bool pwr_save;
bool dongle_up;
bool roam_on;
@@ -458,17 +329,17 @@ struct brcmf_cfg80211_info {
u8 *dcmd_buf;
u8 *extra_buf;
struct dentry *debugfsdir;
- bool escan_on;
struct escan_info escan_info;
struct timer_list escan_timeout;
struct work_struct escan_timeout_work;
u8 *escan_ioctl_buf;
- struct ap_info *ap_info;
+ struct list_head vif_list;
+ u8 vif_cnt;
};
-static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
{
- return w->wdev->wiphy;
+ return cfg->wiphy;
}
static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
@@ -481,9 +352,12 @@ static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
}
-static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
+static inline
+struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
{
- return cfg->wdev->netdev;
+ struct brcmf_cfg80211_vif *vif;
+ vif = list_first_entry(&cfg->vif_list, struct brcmf_cfg80211_vif, list);
+ return vif->wdev.netdev;
}
static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
@@ -491,8 +365,17 @@ static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
return wdev_to_cfg(ndev->ieee80211_ptr);
}
-#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
-#define cfg_to_iscan(w) (w->iscan)
+static inline struct brcmf_cfg80211_profile *ndev_to_prof(struct net_device *nd)
+{
+ struct brcmf_if *ifp = netdev_priv(nd);
+ return &ifp->vif->profile;
+}
+
+static inline struct brcmf_cfg80211_vif *ndev_to_vif(struct net_device *ndev)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ return ifp->vif;
+}
static inline struct
brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
@@ -500,15 +383,10 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
return &cfg->conn_info;
}
-struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
- struct device *busdev,
- struct brcmf_pub *drvr);
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+ struct device *busdev);
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
-
-/* event handler from dongle */
-void brcmf_cfg80211_event(struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data);
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_cfg80211_up(struct net_device *ndev);
+s32 brcmf_cfg80211_down(struct net_device *ndev);
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index e227c4c68ef9..d3d4151c3eda 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -40,7 +40,8 @@ BRCMSMAC_OFILES := \
phy/phytbl_n.o \
phy/phy_qmath.o \
dma.o \
- brcms_trace_events.o
+ brcms_trace_events.o \
+ debug.o
MODULEPFX := brcmsmac
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index b89f1272b93f..f0888a9ee32e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -692,7 +692,7 @@ void ai_pci_up(struct si_pub *sih)
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
}
/* Unconfigure and/or apply various WARs when going down */
@@ -703,7 +703,7 @@ void ai_pci_down(struct si_pub *sih)
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
@@ -721,7 +721,7 @@ void ai_epa_4313war(struct si_pub *sih)
/* check if the device is removed */
bool ai_deviceremoved(struct si_pub *sih)
{
- u32 w;
+ u32 w = 0;
struct si_info *sii;
sii = container_of(sih, struct si_info, pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index be5bcfb9153b..1de94f30564f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -21,6 +21,8 @@
#include "antsel.h"
#include "main.h"
#include "ampdu.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
/* max number of mpdus in an ampdu */
#define AMPDU_MAX_MPDU 32
@@ -40,8 +42,6 @@
#define AMPDU_DEF_RETRY_LIMIT 5
/* default tx retry limit at reg rate */
#define AMPDU_DEF_RR_RETRY_LIMIT 2
-/* default weight of ampdu in txfifo */
-#define AMPDU_DEF_TXPKT_WEIGHT 2
/* default ffpld reserved bytes */
#define AMPDU_DEF_FFPLD_RSVD 2048
/* # of inis to be freed on detach */
@@ -114,7 +114,6 @@ struct brcms_fifo_info {
* mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
* max_pdu: max pdus allowed in ampdu
* dur: max duration of an ampdu (in msec)
- * txpkt_weight: weight of ampdu in txfifo; reduces rate lag
* rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
* ffpld_rsvd: number of bytes to reserve for preload
* max_txlen: max size of ampdu per mcs, bw and sgi
@@ -136,7 +135,6 @@ struct ampdu_info {
u8 mpdu_density;
s8 max_pdu;
u8 dur;
- u8 txpkt_weight;
u8 rx_factor;
u32 ffpld_rsvd;
u32 max_txlen[MCS_TABLE_SIZE][2][2];
@@ -183,18 +181,19 @@ static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
{
struct brcms_c_info *wlc = ampdu->wlc;
+ struct bcma_device *core = wlc->hw->d11core;
wlc->pub->_ampdu = false;
if (on) {
if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
- wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not "
- "nmode enabled\n", wlc->pub->unit);
+ brcms_err(core, "wl%d: driver not nmode enabled\n",
+ wlc->pub->unit);
return -ENOTSUPP;
}
if (!brcms_c_ampdu_cap(ampdu)) {
- wiphy_err(ampdu->wlc->wiphy, "wl%d: device not "
- "ampdu capable\n", wlc->pub->unit);
+ brcms_err(core, "wl%d: device not ampdu capable\n",
+ wlc->pub->unit);
return -ENOTSUPP;
}
wlc->pub->_ampdu = on;
@@ -247,7 +246,6 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
ampdu->max_pdu = AUTO;
ampdu->dur = AMPDU_MAX_DUR;
- ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
/*
@@ -374,7 +372,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
offsetof(struct macstat, txfunfl[fid]));
new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
if (new_txunfl == 0) {
- BCMMSG(wlc->wiphy, "TX status FRAG set but no tx underflows\n");
+ brcms_dbg_ht(wlc->hw->d11core,
+ "TX status FRAG set but no tx underflows\n");
return -1;
}
fifo->prev_txfunfl = cur_txunfl;
@@ -396,8 +395,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
if (fifo->accum_txfunfl < 10)
return 0;
- BCMMSG(wlc->wiphy, "ampdu_count %d tx_underflows %d\n",
- current_ampdu_cnt, fifo->accum_txfunfl);
+ brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d tx_underflows %d\n",
+ current_ampdu_cnt, fifo->accum_txfunfl);
/*
compute the current ratio of tx unfl per ampdu.
@@ -450,9 +449,10 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
(max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
/ (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
- BCMMSG(wlc->wiphy, "DMA estimated transfer rate %d; "
- "pre-load size %d\n",
- fifo->dmaxferrate, fifo->ampdu_pld_size);
+ brcms_dbg_ht(wlc->hw->d11core,
+ "DMA estimated transfer rate %d; "
+ "pre-load size %d\n",
+ fifo->dmaxferrate, fifo->ampdu_pld_size);
} else {
/* decrease ampdu size */
@@ -486,7 +486,7 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
scb_ampdu = &scb->scb_ampdu;
if (!ampdu->ini_enable[tid]) {
- wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n",
+ brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
__func__, tid);
return;
}
@@ -498,378 +498,324 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
}
-int
-brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
- struct sk_buff **pdu, int prec)
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+ struct brcms_c_info *wlc)
{
- struct brcms_c_info *wlc;
- struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
- u8 tid, ndelim;
- int err = 0;
- u8 preamble_type = BRCMS_GF_PREAMBLE;
- u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
- u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
- u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
+ session->wlc = wlc;
+ skb_queue_head_init(&session->skb_list);
+ session->max_ampdu_len = 0; /* determined from first MPDU */
+ session->max_ampdu_frames = 0; /* determined from first MPDU */
+ session->ampdu_len = 0;
+ session->dma_len = 0;
+}
- bool rr = true, fbr = false;
- uint i, count = 0, fifo, seg_cnt = 0;
- u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0;
- u32 ampdu_len, max_ampdu_bytes = 0;
- struct d11txh *txh = NULL;
+/*
+ * Preps the given packet for AMPDU based on the session data. If the
+ * frame cannot be accomodated in the current session, -ENOSPC is
+ * returned.
+ */
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+ struct sk_buff *p)
+{
+ struct brcms_c_info *wlc = session->wlc;
+ struct ampdu_info *ampdu = wlc->ampdu;
+ struct scb *scb = &wlc->pri_scb;
+ struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
+ struct ieee80211_tx_rate *txrate = tx_info->status.rates;
+ struct d11txh *txh = (struct d11txh *)p->data;
+ unsigned ampdu_frames;
+ u8 ndelim, tid;
u8 *plcp;
- struct ieee80211_hdr *h;
- struct scb *scb;
- struct scb_ampdu *scb_ampdu;
- struct scb_ampdu_tid_ini *ini;
- u8 mcs = 0;
- bool use_rts = false, use_cts = false;
- u32 rspec = 0, rspec_fallback = 0;
- u32 rts_rspec = 0, rts_rspec_fallback = 0;
- u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
- struct ieee80211_rts *rts;
- u8 rr_retry_limit;
- struct brcms_fifo_info *f;
+ uint len;
+ u16 mcl;
bool fbr_iscck;
- struct ieee80211_tx_info *tx_info;
- u16 qlen;
- struct wiphy *wiphy;
-
- wlc = ampdu->wlc;
- wiphy = wlc->wiphy;
- p = *pdu;
-
- tid = (u8) (p->priority);
+ bool rr;
- f = ampdu->fifo_tb + prio2fifo[tid];
+ ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
+ plcp = (u8 *)(txh + 1);
+ fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
+ len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
+ BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
+ len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
- scb = &wlc->pri_scb;
- scb_ampdu = &scb->scb_ampdu;
- ini = &scb_ampdu->ini[tid];
+ ampdu_frames = skb_queue_len(&session->skb_list);
+ if (ampdu_frames != 0) {
+ struct sk_buff *first;
- /* Let pressure continue to build ... */
- qlen = pktq_plen(&qi->q, prec);
- if (ini->tx_in_transit > 0 &&
- qlen < min(scb_ampdu->max_pdu, ini->ba_wsize))
- /* Collect multiple MPDU's to be sent in the next AMPDU */
- return -EBUSY;
+ if (ampdu_frames + 1 > session->max_ampdu_frames ||
+ session->ampdu_len + len > session->max_ampdu_len)
+ return -ENOSPC;
- /* at this point we intend to transmit an AMPDU */
- rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
- ampdu_len = 0;
- dma_len = 0;
- while (p) {
- struct ieee80211_tx_rate *txrate;
-
- tx_info = IEEE80211_SKB_CB(p);
- txrate = tx_info->status.rates;
+ /*
+ * We aren't really out of space if the new frame is of
+ * a different priority, but we want the same behaviour
+ * so return -ENOSPC anyway.
+ *
+ * XXX: The old AMPDU code did this, but is it really
+ * necessary?
+ */
+ first = skb_peek(&session->skb_list);
+ if (p->priority != first->priority)
+ return -ENOSPC;
+ }
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- err = brcms_c_prep_pdu(wlc, p, &fifo);
- } else {
- wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__);
- *pdu = NULL;
- err = 0;
- break;
- }
+ /*
+ * Now that we're sure this frame can be accomodated, update the
+ * session information.
+ */
+ session->ampdu_len += len;
+ session->dma_len += p->len;
- if (err) {
- if (err == -EBUSY) {
- wiphy_err(wiphy, "wl%d: sendampdu: "
- "prep_xdu retry; seq 0x%x\n",
- wlc->pub->unit, seq);
- *pdu = p;
- break;
- }
+ tid = (u8)p->priority;
- /* error in the packet; reject it */
- wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu "
- "rejected; seq 0x%x\n", wlc->pub->unit, seq);
- *pdu = NULL;
- break;
- }
+ /* Handle retry limits */
+ if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
+ txrate[0].count++;
+ rr = true;
+ } else {
+ txrate[1].count++;
+ rr = false;
+ }
- /* pkt is good to be aggregated */
- txh = (struct d11txh *) p->data;
- plcp = (u8 *) (txh + 1);
- h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
- seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
- index = TX_SEQ_TO_INDEX(seq);
+ if (ampdu_frames == 0) {
+ u8 plcp0, plcp3, is40, sgi, mcs;
+ uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
+ struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
- /* check mcl fields and test whether it can be agg'd */
- mcl = le16_to_cpu(txh->MacTxControlLow);
- mcl &= ~TXC_AMPDU_MASK;
- fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x3);
- txh->PreloadSize = 0; /* always default to 0 */
-
- /* Handle retry limits */
- if (txrate[0].count <= rr_retry_limit) {
- txrate[0].count++;
- rr = true;
- fbr = false;
+ if (rr) {
+ plcp0 = plcp[0];
+ plcp3 = plcp[3];
} else {
- fbr = true;
- rr = false;
- txrate[1].count++;
- }
-
- /* extract the length info */
- len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
- : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
-
- /* retrieve null delimiter count */
- ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
- seg_cnt += 1;
+ plcp0 = txh->FragPLCPFallback[0];
+ plcp3 = txh->FragPLCPFallback[3];
- BCMMSG(wlc->wiphy, "wl%d: mpdu %d plcp_len %d\n",
- wlc->pub->unit, count, len);
-
- /*
- * aggregateable mpdu. For ucode/hw agg,
- * test whether need to break or change the epoch
- */
- if (count == 0) {
- mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
- /* refill the bits since might be a retx mpdu */
- mcl |= TXC_STARTMSDU;
- rts = (struct ieee80211_rts *)&txh->rts_frame;
-
- if (ieee80211_is_rts(rts->frame_control)) {
- mcl |= TXC_SENDRTS;
- use_rts = true;
- }
- if (ieee80211_is_cts(rts->frame_control)) {
- mcl |= TXC_SENDCTS;
- use_cts = true;
- }
- } else {
- mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
- mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
}
- len = roundup(len, 4);
- ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
+ /* Limit AMPDU size based on MCS */
+ is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
+ sgi = plcp3_issgi(plcp3) ? 1 : 0;
+ mcs = plcp0 & ~MIMO_PLCP_40MHZ;
+ session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
+ ampdu->max_txlen[mcs][is40][sgi]);
- dma_len += (u16) p->len;
+ session->max_ampdu_frames = scb_ampdu->max_pdu;
+ if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
+ session->max_ampdu_frames =
+ min_t(u16, f->mcs2ampdu_table[mcs],
+ session->max_ampdu_frames);
+ }
+ }
- BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
- " seg_cnt %d null delim %d\n",
- wlc->pub->unit, ampdu_len, seg_cnt, ndelim);
+ /*
+ * Treat all frames as "middle" frames of AMPDU here. First and
+ * last frames must be fixed up after all MPDUs have been prepped.
+ */
+ mcl = le16_to_cpu(txh->MacTxControlLow);
+ mcl &= ~TXC_AMPDU_MASK;
+ mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
+ mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
+ txh->MacTxControlLow = cpu_to_le16(mcl);
+ txh->PreloadSize = 0; /* always default to 0 */
- txh->MacTxControlLow = cpu_to_le16(mcl);
+ skb_queue_tail(&session->skb_list, p);
- /* this packet is added */
- pkt[count++] = p;
+ return 0;
+}
- /* patch the first MPDU */
- if (count == 1) {
- u8 plcp0, plcp3, is40, sgi;
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
+{
+ struct brcms_c_info *wlc = session->wlc;
+ struct ampdu_info *ampdu = wlc->ampdu;
+ struct sk_buff *first, *last;
+ struct d11txh *txh;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *txrate;
+ u8 ndelim;
+ u8 *plcp;
+ uint len;
+ uint fifo;
+ struct brcms_fifo_info *f;
+ u16 mcl;
+ bool fbr;
+ bool fbr_iscck;
+ struct ieee80211_rts *rts;
+ bool use_rts = false, use_cts = false;
+ u16 dma_len = session->dma_len;
+ u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
+ u32 rspec = 0, rspec_fallback = 0;
+ u32 rts_rspec = 0, rts_rspec_fallback = 0;
+ u8 plcp0, plcp3, is40, sgi, mcs;
+ u16 mch;
+ u8 preamble_type = BRCMS_GF_PREAMBLE;
+ u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
+ u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
+ u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
- if (rr) {
- plcp0 = plcp[0];
- plcp3 = plcp[3];
- } else {
- plcp0 = txh->FragPLCPFallback[0];
- plcp3 = txh->FragPLCPFallback[3];
+ if (skb_queue_empty(&session->skb_list))
+ return;
- }
- is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
- sgi = plcp3_issgi(plcp3) ? 1 : 0;
- mcs = plcp0 & ~MIMO_PLCP_40MHZ;
- max_ampdu_bytes =
- min(scb_ampdu->max_rx_ampdu_bytes,
- ampdu->max_txlen[mcs][is40][sgi]);
-
- if (is40)
- mimo_ctlchbw =
- CHSPEC_SB_UPPER(wlc_phy_chanspec_get(
- wlc->band->pi))
- ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
-
- /* rebuild the rspec and rspec_fallback */
- rspec = RSPEC_MIMORATE;
- rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
- if (plcp[0] & MIMO_PLCP_40MHZ)
- rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
-
- if (fbr_iscck) /* CCK */
- rspec_fallback = cck_rspec(cck_phy2mac_rate
- (txh->FragPLCPFallback[0]));
- else { /* MIMO */
- rspec_fallback = RSPEC_MIMORATE;
- rspec_fallback |=
- txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
- if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
- rspec_fallback |=
- (PHY_TXC1_BW_40MHZ <<
- RSPEC_BW_SHIFT);
- }
+ first = skb_peek(&session->skb_list);
+ last = skb_peek_tail(&session->skb_list);
+
+ /* Need to fix up last MPDU first to adjust AMPDU length */
+ txh = (struct d11txh *)last->data;
+ fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
+ f = &ampdu->fifo_tb[fifo];
+
+ mcl = le16_to_cpu(txh->MacTxControlLow);
+ mcl &= ~TXC_AMPDU_MASK;
+ mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
+ txh->MacTxControlLow = cpu_to_le16(mcl);
+
+ /* remove the null delimiter after last mpdu */
+ ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
+ txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
+ session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
+
+ /* remove the pad len from last mpdu */
+ fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
+ len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
+ BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
+ session->ampdu_len -= roundup(len, 4) - len;
+
+ /* Now fix up the first MPDU */
+ tx_info = IEEE80211_SKB_CB(first);
+ txrate = tx_info->status.rates;
+ txh = (struct d11txh *)first->data;
+ plcp = (u8 *)(txh + 1);
+ rts = (struct ieee80211_rts *)&txh->rts_frame;
+
+ mcl = le16_to_cpu(txh->MacTxControlLow);
+ /* If only one MPDU leave it marked as last */
+ if (first != last) {
+ mcl &= ~TXC_AMPDU_MASK;
+ mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
+ }
+ mcl |= TXC_STARTMSDU;
+ if (ieee80211_is_rts(rts->frame_control)) {
+ mcl |= TXC_SENDRTS;
+ use_rts = true;
+ }
+ if (ieee80211_is_cts(rts->frame_control)) {
+ mcl |= TXC_SENDCTS;
+ use_cts = true;
+ }
+ txh->MacTxControlLow = cpu_to_le16(mcl);
- if (use_rts || use_cts) {
- rts_rspec =
- brcms_c_rspec_to_rts_rspec(wlc,
- rspec, false, mimo_ctlchbw);
- rts_rspec_fallback =
- brcms_c_rspec_to_rts_rspec(wlc,
- rspec_fallback, false, mimo_ctlchbw);
- }
- }
+ fbr = txrate[1].count > 0;
+ if (!fbr) {
+ plcp0 = plcp[0];
+ plcp3 = plcp[3];
+ } else {
+ plcp0 = txh->FragPLCPFallback[0];
+ plcp3 = txh->FragPLCPFallback[3];
+ }
+ is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
+ sgi = plcp3_issgi(plcp3) ? 1 : 0;
+ mcs = plcp0 & ~MIMO_PLCP_40MHZ;
+
+ if (is40) {
+ if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
+ mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
+ else
+ mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
+ }
- /* if (first mpdu for host agg) */
- /* test whether to add more */
- if ((mcs_2_rate(mcs, true, false) >= f->dmaxferrate) &&
- (count == f->mcs2ampdu_table[mcs])) {
- BCMMSG(wlc->wiphy, "wl%d: PR 37644: stopping"
- " ampdu at %d for mcs %d\n",
- wlc->pub->unit, count, mcs);
- break;
- }
+ /* rebuild the rspec and rspec_fallback */
+ rspec = RSPEC_MIMORATE;
+ rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
+ if (plcp[0] & MIMO_PLCP_40MHZ)
+ rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
- if (count == scb_ampdu->max_pdu)
- break;
+ fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
+ if (fbr_iscck) {
+ rspec_fallback =
+ cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
+ } else {
+ rspec_fallback = RSPEC_MIMORATE;
+ rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
+ if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
+ rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
+ }
- /*
- * check to see if the next pkt is
- * a candidate for aggregation
- */
- p = pktq_ppeek(&qi->q, prec);
- if (p) {
- tx_info = IEEE80211_SKB_CB(p);
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- ((u8) (p->priority) == tid)) {
- plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
- plen = max(scb_ampdu->min_len, plen);
+ if (use_rts || use_cts) {
+ rts_rspec =
+ brcms_c_rspec_to_rts_rspec(wlc, rspec,
+ false, mimo_ctlchbw);
+ rts_rspec_fallback =
+ brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
+ false, mimo_ctlchbw);
+ }
- if ((plen + ampdu_len) > max_ampdu_bytes) {
- p = NULL;
- continue;
- }
+ BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
+ /* mark plcp to indicate ampdu */
+ BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
- /*
- * check if there are enough
- * descriptors available
- */
- if (*wlc->core->txavail[fifo] <= seg_cnt + 1) {
- wiphy_err(wiphy, "%s: No fifo space "
- "!!\n", __func__);
- p = NULL;
- continue;
- }
- /* next packet fit for aggregation so dequeue */
- p = brcmu_pktq_pdeq(&qi->q, prec);
- } else {
- p = NULL;
- }
- }
- } /* end while(p) */
+ /* reset the mixed mode header durations */
+ if (txh->MModeLen) {
+ u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
+ session->ampdu_len);
+ txh->MModeLen = cpu_to_le16(mmodelen);
+ preamble_type = BRCMS_MM_PREAMBLE;
+ }
+ if (txh->MModeFbrLen) {
+ u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
+ session->ampdu_len);
+ txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
+ fbr_preamble_type = BRCMS_MM_PREAMBLE;
+ }
- ini->tx_in_transit += count;
+ /* set the preload length */
+ if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
+ dma_len = min(dma_len, f->ampdu_pld_size);
+ txh->PreloadSize = cpu_to_le16(dma_len);
+ } else {
+ txh->PreloadSize = 0;
+ }
- if (count) {
- /* patch up the last txh */
- txh = (struct d11txh *) pkt[count - 1]->data;
- mcl = le16_to_cpu(txh->MacTxControlLow);
- mcl &= ~TXC_AMPDU_MASK;
- mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
- txh->MacTxControlLow = cpu_to_le16(mcl);
-
- /* remove the null delimiter after last mpdu */
- ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
- txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
- ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
-
- /* remove the pad len from last mpdu */
- fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
- len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
- : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
- ampdu_len -= roundup(len, 4) - len;
-
- /* patch up the first txh & plcp */
- txh = (struct d11txh *) pkt[0]->data;
- plcp = (u8 *) (txh + 1);
+ mch = le16_to_cpu(txh->MacTxControlHigh);
- BRCMS_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
- /* mark plcp to indicate ampdu */
- BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
+ /* update RTS dur fields */
+ if (use_rts || use_cts) {
+ u16 durid;
+ if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
+ TXC_PREAMBLE_RTS_MAIN_SHORT)
+ rts_preamble_type = BRCMS_SHORT_PREAMBLE;
- /* reset the mixed mode header durations */
- if (txh->MModeLen) {
- u16 mmodelen =
- brcms_c_calc_lsig_len(wlc, rspec, ampdu_len);
- txh->MModeLen = cpu_to_le16(mmodelen);
- preamble_type = BRCMS_MM_PREAMBLE;
- }
- if (txh->MModeFbrLen) {
- u16 mmfbrlen =
- brcms_c_calc_lsig_len(wlc, rspec_fallback,
- ampdu_len);
- txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
- fbr_preamble_type = BRCMS_MM_PREAMBLE;
- }
+ if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
+ TXC_PREAMBLE_RTS_FB_SHORT)
+ rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
- /* set the preload length */
- if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
- dma_len = min(dma_len, f->ampdu_pld_size);
- txh->PreloadSize = cpu_to_le16(dma_len);
- } else
- txh->PreloadSize = 0;
-
- mch = le16_to_cpu(txh->MacTxControlHigh);
-
- /* update RTS dur fields */
- if (use_rts || use_cts) {
- u16 durid;
- rts = (struct ieee80211_rts *)&txh->rts_frame;
- if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
- TXC_PREAMBLE_RTS_MAIN_SHORT)
- rts_preamble_type = BRCMS_SHORT_PREAMBLE;
-
- if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
- TXC_PREAMBLE_RTS_FB_SHORT)
- rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
-
- durid =
- brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
rspec, rts_preamble_type,
- preamble_type, ampdu_len,
- true);
- rts->duration = cpu_to_le16(durid);
- durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
- rts_rspec_fallback,
- rspec_fallback,
- rts_fbr_preamble_type,
- fbr_preamble_type,
- ampdu_len, true);
- txh->RTSDurFallback = cpu_to_le16(durid);
- /* set TxFesTimeNormal */
- txh->TxFesTimeNormal = rts->duration;
- /* set fallback rate version of TxFesTimeNormal */
- txh->TxFesTimeFallback = txh->RTSDurFallback;
- }
-
- /* set flag and plcp for fallback rate */
- if (fbr) {
- mch |= TXC_AMPDU_FBR;
- txh->MacTxControlHigh = cpu_to_le16(mch);
- BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
- BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
- }
-
- BCMMSG(wlc->wiphy, "wl%d: count %d ampdu_len %d\n",
- wlc->pub->unit, count, ampdu_len);
-
- /* inform rate_sel if it this is a rate probe pkt */
- frameid = le16_to_cpu(txh->TxFrameID);
- if (frameid & TXFID_RATE_PROBE_MASK)
- wiphy_err(wiphy, "%s: XXX what to do with "
- "TXFID_RATE_PROBE_MASK!?\n", __func__);
-
- for (i = 0; i < count; i++)
- brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1),
- ampdu->txpkt_weight);
+ preamble_type,
+ session->ampdu_len, true);
+ rts->duration = cpu_to_le16(durid);
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
+ rts_rspec_fallback,
+ rspec_fallback,
+ rts_fbr_preamble_type,
+ fbr_preamble_type,
+ session->ampdu_len, true);
+ txh->RTSDurFallback = cpu_to_le16(durid);
+ /* set TxFesTimeNormal */
+ txh->TxFesTimeNormal = rts->duration;
+ /* set fallback rate version of TxFesTimeNormal */
+ txh->TxFesTimeFallback = txh->RTSDurFallback;
+ }
+ /* set flag and plcp for fallback rate */
+ if (fbr) {
+ mch |= TXC_AMPDU_FBR;
+ txh->MacTxControlHigh = cpu_to_le16(mch);
+ BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
+ BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
}
- /* endif (count) */
- return err;
+
+ brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
+ wlc->pub->unit, skb_queue_len(&session->skb_list),
+ session->ampdu_len);
}
static void
@@ -909,7 +855,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
u8 antselid = 0;
u8 retry_limit, rr_retry_limit;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
- struct wiphy *wiphy = wlc->wiphy;
#ifdef DEBUG
u8 hole[AMPDU_MAX_MPDU];
@@ -955,13 +900,14 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (supr_status) {
update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
- wiphy_err(wiphy,
+ brcms_err(wlc->hw->d11core,
"%s: Pkt tx suppressed, illegal channel possibly %d\n",
__func__, CHSPEC_CHANNEL(
wlc->default_bss->chanspec));
} else {
if (supr_status != TX_STATUS_SUPR_FRAG)
- wiphy_err(wiphy, "%s: supr_status 0x%x\n",
+ brcms_err(wlc->hw->d11core,
+ "%s: supr_status 0x%x\n",
__func__, supr_status);
}
/* no need to retry for badch; will fail again */
@@ -977,20 +923,14 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
* if there were underflows, but pre-loading
* is not active, notify rate adaptation.
*/
- if (brcms_c_ffpld_check_txfunfl(wlc,
- prio2fifo[tid]) > 0)
+ if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
tx_error = true;
}
} else if (txs->phyerr) {
update_rate = false;
- wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n",
+ brcms_err(wlc->hw->d11core,
+ "%s: ampdu tx phy error (0x%x)\n",
__func__, txs->phyerr);
-
- if (brcm_msg_level & LOG_ERROR_VAL) {
- brcmu_prpkt("txpkt (AMPDU)", p);
- brcms_c_print_txdesc((struct d11txh *) p->data);
- }
- brcms_c_print_txstatus(txs);
}
}
@@ -1003,6 +943,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
+ trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
+
if (tot_mpdu == 0) {
mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
@@ -1012,10 +954,10 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ack_recd = false;
if (ba_recd) {
bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
- BCMMSG(wiphy,
- "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
- tid, seq, start_seq, bindex,
- isset(bitmap, bindex), index);
+ brcms_dbg_ht(wlc->hw->d11core,
+ "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
+ tid, seq, start_seq, bindex,
+ isset(bitmap, bindex), index);
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
@@ -1046,14 +988,16 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
/* either retransmit or send bar if ack not recd */
if (!ack_recd) {
if (retry && (ini->txretry[index] < (int)retry_limit)) {
+ int ret;
ini->txretry[index]++;
ini->tx_in_transit--;
+ ret = brcms_c_txfifo(wlc, queue, p);
/*
- * Use high prededence for retransmit to
- * give some punch
+ * We shouldn't be out of space in the DMA
+ * ring here since we're reinserting a frame
+ * that was just pulled out.
*/
- brcms_c_txq_enq(wlc, scb, p,
- BRCMS_PRIO_TO_HI_PREC(tid));
+ WARN_ONCE(ret, "queue %d out of txds\n", queue);
} else {
/* Retry timeout */
ini->tx_in_transit--;
@@ -1064,9 +1008,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
IEEE80211_TX_STAT_AMPDU_NO_BACK;
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
- BCMMSG(wiphy,
- "BA Timeout, seq %d, in_transit %d\n",
- seq, ini->tx_in_transit);
+ brcms_dbg_ht(wlc->hw->d11core,
+ "BA Timeout, seq %d, in_transit %d\n",
+ seq, ini->tx_in_transit);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
}
@@ -1080,12 +1024,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
}
- brcms_c_send_q(wlc);
/* update rate state */
antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
-
- brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
void
@@ -1133,6 +1074,8 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
while (p) {
tx_info = IEEE80211_SKB_CB(p);
txh = (struct d11txh *) p->data;
+ trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
+ sizeof(*txh));
mcl = le16_to_cpu(txh->MacTxControlLow);
brcmu_pkt_buf_free_skb(p);
/* break out if last packet of ampdu */
@@ -1142,7 +1085,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
p = dma_getnexttxp(wlc->hw->di[queue],
DMA_RANGE_TRANSMITTED);
}
- brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
}
@@ -1182,23 +1124,6 @@ void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
}
/*
- * callback function that helps flushing ampdu packets from a priority queue
- */
-static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu);
- struct cb_del_ampdu_pars *ampdu_pars =
- (struct cb_del_ampdu_pars *)arg_a;
- bool rc;
-
- rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
- rc = rc && (tx_info->rate_driver_data[0] == NULL || ampdu_pars->sta == NULL ||
- tx_info->rate_driver_data[0] == ampdu_pars->sta);
- rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
- return rc;
-}
-
-/*
* callback function that helps invalidating ampdu packets in a DMA queue
*/
static void dma_cb_fn_ampdu(void *txi, void *arg_a)
@@ -1218,15 +1143,5 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a)
void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
struct ieee80211_sta *sta, u16 tid)
{
- struct brcms_txq_info *qi = wlc->pkt_queue;
- struct pktq *pq = &qi->q;
- int prec;
- struct cb_del_ampdu_pars ampdu_pars;
-
- ampdu_pars.sta = sta;
- ampdu_pars.tid = tid;
- for (prec = 0; prec < pq->num_prec; prec++)
- brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
- (void *)&ampdu_pars);
brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 421f4ba7c63c..73d01e586109 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -17,11 +17,34 @@
#ifndef _BRCM_AMPDU_H_
#define _BRCM_AMPDU_H_
+/*
+ * Data structure representing an in-progress session for accumulating
+ * frames for AMPDU.
+ *
+ * wlc: pointer to common driver data
+ * skb_list: queue of skb's for AMPDU
+ * max_ampdu_len: maximum length for this AMPDU
+ * max_ampdu_frames: maximum number of frames for this AMPDU
+ * ampdu_len: total number of bytes accumulated for this AMPDU
+ * dma_len: DMA length of this AMPDU
+ */
+struct brcms_ampdu_session {
+ struct brcms_c_info *wlc;
+ struct sk_buff_head skb_list;
+ unsigned max_ampdu_len;
+ u16 max_ampdu_frames;
+ u16 ampdu_len;
+ u16 dma_len;
+};
+
+extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+ struct brcms_c_info *wlc);
+extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+ struct sk_buff *p);
+extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+
extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern int brcms_c_sendampdu(struct ampdu_info *ampdu,
- struct brcms_txq_info *qi,
- struct sk_buff **aggp, int prec);
extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs);
extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
index 55e12c327911..54c616919590 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
@@ -21,6 +21,7 @@
#include "main.h"
#include "phy_shim.h"
#include "antsel.h"
+#include "debug.h"
#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
@@ -137,7 +138,8 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
asi->antsel_avail = false;
} else {
asi->antsel_avail = false;
- wiphy_err(wlc->wiphy, "antsel_attach: 2o3 "
+ brcms_err(wlc->hw->d11core,
+ "antsel_attach: 2o3 "
"board cfg invalid\n");
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
index 27dd73eef56d..871781e6a713 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
@@ -14,22 +14,29 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM brcmsmac
-
#if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_BRCMSMAC_H
+#include <linux/types.h>
+#include <linux/device.h>
#include <linux/tracepoint.h>
#include "mac80211_if.h"
-#ifndef CONFIG_BRCMDBG
+#ifndef CONFIG_BRCM_TRACING
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
#endif
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac
+
/*
* We define a tracepoint, its arguments, its printk format and its
* 'fast binary record' layout.
@@ -78,9 +85,165 @@ TRACE_EVENT(brcms_dpc,
)
);
+TRACE_EVENT(brcms_macintstatus,
+ TP_PROTO(const struct device *dev, int in_isr, u32 macintstatus,
+ u32 mask),
+ TP_ARGS(dev, in_isr, macintstatus, mask),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(int, in_isr)
+ __field(u32, macintstatus)
+ __field(u32, mask)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ __entry->in_isr = in_isr;
+ __entry->macintstatus = macintstatus;
+ __entry->mask = mask;
+ ),
+ TP_printk("[%s] in_isr=%d macintstatus=%#x mask=%#x", __get_str(dev),
+ __entry->in_isr, __entry->macintstatus, __entry->mask)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac_tx
+
+TRACE_EVENT(brcms_txdesc,
+ TP_PROTO(const struct device *dev,
+ void *txh, size_t txh_len),
+ TP_ARGS(dev, txh, txh_len),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __dynamic_array(u8, txh, txh_len)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ memcpy(__get_dynamic_array(txh), txh, txh_len);
+ ),
+ TP_printk("[%s] txdesc", __get_str(dev))
+);
+
+TRACE_EVENT(brcms_txstatus,
+ TP_PROTO(const struct device *dev, u16 framelen, u16 frameid,
+ u16 status, u16 lasttxtime, u16 sequence, u16 phyerr,
+ u16 ackphyrxsh),
+ TP_ARGS(dev, framelen, frameid, status, lasttxtime, sequence, phyerr,
+ ackphyrxsh),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(u16, framelen)
+ __field(u16, frameid)
+ __field(u16, status)
+ __field(u16, lasttxtime)
+ __field(u16, sequence)
+ __field(u16, phyerr)
+ __field(u16, ackphyrxsh)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ __entry->framelen = framelen;
+ __entry->frameid = frameid;
+ __entry->status = status;
+ __entry->lasttxtime = lasttxtime;
+ __entry->sequence = sequence;
+ __entry->phyerr = phyerr;
+ __entry->ackphyrxsh = ackphyrxsh;
+ ),
+ TP_printk("[%s] FrameId %#04x TxStatus %#04x LastTxTime %#04x "
+ "Seq %#04x PHYTxStatus %#04x RxAck %#04x",
+ __get_str(dev), __entry->frameid, __entry->status,
+ __entry->lasttxtime, __entry->sequence, __entry->phyerr,
+ __entry->ackphyrxsh)
+);
+
+TRACE_EVENT(brcms_ampdu_session,
+ TP_PROTO(const struct device *dev, unsigned max_ampdu_len,
+ u16 max_ampdu_frames, u16 ampdu_len, u16 ampdu_frames,
+ u16 dma_len),
+ TP_ARGS(dev, max_ampdu_len, max_ampdu_frames, ampdu_len, ampdu_frames,
+ dma_len),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(unsigned, max_ampdu_len)
+ __field(u16, max_ampdu_frames)
+ __field(u16, ampdu_len)
+ __field(u16, ampdu_frames)
+ __field(u16, dma_len)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev));
+ __entry->max_ampdu_len = max_ampdu_len;
+ __entry->max_ampdu_frames = max_ampdu_frames;
+ __entry->ampdu_len = ampdu_len;
+ __entry->ampdu_frames = ampdu_frames;
+ __entry->dma_len = dma_len;
+ ),
+ TP_printk("[%s] ampdu session max_len=%u max_frames=%u len=%u frames=%u dma_len=%u",
+ __get_str(dev), __entry->max_ampdu_len,
+ __entry->max_ampdu_frames, __entry->ampdu_len,
+ __entry->ampdu_frames, __entry->dma_len)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmsmac_msg
+
+#define MAX_MSG_LEN 100
+
+DECLARE_EVENT_CLASS(brcms_msg_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(brcms_msg_event, brcms_crit,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(brcms_dbg,
+ TP_PROTO(u32 level, const char *func, struct va_format *vaf),
+ TP_ARGS(level, func, vaf),
+ TP_STRUCT__entry(
+ __field(u32, level)
+ __string(func, func)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ __assign_str(func, func);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+
#endif /* __TRACE_BRCMSMAC_H */
-#ifdef CONFIG_BRCMDBG
+#ifdef CONFIG_BRCM_TRACING
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
@@ -89,4 +252,4 @@ TRACE_EVENT(brcms_dpc,
#include <trace/define_trace.h>
-#endif /* CONFIG_BRCMDBG */
+#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 64a48f06d68b..a90b72202ec5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -26,6 +26,7 @@
#include "stf.h"
#include "channel.h"
#include "mac80211_if.h"
+#include "debug.h"
/* QDB() macro takes a dB value and converts to a quarter dB value */
#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
@@ -336,8 +337,6 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
const char *ccode = sprom->alpha2;
int ccode_len = sizeof(sprom->alpha2);
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
if (wlc_cm == NULL)
return NULL;
@@ -615,8 +614,8 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
/* check the chanspec */
if (brcms_c_chspec_malformed(chspec)) {
- wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n",
- wlc->pub->unit, chspec);
+ brcms_err(wlc->hw->d11core, "wl%d: malformed chanspec 0x%x\n",
+ wlc->pub->unit, chspec);
return false;
}
@@ -738,7 +737,8 @@ static int brcms_reg_notifier(struct wiphy *wiphy,
mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
} else {
mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n",
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s: no valid channel for \"%s\"\n",
wlc->pub->unit, __func__, request->alpha2);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
new file mode 100644
index 000000000000..9761deb46204
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012 Canonical Ltd.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/module.h>
+#include <net/mac80211.h>
+
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "types.h"
+#include "main.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
+
+static struct dentry *root_folder;
+
+void brcms_debugfs_init(void)
+{
+ root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(root_folder))
+ root_folder = NULL;
+}
+
+void brcms_debugfs_exit(void)
+{
+ if (!root_folder)
+ return;
+
+ debugfs_remove_recursive(root_folder);
+ root_folder = NULL;
+}
+
+int brcms_debugfs_attach(struct brcms_pub *drvr)
+{
+ if (!root_folder)
+ return -ENODEV;
+
+ drvr->dbgfs_dir = debugfs_create_dir(
+ dev_name(&drvr->wlc->hw->d11core->dev), root_folder);
+ return PTR_RET(drvr->dbgfs_dir);
+}
+
+void brcms_debugfs_detach(struct brcms_pub *drvr)
+{
+ if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
+ debugfs_remove_recursive(drvr->dbgfs_dir);
+}
+
+struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr)
+{
+ return drvr->dbgfs_dir;
+}
+
+static
+ssize_t brcms_debugfs_hardware_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int res;
+ struct brcms_pub *drvr = f->private_data;
+
+ /* only allow read from start */
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf),
+ "board vendor: %x\n"
+ "board type: %x\n"
+ "board revision: %x\n"
+ "board flags: %x\n"
+ "board flags2: %x\n"
+ "firmware revision: %x\n",
+ drvr->wlc->hw->d11core->bus->boardinfo.vendor,
+ drvr->wlc->hw->d11core->bus->boardinfo.type,
+ drvr->wlc->hw->boardrev,
+ drvr->wlc->hw->boardflags,
+ drvr->wlc->hw->boardflags2,
+ drvr->wlc->ucode_rev
+ );
+
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcms_debugfs_hardware_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcms_debugfs_hardware_read
+};
+
+void brcms_debugfs_create_files(struct brcms_pub *drvr)
+{
+ struct dentry *dentry = drvr->dbgfs_dir;
+
+ if (!IS_ERR_OR_NULL(dentry))
+ debugfs_create_file("hardware", S_IRUGO, dentry,
+ drvr, &brcms_debugfs_hardware_ops);
+}
+
+#define __brcms_fn(fn) \
+void __brcms_ ##fn(struct device *dev, const char *fmt, ...) \
+{ \
+ struct va_format vaf = { \
+ .fmt = fmt, \
+ }; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ vaf.va = &args; \
+ dev_ ##fn(dev, "%pV", &vaf); \
+ trace_brcms_ ##fn(&vaf); \
+ va_end(args); \
+}
+
+__brcms_fn(info)
+__brcms_fn(warn)
+__brcms_fn(err)
+__brcms_fn(crit)
+
+#if defined(CONFIG_BRCMDBG) || defined(CONFIG_BRCM_TRACING)
+void __brcms_dbg(struct device *dev, u32 level, const char *func,
+ const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+#ifdef CONFIG_BRCMDBG
+ if ((brcm_msg_level & level) && net_ratelimit())
+ dev_err(dev, "%s %pV", func, &vaf);
+#endif
+ trace_brcms_dbg(level, func, &vaf);
+ va_end(args);
+}
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
new file mode 100644
index 000000000000..822781cf15d4
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012 Canonical Ltd.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _BRCMS_DEBUG_H_
+#define _BRCMS_DEBUG_H_
+
+#include <linux/device.h>
+#include <linux/bcma/bcma.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include "main.h"
+#include "mac80211_if.h"
+
+__printf(2, 3)
+void __brcms_info(struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void __brcms_warn(struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void __brcms_err(struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void __brcms_crit(struct device *dev, const char *fmt, ...);
+
+#if defined(CONFIG_BRCMDBG) || defined(CONFIG_BRCM_TRACING)
+__printf(4, 5)
+void __brcms_dbg(struct device *dev, u32 level, const char *func,
+ const char *fmt, ...);
+#else
+static inline __printf(4, 5)
+void __brcms_dbg(struct device *dev, u32 level, const char *func,
+ const char *fmt, ...)
+{
+}
+#endif
+
+/*
+ * Debug macros cannot be used when wlc is uninitialized. Generally
+ * this means any code that could run before brcms_c_attach() has
+ * returned successfully probably shouldn't use the following macros.
+ */
+
+#define brcms_dbg(core, l, f, a...) __brcms_dbg(&(core)->dev, l, __func__, f, ##a)
+#define brcms_info(core, f, a...) __brcms_info(&(core)->dev, f, ##a)
+#define brcms_warn(core, f, a...) __brcms_warn(&(core)->dev, f, ##a)
+#define brcms_err(core, f, a...) __brcms_err(&(core)->dev, f, ##a)
+#define brcms_crit(core, f, a...) __brcms_crit(&(core)->dev, f, ##a)
+
+#define brcms_dbg_info(core, f, a...) brcms_dbg(core, BRCM_DL_INFO, f, ##a)
+#define brcms_dbg_mac80211(core, f, a...) brcms_dbg(core, BRCM_DL_MAC80211, f, ##a)
+#define brcms_dbg_rx(core, f, a...) brcms_dbg(core, BRCM_DL_RX, f, ##a)
+#define brcms_dbg_tx(core, f, a...) brcms_dbg(core, BRCM_DL_TX, f, ##a)
+#define brcms_dbg_int(core, f, a...) brcms_dbg(core, BRCM_DL_INT, f, ##a)
+#define brcms_dbg_dma(core, f, a...) brcms_dbg(core, BRCM_DL_DMA, f, ##a)
+#define brcms_dbg_ht(core, f, a...) brcms_dbg(core, BRCM_DL_HT, f, ##a)
+
+struct brcms_pub;
+void brcms_debugfs_init(void);
+void brcms_debugfs_exit(void);
+int brcms_debugfs_attach(struct brcms_pub *drvr);
+void brcms_debugfs_detach(struct brcms_pub *drvr);
+struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr);
+void brcms_debugfs_create_files(struct brcms_pub *drvr);
+
+#endif /* _BRCMS_DEBUG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 5e53305bd9a9..1860c572b3c4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -14,17 +14,22 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
#include <brcmu_utils.h>
#include <aiutils.h>
#include "types.h"
+#include "main.h"
#include "dma.h"
#include "soc.h"
+#include "scb.h"
+#include "ampdu.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
/*
* dma register field offset calculation
@@ -176,28 +181,6 @@
#define BCMEXTRAHDROOM 172
-/* debug/trace */
-#ifdef DEBUG
-#define DMA_ERROR(fmt, ...) \
-do { \
- if (*di->msg_level & 1) \
- pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-#define DMA_TRACE(fmt, ...) \
-do { \
- if (*di->msg_level & 2) \
- pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-#else
-#define DMA_ERROR(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-#define DMA_TRACE(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-#endif /* DEBUG */
-
-#define DMA_NONE(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-
#define MAXNAMEL 8 /* 8 char names */
/* macros to convert between byte offsets and indexes */
@@ -224,12 +207,14 @@ struct dma64desc {
/* dma engine software state */
struct dma_info {
struct dma_pub dma; /* exported structure */
- uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
struct bcma_device *core;
struct device *dmadev;
+ /* session information for AMPDU */
+ struct brcms_ampdu_session ampdu_session;
+
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
@@ -298,12 +283,6 @@ struct dma_info {
bool aligndesc_4k;
};
-/*
- * default dma message level (if input msg_level
- * pointer is null in dma_attach())
- */
-static uint dma_msg_level;
-
/* Check for odd number of 1's */
static u32 parity32(__le32 data)
{
@@ -353,7 +332,7 @@ static uint prevtxd(struct dma_info *di, uint i)
static uint nextrxd(struct dma_info *di, uint i)
{
- return txd(di, i + 1);
+ return rxd(di, i + 1);
}
static uint ntxdactive(struct dma_info *di, uint h, uint t)
@@ -370,10 +349,8 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
{
uint dmactrlflags;
- if (di == NULL) {
- DMA_ERROR("NULL dma handle\n");
+ if (di == NULL)
return 0;
- }
dmactrlflags = di->dma.dmactrlflags;
dmactrlflags &= ~mask;
@@ -423,13 +400,15 @@ static bool _dma_isaddrext(struct dma_info *di)
/* not all tx or rx channel are available */
if (di->d64txregbase != 0) {
if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
- DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA64 tx doesn't have AE set\n",
+ di->name);
return true;
} else if (di->d64rxregbase != 0) {
if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
- DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA64 rx doesn't have AE set\n",
+ di->name);
return true;
}
@@ -530,8 +509,9 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
- DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -544,8 +524,9 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
- DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -564,12 +545,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
return dma64_alloc(di, direction);
}
-struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- struct bcma_device *core,
+struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
+ uint nrxpost, uint rxoffset)
{
+ struct si_pub *sih = wlc->hw->sih;
+ struct bcma_device *core = wlc->hw->d11core;
struct dma_info *di;
u8 rev = core->id.rev;
uint size;
@@ -580,9 +562,6 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if (di == NULL)
return NULL;
- di->msg_level = msg_level ? msg_level : &dma_msg_level;
-
-
di->dma64 =
((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
@@ -598,11 +577,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
- DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
- "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
- "txregbase %u rxregbase %u\n", name, "DMA64",
- di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
+ brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "txregbase %u rxregbase %u\n", name, "DMA64",
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
@@ -664,8 +643,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */
}
- DMA_NONE("DMA descriptor align_needed %d, align %d\n",
- di->aligndesc_4k, di->dmadesc_align);
+ brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */
if (ntxd) {
@@ -703,21 +682,27 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
- di->name, (u32)di->txdpa);
+ brcms_dbg_dma(di->core,
+ "%s: txdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->txdpa);
goto fail;
}
if (di->rxdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
- di->name, (u32)di->rxdpa);
+ brcms_dbg_dma(di->core,
+ "%s: rxdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->rxdpa);
goto fail;
}
}
- DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
- di->ddoffsetlow, di->ddoffsethigh,
- di->dataoffsetlow, di->dataoffsethigh,
- di->addrext);
+ /* Initialize AMPDU session */
+ brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
+
+ brcms_dbg_dma(di->core,
+ "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+ di->ddoffsetlow, di->ddoffsethigh,
+ di->dataoffsetlow, di->dataoffsethigh,
+ di->addrext);
return (struct dma_pub *) di;
@@ -763,7 +748,7 @@ void dma_detach(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
/* free dma descriptor rings */
if (di->txd64)
@@ -839,7 +824,7 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
control = D64_RC_RE | (bcma_read32(di->core,
DMA64RXREGOFFS(di, control)) &
@@ -859,7 +844,7 @@ void dma_rxinit(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->nrxd == 0)
return;
@@ -954,7 +939,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
return 0;
len = le16_to_cpu(*(__le16 *) (p->data));
- DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+ brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len);
dma_spin_for_len(len, p);
/* set actual length */
@@ -981,14 +966,15 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
DMA64RXREGOFFS(di, status0)) &
D64_RS0_CD_MASK) - di->rcvptrbase) &
D64_RS0_CD_MASK, struct dma64desc);
- DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur);
+ brcms_dbg_dma(di->core,
+ "rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur);
}
#endif /* DEBUG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
- DMA_ERROR("%s: bad frame length (%d)\n",
- di->name, len);
+ brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n",
+ di->name, len);
skb_queue_walk_safe(&dma_frames, p, next) {
skb_unlink(p, &dma_frames);
brcmu_pkt_buf_free_skb(p);
@@ -1005,7 +991,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
static bool dma64_rxidle(struct dma_info *di)
{
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->nrxd == 0)
return true;
@@ -1016,6 +1002,17 @@ static bool dma64_rxidle(struct dma_info *di)
D64_RS0_CD_MASK));
}
+static bool dma64_txidle(struct dma_info *di)
+{
+ if (di->ntxd == 0)
+ return true;
+
+ return ((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
+ D64_XS0_CD_MASK));
+}
+
/*
* post receive buffers
* return false is refill failed completely and ring is empty this will stall
@@ -1047,7 +1044,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout);
- DMA_TRACE("%s: post %d\n", di->name, n);
+ brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom;
@@ -1060,9 +1057,11 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
- DMA_ERROR("%s: out of rxbufs\n", di->name);
+ brcms_dbg_dma(di->core, "%s: out of rxbufs\n",
+ di->name);
if (i == 0 && dma64_rxidle(di)) {
- DMA_ERROR("%s: ring is empty !\n", di->name);
+ brcms_dbg_dma(di->core, "%s: ring is empty !\n",
+ di->name);
ring_empty = true;
}
di->dma.rxnobuf++;
@@ -1107,7 +1106,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p);
@@ -1138,7 +1137,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1170,7 +1169,7 @@ void dma_txsuspend(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1182,7 +1181,7 @@ void dma_txresume(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1205,11 +1204,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE("%s: %s\n",
- di->name,
- range == DMA_RANGE_ALL ? "all" :
- range == DMA_RANGE_TRANSMITTED ? "transmitted" :
- "transferred");
+ brcms_dbg_dma(di->core, "%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->txin == di->txout)
return;
@@ -1264,39 +1263,25 @@ bool dma_rxreset(struct dma_pub *pub)
return status == D64_RS0_RS_DISABLED;
}
-/*
- * !! tx entry routine
- * WARNING: call must check the return value for error.
- * the error(toss frames) could be fatal and cause many subsequent hard
- * to debug problems
- */
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
+static void dma_txenq(struct dma_info *di, struct sk_buff *p)
{
- struct dma_info *di = (struct dma_info *)pub;
unsigned char *data;
uint len;
u16 txout;
u32 flags = 0;
dma_addr_t pa;
- DMA_TRACE("%s:\n", di->name);
-
txout = di->txout;
+ if (WARN_ON(nexttxd(di, txout) == di->txin))
+ return;
+
/*
* obtain and initialize transmit descriptor entry.
*/
data = p->data;
len = p->len;
- /* no use to transmit a zero length packet */
- if (len == 0)
- return 0;
-
- /* return nonzero if out of tx descriptors */
- if (nexttxd(di, txout) == di->txin)
- goto outoftxd;
-
/* get physical address of buffer start */
pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
@@ -1318,23 +1303,147 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
/* bump the tx descriptor index */
di->txout = txout;
+}
- /* kick the chip */
- if (commit)
- bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
- di->xmtptrbase + I2B(txout, struct dma64desc));
+static void ampdu_finalize(struct dma_info *di)
+{
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+ struct sk_buff *p;
+
+ trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev,
+ session->max_ampdu_len,
+ session->max_ampdu_frames,
+ session->ampdu_len,
+ skb_queue_len(&session->skb_list),
+ session->dma_len);
+
+ if (WARN_ON(skb_queue_empty(&session->skb_list)))
+ return;
+
+ brcms_c_ampdu_finalize(session);
+
+ while (!skb_queue_empty(&session->skb_list)) {
+ p = skb_dequeue(&session->skb_list);
+ dma_txenq(di, p);
+ }
+
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
+ di->xmtptrbase + I2B(di->txout, struct dma64desc));
+ brcms_c_ampdu_reset_session(session, session->wlc);
+}
+
+static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
+{
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+ int ret;
+
+ ret = brcms_c_ampdu_add_frame(session, p);
+ if (ret == -ENOSPC) {
+ /*
+ * AMPDU cannot accomodate this frame. Close out the in-
+ * progress AMPDU session and start a new one.
+ */
+ ampdu_finalize(di);
+ ret = brcms_c_ampdu_add_frame(session, p);
+ }
+
+ WARN_ON(ret);
+}
+
+/* Update count of available tx descriptors based on current DMA state */
+static void dma_update_txavail(struct dma_info *di)
+{
+ /*
+ * Available space is number of descriptors less the number of
+ * active descriptors and the number of queued AMPDU frames.
+ */
+ di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
+ skb_queue_len(&di->ampdu_session.skb_list) - 1;
+}
+
+/*
+ * !! tx entry routine
+ * WARNING: call must check the return value for error.
+ * the error(toss frames) could be fatal and cause many subsequent hard
+ * to debug problems
+ */
+int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
+ struct sk_buff *p)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+ struct ieee80211_tx_info *tx_info;
+ bool is_ampdu;
+
+ /* no use to transmit a zero length packet */
+ if (p->len == 0)
+ return 0;
+
+ /* return nonzero if out of tx descriptors */
+ if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
+ goto outoftxd;
+
+ tx_info = IEEE80211_SKB_CB(p);
+ is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
+ if (is_ampdu)
+ prep_ampdu_frame(di, p);
+ else
+ dma_txenq(di, p);
/* tx flow control */
- di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+ dma_update_txavail(di);
+
+ /* kick the chip */
+ if (is_ampdu) {
+ /*
+ * Start sending data if we've got a full AMPDU, there's
+ * no more space in the DMA ring, or the ring isn't
+ * currently transmitting.
+ */
+ if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
+ di->dma.txavail == 0 || dma64_txidle(di))
+ ampdu_finalize(di);
+ } else {
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
+ di->xmtptrbase + I2B(di->txout, struct dma64desc));
+ }
return 0;
outoftxd:
- DMA_ERROR("%s: out of txds !!!\n", di->name);
+ brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name);
brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
- return -1;
+ return -ENOSPC;
+}
+
+void dma_txflush(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+
+ if (!skb_queue_empty(&session->skb_list))
+ ampdu_finalize(di);
+}
+
+int dma_txpending(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ return ntxdactive(di, di->txin, di->txout);
+}
+
+/*
+ * If we have an active AMPDU session and are not transmitting,
+ * this function will force tx to start.
+ */
+void dma_kick_tx(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+
+ if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
+ ampdu_finalize(di);
}
/*
@@ -1354,11 +1463,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc;
struct sk_buff *txp;
- DMA_TRACE("%s: %s\n",
- di->name,
- range == DMA_RANGE_ALL ? "all" :
- range == DMA_RANGE_TRANSMITTED ? "transmitted" :
- "transferred");
+ brcms_dbg_dma(di->core, "%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->ntxd == 0)
return NULL;
@@ -1412,13 +1521,13 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
di->txin = i;
/* tx flow control */
- di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+ dma_update_txavail(di);
return txp;
bogus:
- DMA_NONE("bogus curr: start %d end %d txout %d\n",
- start, end, di->txout);
+ brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n",
+ start, end, di->txout);
return NULL;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index cc269ee5c499..ff5b80b09046 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -74,12 +74,11 @@ struct dma_pub {
uint txnobuf; /* tx out of dma descriptors */
};
-extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- struct bcma_device *d11core,
+extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
uint txregbase, uint rxregbase,
uint ntxd, uint nrxd,
uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level);
+ uint nrxpost, uint rxoffset);
void dma_rxinit(struct dma_pub *pub);
int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
@@ -87,7 +86,11 @@ bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub);
void dma_txinit(struct dma_pub *pub);
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit);
+int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
+ struct sk_buff *p0);
+void dma_txflush(struct dma_pub *pub);
+int dma_txpending(struct dma_pub *pub);
+void dma_kick_tx(struct dma_pub *pub);
void dma_txsuspend(struct dma_pub *pub);
bool dma_txsuspended(struct dma_pub *pub);
void dma_txresume(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index a744ea5a9559..e5fd20994bec 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -33,8 +33,10 @@
#include "ucode_loader.h"
#include "mac80211_if.h"
#include "main.h"
+#include "debug.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -92,16 +94,21 @@ MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
/* recognized BCMA Core IDs */
static struct bcma_device_id brcms_coreid_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 17, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
BCMA_CORETABLE_END
};
MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
-#ifdef DEBUG
-static int msglevel = 0xdeadbeef;
-module_param(msglevel, int, 0);
-#endif /* DEBUG */
+#if defined(CONFIG_BRCMDBG)
+/*
+ * Module parameter for setting the debug message level. Available
+ * flags are specified by the BRCM_DL_* macros in
+ * drivers/net/wireless/brcm80211/include/defs.h.
+ */
+module_param_named(debug, brcm_msg_level, uint, S_IRUGO | S_IWUSR);
+#endif
static struct ieee80211_channel brcms_2ghz_chantable[] = {
CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
@@ -276,12 +283,12 @@ static void brcms_ops_tx(struct ieee80211_hw *hw,
spin_lock_bh(&wl->lock);
if (!wl->pub->up) {
- wiphy_err(wl->wiphy, "ops->tx called while down\n");
+ brcms_err(wl->wlc->hw->d11core, "ops->tx called while down\n");
kfree_skb(skb);
goto done;
}
- brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
- tx_info->rate_driver_data[0] = control->sta;
+ if (brcms_c_sendpkt_mac80211(wl->wlc, skb, hw))
+ tx_info->rate_driver_data[0] = control->sta;
done:
spin_unlock_bh(&wl->lock);
}
@@ -313,8 +320,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
spin_unlock_bh(&wl->lock);
if (err != 0)
- wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
- err);
+ brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
+ __func__, err);
return err;
}
@@ -332,7 +339,7 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
status = brcms_c_chipmatch(wl->wlc->hw->d11core);
spin_unlock_bh(&wl->lock);
if (!status) {
- wiphy_err(wl->wiphy,
+ brcms_err(wl->wlc->hw->d11core,
"wl: brcms_ops_stop: chipmatch failed\n");
return;
}
@@ -350,8 +357,9 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
/* Just STA for now */
if (vif->type != NL80211_IFTYPE_STATION) {
- wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
- " STA for now\n", __func__, vif->type);
+ brcms_err(wl->wlc->hw->d11core,
+ "%s: Attempt to add type %d, only STA for now\n",
+ __func__, vif->type);
return -EOPNOTSUPP;
}
@@ -370,9 +378,9 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct ieee80211_conf *conf = &hw->conf;
struct brcms_info *wl = hw->priv;
+ struct bcma_device *core = wl->wlc->hw->d11core;
int err = 0;
int new_int;
- struct wiphy *wiphy = hw->wiphy;
spin_lock_bh(&wl->lock);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
@@ -380,25 +388,26 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
- __func__, conf->flags & IEEE80211_CONF_MONITOR ?
- "true" : "false");
+ brcms_dbg_info(core, "%s: change monitor mode: %s\n",
+ __func__, conf->flags & IEEE80211_CONF_MONITOR ?
+ "true" : "false");
if (changed & IEEE80211_CONF_CHANGE_PS)
- wiphy_err(wiphy, "%s: change power-save mode: %s (implement)\n",
+ brcms_err(core, "%s: change power-save mode: %s (implement)\n",
__func__, conf->flags & IEEE80211_CONF_PS ?
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_POWER) {
err = brcms_c_set_tx_power(wl->wlc, conf->power_level);
if (err < 0) {
- wiphy_err(wiphy, "%s: Error setting power_level\n",
+ brcms_err(core, "%s: Error setting power_level\n",
__func__);
goto config_out;
}
new_int = brcms_c_get_tx_power(wl->wlc);
if (new_int != conf->power_level)
- wiphy_err(wiphy, "%s: Power level req != actual, %d %d"
- "\n", __func__, conf->power_level,
+ brcms_err(core,
+ "%s: Power level req != actual, %d %d\n",
+ __func__, conf->power_level,
new_int);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -425,13 +434,13 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info, u32 changed)
{
struct brcms_info *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
+ struct bcma_device *core = wl->wlc->hw->d11core;
if (changed & BSS_CHANGED_ASSOC) {
/* association status changed (associated/disassociated)
* also implies a change in the AID.
*/
- wiphy_err(wiphy, "%s: %s: %sassociated\n", KBUILD_MODNAME,
+ brcms_err(core, "%s: %s: %sassociated\n", KBUILD_MODNAME,
__func__, info->assoc ? "" : "dis");
spin_lock_bh(&wl->lock);
brcms_c_associate_upd(wl->wlc, info->assoc);
@@ -491,7 +500,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
error = brcms_c_set_rateset(wl->wlc, &rs);
spin_unlock_bh(&wl->lock);
if (error)
- wiphy_err(wiphy, "changing basic rates failed: %d\n",
+ brcms_err(core, "changing basic rates failed: %d\n",
error);
}
if (changed & BSS_CHANGED_BEACON_INT) {
@@ -508,30 +517,30 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BEACON)
/* Beacon data changed, retrieve new beacon (beaconing modes) */
- wiphy_err(wiphy, "%s: beacon changed\n", __func__);
+ brcms_err(core, "%s: beacon changed\n", __func__);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
/* Beaconing should be enabled/disabled (beaconing modes) */
- wiphy_err(wiphy, "%s: Beacon enabled: %s\n", __func__,
+ brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
info->enable_beacon ? "true" : "false");
}
if (changed & BSS_CHANGED_CQM) {
/* Connection quality monitor config changed */
- wiphy_err(wiphy, "%s: cqm change: threshold %d, hys %d "
+ brcms_err(core, "%s: cqm change: threshold %d, hys %d "
" (implement)\n", __func__, info->cqm_rssi_thold,
info->cqm_rssi_hyst);
}
if (changed & BSS_CHANGED_IBSS) {
/* IBSS join status changed */
- wiphy_err(wiphy, "%s: IBSS joined: %s (implement)\n", __func__,
- info->ibss_joined ? "true" : "false");
+ brcms_err(core, "%s: IBSS joined: %s (implement)\n",
+ __func__, info->ibss_joined ? "true" : "false");
}
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
- wiphy_err(wiphy, "%s: arp filtering: enabled %s, count %d"
+ brcms_err(core, "%s: arp filtering: enabled %s, count %d"
" (implement)\n", __func__, info->arp_filter_enabled ?
"true" : "false", info->arp_addr_cnt);
}
@@ -541,8 +550,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
* QoS for this association was enabled/disabled.
* Note that it is only ever disabled for station mode.
*/
- wiphy_err(wiphy, "%s: qos enabled: %s (implement)\n", __func__,
- info->qos ? "true" : "false");
+ brcms_err(core, "%s: qos enabled: %s (implement)\n",
+ __func__, info->qos ? "true" : "false");
}
return;
}
@@ -553,25 +562,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags, u64 multicast)
{
struct brcms_info *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
+ struct bcma_device *core = wl->wlc->hw->d11core;
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
if (changed_flags & FIF_PROMISC_IN_BSS)
- wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
+ brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
- wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
+ brcms_dbg_info(core, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
- wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
+ brcms_dbg_info(core, "FIF_FCSFAIL\n");
if (changed_flags & FIF_CONTROL)
- wiphy_dbg(wiphy, "FIF_CONTROL\n");
+ brcms_dbg_info(core, "FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
- wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+ brcms_dbg_info(core, "FIF_OTHER_BSS\n");
if (changed_flags & FIF_PSPOLL)
- wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+ brcms_dbg_info(core, "FIF_PSPOLL\n");
if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
- wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+ brcms_dbg_info(core, "FIF_BCN_PRBRESP_PROMISC\n");
spin_lock_bh(&wl->lock);
brcms_c_mac_promisc(wl->wlc, *total_flags);
@@ -653,8 +662,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
status = brcms_c_aggregatable(wl->wlc, tid);
spin_unlock_bh(&wl->lock);
if (!status) {
- wiphy_err(wl->wiphy, "START: tid %d is not agg\'able\n",
- tid);
+ brcms_err(wl->wlc->hw->d11core,
+ "START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -681,8 +690,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
/* Power save wakeup */
break;
default:
- wiphy_err(wl->wiphy, "%s: Invalid command, ignoring\n",
- __func__);
+ brcms_err(wl->wlc->hw->d11core,
+ "%s: Invalid command, ignoring\n", __func__);
}
return 0;
@@ -700,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+ bool result;
+
+ spin_lock_bh(&wl->lock);
+ result = brcms_c_tx_flush_completed(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return result;
+}
+
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
struct brcms_info *wl = hw->priv;
+ int ret;
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
- /* wait for packet queue and dma fifos to run empty */
- spin_lock_bh(&wl->lock);
- brcms_c_wait_for_tx_completion(wl->wlc, drop);
- spin_unlock_bh(&wl->lock);
+ ret = wait_event_timeout(wl->tx_flush_wq,
+ brcms_tx_flush_completed(wl),
+ msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+ brcms_dbg_mac80211(wl->wlc->hw->d11core,
+ "ret=%d\n", jiffies_to_msecs(ret));
}
static const struct ieee80211_ops brcms_ops = {
@@ -764,6 +786,7 @@ void brcms_dpc(unsigned long data)
done:
spin_unlock_bh(&wl->lock);
+ wake_up(&wl->tx_flush_wq);
}
/*
@@ -839,8 +862,10 @@ static void brcms_free(struct brcms_info *wl)
/* kill dpc */
tasklet_kill(&wl->tasklet);
- if (wl->pub)
+ if (wl->pub) {
+ brcms_debugfs_detach(wl->pub);
brcms_c_module_unregister(wl->pub, "linux", wl);
+ }
/* free common resources */
if (wl->wlc) {
@@ -889,27 +914,22 @@ static void brcms_remove(struct bcma_device *pdev)
static irqreturn_t brcms_isr(int irq, void *dev_id)
{
struct brcms_info *wl;
- bool ours, wantdpc;
+ irqreturn_t ret = IRQ_NONE;
wl = (struct brcms_info *) dev_id;
spin_lock(&wl->isr_lock);
/* call common first level interrupt handler */
- ours = brcms_c_isr(wl->wlc, &wantdpc);
- if (ours) {
- /* if more to do... */
- if (wantdpc) {
-
- /* ...and call the second level interrupt handler */
- /* schedule dpc */
- tasklet_schedule(&wl->tasklet);
- }
+ if (brcms_c_isr(wl->wlc)) {
+ /* schedule second level handler */
+ tasklet_schedule(&wl->tasklet);
+ ret = IRQ_HANDLED;
}
spin_unlock(&wl->isr_lock);
- return IRQ_RETVAL(ours);
+ return ret;
}
/*
@@ -1015,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
atomic_set(&wl->callbacks, 0);
+ init_waitqueue_head(&wl->tx_flush_wq);
+
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
@@ -1075,6 +1097,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
+ brcms_debugfs_attach(wl->pub);
+ brcms_debugfs_create_files(wl->pub);
n_adapters_found++;
return wl;
@@ -1093,7 +1117,7 @@ fail:
*
* Perimeter lock is initialized in the course of this function.
*/
-static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
+static int brcms_bcma_probe(struct bcma_device *pdev)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
@@ -1144,14 +1168,13 @@ static int brcms_suspend(struct bcma_device *pdev)
wl->pub->hw_up = false;
spin_unlock_bh(&wl->lock);
- pr_debug("brcms_suspend ok\n");
+ brcms_dbg_info(wl->wlc->hw->d11core, "brcms_suspend ok\n");
return 0;
}
static int brcms_resume(struct bcma_device *pdev)
{
- pr_debug("brcms_resume ok\n");
return 0;
}
@@ -1160,7 +1183,7 @@ static struct bcma_driver brcms_bcma_driver = {
.probe = brcms_bcma_probe,
.suspend = brcms_suspend,
.resume = brcms_resume,
- .remove = __devexit_p(brcms_remove),
+ .remove = brcms_remove,
.id_table = brcms_coreid_table,
};
@@ -1184,10 +1207,7 @@ static DECLARE_WORK(brcms_driver_work, brcms_driver_init);
static int __init brcms_module_init(void)
{
-#ifdef DEBUG
- if (msglevel != 0xdeadbeef)
- brcm_msg_level = msglevel;
-#endif
+ brcms_debugfs_init();
if (!schedule_work(&brcms_driver_work))
return -EBUSY;
@@ -1205,6 +1225,7 @@ static void __exit brcms_module_exit(void)
{
cancel_work_sync(&brcms_driver_work);
bcma_driver_unregister(&brcms_bcma_driver);
+ brcms_debugfs_exit();
}
module_init(brcms_module_init);
@@ -1216,7 +1237,7 @@ module_exit(brcms_module_exit);
void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
bool state, int prio)
{
- wiphy_err(wl->wiphy, "Shouldn't be here %s\n", __func__);
+ brcms_err(wl->wlc->hw->d11core, "Shouldn't be here %s\n", __func__);
}
/*
@@ -1224,7 +1245,8 @@ void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
*/
void brcms_init(struct brcms_info *wl)
{
- BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
+ brcms_dbg_info(wl->wlc->hw->d11core, "Initializing wl%d\n",
+ wl->pub->unit);
brcms_reset(wl);
brcms_c_init(wl->wlc, wl->mute_tx);
}
@@ -1234,7 +1256,7 @@ void brcms_init(struct brcms_info *wl)
*/
uint brcms_reset(struct brcms_info *wl)
{
- BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
+ brcms_dbg_info(wl->wlc->hw->d11core, "Resetting wl%d\n", wl->pub->unit);
brcms_c_reset(wl->wlc);
/* dpc will not be rescheduled */
@@ -1248,7 +1270,7 @@ uint brcms_reset(struct brcms_info *wl)
void brcms_fatal_error(struct brcms_info *wl)
{
- wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+ brcms_err(wl->wlc->hw->d11core, "wl%d: fatal error, reinitializing\n",
wl->wlc->pub->unit);
brcms_reset(wl);
ieee80211_restart_hw(wl->pub->ieee_hw);
@@ -1396,14 +1418,16 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
#ifdef DEBUG
if (t->set)
- wiphy_err(hw->wiphy, "%s: Already set. Name: %s, per %d\n",
- __func__, t->name, periodic);
+ brcms_dbg_info(t->wl->wlc->hw->d11core,
+ "%s: Already set. Name: %s, per %d\n",
+ __func__, t->name, periodic);
#endif
t->ms = ms;
t->periodic = (bool) periodic;
- t->set = true;
-
- atomic_inc(&t->wl->callbacks);
+ if (!t->set) {
+ t->set = true;
+ atomic_inc(&t->wl->callbacks);
+ }
ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
}
@@ -1486,8 +1510,8 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
}
}
}
- wiphy_err(wl->wiphy, "ERROR: ucode buf tag:%d can not be found!\n",
- idx);
+ brcms_err(wl->wlc->hw->d11core,
+ "ERROR: ucode buf tag:%d can not be found!\n", idx);
*pbuf = NULL;
fail:
return -ENODATA;
@@ -1510,7 +1534,7 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
if (le32_to_cpu(hdr->len) != 4) {
- wiphy_err(wl->wiphy,
+ brcms_err(wl->wlc->hw->d11core,
"ERROR: fw hdr len\n");
return -ENOMSG;
}
@@ -1519,7 +1543,8 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
}
}
}
- wiphy_err(wl->wiphy, "ERROR: ucode tag:%d can not be found!\n", idx);
+ brcms_err(wl->wlc->hw->d11core,
+ "ERROR: ucode tag:%d can not be found!\n", idx);
return -ENOMSG;
}
@@ -1560,8 +1585,8 @@ int brcms_check_firmwares(struct brcms_info *wl)
sizeof(struct firmware_hdr));
rc = -EBADF;
} else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
- wiphy_err(wl->wiphy, "%s: out of bounds fw file size "
- "%zu\n", __func__, fw->size);
+ wiphy_err(wl->wiphy, "%s: out of bounds fw file size %zu\n",
+ __func__, fw->size);
rc = -EBADF;
} else {
/* check if ucode section overruns firmware image */
@@ -1601,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
spin_lock_bh(&wl->lock);
return blocked;
}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
- spin_unlock_bh(&wl->lock);
- msleep(ms);
- spin_lock_bh(&wl->lock);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 9358bd5ebd35..947ccacf43e6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,6 +68,8 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
+ /* tx flush */
+ wait_queue_head_t tx_flush_wq;
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
extern void brcms_free_timer(struct brcms_timer *timer);
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
extern void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 75086b37c817..8b5839008af3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -34,12 +34,9 @@
#include "ucode_loader.h"
#include "main.h"
#include "soc.h"
-
-/*
- * Indication for txflowcontrol that all priority bits in
- * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
- */
-#define ALLPRIO -1
+#include "dma.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
/* watchdog timer, in unit of ms */
#define TIMER_INTERVAL_WATCHDOG 1000
@@ -126,21 +123,6 @@
#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
-/* precedences numbers for wlc queues. These are twice as may levels as
- * 802.1D priorities.
- * Odd numbers are used for HI priority traffic at same precedence levels
- * These constants are used ONLY by wlc_prio2prec_map. Do not use them
- * elsewhere.
- */
-#define _BRCMS_PREC_NONE 0 /* None = - */
-#define _BRCMS_PREC_BK 2 /* BK - Background */
-#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
-#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
-#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
-#define _BRCMS_PREC_VI 10 /* Vi - Video */
-#define _BRCMS_PREC_VO 12 /* Vo - Voice */
-#define _BRCMS_PREC_NC 14 /* NC - Network Control */
-
/* synthpu_dly times in us */
#define SYNTHPU_DLY_APHY_US 3700
#define SYNTHPU_DLY_BPHY_US 1050
@@ -237,17 +219,17 @@
#define MAX_DMA_SEGS 4
-/* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD 256
+/* # of entries in Tx FIFO */
+#define NTXD 64
/* Max # of entries in Rx FIFO based on 4kb page size */
#define NRXD 256
+/* Amount of headroom to leave in Tx FIFO */
+#define TX_HEADROOM 4
+
/* try to keep this # rbufs posted to the chip */
#define NRXBUFPOST 32
-/* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT 50
-
/* max # frames to process in brcms_c_recv() */
#define RXBND 8
/* max # tx status to process in wlc_txstatus() */
@@ -283,24 +265,8 @@ struct edcf_acparam {
u16 TXOP;
} __packed;
-const u8 prio2fifo[NUMPRIO] = {
- TX_AC_BE_FIFO, /* 0 BE AC_BE Best Effort */
- TX_AC_BK_FIFO, /* 1 BK AC_BK Background */
- TX_AC_BK_FIFO, /* 2 -- AC_BK Background */
- TX_AC_BE_FIFO, /* 3 EE AC_BE Best Effort */
- TX_AC_VI_FIFO, /* 4 CL AC_VI Video */
- TX_AC_VI_FIFO, /* 5 VI AC_VI Video */
- TX_AC_VO_FIFO, /* 6 VO AC_VO Voice */
- TX_AC_VO_FIFO /* 7 NC AC_VO Voice */
-};
-
/* debug/trace */
-uint brcm_msg_level =
-#if defined(DEBUG)
- LOG_ERROR_VAL;
-#else
- 0;
-#endif /* DEBUG */
+uint brcm_msg_level;
/* TX FIFO number to WME/802.1E Access Category */
static const u8 wme_fifo2ac[] = {
@@ -320,18 +286,6 @@ static const u8 wme_ac2fifo[] = {
TX_AC_BK_FIFO
};
-/* 802.1D Priority to precedence queue mapping */
-const u8 wlc_prio2prec_map[] = {
- _BRCMS_PREC_BE, /* 0 BE - Best-effort */
- _BRCMS_PREC_BK, /* 1 BK - Background */
- _BRCMS_PREC_NONE, /* 2 None = - */
- _BRCMS_PREC_EE, /* 3 EE - Excellent-effort */
- _BRCMS_PREC_CL, /* 4 CL - Controlled Load */
- _BRCMS_PREC_VI, /* 5 Vi - Video */
- _BRCMS_PREC_VO, /* 6 Vo - Voice */
- _BRCMS_PREC_NC, /* 7 NC - Network Control */
-};
-
static const u16 xmtfifo_sz[][NFIFO] = {
/* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
@@ -371,6 +325,36 @@ static const char fifo_names[6][0];
static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
+/* Mapping of ieee80211 AC numbers to tx fifos */
+static const u8 ac_to_fifo_mapping[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = TX_AC_VO_FIFO,
+ [IEEE80211_AC_VI] = TX_AC_VI_FIFO,
+ [IEEE80211_AC_BE] = TX_AC_BE_FIFO,
+ [IEEE80211_AC_BK] = TX_AC_BK_FIFO,
+};
+
+/* Mapping of tx fifos to ieee80211 AC numbers */
+static const u8 fifo_to_ac_mapping[IEEE80211_NUM_ACS] = {
+ [TX_AC_BK_FIFO] = IEEE80211_AC_BK,
+ [TX_AC_BE_FIFO] = IEEE80211_AC_BE,
+ [TX_AC_VI_FIFO] = IEEE80211_AC_VI,
+ [TX_AC_VO_FIFO] = IEEE80211_AC_VO,
+};
+
+static u8 brcms_ac_to_fifo(u8 ac)
+{
+ if (ac >= ARRAY_SIZE(ac_to_fifo_mapping))
+ return TX_AC_BE_FIFO;
+ return ac_to_fifo_mapping[ac];
+}
+
+static u8 brcms_fifo_to_ac(u8 fifo)
+{
+ if (fifo >= ARRAY_SIZE(fifo_to_ac_mapping))
+ return IEEE80211_AC_BE;
+ return fifo_to_ac_mapping[fifo];
+}
+
/* Find basic rate for a given rate */
static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
{
@@ -415,10 +399,15 @@ static bool brcms_deviceremoved(struct brcms_c_info *wlc)
}
/* sum the individual fifo tx pending packet counts */
-static s16 brcms_txpktpendtot(struct brcms_c_info *wlc)
+static int brcms_txpktpendtot(struct brcms_c_info *wlc)
{
- return wlc->core->txpktpend[0] + wlc->core->txpktpend[1] +
- wlc->core->txpktpend[2] + wlc->core->txpktpend[3];
+ int i;
+ int pending = 0;
+
+ for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
+ if (wlc->hw->di[i])
+ pending += dma_txpending(wlc->hw->di[i]);
+ return pending;
}
static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc)
@@ -626,14 +615,11 @@ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
uint rate = rspec2rate(ratespec);
if (rate == 0) {
- wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n",
+ brcms_err(wlc->hw->d11core, "wl%d: WAR: using rate of 1 mbps\n",
wlc->pub->unit);
rate = BRCM_RATE_1M;
}
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n",
- wlc->pub->unit, ratespec, preamble_type, mac_len);
-
if (is_mcs_rate(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
@@ -696,7 +682,7 @@ static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
u16 size;
u32 value;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
size = le16_to_cpu(inits[i].size);
@@ -725,19 +711,19 @@ static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs)
static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
{
- struct wiphy *wiphy = wlc_hw->wlc->wiphy;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
/* init microcode host flags */
brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs);
/* do band-specific ucode IHR, SHM, and SCR inits */
- if (D11REV_IS(wlc_hw->corerev, 23)) {
+ if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16);
else
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
- " %d\n", __func__, wlc_hw->unit,
+ brcms_err(wlc_hw->d11core,
+ "%s: wl%d: unsupported phy in corerev %d\n",
+ __func__, wlc_hw->unit,
wlc_hw->corerev);
} else {
if (D11REV_IS(wlc_hw->corerev, 24)) {
@@ -745,12 +731,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
brcms_c_write_inits(wlc_hw,
ucode->d11lcn0bsinitvals24);
else
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in"
- " core rev %d\n", __func__,
- wlc_hw->unit, wlc_hw->corerev);
+ brcms_err(wlc_hw->d11core,
+ "%s: wl%d: unsupported phy in core rev %d\n",
+ __func__, wlc_hw->unit,
+ wlc_hw->corerev);
} else {
- wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
- __func__, wlc_hw->unit, wlc_hw->corerev);
+ brcms_err(wlc_hw->d11core,
+ "%s: wl%d: unsupported corerev %d\n",
+ __func__, wlc_hw->unit, wlc_hw->corerev);
}
}
}
@@ -765,7 +753,7 @@ static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d: clk %d\n", wlc_hw->unit, clk);
wlc_hw->phyclk = clk;
@@ -790,8 +778,8 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
/* low-level band switch utility routine */
static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- bandunit);
+ brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
+ bandunit);
wlc_hw->band = wlc_hw->bandstate[bandunit];
@@ -819,7 +807,7 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
u32 macintmask;
u32 macctrl;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_mac80211(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
macctrl = bcma_read32(wlc_hw->d11core,
D11REGOFFS(maccontrol));
WARN_ON((macctrl & MCTL_EN_MAC) != 0);
@@ -841,9 +829,10 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
static bool
brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
{
- struct sk_buff *p;
- uint queue;
- struct d11txh *txh;
+ struct sk_buff *p = NULL;
+ uint queue = NFIFO;
+ struct dma_pub *dma = NULL;
+ struct d11txh *txh = NULL;
struct scb *scb = NULL;
bool free_pdu;
int tx_rts, tx_frame_count, tx_rts_count;
@@ -854,6 +843,11 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *txrate;
int i;
+ bool fatal = true;
+
+ trace_brcms_txstatus(&wlc->hw->d11core->dev, txs->framelen,
+ txs->frameid, txs->status, txs->lasttxtime,
+ txs->sequence, txs->phyerr, txs->ackphyrxsh);
/* discard intermediate indications for ucode with one legitimate case:
* e.g. if "useRTS" is set. ucode did a successful rts/cts exchange,
@@ -862,34 +856,36 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
*/
if (!(txs->status & TX_STATUS_AMPDU)
&& (txs->status & TX_STATUS_INTERMEDIATE)) {
- BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
- return false;
+ brcms_dbg_tx(wlc->hw->d11core, "INTERMEDIATE but not AMPDU\n");
+ fatal = false;
+ goto out;
}
queue = txs->frameid & TXFID_QUEUE_MASK;
if (queue >= NFIFO) {
- p = NULL;
- goto fatal;
+ brcms_err(wlc->hw->d11core, "queue %u >= NFIFO\n", queue);
+ goto out;
}
+ dma = wlc->hw->di[queue];
+
p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
- if (p == NULL)
- goto fatal;
+ if (p == NULL) {
+ brcms_err(wlc->hw->d11core, "dma_getnexttxp returned null!\n");
+ goto out;
+ }
txh = (struct d11txh *) (p->data);
mcl = le16_to_cpu(txh->MacTxControlLow);
- if (txs->phyerr) {
- if (brcm_msg_level & LOG_ERROR_VAL) {
- wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n",
- txs->phyerr, txh->MainRates);
- brcms_c_print_txdesc(txh);
- }
- brcms_c_print_txstatus(txs);
- }
+ if (txs->phyerr)
+ brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
+ txs->phyerr, txh->MainRates);
- if (txs->frameid != le16_to_cpu(txh->TxFrameID))
- goto fatal;
+ if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
+ brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
+ goto out;
+ }
tx_info = IEEE80211_SKB_CB(p);
h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
@@ -898,14 +894,24 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
- return false;
+ fatal = false;
+ goto out;
}
+ /*
+ * brcms_c_ampdu_dotxstatus() will trace tx descriptors for AMPDU
+ * frames; this traces them for the rest.
+ */
+ trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
+
supr_status = txs->status & TX_STATUS_SUPR_MASK;
- if (supr_status == TX_STATUS_SUPR_BADCH)
- BCMMSG(wlc->wiphy,
- "%s: Pkt tx suppressed, possibly channel %d\n",
- __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec));
+ if (supr_status == TX_STATUS_SUPR_BADCH) {
+ unsigned xfts = le16_to_cpu(txh->XtraFrameTypes);
+ brcms_dbg_tx(wlc->hw->d11core,
+ "Pkt tx suppressed, dest chan %u, current %d\n",
+ (xfts >> XFTS_CHANNEL_SHIFT) & 0xff,
+ CHSPEC_CHANNEL(wlc->default_bss->chanspec));
+ }
tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
tx_frame_count =
@@ -916,7 +922,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
lastframe = !ieee80211_has_morefrags(h->frame_control);
if (!lastframe) {
- wiphy_err(wlc->wiphy, "Not last frame!\n");
+ brcms_err(wlc->hw->d11core, "Not last frame!\n");
} else {
/*
* Set information to be consumed by Minstrel ht.
@@ -982,26 +988,37 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
totlen = p->len;
free_pdu = true;
- brcms_c_txfifo_complete(wlc, queue, 1);
-
if (lastframe) {
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
} else {
- wiphy_err(wlc->wiphy, "%s: Not last frame => not calling "
- "tx_status\n", __func__);
+ brcms_err(wlc->hw->d11core,
+ "%s: Not last frame => not calling tx_status\n",
+ __func__);
}
- return false;
+ fatal = false;
- fatal:
- if (p)
- brcmu_pkt_buf_free_skb(p);
+ out:
+ if (fatal) {
+ if (txh)
+ trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
+ sizeof(*txh));
+ if (p)
+ brcmu_pkt_buf_free_skb(p);
+ }
- return true;
+ if (dma && queue < NFIFO) {
+ u16 ac_queue = brcms_fifo_to_ac(queue);
+ if (dma->txavail > TX_HEADROOM && queue < TX_BCMC_FIFO &&
+ ieee80211_queue_stopped(wlc->pub->ieee_hw, ac_queue))
+ ieee80211_wake_queue(wlc->pub->ieee_hw, ac_queue);
+ dma_kick_tx(dma);
+ }
+ return fatal;
}
/* process tx completion events in BMAC
@@ -1010,8 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
static bool
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
- bool morepending = false;
- struct brcms_c_info *wlc = wlc_hw->wlc;
struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
@@ -1022,22 +1037,23 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
*/
uint max_tx_num = bound ? TXSBND : -1;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
txs = &txstatus;
core = wlc_hw->d11core;
*fatal = false;
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
- while (!(*fatal)
- && (s1 & TXS_V)) {
+ while (n < max_tx_num) {
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (s1 == 0xffffffff) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
- wlc_hw->unit, __func__);
- return morepending;
+ brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ __func__);
+ *fatal = true;
+ return false;
}
- s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+ /* only process when valid */
+ if (!(s1 & TXS_V))
+ break;
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1045,23 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
- /* !give others some time to run! */
- if (++n >= max_tx_num)
- break;
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+ if (*fatal == true)
+ return false;
+ n++;
}
- if (*fatal)
- return 0;
-
- if (n >= max_tx_num)
- morepending = true;
-
- if (!pktq_empty(&wlc->pkt_queue->q))
- brcms_c_send_q(wlc);
-
- return morepending;
+ return n >= max_tx_num;
}
static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -1112,7 +1117,6 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
u16 pio_mhf2 = 0;
struct brcms_hardware *wlc_hw = wlc->hw;
uint unit = wlc_hw->unit;
- struct wiphy *wiphy = wlc->wiphy;
/* name and offsets for dma_attach */
snprintf(name, sizeof(name), "wl%d", unit);
@@ -1125,12 +1129,12 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_BK_FIFO (TX AC Background data packets)
* RX: RX_FIFO (RX data packets)
*/
- wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ wlc_hw->di[0] = dma_attach(name, wlc,
(wme ? dmareg(DMA_TX, 0) : 0),
dmareg(DMA_RX, 0),
(wme ? NTXD : 0), NRXD,
RXBUFSZ, -1, NRXBUFPOST,
- BRCMS_HWRXOFF, &brcm_msg_level);
+ BRCMS_HWRXOFF);
dma_attach_err |= (NULL == wlc_hw->di[0]);
/*
@@ -1139,10 +1143,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* (legacy) TX_DATA_FIFO (TX data packets)
* RX: UNUSED
*/
- wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ wlc_hw->di[1] = dma_attach(name, wlc,
dmareg(DMA_TX, 1), 0,
- NTXD, 0, 0, -1, 0, 0,
- &brcm_msg_level);
+ NTXD, 0, 0, -1, 0, 0);
dma_attach_err |= (NULL == wlc_hw->di[1]);
/*
@@ -1150,26 +1153,26 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VI_FIFO (TX AC Video data packets)
* RX: UNUSED
*/
- wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ wlc_hw->di[2] = dma_attach(name, wlc,
dmareg(DMA_TX, 2), 0,
- NTXD, 0, 0, -1, 0, 0,
- &brcm_msg_level);
+ NTXD, 0, 0, -1, 0, 0);
dma_attach_err |= (NULL == wlc_hw->di[2]);
/*
* FIFO 3
* TX: TX_AC_VO_FIFO (TX AC Voice data packets)
* (legacy) TX_CTL_FIFO (TX control & mgmt packets)
*/
- wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ wlc_hw->di[3] = dma_attach(name, wlc,
dmareg(DMA_TX, 3),
0, NTXD, 0, 0, -1,
- 0, 0, &brcm_msg_level);
+ 0, 0);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
if (dma_attach_err) {
- wiphy_err(wiphy, "wl%d: wlc_attach: dma_attach failed"
- "\n", unit);
+ brcms_err(wlc_hw->d11core,
+ "wl%d: wlc_attach: dma_attach failed\n",
+ unit);
return false;
}
@@ -1503,8 +1506,7 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
u16 mac_m;
u16 mac_h;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
- wlc_hw->unit);
+ brcms_dbg_rx(core, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit);
mac_l = addr[0] | (addr[1] << 8);
mac_m = addr[2] | (addr[3] << 8);
@@ -1527,7 +1529,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
__le32 word_le;
__be32 word_be;
bool be_bit;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
@@ -1700,8 +1702,8 @@ static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- wlc_hw->band->bandunit);
+ brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
+ wlc_hw->band->bandunit);
brcms_c_ucode_bsinit(wlc_hw);
@@ -1736,8 +1738,6 @@ static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec)
/* Perform a soft reset of the PHY PLL */
void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
~0, 0);
udelay(1);
@@ -1782,7 +1782,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
u32 phy_bw_clkbits;
bool phy_in_reset = false;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d: reset phy\n", wlc_hw->unit);
if (pih == NULL)
return;
@@ -1916,7 +1916,7 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
/* power both the pll and external oscillator on/off */
static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d: want %d\n", wlc_hw->unit, want);
/*
* dont power down if plldown is false or
@@ -2005,7 +2005,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d: core reset\n", wlc_hw->unit);
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
@@ -2016,13 +2016,13 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
if (bcma_core_is_enabled(wlc_hw->d11core)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
+ brcms_err(wlc_hw->d11core, "wl%d: %s: "
"dma_txreset[%d]: cannot stop dma\n",
wlc_hw->unit, __func__, i);
if ((wlc_hw->di[RX_FIFO])
&& (!wlc_dma_rxreset(wlc_hw, RX_FIFO)))
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: dma_rxreset"
+ brcms_err(wlc_hw->d11core, "wl%d: %s: dma_rxreset"
"[%d]: cannot stop dma\n",
wlc_hw->unit, __func__, RX_FIFO);
}
@@ -2235,7 +2235,7 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
uint i;
uint count;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
count = (nbytes / sizeof(u32));
@@ -2257,14 +2257,14 @@ static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
if (wlc_hw->ucode_loaded)
return;
- if (D11REV_IS(wlc_hw->corerev, 23)) {
+ if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band)) {
brcms_ucode_write(wlc_hw, ucode->bcm43xx_16_mimo,
ucode->bcm43xx_16_mimosz);
wlc_hw->ucode_loaded = true;
} else
- wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
- "corerev %d\n",
+ brcms_err(wlc_hw->d11core,
+ "%s: wl%d: unsupported phy in corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
if (BRCMS_ISLCNPHY(wlc_hw->band)) {
@@ -2272,8 +2272,8 @@ static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
ucode->bcm43xx_24_lcnsz);
wlc_hw->ucode_loaded = true;
} else {
- wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in "
- "corerev %d\n",
+ brcms_err(wlc_hw->d11core,
+ "%s: wl%d: unsupported phy in corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
}
}
@@ -2310,7 +2310,6 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
uint unit;
uint intstatus, idx;
struct bcma_device *core = wlc_hw->d11core;
- struct wiphy *wiphy = wlc_hw->wlc->wiphy;
unit = wlc_hw->unit;
@@ -2323,39 +2322,39 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
if (!intstatus)
continue;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: intstatus%d 0x%x\n",
- unit, idx, intstatus);
+ brcms_dbg_int(core, "wl%d: intstatus%d 0x%x\n",
+ unit, idx, intstatus);
if (intstatus & I_RO) {
- wiphy_err(wiphy, "wl%d: fifo %d: receive fifo "
+ brcms_err(core, "wl%d: fifo %d: receive fifo "
"overflow\n", unit, idx);
fatal = true;
}
if (intstatus & I_PC) {
- wiphy_err(wiphy, "wl%d: fifo %d: descriptor error\n",
- unit, idx);
+ brcms_err(core, "wl%d: fifo %d: descriptor error\n",
+ unit, idx);
fatal = true;
}
if (intstatus & I_PD) {
- wiphy_err(wiphy, "wl%d: fifo %d: data error\n", unit,
+ brcms_err(core, "wl%d: fifo %d: data error\n", unit,
idx);
fatal = true;
}
if (intstatus & I_DE) {
- wiphy_err(wiphy, "wl%d: fifo %d: descriptor protocol "
+ brcms_err(core, "wl%d: fifo %d: descriptor protocol "
"error\n", unit, idx);
fatal = true;
}
if (intstatus & I_RU)
- wiphy_err(wiphy, "wl%d: fifo %d: receive descriptor "
+ brcms_err(core, "wl%d: fifo %d: receive descriptor "
"underflow\n", idx, unit);
if (intstatus & I_XU) {
- wiphy_err(wiphy, "wl%d: fifo %d: transmit fifo "
+ brcms_err(core, "wl%d: fifo %d: transmit fifo "
"underflow\n", idx, unit);
fatal = true;
}
@@ -2516,13 +2515,13 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
- u32 macintstatus;
+ u32 macintstatus, mask;
/* macintstatus includes a DMA interrupt summary bit */
macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
+ mask = in_isr ? wlc->macintmask : wlc->defmacintmask;
- BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
- macintstatus);
+ trace_brcms_macintstatus(&core->dev, in_isr, macintstatus, mask);
/* detect cardbus removed, in power down(suspend) and in reset */
if (brcms_deviceremoved(wlc))
@@ -2535,16 +2534,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
return 0;
/* defer unsolicited interrupts */
- macintstatus &= (in_isr ? wlc->macintmask : wlc->defmacintmask);
+ macintstatus &= mask;
/* if not for us */
if (macintstatus == 0)
return 0;
- /* interrupts are already turned off for CFE build
- * Caution: For CFE Turning off the interrupts again has some undesired
- * consequences
- */
/* turn off the interrupts */
bcma_write32(core, D11REGOFFS(macintmask), 0);
(void)bcma_read32(core, D11REGOFFS(macintmask));
@@ -2587,33 +2582,31 @@ bool brcms_c_intrsupd(struct brcms_c_info *wlc)
/*
* First-level interrupt processing.
- * Return true if this was our interrupt, false otherwise.
- * *wantdpc will be set to true if further brcms_c_dpc() processing is required,
+ * Return true if this was our interrupt
+ * and if further brcms_c_dpc() processing is required,
* false otherwise.
*/
-bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
+bool brcms_c_isr(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintstatus;
- *wantdpc = false;
-
if (!wlc_hw->up || !wlc->macintmask)
return false;
/* read and clear macintstatus and intstatus registers */
macintstatus = wlc_intstatus(wlc, true);
- if (macintstatus == 0xffffffff)
- wiphy_err(wlc->wiphy, "DEVICEREMOVED detected in the ISR code"
- " path\n");
+ if (macintstatus == 0xffffffff) {
+ brcms_err(wlc_hw->d11core,
+ "DEVICEREMOVED detected in the ISR code path\n");
+ return false;
+ }
/* it is not for us */
if (macintstatus == 0)
return false;
- *wantdpc = true;
-
/* save interrupt status bits */
wlc->macintstatus = macintstatus;
@@ -2626,10 +2619,9 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
- struct wiphy *wiphy = wlc->wiphy;
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- wlc_hw->band->bandunit);
+ brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
+ wlc_hw->band->bandunit);
/*
* Track overlapping suspend requests
@@ -2644,7 +2636,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return;
@@ -2655,7 +2647,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
mi = bcma_read32(core, D11REGOFFS(macintstatus));
if (mi == 0xffffffff) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return;
@@ -2668,10 +2660,10 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
BRCMS_MAX_MAC_SUSPEND);
if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
- wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
+ brcms_err(core, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
- wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
+ brcms_err(core, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
bcma_read32(core, D11REGOFFS(psmdebug)),
bcma_read32(core, D11REGOFFS(phydebug)),
@@ -2680,7 +2672,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return;
@@ -2696,8 +2688,8 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
- wlc->band->bandunit);
+ brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
+ wlc->band->bandunit);
/*
* Track overlapping suspend requests
@@ -2740,8 +2732,6 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
u32 w, val;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
- BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
-
/* Validate dchip register access */
bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
@@ -2802,7 +2792,7 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
struct bcma_device *core = wlc_hw->d11core;
u32 tmp;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
tmp = 0;
@@ -2818,8 +2808,8 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
- wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
- " PLL failed\n", __func__);
+ brcms_err(core, "%s: turn on PHY PLL failed\n",
+ __func__);
} else {
bcma_set32(core, D11REGOFFS(clk_ctl_st),
tmp | CCS_ERSRC_REQ_D11PLL |
@@ -2835,8 +2825,8 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
- wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on "
- "PHY PLL failed\n", __func__);
+ brcms_err(core, "%s: turn on PHY PLL failed\n",
+ __func__);
}
} else {
/*
@@ -2854,7 +2844,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d: disable core\n", wlc_hw->unit);
dev_gone = brcms_deviceremoved(wlc_hw->wlc);
@@ -2884,12 +2874,14 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
uint i;
/* free any posted tx packets */
- for (i = 0; i < NFIFO; i++)
+ for (i = 0; i < NFIFO; i++) {
if (wlc_hw->di[i]) {
dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
- wlc->core->txpktpend[i] = 0;
- BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i);
+ if (i < TX_BCMC_FIFO)
+ ieee80211_wake_queue(wlc->pub->ieee_hw,
+ brcms_fifo_to_ac(i));
}
+ }
/* free any posted rx packets */
dma_rxreclaim(wlc_hw->di[RX_FIFO]);
@@ -2921,7 +2913,7 @@ brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
if (offset & 2)
objoff += 2;
- bcma_write16(core, objoff, v);
+ bcma_wflush16(core, objoff, v);
}
/*
@@ -3109,7 +3101,7 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
/* check for rx fifo 0 overflow */
delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
if (delta)
- wiphy_err(wlc->wiphy, "wl%d: %u rx fifo 0 overflows!\n",
+ brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n",
wlc->pub->unit, delta);
/* check for tx fifo underflows */
@@ -3118,8 +3110,9 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
(u16) (wlc->core->macstat_snapshot->txfunfl[i] -
txfunfl[i]);
if (delta)
- wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!"
- "\n", wlc->pub->unit, delta, i);
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %u tx fifo %d underflows!\n",
+ wlc->pub->unit, delta, i);
}
#endif /* DEBUG */
@@ -3132,8 +3125,6 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
static void brcms_b_reset(struct brcms_hardware *wlc_hw)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
/* reset the core */
if (!brcms_deviceremoved(wlc_hw->wlc))
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -3144,7 +3135,7 @@ static void brcms_b_reset(struct brcms_hardware *wlc_hw)
void brcms_c_reset(struct brcms_c_info *wlc)
{
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+ brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
/* slurp up hw mac counters before core reset */
brcms_c_statsupd(wlc);
@@ -3189,10 +3180,9 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
bool fifosz_fixup = false;
int err = 0;
u16 buf[NFIFO];
- struct wiphy *wiphy = wlc->wiphy;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(core, "wl%d: core init\n", wlc_hw->unit);
/* reset PSM */
brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
@@ -3212,29 +3202,29 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
MI_MACSSPNDD) == 0), 1000 * 1000);
if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
- wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
+ brcms_err(core, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
brcms_c_gpio_init(wlc);
sflags = bcma_aread32(core, BCMA_IOST);
- if (D11REV_IS(wlc_hw->corerev, 23)) {
+ if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16);
else
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+ brcms_err(core, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
if (BRCMS_ISLCNPHY(wlc_hw->band))
brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24);
else
- wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
+ brcms_err(core, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
wlc_hw->corerev);
} else {
- wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n",
+ brcms_err(core, "%s: wl%d: unsupported corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
}
@@ -3276,7 +3266,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
err = -1;
}
if (err != 0)
- wiphy_err(wiphy, "wlc_coreinit: txfifo mismatch: ucode size %d"
+ brcms_err(core, "wlc_coreinit: txfifo mismatch: ucode size %d"
" driver size %d index %d\n", buf[i],
wlc_hw->xmtfifo_sz[i], i);
@@ -3359,8 +3349,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
bool fastclk;
struct brcms_c_info *wlc = wlc_hw->wlc;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
@@ -3453,7 +3441,7 @@ static void brcms_c_rate_lookup_init(struct brcms_c_info *wlc,
rate = (rateset->rates[i] & BRCMS_RATE_MASK);
if (rate > BRCM_MAXRATE) {
- wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: "
+ brcms_err(wlc->hw->d11core, "brcms_c_rate_lookup_init: "
"invalid rate 0x%X in rate set\n",
rateset->rates[i]);
continue;
@@ -3529,7 +3517,6 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
uint parkband;
uint i, band_order[2];
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
/*
* We might have been bandlocked during down and the chip
* power-cycled (hibernate). Figure out the right band to park on
@@ -3710,8 +3697,8 @@ static void brcms_c_set_ratetable(struct brcms_c_info *wlc)
/* band-specific init */
static void brcms_c_bsinit(struct brcms_c_info *wlc)
{
- BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n",
- wlc->pub->unit, wlc->band->bandunit);
+ brcms_dbg_info(wlc->hw->d11core, "wl%d: bandunit %d\n",
+ wlc->pub->unit, wlc->band->bandunit);
/* write ucode ACK/CTS rate table */
brcms_c_set_ratetable(wlc);
@@ -3734,7 +3721,8 @@ brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
M_TX_IDLE_BUSY_RATIO_X_16_CCK;
if (duty_cycle > 100 || duty_cycle < 0) {
- wiphy_err(wlc->wiphy, "wl%d: duty cycle value off limit\n",
+ brcms_err(wlc->hw->d11core,
+ "wl%d: duty cycle value off limit\n",
wlc->pub->unit);
return -EINVAL;
}
@@ -3752,40 +3740,6 @@ brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
return 0;
}
-/*
- * Initialize the base precedence map for dequeueing
- * from txq based on WME settings
- */
-static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc)
-{
- wlc->tx_prec_map = BRCMS_PREC_BMP_ALL;
- memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
-
- wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK;
- wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE;
- wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI;
- wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO;
-}
-
-static void
-brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi, bool on, int prio)
-{
- /* transmit flowcontrol is not yet implemented */
-}
-
-static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc)
-{
- struct brcms_txq_info *qi;
-
- for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
- if (qi->stopped) {
- brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
- qi->stopped = 0;
- }
- }
-}
-
/* push sw hps and wake state through hardware */
static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
{
@@ -3795,7 +3749,8 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
hps = brcms_c_ps_allowed(wlc);
- BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
+ brcms_dbg_mac80211(wlc->hw->d11core, "wl%d: hps %d\n", wlc->pub->unit,
+ hps);
v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
v2 = MCTL_WAKE;
@@ -3881,7 +3836,8 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
{
uint bandunit;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: 0x%x\n", wlc_hw->unit, chanspec);
+ brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: 0x%x\n", wlc_hw->unit,
+ chanspec);
wlc_hw->chanspec = chanspec;
@@ -3942,7 +3898,7 @@ static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
u16 old_chanspec = wlc->chanspec;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n",
+ brcms_err(wlc->hw->d11core, "wl%d: %s: Bad channel %d\n",
wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
return;
}
@@ -3953,8 +3909,8 @@ static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
switchband = true;
if (wlc->bandlocked) {
- wiphy_err(wlc->wiphy, "wl%d: %s: chspec %d "
- "band is locked!\n",
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s: chspec %d band is locked!\n",
wlc->pub->unit, __func__,
CHSPEC_CHANNEL(chanspec));
return;
@@ -4018,6 +3974,10 @@ void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
*/
void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
{
+ /*
+ * Cannot use brcms_dbg_* here because this function is called
+ * before wlc is sufficiently initialized.
+ */
BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
switch (idx) {
@@ -4090,8 +4050,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
/* Only apply params if the core is out of reset and has clocks */
if (!wlc->clk) {
- wiphy_err(wlc->wiphy, "wl%d: %s : no-clock\n", wlc->pub->unit,
- __func__);
+ brcms_err(wlc->hw->d11core, "wl%d: %s : no-clock\n",
+ wlc->pub->unit, __func__);
return;
}
@@ -4109,7 +4069,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
if (acp_shm.aifs < EDCF_AIFSN_MIN
|| acp_shm.aifs > EDCF_AIFSN_MAX) {
- wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad "
+ brcms_err(wlc->hw->d11core, "wl%d: edcf_setparams: bad "
"aifs %d\n", wlc->pub->unit, acp_shm.aifs);
} else {
acp_shm.cwmin = params->cw_min;
@@ -4224,8 +4184,8 @@ static void brcms_c_radio_timer(void *arg)
struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
if (brcms_deviceremoved(wlc)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
- __func__);
+ brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
+ wlc->pub->unit, __func__);
brcms_down(wlc->wl);
return;
}
@@ -4238,8 +4198,6 @@ static void brcms_b_watchdog(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
if (!wlc_hw->up)
return;
@@ -4258,14 +4216,14 @@ static void brcms_b_watchdog(struct brcms_c_info *wlc)
/* common watchdog code */
static void brcms_c_watchdog(struct brcms_c_info *wlc)
{
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+ brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
if (!wlc->pub->up)
return;
if (brcms_deviceremoved(wlc)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
- __func__);
+ brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
+ wlc->pub->unit, __func__);
brcms_down(wlc->wl);
return;
}
@@ -4437,13 +4395,13 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
struct ssb_sprom *sprom = &core->bus->sprom;
if (core->bus->hosttype == BCMA_HOSTTYPE_PCI)
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
- pcidev->vendor,
- pcidev->device);
+ brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
else
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
- core->bus->boardinfo.vendor,
- core->bus->boardinfo.type);
+ brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ core->bus->boardinfo.vendor,
+ core->bus->boardinfo.type);
wme = true;
@@ -4535,7 +4493,8 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
/* check device id(srom, nvram etc.) to set bands */
if (wlc_hw->deviceid == BCM43224_D11N_ID ||
- wlc_hw->deviceid == BCM43224_D11N_ID_VEN1)
+ wlc_hw->deviceid == BCM43224_D11N_ID_VEN1 ||
+ wlc_hw->deviceid == BCM43224_CHIP_ID)
/* Dualband boards */
wlc_hw->_nbands = 2;
else
@@ -4715,8 +4674,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
goto fail;
}
- BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x\n",
- wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih));
+ brcms_dbg_info(wlc_hw->d11core, "deviceid 0x%x nbands %d board 0x%x\n",
+ wlc_hw->deviceid, wlc_hw->_nbands,
+ ai_get_boardtype(wlc_hw->sih));
return err;
@@ -4836,56 +4796,6 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
bi->flags |= BRCMS_BSS_HT;
}
-static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc)
-{
- struct brcms_txq_info *qi, *p;
-
- qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC);
- if (qi != NULL) {
- /*
- * Have enough room for control packets along with HI watermark
- * Also, add room to txq for total psq packets if all the SCBs
- * leave PS mode. The watermark for flowcontrol to OS packets
- * will remain the same
- */
- brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT,
- 2 * BRCMS_DATAHIWAT + PKTQ_LEN_DEFAULT);
-
- /* add this queue to the the global list */
- p = wlc->tx_queues;
- if (p == NULL) {
- wlc->tx_queues = qi;
- } else {
- while (p->next != NULL)
- p = p->next;
- p->next = qi;
- }
- }
- return qi;
-}
-
-static void brcms_c_txq_free(struct brcms_c_info *wlc,
- struct brcms_txq_info *qi)
-{
- struct brcms_txq_info *p;
-
- if (qi == NULL)
- return;
-
- /* remove the queue from the linked list */
- p = wlc->tx_queues;
- if (p == qi)
- wlc->tx_queues = p->next;
- else {
- while (p != NULL && p->next != qi)
- p = p->next;
- if (p != NULL)
- p->next = p->next->next;
- }
-
- kfree(qi);
-}
-
static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
{
uint i;
@@ -4991,8 +4901,6 @@ uint brcms_c_detach(struct brcms_c_info *wlc)
if (wlc == NULL)
return 0;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
callbacks += brcms_b_detach(wlc);
/* delete software timers */
@@ -5005,10 +4913,6 @@ uint brcms_c_detach(struct brcms_c_info *wlc)
brcms_c_detach_module(wlc);
-
- while (wlc->tx_queues != NULL)
- brcms_c_txq_free(wlc, wlc->tx_queues);
-
brcms_c_detach_mfree(wlc);
return callbacks;
}
@@ -5026,7 +4930,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
if (wlc_hw->wlc->pub->hw_up)
return;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
/*
* Enable pll and xtal, initialize the power control registers,
@@ -5063,7 +4967,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
+ brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
/*
* Enable pll and xtal, initialize the power control registers,
@@ -5077,7 +4981,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
* Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
- bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci, wlc_hw->d11core,
+ bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
true);
/*
@@ -5102,8 +5006,6 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
{
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
wlc_hw->up = true;
wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
@@ -5135,7 +5037,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
{
struct ieee80211_channel *ch;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+ brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
/* HW is turned off so don't try to access it */
if (wlc->pub->hw_off || brcms_deviceremoved(wlc))
@@ -5176,8 +5078,8 @@ int brcms_c_up(struct brcms_c_info *wlc)
WL_RADIO_HW_DISABLE);
if (bsscfg->enable && bsscfg->BSS)
- wiphy_err(wlc->wiphy, "wl%d: up"
- ": rfdisable -> "
+ brcms_err(wlc->hw->d11core,
+ "wl%d: up: rfdisable -> "
"bsscfg_disable()\n",
wlc->pub->unit);
}
@@ -5237,8 +5139,6 @@ static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
bool dev_gone;
uint callbacks = 0;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
if (!wlc_hw->up)
return callbacks;
@@ -5265,8 +5165,6 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
uint callbacks = 0;
bool dev_gone;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
if (!wlc_hw->up)
return callbacks;
@@ -5314,14 +5212,14 @@ uint brcms_c_down(struct brcms_c_info *wlc)
uint callbacks = 0;
int i;
bool dev_gone = false;
- struct brcms_txq_info *qi;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+ brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
/* check if we are already in the going down path */
if (wlc->going_down) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Driver going down so return"
- "\n", wlc->pub->unit, __func__);
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s: Driver going down so return\n",
+ wlc->pub->unit, __func__);
return 0;
}
if (!wlc->pub->up)
@@ -5353,13 +5251,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
- /* clear txq flow control */
- brcms_c_txflowcontrol_reset(wlc);
-
- /* flush tx queues */
- for (qi = wlc->tx_queues; qi != NULL; qi = qi->next)
- brcmu_pktq_flush(&qi->q, true, NULL, NULL);
-
callbacks += brcms_b_down_finish(wlc->hw);
/* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
@@ -5441,7 +5332,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
default:
/* Error */
- wiphy_err(wlc->wiphy, "wl%d: %s: invalid gmode %d\n",
+ brcms_err(wlc->hw->d11core, "wl%d: %s: invalid gmode %d\n",
wlc->pub->unit, __func__, gmode);
return -ENOTSUPP;
}
@@ -5745,45 +5636,6 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
return -ENODATA;
}
-void brcms_c_print_txstatus(struct tx_status *txs)
-{
- pr_debug("\ntxpkt (MPDU) Complete\n");
-
- pr_debug("FrameID: %04x TxStatus: %04x\n", txs->frameid, txs->status);
-
- pr_debug("[15:12] %d frame attempts\n",
- (txs->status & TX_STATUS_FRM_RTX_MASK) >>
- TX_STATUS_FRM_RTX_SHIFT);
- pr_debug(" [11:8] %d rts attempts\n",
- (txs->status & TX_STATUS_RTS_RTX_MASK) >>
- TX_STATUS_RTS_RTX_SHIFT);
- pr_debug(" [7] %d PM mode indicated\n",
- txs->status & TX_STATUS_PMINDCTD ? 1 : 0);
- pr_debug(" [6] %d intermediate status\n",
- txs->status & TX_STATUS_INTERMEDIATE ? 1 : 0);
- pr_debug(" [5] %d AMPDU\n",
- txs->status & TX_STATUS_AMPDU ? 1 : 0);
- pr_debug(" [4:2] %d Frame Suppressed Reason (%s)\n",
- (txs->status & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT,
- (const char *[]) {
- "None",
- "PMQ Entry",
- "Flush request",
- "Previous frag failure",
- "Channel mismatch",
- "Lifetime Expiry",
- "Underflow"
- } [(txs->status & TX_STATUS_SUPR_MASK) >>
- TX_STATUS_SUPR_SHIFT]);
- pr_debug(" [1] %d acked\n",
- txs->status & TX_STATUS_ACK_RCV ? 1 : 0);
-
- pr_debug("LastTxTime: %04x Seq: %04x PHYTxStatus: %04x RxAckRSSI: %04x RxAckSQ: %04x\n",
- txs->lasttxtime, txs->sequence, txs->phyerr,
- (txs->ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT,
- (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
-}
-
static bool brcms_c_chipmatch_pci(struct bcma_device *core)
{
struct pci_dev *pcidev = core->bus->host_pci;
@@ -5795,7 +5647,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
return false;
}
- if (device == BCM43224_D11N_ID_VEN1)
+ if (device == BCM43224_D11N_ID_VEN1 || device == BCM43224_CHIP_ID)
return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
@@ -5832,184 +5684,6 @@ bool brcms_c_chipmatch(struct bcma_device *core)
}
}
-#if defined(DEBUG)
-void brcms_c_print_txdesc(struct d11txh *txh)
-{
- u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
- u16 mtch = le16_to_cpu(txh->MacTxControlHigh);
- u16 mfc = le16_to_cpu(txh->MacFrameControl);
- u16 tfest = le16_to_cpu(txh->TxFesTimeNormal);
- u16 ptcw = le16_to_cpu(txh->PhyTxControlWord);
- u16 ptcw_1 = le16_to_cpu(txh->PhyTxControlWord_1);
- u16 ptcw_1_Fbr = le16_to_cpu(txh->PhyTxControlWord_1_Fbr);
- u16 ptcw_1_Rts = le16_to_cpu(txh->PhyTxControlWord_1_Rts);
- u16 ptcw_1_FbrRts = le16_to_cpu(txh->PhyTxControlWord_1_FbrRts);
- u16 mainrates = le16_to_cpu(txh->MainRates);
- u16 xtraft = le16_to_cpu(txh->XtraFrameTypes);
- u8 *iv = txh->IV;
- u8 *ra = txh->TxFrameRA;
- u16 tfestfb = le16_to_cpu(txh->TxFesTimeFallback);
- u8 *rtspfb = txh->RTSPLCPFallback;
- u16 rtsdfb = le16_to_cpu(txh->RTSDurFallback);
- u8 *fragpfb = txh->FragPLCPFallback;
- u16 fragdfb = le16_to_cpu(txh->FragDurFallback);
- u16 mmodelen = le16_to_cpu(txh->MModeLen);
- u16 mmodefbrlen = le16_to_cpu(txh->MModeFbrLen);
- u16 tfid = le16_to_cpu(txh->TxFrameID);
- u16 txs = le16_to_cpu(txh->TxStatus);
- u16 mnmpdu = le16_to_cpu(txh->MaxNMpdus);
- u16 mabyte = le16_to_cpu(txh->MaxABytes_MRT);
- u16 mabyte_f = le16_to_cpu(txh->MaxABytes_FBR);
- u16 mmbyte = le16_to_cpu(txh->MinMBytes);
-
- u8 *rtsph = txh->RTSPhyHeader;
- struct ieee80211_rts rts = txh->rts_frame;
-
- /* add plcp header along with txh descriptor */
- brcmu_dbg_hex_dump(txh, sizeof(struct d11txh) + 48,
- "Raw TxDesc + plcp header:\n");
-
- pr_debug("TxCtlLow: %04x ", mtcl);
- pr_debug("TxCtlHigh: %04x ", mtch);
- pr_debug("FC: %04x ", mfc);
- pr_debug("FES Time: %04x\n", tfest);
- pr_debug("PhyCtl: %04x%s ", ptcw,
- (ptcw & PHY_TXC_SHORT_HDR) ? " short" : "");
- pr_debug("PhyCtl_1: %04x ", ptcw_1);
- pr_debug("PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
- pr_debug("PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
- pr_debug("PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
- pr_debug("MainRates: %04x ", mainrates);
- pr_debug("XtraFrameTypes: %04x ", xtraft);
- pr_debug("\n");
-
- print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
- print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
- ra, sizeof(txh->TxFrameRA));
-
- pr_debug("Fb FES Time: %04x ", tfestfb);
- print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
- rtspfb, sizeof(txh->RTSPLCPFallback));
- pr_debug("RTS DUR: %04x ", rtsdfb);
- print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
- fragpfb, sizeof(txh->FragPLCPFallback));
- pr_debug("DUR: %04x", fragdfb);
- pr_debug("\n");
-
- pr_debug("MModeLen: %04x ", mmodelen);
- pr_debug("MModeFbrLen: %04x\n", mmodefbrlen);
-
- pr_debug("FrameID: %04x\n", tfid);
- pr_debug("TxStatus: %04x\n", txs);
-
- pr_debug("MaxNumMpdu: %04x\n", mnmpdu);
- pr_debug("MaxAggbyte: %04x\n", mabyte);
- pr_debug("MaxAggbyte_fb: %04x\n", mabyte_f);
- pr_debug("MinByte: %04x\n", mmbyte);
-
- print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
- rtsph, sizeof(txh->RTSPhyHeader));
- print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
- (u8 *)&rts, sizeof(txh->rts_frame));
- pr_debug("\n");
-}
-#endif /* defined(DEBUG) */
-
-#if defined(DEBUG)
-static int
-brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
- int len)
-{
- int i;
- char *p = buf;
- char hexstr[16];
- int slen = 0, nlen = 0;
- u32 bit;
- const char *name;
-
- if (len < 2 || !buf)
- return 0;
-
- buf[0] = '\0';
-
- for (i = 0; flags != 0; i++) {
- bit = bd[i].bit;
- name = bd[i].name;
- if (bit == 0 && flags != 0) {
- /* print any unnamed bits */
- snprintf(hexstr, 16, "0x%X", flags);
- name = hexstr;
- flags = 0; /* exit loop */
- } else if ((flags & bit) == 0)
- continue;
- flags &= ~bit;
- nlen = strlen(name);
- slen += nlen;
- /* count btwn flag space */
- if (flags != 0)
- slen += 1;
- /* need NULL char as well */
- if (len <= slen)
- break;
- /* copy NULL char but don't count it */
- strncpy(p, name, nlen + 1);
- p += nlen;
- /* copy btwn flag space and NULL char */
- if (flags != 0)
- p += snprintf(p, 2, " ");
- len -= slen;
- }
-
- /* indicate the str was too short */
- if (flags != 0) {
- if (len < 2)
- p -= 2 - len; /* overwrite last char */
- p += snprintf(p, 2, ">");
- }
-
- return (int)(p - buf);
-}
-#endif /* defined(DEBUG) */
-
-#if defined(DEBUG)
-void brcms_c_print_rxh(struct d11rxhdr *rxh)
-{
- u16 len = rxh->RxFrameSize;
- u16 phystatus_0 = rxh->PhyRxStatus_0;
- u16 phystatus_1 = rxh->PhyRxStatus_1;
- u16 phystatus_2 = rxh->PhyRxStatus_2;
- u16 phystatus_3 = rxh->PhyRxStatus_3;
- u16 macstatus1 = rxh->RxStatus1;
- u16 macstatus2 = rxh->RxStatus2;
- char flagstr[64];
- char lenbuf[20];
- static const struct brcms_c_bit_desc macstat_flags[] = {
- {RXS_FCSERR, "FCSErr"},
- {RXS_RESPFRAMETX, "Reply"},
- {RXS_PBPRES, "PADDING"},
- {RXS_DECATMPT, "DeCr"},
- {RXS_DECERR, "DeCrErr"},
- {RXS_BCNSENT, "Bcn"},
- {0, NULL}
- };
-
- brcmu_dbg_hex_dump(rxh, sizeof(struct d11rxhdr), "Raw RxDesc:\n");
-
- brcms_c_format_flags(macstat_flags, macstatus1, flagstr, 64);
-
- snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
-
- pr_debug("RxFrameSize: %6s (%d)%s\n", lenbuf, len,
- (rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : "");
- pr_debug("RxPHYStatus: %04x %04x %04x %04x\n",
- phystatus_0, phystatus_1, phystatus_2, phystatus_3);
- pr_debug("RxMACStatus: %x %s\n", macstatus1, flagstr);
- pr_debug("RXMACaggtype: %x\n",
- (macstatus2 & RXS_AGGTYPE_MASK));
- pr_debug("RxTSFTime: %04x\n", rxh->RxTSFTime);
-}
-#endif /* defined(DEBUG) */
-
u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
{
u16 table_ptr;
@@ -6033,86 +5707,6 @@ u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
}
-static bool
-brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
- struct sk_buff *pkt, int prec, bool head)
-{
- struct sk_buff *p;
- int eprec = -1; /* precedence to evict from */
-
- /* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(q, prec))
- eprec = prec;
- else if (pktq_full(q)) {
- p = brcmu_pktq_peek_tail(q, &eprec);
- if (eprec > prec) {
- wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d"
- "\n", __func__, eprec, prec);
- return false;
- }
- }
-
- /* Evict if needed */
- if (eprec >= 0) {
- bool discard_oldest;
-
- discard_oldest = ac_bitmap_tst(0, eprec);
-
- /* Refuse newer packet unless configured to discard oldest */
- if (eprec == prec && !discard_oldest) {
- wiphy_err(wlc->wiphy, "%s: No where to go, prec == %d"
- "\n", __func__, prec);
- return false;
- }
-
- /* Evict packet according to discard policy */
- p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
- brcmu_pktq_pdeq_tail(q, eprec);
- brcmu_pkt_buf_free_skb(p);
- }
-
- /* Enqueue */
- if (head)
- p = brcmu_pktq_penq_head(q, prec, pkt);
- else
- p = brcmu_pktq_penq(q, prec, pkt);
-
- return true;
-}
-
-/*
- * Attempts to queue a packet onto a multiple-precedence queue,
- * if necessary evicting a lower precedence packet from the queue.
- *
- * 'prec' is the precedence number that has already been mapped
- * from the packet priority.
- *
- * Returns true if packet consumed (queued), false if not.
- */
-static bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q,
- struct sk_buff *pkt, int prec)
-{
- return brcms_c_prec_enq_head(wlc, q, pkt, prec, false);
-}
-
-void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
- struct sk_buff *sdu, uint prec)
-{
- struct brcms_txq_info *qi = wlc->pkt_queue; /* Check me */
- struct pktq *q = &qi->q;
- int prio;
-
- prio = sdu->priority;
-
- if (!brcms_c_prec_enq(wlc, q, sdu, prec)) {
- /*
- * we might hit this condtion in case
- * packet flooding from mac80211 stack
- */
- brcmu_pkt_buf_free_skb(sdu);
- }
-}
-
/*
* bcmc_fid_generate:
* Generate frame ID for a BCMC packet. The frag field is not used
@@ -6140,8 +5734,6 @@ brcms_c_calc_ack_time(struct brcms_c_info *wlc, u32 rspec,
{
uint dur = 0;
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type);
/*
* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
* is less than or equal to the rate of the immediately previous
@@ -6159,8 +5751,6 @@ static uint
brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec,
u8 preamble_type)
{
- BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n",
- wlc->pub->unit, rspec, preamble_type);
return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
}
@@ -6168,8 +5758,6 @@ static uint
brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec,
u8 preamble_type)
{
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, "
- "preamble_type %d\n", wlc->pub->unit, rspec, preamble_type);
/*
* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
* is less than or equal to the rate of the immediately previous
@@ -6223,9 +5811,6 @@ brcms_c_calc_frame_len(struct brcms_c_info *wlc, u32 ratespec,
uint nsyms, mac_len, Ndps, kNdps;
uint rate = rspec2rate(ratespec);
- BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, dur %d\n",
- wlc->pub->unit, ratespec, preamble_type, dur);
-
if (is_mcs_rate(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
@@ -6292,7 +5877,7 @@ static bool brcms_c_valid_rate(struct brcms_c_info *wlc, u32 rspec, int band,
return true;
error:
if (verbose)
- wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x "
+ brcms_err(wlc->hw->d11core, "wl%d: valid_rate: rate spec 0x%x "
"not in hw_rateset\n", wlc->pub->unit, rspec);
return false;
@@ -6302,6 +5887,7 @@ static u32
mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
u32 int_val)
{
+ struct bcma_device *core = wlc->hw->d11core;
u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
u8 rate = int_val & NRATE_RATE_MASK;
u32 rspec;
@@ -6318,7 +5904,7 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) {
/* mcs only allowed when nmode */
if (stf > PHY_TXC1_MODE_SDM) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n",
+ brcms_err(core, "wl%d: %s: Invalid stf\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
@@ -6329,8 +5915,8 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if (!CHSPEC_IS40(wlc->home_chanspec) ||
((stf != PHY_TXC1_MODE_SISO)
&& (stf != PHY_TXC1_MODE_CDD))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs "
- "32\n", wlc->pub->unit, __func__);
+ brcms_err(core, "wl%d: %s: Invalid mcs 32\n",
+ wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
@@ -6338,9 +5924,9 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
} else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
/* mcs > 7 must use stf SDM */
if (stf != PHY_TXC1_MODE_SDM) {
- BCMMSG(wlc->wiphy, "wl%d: enabling "
- "SDM mode for mcs %d\n",
- wlc->pub->unit, rate);
+ brcms_dbg_mac80211(core, "wl%d: enabling "
+ "SDM mode for mcs %d\n",
+ wlc->pub->unit, rate);
stf = PHY_TXC1_MODE_SDM;
}
} else {
@@ -6351,15 +5937,15 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if ((stf > PHY_TXC1_MODE_STBC) ||
(!BRCMS_STBC_CAP_PHY(wlc)
&& (stf == PHY_TXC1_MODE_STBC))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC"
- "\n", wlc->pub->unit, __func__);
+ brcms_err(core, "wl%d: %s: Invalid STBC\n",
+ wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
}
} else if (is_ofdm_rate(rate)) {
if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n",
+ brcms_err(core, "wl%d: %s: Invalid OFDM\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
@@ -6367,20 +5953,20 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
} else if (is_cck_rate(rate)) {
if ((cur_band->bandtype != BRCM_BAND_2G)
|| (stf != PHY_TXC1_MODE_SISO)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n",
+ brcms_err(core, "wl%d: %s: Invalid CCK\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
} else {
- wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n",
+ brcms_err(core, "wl%d: %s: Unknown rate type\n",
wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
}
/* make sure multiple antennae are available for non-siso rates */
if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO "
+ brcms_err(core, "wl%d: %s: SISO antenna but !SISO "
"request\n", wlc->pub->unit, __func__);
bcmerror = -EINVAL;
goto done;
@@ -6449,7 +6035,7 @@ static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500,
break;
default:
- wiphy_err(wlc->wiphy,
+ brcms_err(wlc->hw->d11core,
"brcms_c_cck_plcp_set: unsupported rate %d\n",
rate_500);
rate_500 = BRCM_RATE_1M;
@@ -6582,7 +6168,7 @@ static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec)
bw = rspec_get_bw(rspec);
/* 10Mhz is not supported yet */
if (bw < PHY_TXC1_BW_20MHZ) {
- wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is "
+ brcms_err(wlc->hw->d11core, "phytxctl1_calc: bw %d is "
"not supported yet, set to 20L\n", bw);
bw = PHY_TXC1_BW_20MHZ;
}
@@ -6609,7 +6195,7 @@ static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec)
/* get the phyctl byte from rate phycfg table */
phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec));
if (phycfg == -1) {
- wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong "
+ brcms_err(wlc->hw->d11core, "phytxctl1_calc: wrong "
"legacy OFDM/CCK rate\n");
phycfg = 0;
}
@@ -6689,8 +6275,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
/* non-AP STA should never use BCMC queue */
if (queue == TX_BCMC_FIFO) {
- wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == "
- "TX_BCMC!\n", wlc->pub->unit, __func__);
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s: ASSERT queue == TX_BCMC!\n",
+ wlc->pub->unit, __func__);
frameid = bcmc_fid_generate(wlc, NULL, txh);
} else {
/* Increment the counter for first fragment */
@@ -6860,7 +6447,8 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!is_mcs_rate(rspec[k]))) {
- wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_"
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s: IEEE80211_TX_"
"RC_MCS != is_mcs_rate(rspec)\n",
wlc->pub->unit, __func__);
}
@@ -7254,14 +6842,16 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
wlc->fragthresh[queue] =
(u16) newfragthresh;
} else {
- wiphy_err(wlc->wiphy, "wl%d: %s txop invalid "
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s txop invalid "
"for rate %d\n",
wlc->pub->unit, fifo_names[queue],
rspec2rate(rspec[0]));
}
if (dur > wlc->edcf_txop[ac])
- wiphy_err(wlc->wiphy, "wl%d: %s: %s txop "
+ brcms_err(wlc->hw->d11core,
+ "wl%d: %s: %s txop "
"exceeded phylen %d/%d dur %d/%d\n",
wlc->pub->unit, __func__,
fifo_names[queue],
@@ -7273,79 +6863,33 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
return 0;
}
-void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
- struct ieee80211_hw *hw)
-{
- u8 prio;
- uint fifo;
- struct scb *scb = &wlc->pri_scb;
- struct ieee80211_hdr *d11_header = (struct ieee80211_hdr *)(sdu->data);
-
- /*
- * 802.11 standard requires management traffic
- * to go at highest priority
- */
- prio = ieee80211_is_data(d11_header->frame_control) ? sdu->priority :
- MAXPRIO;
- fifo = prio2fifo[prio];
- if (brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0))
- return;
- brcms_c_txq_enq(wlc, scb, sdu, BRCMS_PRIO_TO_PREC(prio));
- brcms_c_send_q(wlc);
-}
-
-void brcms_c_send_q(struct brcms_c_info *wlc)
+static int brcms_c_tx(struct brcms_c_info *wlc, struct sk_buff *skb)
{
- struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
- int prec;
- u16 prec_map;
- int err = 0, i, count;
- uint fifo;
- struct brcms_txq_info *qi = wlc->pkt_queue;
- struct pktq *q = &qi->q;
- struct ieee80211_tx_info *tx_info;
-
- prec_map = wlc->tx_prec_map;
+ struct dma_pub *dma;
+ int fifo, ret = -ENOSPC;
+ struct d11txh *txh;
+ u16 frameid = INVALIDFID;
- /* Send all the enq'd pkts that we can.
- * Dequeue packets with precedence with empty HW fifo only
- */
- while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) {
- tx_info = IEEE80211_SKB_CB(pkt[0]);
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec);
- } else {
- count = 1;
- err = brcms_c_prep_pdu(wlc, pkt[0], &fifo);
- if (!err) {
- for (i = 0; i < count; i++)
- brcms_c_txfifo(wlc, fifo, pkt[i], true,
- 1);
- }
- }
+ fifo = brcms_ac_to_fifo(skb_get_queue_mapping(skb));
+ dma = wlc->hw->di[fifo];
+ txh = (struct d11txh *)(skb->data);
- if (err == -EBUSY) {
- brcmu_pktq_penq_head(q, prec, pkt[0]);
- /*
- * If send failed due to any other reason than a
- * change in HW FIFO condition, quit. Otherwise,
- * read the new prec_map!
- */
- if (prec_map == wlc->tx_prec_map)
- break;
- prec_map = wlc->tx_prec_map;
- }
+ if (dma->txavail == 0) {
+ /*
+ * We sometimes get a frame from mac80211 after stopping
+ * the queues. This only ever seems to be a single frame
+ * and is seems likely to be a race. TX_HEADROOM should
+ * ensure that we have enough space to handle these stray
+ * packets, so warn if there isn't. If we're out of space
+ * in the tx ring and the tx queue isn't stopped then
+ * we've really got a bug; warn loudly if that happens.
+ */
+ brcms_warn(wlc->hw->d11core,
+ "Received frame for tx with no space in DMA ring\n");
+ WARN_ON(!ieee80211_queue_stopped(wlc->pub->ieee_hw,
+ skb_get_queue_mapping(skb)));
+ return -ENOSPC;
}
-}
-
-void
-brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
- bool commit, s8 txpktpend)
-{
- u16 frameid = INVALIDFID;
- struct d11txh *txh;
-
- txh = (struct d11txh *) (p->data);
/* When a BC/MC frame is being committed to the BCMC fifo
* via DMA (NOT PIO), update ucode or BSS info as appropriate.
@@ -7353,16 +6897,6 @@ brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
if (fifo == TX_BCMC_FIFO)
frameid = le16_to_cpu(txh->TxFrameID);
- /*
- * Bump up pending count for if not using rpc. If rpc is
- * used, this will be handled in brcms_b_txfifo()
- */
- if (commit) {
- wlc->core->txpktpend[fifo] += txpktpend;
- BCMMSG(wlc->wiphy, "pktpend inc %d to %d\n",
- txpktpend, wlc->core->txpktpend[fifo]);
- }
-
/* Commit BCMC sequence number in the SHM frame ID location */
if (frameid != INVALIDFID) {
/*
@@ -7372,8 +6906,55 @@ brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid);
}
- if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0)
+ ret = brcms_c_txfifo(wlc, fifo, skb);
+ /*
+ * The only reason for brcms_c_txfifo to fail is because
+ * there weren't any DMA descriptors, but we've already
+ * checked for that. So if it does fail yell loudly.
+ */
+ WARN_ON_ONCE(ret);
+
+ return ret;
+}
+
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+ struct ieee80211_hw *hw)
+{
+ uint fifo;
+ struct scb *scb = &wlc->pri_scb;
+
+ fifo = brcms_ac_to_fifo(skb_get_queue_mapping(sdu));
+ brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0);
+ if (!brcms_c_tx(wlc, sdu))
+ return true;
+
+ /* packet discarded */
+ dev_kfree_skb_any(sdu);
+ return false;
+}
+
+int
+brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p)
+{
+ struct dma_pub *dma = wlc->hw->di[fifo];
+ int ret;
+ u16 queue;
+
+ ret = dma_txfast(wlc, dma, p);
+ if (ret < 0)
wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
+
+ /*
+ * Stop queue if DMA ring is full. Reserve some free descriptors,
+ * as we sometimes receive a frame from mac80211 after the queues
+ * are stopped.
+ */
+ queue = skb_get_queue_mapping(p);
+ if (dma->txavail <= TX_HEADROOM && fifo < TX_BCMC_FIFO &&
+ !ieee80211_queue_stopped(wlc->pub->ieee_hw, queue))
+ ieee80211_stop_queue(wlc->pub->ieee_hw, queue);
+
+ return ret;
}
u32
@@ -7423,19 +7004,6 @@ brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
return rts_rspec;
}
-void
-brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend)
-{
- wlc->core->txpktpend[fifo] -= txpktpend;
- BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend,
- wlc->core->txpktpend[fifo]);
-
- /* There is more room; mark precedences related to this FIFO sendable */
- wlc->tx_prec_map |= wlc->fifo2prec_map[fifo];
-
- /* figure out which bsscfg is being worked on... */
-}
-
/* Update beacon listen interval in shared memory */
static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
{
@@ -7508,7 +7076,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
/* fill in TSF and flag its presence */
rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh);
- rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
@@ -7571,7 +7139,8 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
rx_status->rate_idx = 11;
break;
default:
- wiphy_err(wlc->wiphy, "%s: Unknown rate\n", __func__);
+ brcms_err(wlc->hw->d11core,
+ "%s: Unknown rate\n", __func__);
}
/*
@@ -7590,7 +7159,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
} else if (is_ofdm_rate(rspec)) {
rx_status->flag |= RX_FLAG_SHORTPRE;
} else {
- wiphy_err(wlc->wiphy, "%s: Unknown modulation\n",
+ brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n",
__func__);
}
}
@@ -7600,12 +7169,12 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
if (rxh->RxStatus1 & RXS_DECERR) {
rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
- wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_PLCP_CRC\n",
+ brcms_err(wlc->hw->d11core, "%s: RX_FLAG_FAILED_PLCP_CRC\n",
__func__);
}
if (rxh->RxStatus1 & RXS_FCSERR) {
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_FCS_CRC\n",
+ brcms_err(wlc->hw->d11core, "%s: RX_FLAG_FAILED_FCS_CRC\n",
__func__);
}
}
@@ -7649,9 +7218,6 @@ brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
{
uint nsyms, len = 0, kNdps;
- BCMMSG(wlc->wiphy, "wl%d: rate %d, len%d\n",
- wlc->pub->unit, rspec2rate(ratespec), mac_len);
-
if (is_mcs_rate(ratespec)) {
uint mcs = ratespec & RSPEC_RATE_MASK;
int tot_streams = (mcs_2_txstreams(mcs) + 1) +
@@ -7883,35 +7449,6 @@ void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend);
}
-/* prepares pdu for transmission. returns BCM error codes */
-int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop)
-{
- uint fifo;
- struct d11txh *txh;
- struct ieee80211_hdr *h;
- struct scb *scb;
-
- txh = (struct d11txh *) (pdu->data);
- h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
-
- /* get the pkt queue info. This was put at brcms_c_sendctl or
- * brcms_c_send for PDU */
- fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
-
- scb = NULL;
-
- *fifop = fifo;
-
- /* return if insufficient dma resources */
- if (*wlc->core->txavail[fifo] < MAX_DMA_SEGS) {
- /* Mark precedences related to this FIFO, unsendable */
- /* A fifo is full. Clear precedences related to that FIFO */
- wlc->tx_prec_map &= ~(wlc->fifo2prec_map[fifo]);
- return -EBUSY;
- }
- return 0;
-}
-
int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
uint *blocks)
{
@@ -7974,23 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
return wlc->band->bandunit;
}
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
- int timeout = 20;
-
- /* flush packet queue when requested */
- if (drop)
- brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
-
- /* wait for queue and DMA fifos to run dry */
- while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
- brcms_msleep(wlc->wl, 1);
+ int i;
- if (--timeout == 0)
- break;
- }
+ /* Kick DMA to send any pending AMPDU */
+ for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
+ if (wlc->hw->di[i])
+ dma_kick_tx(wlc->hw->di[i]);
- WARN_ON_ONCE(timeout == 0);
+ return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
@@ -8032,8 +7562,6 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
uint len;
bool is_amsdu;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
/* frame starts with rxhdr */
rxh = (struct d11rxhdr *) (p->data);
@@ -8043,8 +7571,9 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
/* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
if (rxh->RxStatus1 & RXS_PBPRES) {
if (p->len < 2) {
- wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of "
- "len %d\n", wlc->pub->unit, p->len);
+ brcms_err(wlc->hw->d11core,
+ "wl%d: recv: rcvd runt of len %d\n",
+ wlc->pub->unit, p->len);
goto toss;
}
skb_pull(p, 2);
@@ -8088,17 +7617,19 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
+ bool morepending;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
skb_queue_head_init(&recv_frames);
/* gather received frames */
- while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
-
+ do {
/* !give others some time to run! */
- if (++n >= bound_limit)
+ if (n >= bound_limit)
break;
- }
+
+ morepending = dma_rx(wlc_hw->di[fifo], &recv_frames);
+ n++;
+ } while (morepending);
/* post more rbufs */
dma_rxfill(wlc_hw->di[fifo]);
@@ -8128,7 +7659,7 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
brcms_c_recv(wlc_hw->wlc, p);
}
- return n >= bound_limit;
+ return morepending;
}
/* second-level interrupt processing
@@ -8140,10 +7671,9 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
u32 macintstatus;
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
- struct wiphy *wiphy = wlc->wiphy;
if (brcms_deviceremoved(wlc)) {
- wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
+ brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
brcms_down(wlc->wl);
return false;
@@ -8153,8 +7683,8 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
macintstatus = wlc->macintstatus;
wlc->macintstatus = 0;
- BCMMSG(wlc->wiphy, "wl%d: macintstatus 0x%x\n",
- wlc_hw->unit, macintstatus);
+ brcms_dbg_int(core, "wl%d: macintstatus 0x%x\n",
+ wlc_hw->unit, macintstatus);
WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */
@@ -8164,7 +7694,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
wlc->macintstatus |= MI_TFS;
if (fatal) {
- wiphy_err(wiphy, "MI_TFS: fatal\n");
+ brcms_err(core, "MI_TFS: fatal\n");
goto fatal;
}
}
@@ -8174,7 +7704,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
- BCMMSG(wlc->wiphy, "end of ATIM window\n");
+ brcms_dbg_info(core, "end of ATIM window\n");
bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
wlc->qvalid = 0;
}
@@ -8192,7 +7722,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
wlc_phy_noise_sample_intr(wlc_hw->band->pi);
if (macintstatus & MI_GP0) {
- wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
+ brcms_err(core, "wl%d: PSM microcode watchdog fired at %d "
"(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
@@ -8206,15 +7736,11 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
bcma_write32(core, D11REGOFFS(gptimer), 0);
if (macintstatus & MI_RFDISABLE) {
- BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
- " RF Disable Input\n", wlc_hw->unit);
+ brcms_dbg_info(core, "wl%d: BMAC Detected a change on the"
+ " RF Disable Input\n", wlc_hw->unit);
brcms_rfkill_set_hw_state(wlc->wl);
}
- /* send any enq'd tx packets. Just makes sure to jump start tx */
- if (!pktq_empty(&wlc->pkt_queue->q))
- brcms_c_send_q(wlc);
-
/* it isn't done and needs to be resched if macintstatus is non-zero */
return wlc->macintstatus != 0;
@@ -8229,7 +7755,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
u16 chanspec;
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
+ brcms_dbg_info(core, "wl%d\n", wlc->pub->unit);
chanspec = ch20mhz_chspec(ch->hw_value);
@@ -8286,9 +7812,6 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
brcms_c_edcf_setparams(wlc, false);
- /* Init precedence maps for empty FIFOs */
- brcms_c_tx_prec_map_init(wlc);
-
/* read the ucode version if we have not yet done so */
if (wlc->ucode_rev == 0) {
wlc->ucode_rev =
@@ -8303,9 +7826,6 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
if (mute_tx)
brcms_b_mute(wlc->hw, true);
- /* clear tx flow control */
- brcms_c_txflowcontrol_reset(wlc);
-
/* enable the RF Disable Delay timer */
bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
@@ -8464,15 +7984,6 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
* Complete the wlc default state initializations..
*/
- /* allocate our initial queue */
- wlc->pkt_queue = brcms_c_txq_alloc(wlc);
- if (wlc->pkt_queue == NULL) {
- wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n",
- unit, __func__);
- err = 100;
- goto fail;
- }
-
wlc->bsscfg->wlc = wlc;
wlc->mimoft = FT_HT;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index 8debc74c54e1..fb447747c2c6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -101,9 +101,6 @@
#define DATA_BLOCK_TX_SUPR (1 << 4)
-/* 802.1D Priority to TX FIFO number for wme */
-extern const u8 prio2fifo[];
-
/* Ucode MCTL_WAKE override bits */
#define BRCMS_WAKE_OVERRIDE_CLKCTL 0x01
#define BRCMS_WAKE_OVERRIDE_PHYREG 0x02
@@ -242,7 +239,6 @@ struct brcms_core {
/* fifo */
uint *txavail[NFIFO]; /* # tx descriptors available */
- s16 txpktpend[NFIFO]; /* tx admission control */
struct macstat *macstat_snapshot; /* mac hw prev read values */
};
@@ -382,19 +378,6 @@ struct brcms_hardware {
*/
};
-/* TX Queue information
- *
- * Each flow of traffic out of the device has a TX Queue with independent
- * flow control. Several interfaces may be associated with a single TX Queue
- * if they belong to the same flow of traffic from the device. For multi-channel
- * operation there are independent TX Queues for each channel.
- */
-struct brcms_txq_info {
- struct brcms_txq_info *next;
- struct pktq q;
- uint stopped; /* tx flow control bits */
-};
-
/*
* Principal common driver data structure.
*
@@ -435,11 +418,8 @@ struct brcms_txq_info {
* WDlast: last time wlc_watchdog() was called.
* edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
* wme_retries: per-AC retry limits.
- * tx_prec_map: Precedence map based on HW FIFO space.
- * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
* bsscfg: set of BSS configurations, idx 0 is default and always valid.
* cfg: the primary bsscfg (can be AP or STA).
- * tx_queues: common TX Queue list.
* modulecb:
* mimoft: SIGN or 11N.
* cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode.
@@ -469,7 +449,6 @@ struct brcms_txq_info {
* tempsense_lasttime;
* tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM.
* tx_duty_cycle_cck: maximum allowed duty cycle for CCK.
- * pkt_queue: txq for transmit packets.
* wiphy:
* pri_scb: primary Station Control Block
*/
@@ -533,14 +512,9 @@ struct brcms_c_info {
u16 edcf_txop[IEEE80211_NUM_ACS];
u16 wme_retries[IEEE80211_NUM_ACS];
- u16 tx_prec_map;
- u16 fifo2prec_map[NFIFO];
struct brcms_bss_cfg *bsscfg;
- /* tx queue */
- struct brcms_txq_info *tx_queues;
-
struct modulecb *modulecb;
u8 mimoft;
@@ -585,7 +559,6 @@ struct brcms_c_info {
u16 tx_duty_cycle_ofdm;
u16 tx_duty_cycle_cck;
- struct brcms_txq_info *pkt_queue;
struct wiphy *wiphy;
struct scb pri_scb;
};
@@ -637,30 +610,13 @@ struct brcms_bss_cfg {
struct brcms_bss_info *current_bss;
};
-extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
- struct sk_buff *p,
- bool commit, s8 txpktpend);
-extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo,
- s8 txpktpend);
-extern void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
- struct sk_buff *sdu, uint prec);
-extern void brcms_c_print_txstatus(struct tx_status *txs);
+extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
+ struct sk_buff *p);
extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
uint *blocks);
-#if defined(DEBUG)
-extern void brcms_c_print_txdesc(struct d11txh *txh);
-#else
-static inline void brcms_c_print_txdesc(struct d11txh *txh)
-{
-}
-#endif
-
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern void brcms_c_send_q(struct brcms_c_info *wlc);
-extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
- uint *fifo);
extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
uint mac_len);
extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index abfd78822fb8..21a824232478 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
- ((lna2 &
- 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
+ ((lna2 & 0x3) << 4) |
+ ((lna1 & 0x3) << 2) |
+ ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1156,6 +1157,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
}
mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
+ mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
+ mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
}
@@ -1328,6 +1331,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
+static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
+ u16 tia_gain, u16 lna2_gain)
+{
+ u32 i_thresh_l, q_thresh_l;
+ u32 i_thresh_h, q_thresh_h;
+ struct lcnphy_iq_est iq_est_h, iq_est_l;
+
+ wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
+ lna2_gain, 0);
+
+ wlc_lcnphy_rx_gain_override_enable(pi, true);
+ wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
+ return false;
+
+ wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
+ return false;
+
+ i_thresh_l = (iq_est_l.i_pwr << 1);
+ i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
+
+ q_thresh_l = (iq_est_l.q_pwr << 1);
+ q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
+ if ((iq_est_h.i_pwr > i_thresh_l) &&
+ (iq_est_h.i_pwr < i_thresh_h) &&
+ (iq_est_h.q_pwr > q_thresh_l) &&
+ (iq_est_h.q_pwr < q_thresh_h))
+ return true;
+
+ return false;
+}
+
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1342,8 +1382,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
- int tia_gain;
- u32 received_power, rx_pwr_threshold;
+ int tia_gain, lna2_gain, biq1_gain;
+ bool set_gain;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@@ -1368,126 +1408,134 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
- if (module == 1) {
+ WARN_ON(module != 1);
+ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+ for (i = 0; i < 11; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+ Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+ or_phy_reg(pi, 0x631, 0x0015);
+
+ RFOverride0_old = read_phy_reg(pi, 0x44c);
+ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+ rfoverride2_old = read_phy_reg(pi, 0x4b0);
+ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+ rfoverride3_old = read_phy_reg(pi, 0x4f9);
+ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+ rfoverride4_old = read_phy_reg(pi, 0x938);
+ rfoverride4val_old = read_phy_reg(pi, 0x939);
+ afectrlovr_old = read_phy_reg(pi, 0x43b);
+ afectrlovrval_old = read_phy_reg(pi, 0x43c);
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
- for (i = 0; i < 11; i++)
- values_to_save[i] =
- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
- Core1TxControl_old = read_phy_reg(pi, 0x631);
-
- or_phy_reg(pi, 0x631, 0x0015);
-
- RFOverride0_old = read_phy_reg(pi, 0x44c);
- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
- rfoverride2_old = read_phy_reg(pi, 0x4b0);
- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
- rfoverride3_old = read_phy_reg(pi, 0x4f9);
- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
- rfoverride4_old = read_phy_reg(pi, 0x938);
- rfoverride4val_old = read_phy_reg(pi, 0x939);
- afectrlovr_old = read_phy_reg(pi, 0x43b);
- afectrlovrval_old = read_phy_reg(pi, 0x43c);
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
- if (tx_gain_override_old) {
- wlc_lcnphy_get_tx_gain(pi, &old_gains);
- tx_gain_index_old = pi_lcn->lcnphy_current_index;
- }
+ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+ if (tx_gain_override_old) {
+ wlc_lcnphy_get_tx_gain(pi, &old_gains);
+ tx_gain_index_old = pi_lcn->lcnphy_current_index;
+ }
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
-
- wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
- wlc_lcnphy_rx_gain_override_enable(pi, true);
-
- tia_gain = 8;
- rx_pwr_threshold = 950;
- while (tia_gain > 0) {
- tia_gain -= 1;
- wlc_lcnphy_set_rx_gain_by_distribution(pi,
- 0, 0, 2, 2,
- (u16)
- tia_gain, 1, 0);
- udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
- received_power =
- wlc_lcnphy_measure_digital_power(pi, 2000);
- if (received_power < rx_pwr_threshold)
- break;
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+
+ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+ set_gain = false;
+
+ lna2_gain = 3;
+ while ((lna2_gain >= 0) && !set_gain) {
+ tia_gain = 4;
+
+ while ((tia_gain >= 0) && !set_gain) {
+ biq1_gain = 6;
+
+ while ((biq1_gain >= 0) && !set_gain) {
+ set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
+ (u16)
+ biq1_gain,
+ (u16)
+ tia_gain,
+ (u16)
+ lna2_gain);
+ biq1_gain -= 1;
+ }
+ tia_gain -= 1;
}
- result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
+ lna2_gain -= 1;
+ }
- wlc_lcnphy_stop_tx_tone(pi);
+ if (set_gain)
+ result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
+ else
+ result = false;
- write_phy_reg(pi, 0x631, Core1TxControl_old);
+ wlc_lcnphy_stop_tx_tone(pi);
- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
- write_phy_reg(pi, 0x4b0, rfoverride2_old);
- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
- write_phy_reg(pi, 0x4f9, rfoverride3_old);
- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
- write_phy_reg(pi, 0x938, rfoverride4_old);
- write_phy_reg(pi, 0x939, rfoverride4val_old);
- write_phy_reg(pi, 0x43b, afectrlovr_old);
- write_phy_reg(pi, 0x43c, afectrlovrval_old);
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+ write_phy_reg(pi, 0x631, Core1TxControl_old);
+
+ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x4b0, rfoverride2_old);
+ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+ write_phy_reg(pi, 0x4f9, rfoverride3_old);
+ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+ write_phy_reg(pi, 0x938, rfoverride4_old);
+ write_phy_reg(pi, 0x939, rfoverride4val_old);
+ write_phy_reg(pi, 0x43b, afectrlovr_old);
+ write_phy_reg(pi, 0x43c, afectrlovrval_old);
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
- wlc_lcnphy_clear_trsw_override(pi);
+ wlc_lcnphy_clear_trsw_override(pi);
- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
- for (i = 0; i < 11; i++)
- write_radio_reg(pi, rxiq_cal_rf_reg[i],
- values_to_save[i]);
+ for (i = 0; i < 11; i++)
+ write_radio_reg(pi, rxiq_cal_rf_reg[i],
+ values_to_save[i]);
- if (tx_gain_override_old)
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
- else
- wlc_lcnphy_disable_tx_gain_override(pi);
+ if (tx_gain_override_old)
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+ else
+ wlc_lcnphy_disable_tx_gain_override(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
- wlc_lcnphy_rx_gain_override_enable(pi, false);
- }
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+ wlc_lcnphy_rx_gain_override_enable(pi, false);
cal_done:
kfree(ptr);
@@ -1781,6 +1829,17 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
+ 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
+
+ write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
+ write_radio_reg(pi, RADIO_2064_REG091, 0x3);
+ write_radio_reg(pi, RADIO_2064_REG038, 0x3);
+
+ write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
+ }
}
static int
@@ -1860,41 +1919,6 @@ wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm, s16 filt_type)
return (filt_index != -1) ? 0 : -1;
}
-void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
-{
- u8 channel = CHSPEC_CHANNEL(chanspec);
-
- wlc_phy_chanspec_radio_set((struct brcms_phy_pub *) pi, chanspec);
-
- wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec);
-
- or_phy_reg(pi, 0x44a, 0x44);
- write_phy_reg(pi, 0x44a, 0x80);
-
- wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
- udelay(1000);
-
- wlc_lcnphy_toggle_afe_pwdn(pi);
-
- write_phy_reg(pi, 0x657, lcnphy_sfo_cfg[channel - 1].ptcentreTs20);
- write_phy_reg(pi, 0x658, lcnphy_sfo_cfg[channel - 1].ptcentreFactor);
-
- if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
- mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
-
- wlc_lcnphy_load_tx_iir_filter(pi, false, 3);
- } else {
- mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
-
- wlc_lcnphy_load_tx_iir_filter(pi, false, 2);
- }
-
- wlc_lcnphy_load_tx_iir_filter(pi, true, 0);
-
- mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
-
-}
-
static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi)
{
u16 pa_gain;
@@ -1936,6 +1960,21 @@ static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
wlc_lcnphy_enable_tx_gain_override(pi);
}
+static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi)
+{
+ u16 m0m1;
+ struct phytbl_info tab;
+
+ tab.tbl_ptr = &m0m1;
+ tab.tbl_len = 1;
+ tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
+ tab.tbl_offset = 87;
+ tab.tbl_width = 16;
+ wlc_lcnphy_read_table(pi, &tab);
+
+ return (u8) ((m0m1 & 0xff00) >> 8);
+}
+
static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0)
{
u16 m0m1 = (u16) m0 << 8;
@@ -1995,6 +2034,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
+ mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
+ mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
+ mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2081,12 +2130,14 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
+ mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
+ u8 tssi_sel;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
@@ -2108,7 +2159,13 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ if (pi->sh->boardflags & BFL_FEM) {
+ tssi_sel = 0x1;
+ wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ } else {
+ tssi_sel = 0xe;
+ wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
+ }
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2144,9 +2201,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@@ -2193,6 +2251,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@@ -2811,6 +2873,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
+ u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
+
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@@ -2828,6 +2892,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
+
+ mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
+ mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
+
+ wlc_lcnphy_set_bbmult(pi, 0x0);
+
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@@ -2849,6 +2919,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
+ wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3062,6 +3133,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
+ mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@@ -3075,21 +3151,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlapi_enable_mac(pi->sh->physhim);
}
-static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi)
-{
- u16 m0m1;
- struct phytbl_info tab;
-
- tab.tbl_ptr = &m0m1;
- tab.tbl_len = 1;
- tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
- tab.tbl_offset = 87;
- tab.tbl_width = 16;
- wlc_lcnphy_read_table(pi, &tab);
-
- return (u8) ((m0m1 & 0xff00) >> 8);
-}
-
static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain)
{
mod_phy_reg(pi, 0x4fb,
@@ -3878,7 +3939,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
- wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@@ -3889,6 +3949,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
+ wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@@ -4313,17 +4374,22 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
if (CHSPEC_IS5G(pi->radio_chanspec))
pa_gain = 0x70;
else
- pa_gain = 0x70;
+ pa_gain = 0x60;
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
+
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
for (j = 0; j < 128; j++) {
- gm_gain = gain_table[j].gm;
+ if (pi->sh->boardflags & BFL_FEM)
+ gm_gain = gain_table[j].gm;
+ else
+ gm_gain = 15;
+
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@@ -4534,7 +4600,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ if (pi->sh->boardflags & BFL_FEM)
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ else
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@@ -4545,6 +4614,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
+ write_radio_reg(pi, RADIO_2064_REG033, 0x19);
+ write_radio_reg(pi, RADIO_2064_REG039, 0xe);
+ }
+
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4574,22 +4650,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
- tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
- tab.tbl_width = 16;
- tab.tbl_ptr = &val;
- tab.tbl_len = 1;
-
- val = 114;
- tab.tbl_offset = 0;
- wlc_lcnphy_write_table(pi, &tab);
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+ tab.tbl_width = 16;
+ tab.tbl_ptr = &val;
+ tab.tbl_len = 1;
- val = 130;
- tab.tbl_offset = 1;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 150;
+ tab.tbl_offset = 0;
+ wlc_lcnphy_write_table(pi, &tab);
- val = 6;
- tab.tbl_offset = 8;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 220;
+ tab.tbl_offset = 1;
+ wlc_lcnphy_write_table(pi, &tab);
+ }
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@@ -4946,6 +5020,44 @@ void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi)
}
}
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
+{
+ u8 channel = CHSPEC_CHANNEL(chanspec);
+
+ wlc_phy_chanspec_radio_set((struct brcms_phy_pub *)pi, chanspec);
+
+ wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec);
+
+ or_phy_reg(pi, 0x44a, 0x44);
+ write_phy_reg(pi, 0x44a, 0x80);
+
+ wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
+ udelay(1000);
+
+ wlc_lcnphy_toggle_afe_pwdn(pi);
+
+ write_phy_reg(pi, 0x657, lcnphy_sfo_cfg[channel - 1].ptcentreTs20);
+ write_phy_reg(pi, 0x658, lcnphy_sfo_cfg[channel - 1].ptcentreFactor);
+
+ if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
+ mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
+
+ wlc_lcnphy_load_tx_iir_filter(pi, false, 3);
+ } else {
+ mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
+
+ wlc_lcnphy_load_tx_iir_filter(pi, false, 2);
+ }
+
+ if (pi->sh->boardflags & BFL_FEM)
+ wlc_lcnphy_load_tx_iir_filter(pi, true, 0);
+ else
+ wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
+
+ mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
+ wlc_lcnphy_tssi_setup(pi);
+}
+
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
{
kfree(pi->u.pi_lcnphy);
@@ -4982,8 +5094,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
- if ((pi->sh->boardflags & BFL_FEM) &&
- (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 622c01ca72c5..b7e95acc2084 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
};
static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
};
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 5855f4fd16dc..b0f14b7b8616 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -176,6 +176,7 @@ struct brcms_pub {
bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */
struct wl_cnt *_cnt; /* low-level counters in driver */
+ struct dentry *dbgfs_dir;
};
enum wlc_par_id {
@@ -200,43 +201,6 @@ enum wlc_par_id {
/* WL11N Support */
#define AMPDU_AGG_HOST 1
-/* pri is priority encoded in the packet. This maps the Packet priority to
- * enqueue precedence as defined in wlc_prec_map
- */
-extern const u8 wlc_prio2prec_map[];
-#define BRCMS_PRIO_TO_PREC(pri) wlc_prio2prec_map[(pri) & 7]
-
-#define BRCMS_PREC_COUNT 16 /* Max precedence level implemented */
-
-/* Mask to describe all precedence levels */
-#define BRCMS_PREC_BMP_ALL MAXBITVAL(BRCMS_PREC_COUNT)
-
-/*
- * This maps priority to one precedence higher - Used by PS-Poll response
- * packets to simulate enqueue-at-head operation, but still maintain the
- * order on the queue
- */
-#define BRCMS_PRIO_TO_HI_PREC(pri) min(BRCMS_PRIO_TO_PREC(pri) + 1,\
- BRCMS_PREC_COUNT - 1)
-
-/* Define a bitmap of precedences comprised by each AC */
-#define BRCMS_PREC_BMP_AC_BE (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BE)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BE)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_EE)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
-#define BRCMS_PREC_BMP_AC_BK (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BK)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BK)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NONE)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
-#define BRCMS_PREC_BMP_AC_VI (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_CL)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_CL)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VI)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
-#define BRCMS_PREC_BMP_AC_VO (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VO)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VO)) | \
- NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NC)) | \
- NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
-
/* network protection config */
#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */
#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */
@@ -319,9 +283,9 @@ extern void brcms_c_intrson(struct brcms_c_info *wlc);
extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc);
+extern bool brcms_c_isr(struct brcms_c_info *wlc);
extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
+extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
struct sk_buff *sdu,
struct ieee80211_hw *hw);
extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
@@ -350,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
- bool drop);
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -368,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
index ed1d1aa71d2d..dd9162722495 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
@@ -23,6 +23,7 @@
#include "channel.h"
#include "main.h"
#include "stf.h"
+#include "debug.h"
#define MIN_SPATIAL_EXPANSION 0
#define MAX_SPATIAL_EXPANSION 1
@@ -160,8 +161,8 @@ bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val)
static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
u8 core_mask)
{
- BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n",
- wlc->pub->unit, Nsts, core_mask);
+ brcms_dbg_ht(wlc->hw->d11core, "wl%d: Nsts %d core_mask %x\n",
+ wlc->pub->unit, Nsts, core_mask);
if (hweight8(core_mask) > wlc->stf->txstreams)
core_mask = 0;
@@ -194,7 +195,8 @@ static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val)
int i;
u8 core_mask = 0;
- BCMMSG(wlc->wiphy, "wl%d: val %x\n", wlc->pub->unit, val);
+ brcms_dbg_ht(wlc->hw->d11core, "wl%d: val %x\n", wlc->pub->unit,
+ val);
wlc->stf->spatial_policy = (s8) val;
for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index e11ae83111e4..ae1f3ad40d45 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -246,7 +246,7 @@
#define BCMMSG(dev, fmt, args...) \
do { \
- if (brcm_msg_level & LOG_TRACE_VAL) \
+ if (brcm_msg_level & BRCM_DL_INFO) \
wiphy_err(dev, "%s: " fmt, __func__, ##args); \
} while (0)
@@ -281,7 +281,6 @@ struct ieee80211_tx_queue_params;
struct brcms_info;
struct brcms_c_info;
struct brcms_hardware;
-struct brcms_txq_info;
struct brcms_band;
struct dma_pub;
struct si_pub;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index f0d8c04a9c8c..fb7cbcf81179 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -78,9 +78,14 @@
#define PM_OFF 0
#define PM_MAX 1
-/* Message levels */
-#define LOG_ERROR_VAL 0x00000001
-#define LOG_TRACE_VAL 0x00000002
+/* Debug levels */
+#define BRCM_DL_INFO 0x00000001
+#define BRCM_DL_MAC80211 0x00000002
+#define BRCM_DL_RX 0x00000004
+#define BRCM_DL_TX 0x00000008
+#define BRCM_DL_INT 0x00000010
+#define BRCM_DL_DMA 0x00000020
+#define BRCM_DL_HT 0x00000040
#define PM_OFF 0
#define PM_MAX 1
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index df7050abe717..d39e3e24077b 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -415,7 +415,7 @@ static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
ssid = pos + 2;
ssid_len = pos[1];
break;
- case WLAN_EID_GENERIC:
+ case WLAN_EID_VENDOR_SPECIFIC:
if (pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 &&
pos[4] == 0xf2 && pos[5] == 1) {
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 29b8fa1adefd..d92b21a8e597 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1788,10 +1788,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
}
/* Initialize the geo */
- if (libipw_set_geo(priv->ieee, &ipw_geos[0])) {
- printk(KERN_WARNING DRV_NAME "Could not set geo\n");
- return 0;
- }
+ libipw_set_geo(priv->ieee, &ipw_geos[0]);
priv->ieee->freq_band = LIBIPW_24GHZ_BAND;
lock = LOCK_NONE;
@@ -6413,7 +6410,7 @@ out:
goto out;
}
-static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
+static void ipw2100_pci_remove_one(struct pci_dev *pci_dev)
{
struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
struct net_device *dev = priv->net_dev;
@@ -6609,7 +6606,7 @@ static struct pci_driver ipw2100_pci_driver = {
.name = DRV_NAME,
.id_table = ipw2100_pci_id_table,
.probe = ipw2100_pci_init_one,
- .remove = __devexit_p(ipw2100_pci_remove_one),
+ .remove = ipw2100_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ipw2100_suspend,
.resume = ipw2100_resume,
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 973125242490..5fe17cbab1f3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -1045,7 +1045,7 @@ typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW
IPW_ORD_POWER_MGMT_MODE, // Power mode - 0=CAM, 1=PSP
IPW_ORD_POWER_MGMT_INDEX, //NS //
IPW_ORD_COUNTRY_CODE, // IEEE country code as recv'd from beacon
- IPW_ORD_COUNTRY_CHANNELS, // channels suported by country
+ IPW_ORD_COUNTRY_CHANNELS, // channels supported by country
// IPW_ORD_COUNTRY_CHANNELS:
// For 11b the lower 2-byte are used for channels from 1-14
// and the higher 2-byte are not used.
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 768bf612533e..844f201b7b70 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -6812,7 +6812,6 @@ static int ipw_wx_get_auth(struct net_device *dev,
struct libipw_device *ieee = priv->ieee;
struct lib80211_crypt_data *crypt;
struct iw_param *param = &wrqu->param;
- int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
@@ -6822,8 +6821,7 @@ static int ipw_wx_get_auth(struct net_device *dev,
/*
* wpa_supplicant will control these internally
*/
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
case IW_AUTH_TKIP_COUNTERMEASURES:
crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
@@ -10774,7 +10772,7 @@ static void ipw_bg_link_down(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
+static int ipw_setup_deferred_work(struct ipw_priv *priv)
{
int ret = 0;
@@ -11269,10 +11267,31 @@ static const struct libipw_geo ipw_geos[] = {
}
};
+static void ipw_set_geo(struct ipw_priv *priv)
+{
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
+ if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
+ ipw_geos[j].name, 3))
+ break;
+ }
+
+ if (j == ARRAY_SIZE(ipw_geos)) {
+ IPW_WARNING("SKU [%c%c%c] not recognized.\n",
+ priv->eeprom[EEPROM_COUNTRY_CODE + 0],
+ priv->eeprom[EEPROM_COUNTRY_CODE + 1],
+ priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
+ j = 0;
+ }
+
+ libipw_set_geo(priv->ieee, &ipw_geos[j]);
+}
+
#define MAX_HW_RESTARTS 5
static int ipw_up(struct ipw_priv *priv)
{
- int rc, i, j;
+ int rc, i;
/* Age scan list entries found before suspend */
if (priv->suspend_time) {
@@ -11310,22 +11329,7 @@ static int ipw_up(struct ipw_priv *priv)
memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
- for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
- if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
- ipw_geos[j].name, 3))
- break;
- }
- if (j == ARRAY_SIZE(ipw_geos)) {
- IPW_WARNING("SKU [%c%c%c] not recognized.\n",
- priv->eeprom[EEPROM_COUNTRY_CODE + 0],
- priv->eeprom[EEPROM_COUNTRY_CODE + 1],
- priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
- j = 0;
- }
- if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
- IPW_WARNING("Could not set geography.");
- return 0;
- }
+ ipw_set_geo(priv);
if (priv->status & STATUS_RF_KILL_SW) {
IPW_WARNING("Radio disabled by module parameter.\n");
@@ -11722,7 +11726,7 @@ static const struct net_device_ops ipw_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit ipw_pci_probe(struct pci_dev *pdev,
+static int ipw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err = 0;
@@ -11895,7 +11899,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
return err;
}
-static void __devexit ipw_pci_remove(struct pci_dev *pdev)
+static void ipw_pci_remove(struct pci_dev *pdev)
{
struct ipw_priv *priv = pci_get_drvdata(pdev);
struct list_head *p, *q;
@@ -12057,7 +12061,7 @@ static struct pci_driver ipw_driver = {
.name = DRV_NAME,
.id_table = card_ids,
.probe = ipw_pci_probe,
- .remove = __devexit_p(ipw_pci_remove),
+ .remove = ipw_pci_remove,
#ifdef CONFIG_PM
.suspend = ipw_pci_suspend,
.resume = ipw_pci_resume,
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 0b22fb421735..6eede52ad8c0 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -978,7 +978,7 @@ extern void libipw_network_reset(struct libipw_network *network);
/* libipw_geo.c */
extern const struct libipw_geo *libipw_get_geo(struct libipw_device
*ieee);
-extern int libipw_set_geo(struct libipw_device *ieee,
+extern void libipw_set_geo(struct libipw_device *ieee,
const struct libipw_geo *geo);
extern int libipw_is_valid_channel(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c
index c9fe3c99cb00..218f2a32de21 100644
--- a/drivers/net/wireless/ipw2x00/libipw_geo.c
+++ b/drivers/net/wireless/ipw2x00/libipw_geo.c
@@ -132,7 +132,7 @@ u8 libipw_freq_to_channel(struct libipw_device * ieee, u32 freq)
return 0;
}
-int libipw_set_geo(struct libipw_device *ieee,
+void libipw_set_geo(struct libipw_device *ieee,
const struct libipw_geo *geo)
{
memcpy(ieee->geo.name, geo->name, 3);
@@ -143,7 +143,6 @@ int libipw_set_geo(struct libipw_device *ieee,
sizeof(struct libipw_channel));
memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
sizeof(struct libipw_channel));
- return 0;
}
const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee)
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 02e057923236..95a1ca1e895c 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1108,7 +1108,7 @@ static const char *get_info_element_string(u16 id)
MFIE_STRING(ERP_INFO);
MFIE_STRING(RSN);
MFIE_STRING(EXT_SUPP_RATES);
- MFIE_STRING(GENERIC);
+ MFIE_STRING(VENDOR_SPECIFIC);
MFIE_STRING(QOS_PARAMETER);
default:
return "UNKNOWN";
@@ -1248,8 +1248,8 @@ static int libipw_parse_info_param(struct libipw_info_element
LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n");
break;
- case WLAN_EID_GENERIC:
- LIBIPW_DEBUG_MGMT("WLAN_EID_GENERIC: %d bytes\n",
+ case WLAN_EID_VENDOR_SPECIFIC:
+ LIBIPW_DEBUG_MGMT("WLAN_EID_VENDOR_SPECIFIC: %d bytes\n",
info_element->len);
if (!libipw_parse_qos_info_param_IE(info_element,
network))
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index e252acb9c862..3726cd6fcd75 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3273,7 +3273,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
+ strlcpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
@@ -3794,7 +3794,7 @@ out:
return err;
}
-static void __devexit
+static void
il3945_pci_remove(struct pci_dev *pdev)
{
struct il_priv *il = pci_get_drvdata(pdev);
@@ -3884,7 +3884,7 @@ static struct pci_driver il3945_driver = {
.name = DRV_NAME,
.id_table = il3945_hw_card_ids,
.probe = il3945_pci_probe,
- .remove = __devexit_p(il3945_pci_remove),
+ .remove = il3945_pci_remove,
.driver.pm = IL_LEGACY_PM_OPS,
};
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index 87e539894330..e0b9d7fa5de0 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -516,7 +516,7 @@ static void
il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
{
struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
+ struct ieee80211_rx_status rx_status = {};
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index eac4dc8bc879..c3fbf6717564 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -613,7 +613,7 @@ void
il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
{
struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
+ struct ieee80211_rx_status rx_status = {};
struct il_rx_pkt *pkt = rxb_addr(rxb);
struct il_rx_phy_res *phy_res;
__le32 rx_pkt_status;
@@ -686,7 +686,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+ /*rx_status.flag |= RX_FLAG_MACTIME_START; */
il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
@@ -6664,7 +6664,7 @@ out:
return err;
}
-static void __devexit
+static void
il4965_pci_remove(struct pci_dev *pdev)
{
struct il_priv *il = pci_get_drvdata(pdev);
@@ -6772,7 +6772,7 @@ static struct pci_driver il4965_driver = {
.name = DRV_NAME,
.id_table = il4965_hw_card_ids,
.probe = il4965_pci_probe,
- .remove = __devexit_p(il4965_pci_remove),
+ .remove = il4965_pci_remove,
.driver.pm = IL_LEGACY_PM_OPS,
};
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 2d092f328547..1b15b0b2292b 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -917,10 +917,6 @@ struct il4965_scd_bc_tbl {
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
#define IL4965_DEFAULT_TX_RETRY 15
/* EEPROM */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 318ed3c9fe74..90b8970eadf0 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1183,9 +1183,10 @@ EXPORT_SYMBOL(il_power_update_mode);
void
il_power_initialize(struct il_priv *il)
{
- u16 lctl = il_pcie_link_ctl(il);
+ u16 lctl;
- il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+ pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
il->power_data.debug_sleep_level_override = -1;
@@ -3957,17 +3958,21 @@ il_connection_init_rx_config(struct il_priv *il)
memset(&il->staging, 0, sizeof(il->staging));
- if (!il->vif) {
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
il->staging.dev_type = RXON_DEV_TYPE_ESS;
- } else if (il->vif->type == NL80211_IFTYPE_STATION) {
+ break;
+ case NL80211_IFTYPE_STATION:
il->staging.dev_type = RXON_DEV_TYPE_ESS;
il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
+ break;
+ case NL80211_IFTYPE_ADHOC:
il->staging.dev_type = RXON_DEV_TYPE_IBSS;
il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
il->staging.filter_flags =
RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
- } else {
+ break;
+ default:
IL_ERR("Unsupported interface type %d\n", il->vif->type);
return;
}
@@ -4233,9 +4238,8 @@ il_apm_init(struct il_priv *il)
* power savings, even without L1.
*/
if (il->cfg->set_l0s) {
- lctl = il_pcie_link_ctl(il);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
/* L1-ASPM enabled; disable(!) L0S */
il_set_bit(il, CSR_GIO_REG,
CSR_GIO_REG_VAL_L0S_ENABLED);
@@ -4550,8 +4554,7 @@ out:
EXPORT_SYMBOL(il_mac_add_interface);
static void
-il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
- bool mode_change)
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
{
lockdep_assert_held(&il->mutex);
@@ -4560,9 +4563,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
il_force_scan_end(il);
}
- if (!mode_change)
- il_set_mode(il);
-
+ il_set_mode(il);
}
void
@@ -4575,8 +4576,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
WARN_ON(il->vif != vif);
il->vif = NULL;
-
- il_teardown_interface(il, vif, false);
+ il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
+ il_teardown_interface(il, vif);
memset(il->bssid, 0, ETH_ALEN);
D_MAC80211("leave\n");
@@ -4685,18 +4686,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
/* success */
- il_teardown_interface(il, vif, true);
vif->type = newtype;
vif->p2p = false;
- err = il_set_mode(il);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
+ il->iw_mode = newtype;
+ il_teardown_interface(il, vif);
err = 0;
out:
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index b4bb813362bd..a9a569f432fb 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1829,14 +1829,6 @@ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
* PCI *
*****************************************************/
-static inline u16
-il_pcie_link_ctl(struct il_priv *il)
-{
- u16 pci_lnk_ctl;
- pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
void il_bg_watchdog(unsigned long data);
u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
@@ -2434,10 +2426,6 @@ struct il_tfd {
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
struct il_rate_info {
u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
@@ -2919,9 +2907,8 @@ do { \
#define IL_DBG(level, fmt, args...) \
do { \
if (il_get_debug_level(il) & level) \
- dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ dev_err(&il->hw->wiphy->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__ , ##args); \
} while (0)
#define il_print_hex_dump(il, level, p, len) \
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 727fbb5db9da..5cf43236421e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -133,12 +133,3 @@ config IWLWIFI_P2P
support when it is loaded.
Say Y only if you want to experiment with P2P.
-
-config IWLWIFI_EXPERIMENTAL_MFP
- bool "support MFP (802.11w) even if uCode doesn't advertise"
- depends on IWLWIFI
- help
- This option enables experimental MFP (802.11W) support
- even if the microcode doesn't advertise it.
-
- Say Y only if you want to experiment with MFP.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 75e12f29d9eb..33b3ad2e546b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -176,8 +176,8 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
-int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
-void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
+int iwlagn_txfifo_flush(struct iwl_priv *priv);
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index f2dd671d7dc8..de54713b680c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -833,14 +833,14 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->eeprom_data->valid_rx_ant;
+ active_chains &= priv->nvm_data->valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
* priv->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->eeprom_data->valid_tx_ant & ant_msk))
+ if (!(priv->nvm_data->valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
@@ -854,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(priv->eeprom_data->valid_tx_ant);
+ find_first_chain(priv->nvm_data->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -864,13 +864,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
}
}
- if (active_chains != priv->eeprom_data->valid_rx_ant &&
+ if (active_chains != priv->nvm_data->valid_rx_ant &&
active_chains != priv->chain_noise_data.active_chains)
IWL_DEBUG_CALIB(priv,
"Detected that not all antennas are connected! "
"Connected: %#x, valid: %#x.\n",
active_chains,
- priv->eeprom_data->valid_rx_ant);
+ priv->nvm_data->valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
@@ -1055,7 +1055,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
- data->active_chains = priv->eeprom_data->valid_rx_ant;
+ data->active_chains = priv->nvm_data->valid_rx_ant;
for (i = 0; i < NUM_RX_CHAINS; i++)
if (!(data->active_chains & (1<<i)))
data->disconn_array[i] = 1;
@@ -1086,7 +1086,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
iwlagn_gain_computation(
priv, average_noise,
- find_first_chain(priv->eeprom_data->valid_rx_ant));
+ find_first_chain(priv->nvm_data->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 01128c96b5d8..71ab76b2b39d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -986,8 +986,7 @@ struct iwl_rem_sta_cmd {
#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
-#define IWL_DROP_SINGLE 0
-#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
+#define IWL_DROP_ALL BIT(1)
/*
* REPLY_TXFIFO_FLUSH = 0x1e(command and response)
@@ -1004,14 +1003,14 @@ struct iwl_rem_sta_cmd {
* the flush operation ends when both the scheduler DMA done and TXFIFO empty
* are set.
*
- * @fifo_control: bit mask for which queues to flush
+ * @queue_control: bit mask for which queues to flush
* @flush_control: flush controls
* 0: Dump single MSDU
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
* 2: Dump all FIFO
*/
struct iwl_txfifo_flush_cmd {
- __le32 fifo_control;
+ __le32 queue_control;
__le16 flush_control;
__le16 reserved;
} __packed;
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 1a98fa3ab06d..5b9533eef54d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -305,7 +305,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
int pos = 0, ofs = 0, buf_size = 0;
const u8 *ptr;
char *buf;
- u16 eeprom_ver;
+ u16 nvm_ver;
size_t eeprom_len = priv->eeprom_blob_size;
buf_size = 4 * eeprom_len + 256;
@@ -321,9 +321,9 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
if (!buf)
return -ENOMEM;
- eeprom_ver = priv->eeprom_data->eeprom_version;
+ nvm_ver = priv->nvm_data->nvm_version;
pos += scnprintf(buf + pos, buf_size - pos,
- "NVM version: 0x%x\n", eeprom_ver);
+ "NVM version: 0x%x\n", nvm_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -1333,17 +1333,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((priv->eeprom_data->valid_tx_ant & ANT_A) &&
+ if ((priv->nvm_data->valid_tx_ant & ANT_A) &&
tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((priv->eeprom_data->valid_tx_ant & ANT_B) &&
+ if ((priv->nvm_data->valid_tx_ant & ANT_B) &&
tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((priv->eeprom_data->valid_tx_ant & ANT_C) &&
+ if ((priv->nvm_data->valid_tx_ant & ANT_C) &&
tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
@@ -2101,7 +2101,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
if (iwl_is_rfkill(priv))
return -EFAULT;
- iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
+ iwlagn_dev_txfifo_flush(priv);
return count;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 8141f91c3725..2653a891cc7e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -789,7 +789,6 @@ struct iwl_priv {
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
struct delayed_work hw_roc_disable_work;
- enum nl80211_channel_type hw_roc_chantype;
int hw_roc_duration;
bool hw_roc_setup, hw_roc_start_notified;
@@ -844,7 +843,7 @@ struct iwl_priv {
void *wowlan_sram;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
- struct iwl_eeprom_data *eeprom_data;
+ struct iwl_nvm_data *nvm_data;
/* eeprom blob for debugfs/testmode */
u8 *eeprom_blob;
size_t eeprom_blob_size;
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index da5862064195..8c72be3f37c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -305,8 +305,8 @@ static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
u16 temperature, voltage;
- temperature = le16_to_cpu(priv->eeprom_data->kelvin_temperature);
- voltage = le16_to_cpu(priv->eeprom_data->kelvin_voltage);
+ temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature);
+ voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage);
/* offset = temp - volt / coeff */
return (s32)(temperature -
@@ -460,13 +460,13 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
break;
case IWL_DEVICE_FAMILY_6050:
/* Indicate calibration version to uCode. */
- if (priv->eeprom_data->calib_version >= 6)
+ if (priv->nvm_data->calib_version >= 6)
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
break;
case IWL_DEVICE_FAMILY_6150:
/* Indicate calibration version to uCode. */
- if (priv->eeprom_data->calib_version >= 6)
+ if (priv->nvm_data->calib_version >= 6)
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index bef88c1a2c9b..6ff46605ad4f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -59,7 +59,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
/* half dBm need to multiply */
tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
- if (tx_power_cmd.global_lmt > priv->eeprom_data->max_tx_pwr_half_dbm) {
+ if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) {
/*
* For the newer devices which using enhanced/extend tx power
* table in EEPROM, the format is in half dBm. driver need to
@@ -72,7 +72,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
* half-dBm format), lower the tx power based on EEPROM
*/
tx_power_cmd.global_lmt =
- priv->eeprom_data->max_tx_pwr_half_dbm;
+ priv->nvm_data->max_tx_pwr_half_dbm;
}
tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
@@ -136,7 +136,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
* 1. acquire mutex before calling
* 2. make sure rf is on and not in exit state
*/
-int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
+int iwlagn_txfifo_flush(struct iwl_priv *priv)
{
struct iwl_txfifo_flush_cmd flush_cmd;
struct iwl_host_cmd cmd = {
@@ -146,35 +146,34 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
.data = { &flush_cmd, },
};
- might_sleep();
-
memset(&flush_cmd, 0, sizeof(flush_cmd));
- if (flush_control & BIT(IWL_RXON_CTX_BSS))
- flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
- IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
- IWL_SCD_MGMT_MSK;
- if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
- (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
- flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
- IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
- IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
- IWL_PAN_SCD_MULTICAST_MSK;
-
- if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
- flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
-
- IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
- flush_cmd.fifo_control);
- flush_cmd.flush_control = cpu_to_le16(flush_control);
+
+ flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+ IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
+ IWL_SCD_MGMT_MSK;
+ if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+ flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
+ IWL_PAN_SCD_VI_MSK |
+ IWL_PAN_SCD_BE_MSK |
+ IWL_PAN_SCD_BK_MSK |
+ IWL_PAN_SCD_MGMT_MSK |
+ IWL_PAN_SCD_MULTICAST_MSK;
+
+ if (priv->nvm_data->sku_cap_11n_enable)
+ flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
+
+ IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
+ flush_cmd.queue_control);
+ flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
return iwl_dvm_send_cmd(priv, &cmd);
}
-void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
{
mutex_lock(&priv->mutex);
ieee80211_stop_queues(priv->hw);
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
@@ -826,7 +825,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (priv->chain_noise_data.active_chains)
active_chains = priv->chain_noise_data.active_chains;
else
- active_chains = priv->eeprom_data->valid_rx_ant;
+ active_chains = priv->nvm_data->valid_rx_ant;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist &&
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 2d9eee93c743..3163e0f38c25 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -164,14 +164,17 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
*/
- if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (priv->nvm_data->sku_cap_11n_enable)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP
- /* enable 11w if the uCode advertise */
- if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
-#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */
+ /*
+ * Enable 11w if advertised by firmware and software crypto
+ * is not enabled (as the firmware will interpret some mgmt
+ * packets, so enabling it with software crypto isn't safe)
+ */
+ if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+ !iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->sta_data_size = sizeof(struct iwl_station_priv);
@@ -239,12 +242,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- if (priv->eeprom_data->bands[IEEE80211_BAND_2GHZ].n_channels)
+ if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->eeprom_data->bands[IEEE80211_BAND_2GHZ];
- if (priv->eeprom_data->bands[IEEE80211_BAND_5GHZ].n_channels)
+ &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->eeprom_data->bands[IEEE80211_BAND_5GHZ];
+ &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
hw->wiphy->hw_version = priv->trans->hw_id;
@@ -651,7 +654,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
- if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE))
+ if (!(priv->nvm_data->sku_cap_11n_enable))
return -EACCES;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1019,7 +1022,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
*/
if (drop) {
IWL_DEBUG_MAC80211(priv, "send flush command\n");
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
@@ -1032,8 +1035,8 @@ done:
}
static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type,
int duration)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1065,7 +1068,6 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
}
priv->hw_roc_channel = channel;
- priv->hw_roc_chantype = channel_type;
/* convert from ms to TU */
priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
priv->hw_roc_start_notified = false;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 408132cf83c1..faa05932efae 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -185,7 +185,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate = info->control.rates[0].idx;
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->eeprom_data->valid_tx_ant);
+ priv->nvm_data->valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
@@ -511,7 +511,7 @@ static void iwl_bg_tx_flush(struct work_struct *work)
return;
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
- iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
+ iwlagn_dev_txfifo_flush(priv);
}
/*
@@ -776,7 +776,7 @@ int iwl_alive_start(struct iwl_priv *priv)
ieee80211_wake_queues(priv->hw);
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, priv->eeprom_data->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
struct iwl_rxon_cmd *active_rxon =
@@ -1191,36 +1191,38 @@ static void iwl_option_config(struct iwl_priv *priv)
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
- u16 radio_cfg;
+ struct iwl_nvm_data *data = priv->nvm_data;
+ char *debug_msg;
- priv->eeprom_data->sku = priv->eeprom_data->sku;
-
- if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ if (data->sku_cap_11n_enable &&
!priv->cfg->ht_params) {
IWL_ERR(priv, "Invalid 11n configuration\n");
return -EINVAL;
}
- if (!priv->eeprom_data->sku) {
+ if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable &&
+ !data->sku_cap_band_52GHz_enable) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
-
- radio_cfg = priv->eeprom_data->radio_cfg;
+ debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
+ IWL_DEBUG_INFO(priv, debug_msg,
+ data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
+ data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
+ data->sku_cap_11n_enable ? "" : "NOT", "enabled");
priv->hw_params.tx_chains_num =
- num_of_ant(priv->eeprom_data->valid_tx_ant);
+ num_of_ant(data->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
priv->hw_params.rx_chains_num = 1;
else
priv->hw_params.rx_chains_num =
- num_of_ant(priv->eeprom_data->valid_rx_ant);
+ num_of_ant(data->valid_rx_ant);
- IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
- priv->eeprom_data->valid_tx_ant,
- priv->eeprom_data->valid_rx_ant);
+ IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ data->valid_tx_ant,
+ data->valid_rx_ant);
return 0;
}
@@ -1235,7 +1237,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
struct iwl_op_mode *op_mode;
u16 num_mac;
u32 ucode_flags;
- struct iwl_trans_config trans_cfg;
+ struct iwl_trans_config trans_cfg = {};
static const u8 no_reclaim_cmds[] = {
REPLY_RX_PHY_CMD,
REPLY_RX_MPDU_CMD,
@@ -1334,6 +1336,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
/* Configure transport layer */
iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
/* At this point both hw and priv are allocated. */
SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
@@ -1377,24 +1382,24 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
/* Reset chip to save power until we load uCode during "up". */
iwl_trans_stop_hw(priv->trans, false);
- priv->eeprom_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
+ priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
priv->eeprom_blob,
priv->eeprom_blob_size);
- if (!priv->eeprom_data)
+ if (!priv->nvm_data)
goto out_free_eeprom_blob;
- if (iwl_eeprom_check_version(priv->eeprom_data, priv->trans))
+ if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
goto out_free_eeprom;
if (iwl_eeprom_init_hw_params(priv))
goto out_free_eeprom;
/* extract MAC Address */
- memcpy(priv->addresses[0].addr, priv->eeprom_data->hw_addr, ETH_ALEN);
+ memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = priv->eeprom_data->n_hw_addrs;
+ num_mac = priv->nvm_data->n_hw_addrs;
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -1407,7 +1412,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
iwl_set_hw_params(priv);
- if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
+ if (!(priv->nvm_data->sku_cap_ipan_enable)) {
IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
@@ -1489,7 +1494,7 @@ out_destroy_workqueue:
out_free_eeprom_blob:
kfree(priv->eeprom_blob);
out_free_eeprom:
- iwl_free_eeprom_data(priv->eeprom_data);
+ iwl_free_nvm_data(priv->nvm_data);
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
@@ -1508,12 +1513,8 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_tt_exit(priv);
- /*This will stop the queues, move the device to low power state */
- priv->ucode_loaded = false;
- iwl_trans_stop_device(priv->trans);
-
kfree(priv->eeprom_blob);
- iwl_free_eeprom_data(priv->eeprom_data);
+ iwl_free_nvm_data(priv->nvm_data);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -1927,8 +1928,6 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
* commands by clearing the ready bit */
clear_bit(STATUS_READY, &priv->status);
- wake_up(&priv->trans->wait_command_queue);
-
if (!ondemand) {
/*
* If firmware keep reloading, then it indicate something
@@ -1989,7 +1988,6 @@ static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
static void iwl_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- u16 radio_cfg = priv->eeprom_data->radio_cfg;
/* SKU Control */
iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
@@ -2001,13 +1999,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
/* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
+ if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
u32 reg_val =
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <<
+ priv->nvm_data->radio_cfg_type <<
CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) <<
+ priv->nvm_data->radio_cfg_step <<
CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg) <<
+ priv->nvm_data->radio_cfg_dash <<
CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
@@ -2016,9 +2014,9 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
- EEPROM_RF_CFG_STEP_MSK(radio_cfg),
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+ priv->nvm_data->radio_cfg_type,
+ priv->nvm_data->radio_cfg_step,
+ priv->nvm_data->radio_cfg_dash);
} else {
WARN_ON(1);
}
@@ -2152,8 +2150,6 @@ static int __init iwl_init(void)
{
int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
ret = iwlagn_rate_control_register();
if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index a82f46c10f5e..f3dd0da60d8a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -820,7 +820,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
@@ -1448,7 +1448,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
@@ -1466,7 +1466,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1490,7 +1490,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
valid_tx_ant =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
}
start_action = tbl->action;
@@ -1624,7 +1624,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1642,7 +1642,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
@@ -1660,7 +1660,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
valid_tx_ant =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
@@ -1796,7 +1796,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1966,7 +1966,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
@@ -2700,7 +2700,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
i = lq_sta->last_txrate_idx;
- valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ valid_tx_ant = priv->nvm_data->valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2894,15 +2894,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->eeprom_data->valid_tx_ant &
- ~first_antenna(priv->eeprom_data->valid_tx_ant);
+ priv->nvm_data->valid_tx_ant &
+ ~first_antenna(priv->nvm_data->valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
+ } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->eeprom_data->valid_tx_ant;
+ priv->nvm_data->valid_tx_ant;
}
/* as default allow aggregation for all tids */
@@ -2948,7 +2948,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
@@ -2980,7 +2980,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
- valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ valid_tx_ant = priv->nvm_data->valid_tx_ant;
}
/* Fill rest of rate table */
@@ -3014,7 +3014,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
@@ -3101,7 +3101,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u8 ant_sel_tx;
priv = lq_sta->drv;
- valid_tx_ant = priv->eeprom_data->valid_tx_ant;
+ valid_tx_ant = priv->nvm_data->valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3172,9 +3172,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->eeprom_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->eeprom_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->eeprom_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 5a9c325804f6..cac4f37cc427 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -631,8 +631,6 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->trans->wait_command_queue);
return 0;
}
@@ -901,7 +899,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
+ struct ieee80211_rx_status rx_status = {};
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
@@ -951,7 +949,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+ /*rx_status.flag |= RX_FLAG_MACTIME_START;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 2830ea290502..9a891e6e60e8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -420,10 +420,10 @@ static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (tx_power > DIV_ROUND_UP(priv->eeprom_data->max_tx_pwr_half_dbm, 2)) {
+ if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
IWL_WARN(priv,
"Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->eeprom_data->max_tx_pwr_half_dbm);
+ tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index bb9f6252d28f..610ed2204e1f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -660,12 +660,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
- u8 rx_ant = priv->eeprom_data->valid_rx_ant;
+ u8 rx_ant = priv->nvm_data->valid_rx_ant;
u8 rate;
bool is_active = false;
int chan_mod;
u8 active_chains;
- u8 scan_tx_antennas = priv->eeprom_data->valid_tx_ant;
+ u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
int ret;
int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
@@ -673,8 +673,9 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
const u8 *ssid = NULL;
u8 ssid_len = 0;
- if (WARN_ON_ONCE(priv->scan_request &&
- priv->scan_request->n_channels > MAX_SCAN_CHANNEL))
+ if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL &&
+ (!priv->scan_request ||
+ priv->scan_request->n_channels > MAX_SCAN_CHANNEL)))
return -EINVAL;
lockdep_assert_held(&priv->mutex);
@@ -881,7 +882,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
/* MIMO is not used here, but value is required */
rx_chain |=
- priv->eeprom_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -998,7 +999,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
void iwl_init_scan_params(struct iwl_priv *priv)
{
- u8 ant_idx = fls(priv->eeprom_data->valid_tx_ant) - 1;
+ u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index cd9b6de4273e..bdba9543c351 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -634,23 +634,23 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) <<
+ rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
- first_antenna(priv->eeprom_data->valid_tx_ant);
+ first_antenna(priv->nvm_data->valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
- priv->eeprom_data->valid_tx_ant &
- ~first_antenna(priv->eeprom_data->valid_tx_ant);
+ priv->nvm_data->valid_tx_ant &
+ ~first_antenna(priv->nvm_data->valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
+ } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
- priv->eeprom_data->valid_tx_ant;
+ priv->nvm_data->valid_tx_ant;
}
link_cmd->agg_params.agg_dis_start_th =
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index f5ca73a89870..279796419ea0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -188,7 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
rate_idx = rate_lowest_index(
- &priv->eeprom_data->bands[info->band], sta);
+ &priv->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
@@ -207,11 +207,11 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- first_antenna(priv->eeprom_data->valid_tx_ant));
+ first_antenna(priv->nvm_data->valid_tx_ant));
} else
priv->mgmt_tx_ant = iwl_toggle_tx_ant(
priv, priv->mgmt_tx_ant,
- priv->eeprom_data->valid_tx_ant);
+ priv->nvm_data->valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
@@ -305,7 +305,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
u8 hdr_len;
u16 len, seq_number = 0;
u8 sta_id, tid = IWL_MAX_TID_COUNT;
- bool is_agg = false;
+ bool is_agg = false, is_data_qos = false;
int txq_id;
if (info->control.vif)
@@ -378,9 +378,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- is_agg = true;
-
dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
if (unlikely(!dev_cmd))
@@ -442,6 +439,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ is_agg = true;
+ is_data_qos = true;
}
/* Copy MAC header from skb into command buffer */
@@ -474,8 +475,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
- !ieee80211_has_morefrags(fc))
+ if (is_data_qos && !ieee80211_has_morefrags(fc))
priv->tid_data[sta_id][tid].seq_number = seq_number;
spin_unlock(&priv->sta_lock);
@@ -1075,14 +1075,13 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
static void iwlagn_set_tx_status(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
- struct iwlagn_tx_resp *tx_resp,
- bool is_agg)
+ struct iwlagn_tx_resp *tx_resp)
{
- u16 status = le16_to_cpu(tx_resp->status.status);
+ u16 status = le16_to_cpu(tx_resp->status.status);
+
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->status.rates[0].count = tx_resp->failure_frame + 1;
- if (is_agg)
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
info);
@@ -1100,29 +1099,6 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
}
}
-static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
- int txq_id, int ssn, struct sk_buff_head *skbs)
-{
- if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
- tid != IWL_TID_NON_QOS &&
- txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now.
- * Since it is can possibly happen very often and in order
- * not to fill the syslog, don't use IWL_ERR or IWL_WARN
- */
- IWL_DEBUG_TX_QUEUES(priv,
- "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
- txq_id, sta_id, tid,
- priv->tid_data[sta_id][tid].agg.txq_id);
- return 1;
- }
-
- iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
- return 0;
-}
-
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
@@ -1184,9 +1160,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
next_reclaimed);
}
- /*we can free until ssn % q.n_bd not inclusive */
- WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
- txq_id, ssn, &skbs));
+ iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
@@ -1231,7 +1206,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (is_agg && !iwl_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
- tx_resp, is_agg);
+ tx_resp);
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
@@ -1311,16 +1286,27 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
return 0;
}
+ if (unlikely(scd_flow != agg->txq_id)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now.
+ * Since it is can possibly happen very often and in order
+ * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+ */
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
+ scd_flow, sta_id, tid, agg->txq_id);
+ spin_unlock(&priv->sta_lock);
+ return 0;
+ }
+
__skb_queue_head_init(&reclaimed_skbs);
/* Release all TFDs before the SSN, i.e. all TFDs in front of
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
- if (iwl_reclaim(priv, sta_id, tid, scd_flow,
- ba_resp_scd_ssn, &reclaimed_skbs)) {
- spin_unlock(&priv->sta_lock);
- return 0;
- }
+ iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+ &reclaimed_skbs);
IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
"sta_id = %d\n",
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 2cb1efbc5ed1..c6467e5554f5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -61,7 +61,7 @@ iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
static int iwl_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
- __le16 *xtal_calib = priv->eeprom_data->xtal_calib;
+ __le16 *xtal_calib = priv->nvm_data->xtal_calib;
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -75,7 +75,7 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- cmd.radio_sensor_offset = priv->eeprom_data->raw_temperature;
+ cmd.radio_sensor_offset = priv->nvm_data->raw_temperature;
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
@@ -90,14 +90,14 @@ static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- cmd.radio_sensor_offset_high = priv->eeprom_data->kelvin_temperature;
- cmd.radio_sensor_offset_low = priv->eeprom_data->raw_temperature;
+ cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature;
+ cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature;
if (!cmd.radio_sensor_offset_low) {
IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
- cmd.burntVoltageRef = priv->eeprom_data->calib_voltage;
+ cmd.burntVoltageRef = priv->nvm_data->calib_voltage;
IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
@@ -254,10 +254,10 @@ static int iwl_alive_notify(struct iwl_priv *priv)
int ret;
int i;
- iwl_trans_fw_alive(priv->trans);
+ iwl_trans_fw_alive(priv->trans, 0);
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
- priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
+ priv->nvm_data->sku_cap_ipan_enable) {
n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
} else {
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 87f465a49df1..864219d2136a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -150,7 +150,7 @@ enum iwl_led_mode {
struct iwl_base_params {
int eeprom_size;
int num_of_queues; /* def: HW dependent */
- /* for iwl_apm_init() */
+ /* for iwl_pcie_apm_init() */
u32 pll_cfg_val;
const u16 max_ll_items;
@@ -226,8 +226,8 @@ struct iwl_eeprom_params {
* @max_data_size: The maximal length of the fw data section
* @valid_tx_ant: valid transmit antenna
* @valid_rx_ant: valid receive antenna
- * @eeprom_ver: EEPROM version
- * @eeprom_calib_ver: EEPROM calibration version
+ * @nvm_ver: NVM version
+ * @nvm_calib_ver: NVM calibration version
* @lib: pointer to the lib ops
* @base_params: pointer to basic parameters
* @ht_params: point to ht patameters
@@ -257,8 +257,8 @@ struct iwl_cfg {
const u32 max_inst_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
+ u16 nvm_ver;
+ u16 nvm_calib_ver;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 59a5f78402fc..dc7e26b2f383 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -25,6 +25,39 @@
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "iwl-trans.h"
+#if !defined(__IWLWIFI_DEVICE_TRACE)
+static inline bool iwl_trace_data(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (ieee80211_is_data(hdr->frame_control))
+ return skb->protocol != cpu_to_be16(ETH_P_PAE);
+ return false;
+}
+
+static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
+ void *rxbuf, size_t len)
+{
+ struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
+ struct ieee80211_hdr *hdr;
+
+ if (cmd->cmd != trans->rx_mpdu_cmd)
+ return len;
+
+ hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
+ trans->rx_mpdu_cmd_hdr_size);
+ if (!ieee80211_is_data(hdr->frame_control))
+ return len;
+ /* maybe try to identify EAPOL frames? */
+ return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+ ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
@@ -100,6 +133,40 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
__get_str(dev), __entry->offs, __entry->val)
);
+TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write PRPH[%#x] = %#x)",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_ioread_prph32,
+ TP_PROTO(const struct device *dev, u32 offs, u32 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] read PRPH[%#x] = %#x",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
TRACE_EVENT(iwlwifi_dev_irq,
TP_PROTO(const struct device *dev),
TP_ARGS(dev),
@@ -235,6 +302,48 @@ TRACE_EVENT(iwlwifi_dbg,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_data
+
+TRACE_EVENT(iwlwifi_dev_tx_data,
+ TP_PROTO(const struct device *dev,
+ struct sk_buff *skb,
+ void *data, size_t data_len),
+ TP_ARGS(dev, skb, data, data_len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ if (iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(data), data, data_len);
+ ),
+ TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_rx_data,
+ TP_PROTO(const struct device *dev,
+ const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data,
+ len - iwl_rx_trace_len(trans, rxbuf, len))
+ ),
+ TP_fast_assign(
+ size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+ DEV_ASSIGN;
+ if (offs < len)
+ memcpy(__get_dynamic_array(data),
+ ((u8 *)rxbuf) + offs, len - offs);
+ ),
+ TP_printk("[%s] RX frame data", __get_str(dev))
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
@@ -270,25 +379,28 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(const struct device *dev, void *rxbuf, size_t len),
- TP_ARGS(dev, rxbuf, len),
+ TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, rxbuf, len)
+ __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
),
TP_fast_assign(
DEV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+ memcpy(__get_dynamic_array(rxbuf), rxbuf,
+ iwl_rx_trace_len(trans, rxbuf, len));
),
TP_printk("[%s] RX cmd %#.2x",
__get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
- TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
TP_STRUCT__entry(
DEV_ENTRY
@@ -301,14 +413,15 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
+ __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->framelen = buf0_len + buf1_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ if (!iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
),
TP_printk("[%s] TX %.2x (%zu bytes)",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 198634b75ed0..d3549f493a17 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1032,6 +1032,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
if (!drv->dbgfs_drv) {
IWL_ERR(drv, "failed to create debugfs directory\n");
+ ret = -ENOMEM;
goto err_free_drv;
}
@@ -1040,12 +1041,12 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
if (!drv->trans->dbgfs_dir) {
IWL_ERR(drv, "failed to create transport debugfs directory\n");
+ ret = -ENOMEM;
goto err_free_dbgfs;
}
#endif
ret = iwl_request_firmware(drv, true);
-
if (ret) {
IWL_ERR(trans, "Couldn't request the fw\n");
goto err_fw;
@@ -1060,9 +1061,8 @@ err_free_dbgfs:
err_free_drv:
#endif
kfree(drv);
- drv = NULL;
- return drv;
+ return ERR_PTR(ret);
}
void iwl_drv_stop(struct iwl_drv *drv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index f10170fe8799..471986690cf0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -116,6 +116,24 @@ struct iwl_eeprom_calib_hdr {
#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
+/* SKU Capabilities (actual values from EEPROM definition) */
+enum eeprom_sku_bits {
+ EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
+ EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
+ EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
+ EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
+ EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
+};
+
+/* radio config bits (actual values from EEPROM definition) */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+
/*
* EEPROM bands
* These are the channel numbers from each band in the order
@@ -251,7 +269,7 @@ static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
}
static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
- struct iwl_eeprom_data *data)
+ struct iwl_nvm_data *data)
{
struct iwl_eeprom_calib_hdr *hdr;
@@ -330,7 +348,7 @@ struct iwl_eeprom_enhanced_txpwr {
s8 mimo3_max;
} __packed;
-static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
+static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
struct iwl_eeprom_enhanced_txpwr *txp)
{
s8 result = 0; /* (.5 dBm) */
@@ -364,7 +382,7 @@ static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
static void
-iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
+iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
struct iwl_eeprom_enhanced_txpwr *txp,
int n_channels, s8 max_txpower_avg)
{
@@ -392,7 +410,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
}
static void iwl_eeprom_enhanced_txpower(struct device *dev,
- struct iwl_eeprom_data *data,
+ struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size,
int n_channels)
{
@@ -504,7 +522,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
static void iwl_mod_ht40_chan_info(struct device *dev,
- struct iwl_eeprom_data *data, int n_channels,
+ struct iwl_nvm_data *data, int n_channels,
enum ieee80211_band band, u16 channel,
const struct iwl_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
@@ -547,7 +565,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_eeprom_data *data,
+ struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
int band, ch_idx;
@@ -685,7 +703,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
-static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
+static int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum ieee80211_band band)
{
@@ -711,7 +729,7 @@ static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
- struct iwl_eeprom_data *data,
+ struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
enum ieee80211_band band)
{
@@ -725,7 +743,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
else
rx_chains = hweight8(data->valid_rx_ant);
- if (!(data->sku & EEPROM_SKU_CAP_11N_ENABLE) || !cfg->ht_params) {
+ if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
ht_info->ht_supported = false;
return;
}
@@ -773,7 +791,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
}
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_eeprom_data *data,
+ struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
int n_channels = iwl_init_channel_map(dev, cfg, data,
@@ -804,12 +822,13 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
/* EEPROM data functions */
-struct iwl_eeprom_data *
+struct iwl_nvm_data *
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size)
{
- struct iwl_eeprom_data *data;
+ struct iwl_nvm_data *data;
const void *tmp;
+ u16 radio_cfg, sku;
if (WARN_ON(!cfg || !cfg->eeprom_params))
return NULL;
@@ -849,18 +868,27 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
data->kelvin_temperature = *(__le16 *)tmp;
data->kelvin_voltage = *((__le16 *)tmp + 1);
- data->radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
+ radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_RADIO_CONFIG);
- data->sku = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_SKU_CAP);
+ data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
+ data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+
+ sku = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_SKU_CAP);
+ data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
+ data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
+ data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
- data->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
-
- data->eeprom_version = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_VERSION);
+ data->sku_cap_11n_enable = false;
- data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(data->radio_cfg);
- data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(data->radio_cfg);
+ data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_VERSION);
/* check overrides (some devices have wrong EEPROM) */
if (cfg->valid_tx_ant)
@@ -884,20 +912,20 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
/* helper functions */
-int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
+int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans)
{
- if (data->eeprom_version >= trans->cfg->eeprom_ver ||
- data->calib_version >= trans->cfg->eeprom_calib_ver) {
- IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- data->eeprom_version, data->calib_version);
+ if (data->nvm_version >= trans->cfg->nvm_ver ||
+ data->calib_version >= trans->cfg->nvm_calib_ver) {
+ IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ data->nvm_version, data->calib_version);
return 0;
}
IWL_ERR(trans,
"Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
- data->eeprom_version, trans->cfg->eeprom_ver,
- data->calib_version, trans->cfg->eeprom_calib_ver);
+ data->nvm_version, trans->cfg->nvm_ver,
+ data->calib_version, trans->cfg->nvm_calib_ver);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(iwl_eeprom_check_version);
+EXPORT_SYMBOL_GPL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index a5e425718f56..555f0eb61d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -66,22 +66,7 @@
#include <linux/if_ether.h>
#include "iwl-trans.h"
-/* SKU Capabilities (actual values from EEPROM definition) */
-#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
-#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
-#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
-#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
-#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
-
-/* radio config bits (actual values from EEPROM definition) */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-struct iwl_eeprom_data {
+struct iwl_nvm_data {
int n_hw_addrs;
u8 hw_addr[ETH_ALEN];
@@ -93,13 +78,21 @@ struct iwl_eeprom_data {
__le16 kelvin_voltage;
__le16 xtal_calib[2];
- u16 sku;
- u16 radio_cfg;
- u16 eeprom_version;
- s8 max_tx_pwr_half_dbm;
+ bool sku_cap_band_24GHz_enable;
+ bool sku_cap_band_52GHz_enable;
+ bool sku_cap_11n_enable;
+ bool sku_cap_amt_enable;
+ bool sku_cap_ipan_enable;
+ u8 radio_cfg_type;
+ u8 radio_cfg_step;
+ u8 radio_cfg_dash;
+ u8 radio_cfg_pnum;
u8 valid_tx_ant, valid_rx_ant;
+ u16 nvm_version;
+ s8 max_tx_pwr_half_dbm;
+
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[];
};
@@ -115,22 +108,22 @@ struct iwl_eeprom_data {
* This function parses all EEPROM values we need and then
* returns a (newly allocated) struct containing all the
* relevant values for driver use. The struct must be freed
- * later with iwl_free_eeprom_data().
+ * later with iwl_free_nvm_data().
*/
-struct iwl_eeprom_data *
+struct iwl_nvm_data *
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size);
/**
- * iwl_free_eeprom_data - free EEPROM data
+ * iwl_free_nvm_data - free NVM data
* @data: the data to free
*/
-static inline void iwl_free_eeprom_data(struct iwl_eeprom_data *data)
+static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
{
kfree(data);
}
-int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
- struct iwl_trans *trans);
+int iwl_nvm_check_version(struct iwl_nvm_data *data,
+ struct iwl_trans *trans);
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 806046641747..ec48563d3c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -267,7 +267,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
-#define RX_RB_TIMEOUT (0x10)
+#define RX_RB_TIMEOUT (0x11)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3dfebfb8434f..cdaff9572059 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -214,84 +214,84 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
}
EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
-static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
+static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
- iwl_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- return iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
+ u32 val = iwl_trans_read_prph(trans, ofs);
+ trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
+ return val;
}
-static inline void __iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
+static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
- iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
- iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+ trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
+ iwl_trans_write_prph(trans, ofs, val);
}
-u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
+u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans);
- val = __iwl_read_prph(trans, reg);
+ val = __iwl_read_prph(trans, ofs);
iwl_release_nic_access(trans);
spin_unlock_irqrestore(&trans->reg_lock, flags);
return val;
}
EXPORT_SYMBOL_GPL(iwl_read_prph);
-void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
+void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
- __iwl_write_prph(trans, addr, val);
+ __iwl_write_prph(trans, ofs, val);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_prph);
-void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
+void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
- __iwl_write_prph(trans, reg,
- __iwl_read_prph(trans, reg) | mask);
+ __iwl_write_prph(trans, ofs,
+ __iwl_read_prph(trans, ofs) | mask);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
-void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
+void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
- __iwl_write_prph(trans, reg,
- (__iwl_read_prph(trans, reg) & mask) | bits);
+ __iwl_write_prph(trans, ofs,
+ (__iwl_read_prph(trans, ofs) & mask) | bits);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
-void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
+void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
- val = __iwl_read_prph(trans, reg);
- __iwl_write_prph(trans, reg, (val & ~mask));
+ val = __iwl_read_prph(trans, ofs);
+ __iwl_write_prph(trans, ofs, (val & ~mask));
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
@@ -327,11 +327,11 @@ u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+ const void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
- u32 *vals = buf;
+ const u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 50d3819739d1..48dc753e3742 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -69,12 +69,12 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
-u32 iwl_read_prph(struct iwl_trans *trans, u32 reg);
-void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
-void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
-void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
+u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
+void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
-void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
+void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
@@ -87,7 +87,7 @@ void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
} while (0)
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
+ const void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 9253ef1dba72..c3a4bb41e533 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -213,6 +213,9 @@
#define SCD_CONTEXT_QUEUE_OFFSET(x)\
(SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
+#define SCD_TX_STTS_QUEUE_OFFSET(x)\
+ (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
+
#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index ff1154232885..b76532e238c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -221,14 +221,21 @@ struct iwl_device_cmd {
/**
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
- * IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
+ * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
* rather copies it to an previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
- * buffer. This can save memcpy and is worth with very big comamnds.
+ * buffer (that is passed in) instead. This saves the memcpy and allows
+ * commands that are bigger than the fixed buffer to be submitted.
+ * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
+ * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
+ * chunk internally and free it again after the command completes. This
+ * can (currently) be used only once per command.
+ * Note that a TFD entry after a DUP one cannot be a normal copied one.
*/
enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
+ IWL_HCMD_DFL_DUP = BIT(1),
};
/**
@@ -348,14 +355,17 @@ struct iwl_trans;
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
* May sleep
- * @fw_alive: called when the fw sends alive notification
+ * @fw_alive: called when the fw sends alive notification. If the fw provides
+ * the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
* @stop_device:stops the whole device (embedded CPU put to reset)
* May sleep
* @wowlan_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
- * @send_cmd:send a host command
+ * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
+ * If RFkill is asserted in the middle of a SYNC host command, it must
+ * return -ERFKILL straight away.
* May sleep only if CMD_SYNC is set
* @tx: send an skb
* Must be atomic
@@ -375,6 +385,8 @@ struct iwl_trans;
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
+ * @read_prph: read a DWORD from a periphery register
+ * @write_prph: write a DWORD to a periphery register
* @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be
* called after that.
@@ -385,7 +397,7 @@ struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
- void (*fw_alive)(struct iwl_trans *trans);
+ void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
void (*wowlan_suspend)(struct iwl_trans *trans);
@@ -410,6 +422,8 @@ struct iwl_trans_ops {
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
+ u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
+ void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
@@ -438,12 +452,15 @@ enum iwl_trans_state {
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
- * @wait_command_queue: the wait_queue for SYNC host commands
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @dev_cmd_headroom: room needed for the transport's private use before the
* device_cmd for Tx - for internal use only
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ * starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ * start of the 802.11 header in the @rx_mpdu_cmd
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -457,9 +474,9 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
- bool pm_support;
+ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
- wait_queue_head_t wait_command_queue;
+ bool pm_support;
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
@@ -476,10 +493,6 @@ struct iwl_trans {
static inline void iwl_trans_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
- /*
- * only set the op_mode for the moment. Later on, this function will do
- * more
- */
trans->op_mode = trans_cfg->op_mode;
trans->ops->configure(trans, trans_cfg);
@@ -499,16 +512,19 @@ static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
trans->ops->stop_hw(trans, op_mode_leaving);
+ if (op_mode_leaving)
+ trans->op_mode = NULL;
+
trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
+static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
might_sleep();
trans->state = IWL_TRANS_FW_ALIVE;
- trans->ops->fw_alive(trans);
+ trans->ops->fw_alive(trans, scd_addr);
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -516,6 +532,8 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
{
might_sleep();
+ WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
return trans->ops->start_fw(trans, fw);
}
@@ -650,6 +668,17 @@ static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
return trans->ops->read32(trans, ofs);
}
+static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
+{
+ return trans->ops->read_prph(trans, ofs);
+}
+
+static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
+ u32 val)
+{
+ return trans->ops->write_prph(trans, ofs, val);
+}
+
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{
trans->ops->set_pmi(trans, state);
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index 81b83f484f08..f8620ecae6b4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -94,8 +94,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.device_family = IWL_DEVICE_FAMILY_1000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK
@@ -119,8 +119,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.device_family = IWL_DEVICE_FAMILY_100, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 9fbde32f7559..244019cec3e1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -138,8 +138,8 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
@@ -166,8 +166,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.device_family = IWL_DEVICE_FAMILY_2030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
@@ -190,8 +190,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.device_family = IWL_DEVICE_FAMILY_105, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
@@ -220,8 +220,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.device_family = IWL_DEVICE_FAMILY_135, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index d1665fa6d15a..83ca40321ff1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -92,8 +92,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.device_family = IWL_DEVICE_FAMILY_5000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_5000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK
@@ -139,8 +139,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.device_family = IWL_DEVICE_FAMILY_5000,
.max_inst_size = IWLAGN_RTC_INST_SIZE,
.max_data_size = IWLAGN_RTC_DATA_SIZE,
- .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+ .nvm_ver = EEPROM_5050_EEPROM_VERSION,
+ .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
.base_params = &iwl5000_base_params,
.eeprom_params = &iwl5000_eeprom_params,
.ht_params = &iwl5000_ht_params,
@@ -156,8 +156,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.device_family = IWL_DEVICE_FAMILY_5150, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_5050_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.no_xtal_calib = true, \
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index 4a57624afc40..d4df976d4709 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -160,8 +160,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_6005_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
@@ -215,8 +215,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
.eeprom_params = &iwl6000_eeprom_params, \
@@ -254,8 +254,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
.eeprom_params = &iwl6000_eeprom_params, \
@@ -306,8 +306,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
- .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_6000_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK
@@ -337,8 +337,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
- .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_6050_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
@@ -362,8 +362,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.device_family = IWL_DEVICE_FAMILY_6150, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
- .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
+ .nvm_ver = EEPROM_6150_EEPROM_VERSION, \
+ .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
@@ -389,8 +389,8 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE,
.max_data_size = IWL60_RTC_DATA_SIZE,
- .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .nvm_ver = EEPROM_6000_EEPROM_VERSION,
+ .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.base_params = &iwl6000_base_params,
.eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2a4675396707..c2e141af353c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -69,7 +69,6 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
-#include "iwl-trans.h"
#include "cfg.h"
#include "internal.h"
@@ -268,6 +267,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
+ int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
if (iwl_trans == NULL)
@@ -277,11 +277,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
- if (!trans_pcie->drv)
+
+ if (IS_ERR_OR_NULL(trans_pcie->drv)) {
+ ret = PTR_ERR(trans_pcie->drv);
goto out_free_trans;
+ }
/* register transport layer debugfs here */
- if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
+ ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
+ if (ret)
goto out_free_drv;
return 0;
@@ -291,10 +295,10 @@ out_free_drv:
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
pci_set_drvdata(pdev, NULL);
- return -EFAULT;
+ return ret;
}
-static void __devexit iwl_pci_remove(struct pci_dev *pdev)
+static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -353,7 +357,7 @@ static struct pci_driver iwl_pci_driver = {
.name = DRV_NAME,
.id_table = iwl_hw_card_ids,
.probe = iwl_pci_probe,
- .remove = __devexit_p(iwl_pci_remove),
+ .remove = iwl_pci_remove,
.driver.pm = IWL_PM_OPS,
};
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 401178f44a3b..d91d2e8c62f5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -73,7 +73,7 @@ struct isr_statistics {
};
/**
- * struct iwl_rx_queue - Rx queue
+ * struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @pool:
@@ -91,7 +91,7 @@ struct isr_statistics {
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
-struct iwl_rx_queue {
+struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -157,8 +157,8 @@ struct iwl_cmd_meta {
* 32 since we don't need so many commands pending. Since the HW
* still uses 256 BDs for DMA though, n_bd stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
- * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
- * in the same struct) have 256.
+ * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
+ * the same struct) have 256.
* This means that we end up with the following:
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
* SW entries: | 0 | ... | 31 |
@@ -182,15 +182,17 @@ struct iwl_queue {
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
-struct iwl_pcie_tx_queue_entry {
+struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
struct iwl_device_cmd *copy_cmd;
struct sk_buff *skb;
+ /* buffer to free after command completes */
+ const void *free_buf;
struct iwl_cmd_meta meta;
};
/**
- * struct iwl_tx_queue - Tx Queue for DMA
+ * struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
* @entries: transmit entries (driver state)
@@ -203,10 +205,10 @@ struct iwl_pcie_tx_queue_entry {
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
*/
-struct iwl_tx_queue {
+struct iwl_txq {
struct iwl_queue q;
struct iwl_tfd *tfds;
- struct iwl_pcie_tx_queue_entry *entries;
+ struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
@@ -236,7 +238,7 @@ struct iwl_tx_queue {
* @wd_timeout: queue watchdog timeout (jiffies)
*/
struct iwl_trans_pcie {
- struct iwl_rx_queue rxq;
+ struct iwl_rxq rxq;
struct work_struct rx_replenish;
struct iwl_trans *trans;
struct iwl_drv *drv;
@@ -258,7 +260,7 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
- struct iwl_tx_queue *txq;
+ struct iwl_txq *txq;
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
@@ -268,6 +270,8 @@ struct iwl_trans_pcie {
bool ucode_write_complete;
wait_queue_head_t ucode_write_waitq;
+ wait_queue_head_t wait_command_queue;
+
unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
@@ -283,13 +287,23 @@ struct iwl_trans_pcie {
unsigned long wd_timeout;
};
-/*****************************************************
-* DRIVER STATUS FUNCTIONS
-******************************************************/
-#define STATUS_HCMD_ACTIVE 0
-#define STATUS_DEVICE_ENABLED 1
-#define STATUS_TPOWER_PMI 2
-#define STATUS_INT_ENABLED 3
+/**
+ * enum iwl_pcie_status: status of the PCIe transport
+ * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+ */
+enum iwl_pcie_status {
+ STATUS_HCMD_ACTIVE,
+ STATUS_DEVICE_ENABLED,
+ STATUS_TPOWER_PMI,
+ STATUS_INT_ENABLED,
+ STATUS_RFKILL,
+ STATUS_FW_ERROR,
+};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
@@ -301,6 +315,10 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
trans_specific);
}
+/*
+ * Convention: trans API functions: iwl_trans_pcie_XXX
+ * Other functions: iwl_pcie_XXX
+ */
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg *cfg);
@@ -309,50 +327,43 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
/*****************************************************
* RX
******************************************************/
-void iwl_bg_rx_replenish(struct work_struct *data);
-void iwl_irq_tasklet(struct iwl_trans *trans);
-void iwl_rx_replenish(struct iwl_trans *trans);
-void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q);
+int iwl_pcie_rx_init(struct iwl_trans *trans);
+void iwl_pcie_tasklet(struct iwl_trans *trans);
+int iwl_pcie_rx_stop(struct iwl_trans *trans);
+void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
-* ICT
+* ICT - interrupt handling
******************************************************/
-void iwl_reset_ict(struct iwl_trans *trans);
-void iwl_disable_ict(struct iwl_trans *trans);
-int iwl_alloc_isr_ict(struct iwl_trans *trans);
-void iwl_free_isr_ict(struct iwl_trans *trans);
-irqreturn_t iwl_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+int iwl_pcie_alloc_ict(struct iwl_trans *trans);
+void iwl_pcie_free_ict(struct iwl_trans *trans);
+void iwl_pcie_reset_ict(struct iwl_trans *trans);
+void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_trans *trans,
- struct iwl_tx_queue *txq);
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset);
-int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
-int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_tx_cmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status);
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
+int iwl_pcie_tx_init(struct iwl_trans *trans);
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+int iwl_pcie_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_tx_free(struct iwl_trans *trans);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- enum dma_data_direction dma_dir);
-int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
- struct sk_buff_head *skbs);
-int iwl_queue_space(const struct iwl_queue *q);
-
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id);
+void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb, int handler_status);
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs);
/*****************************************************
* Error handling
******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf);
-void iwl_dump_csr(struct iwl_trans *trans);
+int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
+void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
* Helpers
@@ -388,7 +399,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -399,7 +410,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -411,7 +422,7 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
txq->q.id);
}
-static inline int iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
@@ -423,8 +434,8 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
return index & (q->n_window - 1);
}
-static inline const char *
-trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
+static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
+ u8 cmd)
{
if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index bb69f8f90b3b..8389cd38338b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -76,7 +76,7 @@
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
* to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
* iwl->rxq is replenished and the READ INDEX is updated (updating the
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
@@ -89,28 +89,28 @@
*
* Driver sequence:
*
- * iwl_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * iwl_rxq_alloc() Allocates rx_free
+ * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
* the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
+ * are available, schedules iwl_pcie_rx_replenish
*
* -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
+ * Calls iwl_pcie_rxq_restock to refill any empty
* slots.
* ...
*
*/
-/**
- * iwl_rx_queue_space - Return number of free slots available in queue.
+/*
+ * iwl_rxq_space - Return number of free slots available in queue.
*/
-static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+static int iwl_rxq_space(const struct iwl_rxq *q)
{
int s = q->read - q->write;
if (s <= 0)
@@ -122,11 +122,28 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
return s;
}
-/**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+/*
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/*
+ * iwl_pcie_rx_stop - stops the Rx DMA
+ */
+int iwl_pcie_rx_stop(struct iwl_trans *trans)
+{
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+/*
+ * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
{
unsigned long flags;
u32 reg;
@@ -176,16 +193,8 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
spin_unlock_irqrestore(&q->lock, flags);
}
-/**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
@@ -195,11 +204,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-static void iwl_rx_queue_restock(struct iwl_trans *trans)
+static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct list_head *element;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
@@ -215,18 +223,18 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
return;
spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
BUG_ON(rxb && rxb->page);
/* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
+ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
@@ -243,24 +251,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(trans, rxq);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
/*
- * iwl_rx_allocate - allocate a page for each used RBD
+ * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
* A used RBD is an Rx buffer that has been given to the stack. To use it again
* a page must be allocated and the RBD must point to the page. This function
* doesn't change the HW pointer but handles the list of pages that is used by
- * iwl_rx_queue_restock. The latter function will update the HW to use the newly
+ * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct list_head *element;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
unsigned long flags;
@@ -308,10 +315,9 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
__free_pages(page, trans_pcie->rx_page_order);
return;
}
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
+ rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
spin_unlock_irqrestore(&rxq->lock, flags);
BUG_ON(rxb->page);
@@ -343,47 +349,227 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
}
}
+static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ int i;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page,
+ trans_pcie->rx_page_order);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+}
+
/*
- * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
+ * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
*
* When moving to rx_free an page is allocated for the slot.
*
- * Also restock the Rx queue via iwl_rx_queue_restock.
+ * Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-void iwl_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
- iwl_rx_allocate(trans, GFP_KERNEL);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwl_rx_queue_restock(trans);
+ iwl_pcie_rxq_restock(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
-static void iwl_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
{
- iwl_rx_allocate(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
- iwl_rx_queue_restock(trans);
+ iwl_pcie_rxq_restock(trans);
}
-void iwl_bg_rx_replenish(struct work_struct *data)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
{
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans);
+}
+
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct device *dev = trans->dev;
+
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+ spin_lock_init(&rxq->lock);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
}
-static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
+static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+
+ if (trans_pcie->rx_buf_size_8k)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size|
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ int i, err;
+ unsigned long flags;
+
+ if (!rxq->bd) {
+ err = iwl_pcie_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ INIT_WORK(&trans_pcie->rx_replenish,
+ iwl_pcie_rx_replenish_work);
+
+ iwl_pcie_rxq_free_rbs(trans);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ iwl_pcie_rx_replenish(trans);
+
+ iwl_pcie_rx_hw_init(trans, rxq);
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ rxq->need_update = 1;
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ return 0;
+}
+
+void iwl_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ unsigned long flags;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ iwl_pcie_rxq_free_rbs(trans);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+ memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts = NULL;
+}
+
+static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -413,13 +599,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
break;
IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
- rxcb._offset,
- trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
+ rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
pkt->hdr.cmd);
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+ trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -445,7 +631,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
cmd_index = get_cmd_index(&txq->q, index);
if (reclaim) {
- struct iwl_pcie_tx_queue_entry *ent;
+ struct iwl_pcie_txq_entry *ent;
ent = &txq->entries[cmd_index];
cmd = ent->copy_cmd;
WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
@@ -459,6 +645,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
/* The original command isn't needed any more */
kfree(txq->entries[cmd_index].copy_cmd);
txq->entries[cmd_index].copy_cmd = NULL;
+ /* nor is the duplicated part of the command */
+ kfree(txq->entries[cmd_index].free_buf);
+ txq->entries[cmd_index].free_buf = NULL;
}
/*
@@ -472,7 +661,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
* iwl_trans_send_cmd()
* as we reclaim the driver command queue */
if (!rxcb._page_stolen)
- iwl_tx_cmd_complete(trans, &rxcb, err);
+ iwl_pcie_hcmd_complete(trans, &rxcb, err);
else
IWL_WARN(trans, "Claim null rxb?\n");
}
@@ -514,17 +703,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
spin_unlock_irqrestore(&rxq->lock, flags);
}
-/**
- * iwl_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
+/*
+ * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
-static void iwl_rx_handle(struct iwl_trans *trans)
+static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 r, i;
u8 fill_rx = 0;
u32 count = 8;
@@ -532,7 +717,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
/* Rx interrupt, but nothing sent from uCode */
@@ -555,7 +740,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
r, i, rxb);
- iwl_rx_handle_rxbuf(trans, rxb);
+ iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
@@ -564,7 +749,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish_now(trans);
count = 0;
}
}
@@ -573,39 +758,41 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwl_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish_now(trans);
else
- iwl_rx_queue_restock(trans);
+ iwl_pcie_rxq_restock(trans);
}
-/**
- * iwl_irq_handle_error - called for HW or SW error interrupt from card
+/*
+ * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
*/
-static void iwl_irq_handle_error(struct iwl_trans *trans)
+static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
return;
}
- iwl_dump_csr(trans);
- iwl_dump_fh(trans, NULL);
+ iwl_pcie_dump_csr(trans);
+ iwl_pcie_dump_fh(trans, NULL);
+
+ set_bit(STATUS_FW_ERROR, &trans_pcie->status);
+ clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ wake_up(&trans_pcie->wait_command_queue);
iwl_op_mode_nic_error(trans->op_mode);
}
-/* tasklet for iwlagn interrupt */
-void iwl_irq_tasklet(struct iwl_trans *trans)
+void iwl_pcie_tasklet(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -657,7 +844,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
iwl_disable_interrupts(trans);
isr_stats->hw++;
- iwl_irq_handle_error(trans);
+ iwl_pcie_irq_handle_error(trans);
handled |= CSR_INT_BIT_HW_ERR;
@@ -694,6 +881,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
isr_stats->rfkill++;
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL, &trans_pcie->status);
+ if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status))
+ IWL_DEBUG_RF_KILL(trans,
+ "Rfkill while SYNC HCMD in flight\n");
+ wake_up(&trans_pcie->wait_command_queue);
+ } else {
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ }
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -710,17 +907,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
IWL_ERR(trans, "Microcode SW error detected. "
" Restarting 0x%X.\n", inta);
isr_stats->sw++;
- iwl_irq_handle_error(trans);
+ iwl_pcie_irq_handle_error(trans);
handled |= CSR_INT_BIT_SW_ERR;
}
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
+ iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_txq_update_write_ptr(trans,
- &trans_pcie->txq[i]);
+ iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
isr_stats->wakeup++;
@@ -758,7 +954,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_rx_handle(trans);
+ iwl_pcie_rx_handle(trans);
/*
* Enable periodic interrupt in 8 msec only if we received
@@ -816,7 +1012,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
#define ICT_COUNT (ICT_SIZE / sizeof(u32))
/* Free dram table */
-void iwl_free_isr_ict(struct iwl_trans *trans)
+void iwl_pcie_free_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -829,13 +1025,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
}
}
-
/*
* allocate dram shared table, it is an aligned memory
* block of ICT_SIZE.
* also reset all data related to ICT table interrupt.
*/
-int iwl_alloc_isr_ict(struct iwl_trans *trans)
+int iwl_pcie_alloc_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -848,7 +1043,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
/* just an API sanity check ... it is guaranteed to be aligned */
if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
- iwl_free_isr_ict(trans);
+ iwl_pcie_free_ict(trans);
return -EINVAL;
}
@@ -869,7 +1064,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
/* Device is going up inform it about using ICT interrupt table,
* also we need to tell the driver to start using ICT interrupt.
*/
-void iwl_reset_ict(struct iwl_trans *trans)
+void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
@@ -899,7 +1094,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
}
/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_trans *trans)
+void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
@@ -910,7 +1105,7 @@ void iwl_disable_ict(struct iwl_trans *trans)
}
/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_isr(int irq, void *data)
+static irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -927,12 +1122,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
+ inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
/* Discover which interrupts are active/pending */
inta = iwl_read32(trans, CSR_INT);
+ if (inta & (~inta_mask)) {
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)...Ack and ignore\n",
+ inta & (~inta_mask));
+ iwl_write32(trans, CSR_INT, inta & (~inta_mask));
+ inta &= inta_mask;
+ }
+
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
@@ -957,12 +1160,13 @@ static irqreturn_t iwl_isr(int irq, void *data)
#endif
trans_pcie->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ /* iwl_pcie_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta)
iwl_enable_interrupts(trans);
+ return IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service. */
@@ -982,7 +1186,7 @@ none:
* the interrupt we need to service, driver will set the entries back to 0 and
* set index.
*/
-irqreturn_t iwl_isr_ict(int irq, void *data)
+irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
{
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie;
@@ -1002,23 +1206,21 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
* use legacy interrupt.
*/
if (unlikely(!trans_pcie->use_ict)) {
- irqreturn_t ret = iwl_isr(irq, data);
+ irqreturn_t ret = iwl_pcie_isr(irq, data);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return ret;
}
trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here.
*/
- inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
+ inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
@@ -1067,7 +1269,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
inta &= trans_pcie->inta_mask;
trans_pcie->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ /* iwl_pcie_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index fe0fffd04304..35708b959ad6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -74,584 +74,8 @@
#include "iwl-prph.h"
#include "iwl-agn-hw.h"
#include "internal.h"
-/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "dvm/commands.h"
-#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
- (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
- (~(1<<(trans_pcie)->cmd_queue)))
-
-static int iwl_trans_rx_alloc(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct device *dev = trans->dev;
-
- memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
-
- spin_lock_init(&rxq->lock);
-
- if (WARN_ON(rxq->bd || rxq->rb_stts))
- return -EINVAL;
-
- /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb_stts;
-
- return 0;
-
-err_rb_stts:
- dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-err_bd:
- return -ENOMEM;
-}
-
-static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- int i;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rxq->pool[i].page,
- trans_pcie->rx_page_order);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-}
-
-static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
- struct iwl_rx_queue *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- if (trans_pcie->rx_buf_size_8k)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-}
-
-static int iwl_rx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-
- int i, err;
- unsigned long flags;
-
- if (!rxq->bd) {
- err = iwl_trans_rx_alloc(trans);
- if (err)
- return err;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- iwl_trans_rxq_free_rx_bufs(trans);
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- iwl_rx_replenish(trans);
-
- iwl_trans_rx_hw_init(trans, rxq);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- return 0;
-}
-
-static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- unsigned long flags;
-
- /*if rxq->bd is NULL, it means that nothing has been allocated,
- * exit now */
- if (!rxq->bd) {
- IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- iwl_trans_rxq_free_rx_bufs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
- rxq->rb_stts = NULL;
-}
-
-static int iwl_trans_rx_stop(struct iwl_trans *trans)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-}
-
-static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- if (WARN_ON(ptr->addr))
- return -EINVAL;
-
- ptr->addr = dma_alloc_coherent(trans->dev, size,
- &ptr->dma, GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
-{
- struct iwl_tx_queue *txq = (void *)data;
- struct iwl_queue *q = &txq->q;
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- u32 scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
- u8 buf[16];
- int i;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
- jiffies_to_msecs(trans_pcie->wd_timeout));
- IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
-
- iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
- iwl_print_hex_error(trans, buf, sizeof(buf));
-
- for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
- IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
- iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
-
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
- u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- u32 tbl_dw =
- iwl_read_targ_mem(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
-
- if (i & 0x1)
- tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
- else
- tbl_dw = tbl_dw & 0x0000FFFF;
-
- IWL_ERR(trans,
- "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
- i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
- }
-
- for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i, q->n_bd)) {
- struct iwl_tx_cmd *tx_cmd =
- (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
- IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
- get_unaligned_le32(&tx_cmd->scratch));
- }
-
- iwl_op_mode_nic_error(trans->op_mode);
-}
-
-static int iwl_trans_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, int slots_num,
- u32 txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
- int i;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
- (unsigned long)txq);
- txq->trans_pcie = trans_pcie;
-
- txq->q.n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_tx_queue_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = txq_id;
-
- return 0;
-error:
- if (txq->entries && txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-
-}
-
-static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int ret;
-
- txq->need_update = 0;
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- /*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- * Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/**
- * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- enum dma_data_direction dma_dir;
-
- if (!q->n_bd)
- return;
-
- /* In the command queue, all the TBs are mapped as BIDI
- * so unmap them as such.
- */
- if (txq_id == trans_pcie->cmd_queue)
- dma_dir = DMA_BIDIRECTIONAL;
- else
- dma_dir = DMA_TO_DEVICE;
-
- spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
- iwl_txq_free_tfd(trans, txq, dma_dir);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
- spin_unlock_bh(&txq->lock);
-}
-
-/**
- * iwl_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct device *dev = trans->dev;
- int i;
-
- if (WARN_ON(!txq))
- return;
-
- iwl_tx_queue_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
- kfree(txq->entries[i].cmd);
- kfree(txq->entries[i].copy_cmd);
- }
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, sizeof(struct iwl_tfd) *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
- }
-
- kfree(txq->entries);
- txq->entries = NULL;
-
- del_timer_sync(&txq->stuck_timer);
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * iwl_trans_tx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
-{
- int txq_id;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* Tx queues */
- if (trans_pcie->txq) {
- for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
- iwl_tx_queue_free(trans, txq_id);
- }
-
- kfree(trans_pcie->txq);
- trans_pcie->txq = NULL;
-
- iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
-
- iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
-}
-
-/**
- * iwl_trans_tx_alloc - allocate TX context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-static int iwl_trans_tx_alloc(struct iwl_trans *trans)
-{
- int ret;
- int txq_id, slots_num;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
-
- /*It is not allowed to alloc twice, so warn when this happens.
- * We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(trans_pcie->txq)) {
- ret = -EINVAL;
- goto error;
- }
-
- ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- goto error;
- }
-
- /* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(trans, "Keep Warm allocation failed\n");
- goto error;
- }
-
- trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_tx_queue), GFP_KERNEL);
- if (!trans_pcie->txq) {
- IWL_ERR(trans, "Not enough memory for txq\n");
- ret = ENOMEM;
- goto error;
- }
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
- txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-
-error:
- iwl_trans_pcie_tx_free(trans);
-
- return ret;
-}
-static int iwl_tx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
- bool alloc = false;
-
- if (!trans_pcie->txq) {
- ret = iwl_trans_tx_alloc(trans);
- if (ret)
- goto error;
- alloc = true;
- }
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl_write_prph(trans, SCD_TXFACT, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
- trans_pcie->kw.dma >> 4);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
- txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-error:
- /*Upon error, free only if we allocated something */
- if (alloc)
- iwl_trans_pcie_tx_free(trans);
- return ret;
-}
-
-static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
{
/*
* (for documentation purposes)
@@ -670,21 +94,12 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 pci_lnk_ctl;
-
- pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
- &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
+ u16 lctl;
-static void iwl_apm_config(struct iwl_trans *trans)
-{
/*
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
* Check if BIOS (or OS) enabled L1-ASPM on this device.
@@ -693,29 +108,25 @@ static void iwl_apm_config(struct iwl_trans *trans)
* If not (unlikely), enable L0S, so there is at least some
* power savings, even without L1.
*/
- u16 lctl = iwl_pciexp_link_ctrl(trans);
-
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+ if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
/* L1-ASPM enabled; disable(!) L0S */
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- dev_printk(KERN_INFO, trans->dev,
- "L1 Enabled; Disabling L0S\n");
+ dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
} else {
/* L1-ASPM disabled; enable(!) L0S */
iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- dev_printk(KERN_INFO, trans->dev,
- "L1 Disabled; Enabling L0S\n");
+ dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
}
- trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+ trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
}
/*
* Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
* NOTE: This does not load uCode nor start the embedded processor
*/
-static int iwl_apm_init(struct iwl_trans *trans)
+static int iwl_pcie_apm_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
@@ -747,7 +158,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
- iwl_apm_config(trans);
+ iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
if (trans->cfg->base_params->pll_cfg_val)
@@ -793,7 +204,7 @@ out:
return ret;
}
-static int iwl_apm_stop_master(struct iwl_trans *trans)
+static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -811,7 +222,7 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
return ret;
}
-static void iwl_apm_stop(struct iwl_trans *trans)
+static void iwl_pcie_apm_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
@@ -819,7 +230,7 @@ static void iwl_apm_stop(struct iwl_trans *trans)
clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
/* Stop device's DMA activity */
- iwl_apm_stop_master(trans);
+ iwl_pcie_apm_stop_master(trans);
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -834,29 +245,29 @@ static void iwl_apm_stop(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-static int iwl_nic_init(struct iwl_trans *trans)
+static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
/* nic_init */
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwl_apm_init(trans);
+ iwl_pcie_apm_init(trans);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_set_pwr_vmain(trans);
+ iwl_pcie_set_pwr_vmain(trans);
iwl_op_mode_nic_config(trans->op_mode);
/* Allocate the RX queue, or reset if it is already allocated */
- iwl_rx_init(trans);
+ iwl_pcie_rx_init(trans);
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_tx_init(trans))
+ if (iwl_pcie_tx_init(trans))
return -ENOMEM;
if (trans->cfg->base_params->shadow_reg_enable) {
@@ -871,7 +282,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
#define HW_READY_TIMEOUT (50)
/* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_set_hw_ready(struct iwl_trans *trans)
+static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
{
int ret;
@@ -889,14 +300,14 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
}
/* Note: returns standard 0/-ERROR code */
-static int iwl_prepare_card_hw(struct iwl_trans *trans)
+static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
- ret = iwl_set_hw_ready(trans);
+ ret = iwl_pcie_set_hw_ready(trans);
/* If the card is ready, exit 0 */
if (ret >= 0)
return 0;
@@ -906,7 +317,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
CSR_HW_IF_CONFIG_REG_PREPARE);
do {
- ret = iwl_set_hw_ready(trans);
+ ret = iwl_pcie_set_hw_ready(trans);
if (ret >= 0)
return 0;
@@ -920,7 +331,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
/*
* ucode
*/
-static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
dma_addr_t phy_addr, u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -967,7 +378,7 @@ static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
return 0;
}
-static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
const struct fw_desc *section)
{
u8 *v_addr;
@@ -988,8 +399,9 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
- ret = iwl_load_firmware_chunk(trans, section->offset + offset,
- p_addr, copy_size);
+ ret = iwl_pcie_load_firmware_chunk(trans,
+ section->offset + offset,
+ p_addr, copy_size);
if (ret) {
IWL_ERR(trans,
"Could not load the [%d] uCode section\n",
@@ -1002,7 +414,7 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_load_given_ucode(struct iwl_trans *trans,
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
int i, ret = 0;
@@ -1011,7 +423,7 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
if (!image->sec[i].data)
break;
- ret = iwl_load_section(trans, i, &image->sec[i]);
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
@@ -1025,15 +437,18 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
/* This may fail if AMT took ownership of the device */
- if (iwl_prepare_card_hw(trans)) {
+ if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
}
+ clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
+
iwl_enable_rfkill_int(trans);
/* If platform's RF_KILL switch is NOT set to KILL */
@@ -1044,7 +459,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
- ret = iwl_nic_init(trans);
+ ret = iwl_pcie_nic_init(trans);
if (ret) {
IWL_ERR(trans, "Unable to init nic\n");
return ret;
@@ -1064,125 +479,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- return iwl_load_given_ucode(trans, fw);
+ return iwl_pcie_load_given_ucode(trans, fw);
}
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- */
-static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
- struct iwl_trans_pcie __maybe_unused *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_write_prph(trans, SCD_TXFACT, mask);
-}
-
-static void iwl_tx_start(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 a;
- int chan;
- u32 reg_val;
-
- /* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
-
- trans_pcie->scd_base_addr =
- iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
- a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- /* reset tx status memory */
- for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(
- trans->cfg->base_params->num_of_queues);
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
-
- iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans_pcie->scd_bc_tbls.dma >> 10);
-
- /* The chain extension of the SCD doesn't work well. This feature is
- * enabled by default by the HW, so we need to disable it manually.
- */
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
-
- iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
- trans_pcie->cmd_fifo);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-}
-
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
-{
- iwl_reset_ict(trans);
- iwl_tx_start(trans);
-}
-
-/**
- * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
- */
-static int iwl_trans_tx_stop(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ch, txq_id, ret;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- iwl_trans_txq_set_sched(trans, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
- if (ret < 0)
- IWL_ERR(trans,
- "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
- ch,
- iwl_read_direct32(trans,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
- return 0;
- }
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
- txq_id++)
- iwl_tx_queue_unmap(trans, txq_id);
-
- return 0;
+ iwl_pcie_reset_ict(trans);
+ iwl_pcie_tx_start(trans, scd_addr);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1196,7 +499,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* device going down, Stop using ICT table */
- iwl_disable_ict(trans);
+ iwl_pcie_disable_ict(trans);
/*
* If a HW restart happens during firmware loading,
@@ -1206,8 +509,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* already dead.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
- iwl_trans_tx_stop(trans);
- iwl_trans_rx_stop(trans);
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(trans, APMG_CLK_DIS_REG,
@@ -1220,7 +523,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
- iwl_apm_stop(trans);
+ iwl_pcie_apm_stop(trans);
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
@@ -1245,6 +548,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
}
static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1258,169 +562,6 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
-static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- dma_addr_t phys_addr = 0;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u8 wait_write_ptr = 0;
- __le16 fc = hdr->frame_control;
- u8 hdr_len = ieee80211_hdrlen(fc);
- u16 __maybe_unused wifi_seq;
-
- txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- spin_lock(&txq->lock);
-
- /* In AGG mode, the index in the ring must correspond to the WiFi
- * sequence number. This is a HW requirements to help the SCD to parse
- * the BA.
- * Check here that the packets are in the right place on the ring.
- */
-#ifdef CONFIG_IWLWIFI_DEBUG
- wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
- ((wifi_seq & 0xff) != q->write_ptr),
- "Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
-#endif
-
- /* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
-
- dev_cmd->hdr.cmd = REPLY_TX;
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = dma_map_single(trans->dev,
- &dev_cmd->hdr, firstlen,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
- goto out_err;
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
-
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
- secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
- dma_unmap_single(trans->dev,
- dma_unmap_addr(out_meta, mapping),
- dma_unmap_len(out_meta, len),
- DMA_BIDIRECTIONAL);
- goto out_err;
- }
- }
-
- /* Attach buffers to TFD */
- iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
- if (secondlen > 0)
- iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
- secondlen, 0);
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
-
- dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(trans->dev,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &dev_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* start timer if queue currently empty */
- if (txq->need_update && q->read_ptr == q->write_ptr &&
- trans_pcie->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(trans, txq);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
- if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
- iwl_txq_update_write_ptr(trans, txq);
- } else {
- iwl_stop_queue(trans, txq);
- }
- }
- spin_unlock(&txq->lock);
- return 0;
- out_err:
- spin_unlock(&txq->lock);
- return -1;
-}
-
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1431,29 +572,28 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
if (!trans_pcie->irq_requested) {
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)trans);
+ iwl_pcie_tasklet, (unsigned long)trans);
- iwl_alloc_isr_ict(trans);
+ iwl_pcie_alloc_ict(trans);
- err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, trans);
+ err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
+ IRQF_SHARED, DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n",
trans_pcie->irq);
goto error;
}
- INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
trans_pcie->irq_requested = true;
}
- err = iwl_prepare_card_hw(trans);
+ err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
goto err_free_irq;
}
- iwl_apm_init(trans);
+ iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
@@ -1467,7 +607,7 @@ err_free_irq:
trans_pcie->irq_requested = false;
free_irq(trans_pcie->irq, trans);
error:
- iwl_free_isr_ict(trans);
+ iwl_pcie_free_ict(trans);
tasklet_kill(&trans_pcie->irq_tasklet);
return err;
}
@@ -1483,12 +623,14 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_apm_stop(trans);
+ iwl_pcie_apm_stop(trans);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ iwl_pcie_disable_ict(trans);
+
if (!op_mode_leaving) {
/*
* Even if we stop the HW, we still want the RF kill
@@ -1507,28 +649,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
}
}
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- /* n_bd is usually 256 => n_bd - 1 = 0xff */
- int tfd_num = ssn & (txq->q.n_bd - 1);
- int freed = 0;
-
- spin_lock(&txq->lock);
-
- if (txq->q.read_ptr != tfd_num) {
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
- freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
- iwl_wake_queue(trans, txq);
- }
-
- spin_unlock(&txq->lock);
-}
-
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
@@ -1544,6 +664,20 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
+static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+{
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
+}
+
+static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
+ u32 val)
+{
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
+ ((addr & 0x0000FFFF) | (3 << 24)));
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+}
+
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -1575,12 +709,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_trans_pcie_tx_free(trans);
- iwl_trans_pcie_rx_free(trans);
+ iwl_pcie_tx_free(trans);
+ iwl_pcie_rx_free(trans);
if (trans_pcie->irq_requested == true) {
free_irq(trans_pcie->irq, trans);
- iwl_free_isr_ict(trans);
+ iwl_pcie_free_ict(trans);
}
pci_disable_msi(trans_pcie->pci_dev);
@@ -1626,10 +760,10 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq;
+ struct iwl_txq *txq;
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
@@ -1673,7 +807,7 @@ static const char *get_fh_string(int cmd)
#undef IWL_CMD
}
-int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
{
int i;
static const u32 fh_tbl[] = {
@@ -1752,7 +886,7 @@ static const char *get_csr_string(int cmd)
#undef IWL_CMD
}
-void iwl_dump_csr(struct iwl_trans *trans)
+void iwl_pcie_dump_csr(struct iwl_trans *trans)
{
int i;
static const u32 csr_tbl[] = {
@@ -1809,7 +943,6 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos);
-
#define DEBUGFS_READ_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
@@ -1842,7 +975,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq;
+ struct iwl_txq *txq;
struct iwl_queue *q;
char *buf;
int pos = 0;
@@ -1879,7 +1012,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1998,7 +1131,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
if (sscanf(buf, "%d", &csr) != 1)
return -EFAULT;
- iwl_dump_csr(trans);
+ iwl_pcie_dump_csr(trans);
return count;
}
@@ -2012,7 +1145,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
int pos = 0;
ssize_t ret = -EFAULT;
- ret = pos = iwl_dump_fh(trans, &buf);
+ ret = pos = iwl_pcie_dump_fh(trans, &buf);
if (buf) {
ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos);
@@ -2081,7 +1214,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
- .send_cmd = iwl_trans_pcie_send_cmd,
+ .send_cmd = iwl_trans_pcie_send_hcmd,
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_trans_pcie_reclaim,
@@ -2091,7 +1224,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
- .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
#ifdef CONFIG_PM_SLEEP
.suspend = iwl_trans_pcie_suspend,
@@ -2100,6 +1233,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
+ .read_prph = iwl_trans_pcie_read_prph,
+ .write_prph = iwl_trans_pcie_write_prph,
.configure = iwl_trans_pcie_configure,
.set_pmi = iwl_trans_pcie_set_pmi,
};
@@ -2116,7 +1251,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
- if (WARN_ON(!trans))
+ if (!trans)
return NULL;
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2149,43 +1284,38 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
- dev_printk(KERN_ERR, &pdev->dev,
- "No suitable DMA available.\n");
+ dev_err(&pdev->dev, "No suitable DMA available\n");
goto out_pci_disable_device;
}
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_printk(KERN_ERR, &pdev->dev,
- "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
goto out_pci_disable_device;
}
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
err = -ENODEV;
goto out_pci_release_regions;
}
- dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_base = %p\n", trans_pcie->hw_base);
-
- dev_printk(KERN_INFO, &pdev->dev,
- "HW Revision ID = 0x%X\n", pdev->revision);
-
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
err = pci_enable_msi(pdev);
- if (err)
- dev_printk(KERN_ERR, &pdev->dev,
- "pci_enable_msi failed(0X%x)\n", err);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
trans->dev = &pdev->dev;
trans_pcie->irq = pdev->irq;
@@ -2195,16 +1325,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
- /* TODO: Move this away, not needed if not MSI */
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- }
-
/* Initialize the wait queue for commands */
- init_waitqueue_head(&trans->wait_command_queue);
+ init_waitqueue_head(&trans_pcie->wait_command_queue);
spin_lock_init(&trans->reg_lock);
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 79a4ddc002d3..6c5b867c353a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -42,12 +42,170 @@
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
-/**
- * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+static int iwl_queue_space(const struct iwl_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_window;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+{
+ q->n_bd = count;
+ q->n_window = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
+ * and iwl_queue_dec_wrap are broken. */
+ if (WARN_ON(!is_power_of_2(count)))
+ return -EINVAL;
+
+ /* slots_num must be power-of-two size, otherwise
+ * get_cmd_index is broken. */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_pcie_txq_stuck_timer(unsigned long data)
+{
+ struct iwl_txq *txq = (void *)data;
+ struct iwl_queue *q = &txq->q;
+ struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ u32 scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ u8 buf[16];
+ int i;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->q.read_ptr == txq->q.write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ jiffies_to_msecs(trans_pcie->wd_timeout));
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_read_targ_mem(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
+
+ if (i & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ i, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans,
+ SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
+ }
+
+ for (i = q->read_ptr; i != q->write_ptr;
+ i = iwl_queue_inc_wrap(i, q->n_bd)) {
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+ IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
+ get_unaligned_le32(&tx_cmd->scratch));
+ }
+
+ iwl_op_mode_nic_error(trans->op_mode);
+}
+
+/*
+ * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
+static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -88,10 +246,36 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
-/**
- * iwl_txq_update_write_ptr - Send new write index to hardware
+static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans_pcie->cmd_queue)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
+void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
{
u32 reg = 0;
int txq_id = txq->q.id;
@@ -137,7 +321,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
txq->need_update = 0;
}
-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
@@ -149,15 +333,15 @@ static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
return addr;
}
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
u16 hi_n_len = len << 4;
@@ -171,19 +355,20 @@ static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
tfd->num_tbs = idx + 1;
}
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
return tfd->num_tbs & 0x1f;
}
-static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
+ enum dma_data_direction dma_dir)
{
int i;
int num_tbs;
/* Sanity check on number of chunks */
- num_tbs = iwl_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
if (num_tbs >= IWL_NUM_OF_TBS) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
@@ -200,14 +385,14 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
- dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), dma_dir);
+ dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
tfd->num_tbs = 0;
}
-/**
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+/*
+ * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
* @dma_dir - the direction of the DMA mapping
@@ -215,8 +400,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- enum dma_data_direction dma_dir)
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
@@ -227,8 +412,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
- iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
- dma_dir);
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
+ dma_dir);
/* free SKB */
if (txq->entries) {
@@ -247,10 +432,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
}
}
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset)
+static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ dma_addr_t addr, u16 len, u8 reset)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
@@ -263,7 +446,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
if (reset)
memset(tfd, 0, sizeof(*tfd));
- num_tbs = iwl_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IWL_NUM_OF_TBS) {
@@ -279,108 +462,534 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
IWL_ERR(trans, "Unaligned address = %llx\n",
(unsigned long long)addr);
- iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+ iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
}
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
+static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ struct iwl_txq *txq, int slots_num,
+ u32 txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ int i;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
+ (unsigned long)txq);
+ txq->trans_pcie = trans_pcie;
+
+ txq->q.n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = txq_id;
+
+ return 0;
+error:
+ if (txq->entries && txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = 0;
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+ txq_id);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/*
+ * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ enum dma_data_direction dma_dir;
+
+ if (!q->n_bd)
+ return;
+
+ /* In the command queue, all the TBs are mapped as BIDI
+ * so unmap them as such.
+ */
+ if (txq_id == trans_pcie->cmd_queue)
+ dma_dir = DMA_BIDIRECTIONAL;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ spin_lock_bh(&txq->lock);
+ while (q->write_ptr != q->read_ptr) {
+ iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+ spin_unlock_bh(&txq->lock);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
*
- ***************************************************/
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct device *dev = trans->dev;
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_pcie_txq_unmap(trans, txq_id);
-int iwl_queue_space(const struct iwl_queue *q)
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < txq->q.n_window; i++) {
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries[i].copy_cmd);
+ kfree(txq->entries[i].free_buf);
+ }
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd) {
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ }
+
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ del_timer_sync(&txq->stuck_timer);
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ */
+static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
- int s = q->read_ptr - q->write_ptr;
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
+ iwl_write_prph(trans, SCD_TXFACT, mask);
+}
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 a;
+ int chan;
+ u32 reg_val;
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+
+ WARN_ON(scd_base_addr != 0 &&
+ scd_base_addr != trans_pcie->scd_base_addr);
+
+ a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ /* reset conext data memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+ /* reset tx status memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+ for (; a < trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(
+ trans->cfg->base_params->num_of_queues);
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
+ /* The chain extension of the SCD doesn't work well. This feature is
+ * enabled by default by the HW, so we need to disable it manually.
+ */
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+ iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+ trans_pcie->cmd_fifo);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
-/**
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+/*
+ * iwl_pcie_tx_stop - Stop all Tx DMA channels
*/
-int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ch, txq_id, ret;
+ unsigned long flags;
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- /* slots_num must be power-of-two size, otherwise
- * get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
+ iwl_pcie_txq_set_sched(trans, 0);
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
+ if (ret < 0)
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch,
+ iwl_read_direct32(trans,
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
+ if (!trans_pcie->txq) {
+ IWL_WARN(trans,
+ "Stopping tx queues that aren't allocated...\n");
+ return 0;
+ }
- q->write_ptr = q->read_ptr = 0;
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++)
+ iwl_pcie_txq_unmap(trans, txq_id);
return 0;
}
-static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+/*
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_pcie_tx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+ /* Tx queues */
+ if (trans_pcie->txq) {
+ for (txq_id = 0;
+ txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ iwl_pcie_txq_free(trans, txq_id);
+ }
- if (txq_id != trans_pcie->cmd_queue)
- sta_id = tx_cmd->sta_id;
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/*
+ * iwl_pcie_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ */
+static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_pcie_tx_free(trans);
+
+ return ret;
+}
+int iwl_pcie_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+ bool alloc = false;
+
+ if (!trans_pcie->txq) {
+ ret = iwl_pcie_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_write_prph(trans, SCD_TXFACT, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_pcie_tx_free(trans);
+ return ret;
+}
+
+static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq)
+{
+ if (!trans_pcie->wd_timeout)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->q.read_ptr == txq->q.write_ptr)
+ del_timer(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ /* n_bd is usually 256 => n_bd - 1 = 0xff */
+ int tfd_num = ssn & (txq->q.n_bd - 1);
+ struct iwl_queue *q = &txq->q;
+ int last_to_free;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+ return;
+
+ spin_lock(&txq->lock);
+
+ if (txq->q.read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+ txq_id, txq->q.read_ptr, tfd_num, ssn);
+
+ /*Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used */
+ last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+
+ if (!iwl_queue_used(q, last_to_free)) {
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ q->read_ptr != tfd_num;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
+ continue;
+
+ __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+
+ txq->entries[txq->q.read_ptr].skb = NULL;
+
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+
+ iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+ }
+
+ iwl_pcie_txq_progress(trans_pcie, txq);
+
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
+out:
+ spin_unlock(&txq->lock);
}
-static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+/*
+ * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int nfreed = 0;
+
+ lockdep_assert_held(&txq->lock);
+
+ if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+ idx, q->write_ptr, q->read_ptr);
+ iwl_op_mode_nic_error(trans->op_mode);
+ }
+ }
+
+ iwl_pcie_txq_progress(trans_pcie, txq);
+}
+
+static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
u16 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -405,7 +1014,8 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
return 0;
}
-static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
+static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
+ u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -424,7 +1034,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
/* Stop this Tx queue before configuring it */
- iwl_txq_set_inactive(trans, txq_id);
+ iwl_pcie_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD queue */
if (txq_id != trans_pcie->cmd_queue)
@@ -435,7 +1045,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
u16 ra_tid = BUILD_RAxTID(sta_id, tid);
/* Map receiver-address / traffic-ID to this queue */
- iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
+ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
/* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
@@ -480,20 +1090,29 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 stts_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+ static const u32 zero_val[4] = {};
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- iwl_txq_set_inactive(trans, txq_id);
+ iwl_pcie_txq_set_inactive(trans, txq_id);
+
+ _iwl_write_targ_mem_dwords(trans, stts_addr,
+ zero_val, ARRAY_SIZE(zero_val));
+
+ iwl_pcie_txq_unmap(trans, txq_id);
+
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-/**
- * iwl_enqueue_hcmd - enqueue a uCode command
+/*
+ * iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
* @cmd: a point to the ucode command structure
*
@@ -501,15 +1120,17 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
* failed. On success, it turns the index (> 0) of command in the
* command queue.
*/
-static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ void *dup_buf = NULL;
dma_addr_t phys_addr;
- u32 idx;
+ int idx;
u16 copy_size, cmd_size;
bool had_nocopy = false;
int i;
@@ -526,10 +1147,33 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
} else {
/* NOCOPY must not be followed by normal! */
- if (WARN_ON(had_nocopy))
- return -EINVAL;
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
copy_size += cmd->len[i];
}
cmd_size += cmd->len[i];
@@ -541,8 +1185,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
* allocated into separate TFDs, then we will need to
* increase the size of the buffers.
*/
- if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
- return -EINVAL;
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
spin_lock_bh(&txq->lock);
@@ -551,7 +1199,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
- return -ENOSPC;
+ idx = -ENOSPC;
+ goto free_dup_buf;
}
idx = get_cmd_index(q, q->write_ptr);
@@ -575,7 +1224,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+ if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))
break;
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
cmd_pos += cmd->len[i];
@@ -600,7 +1250,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_DEBUG_HC(trans,
"Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
- trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+ get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
@@ -614,28 +1264,35 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, copy_size);
- iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ const void *data = cmd->data[i];
+
if (!cmd->len[i])
continue;
- if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
continue;
- phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, (void *)data,
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_unmap_tfd(trans, out_meta,
- &txq->tfds[q->write_ptr],
- DMA_BIDIRECTIONAL);
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+ DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
- iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
- cmd->len[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
}
out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kfree(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
txq->need_update = 1;
@@ -648,70 +1305,18 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(trans, txq);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
out:
spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
return idx;
}
-static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
- struct iwl_tx_queue *txq)
-{
- if (!trans_pcie->wd_timeout)
- return;
-
- /*
- * if empty delete timer, otherwise move timer forward
- * since we're making progress on this queue
- */
- if (txq->q.read_ptr == txq->q.write_ptr)
- del_timer(&txq->stuck_timer);
- else
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
-}
-
-/**
- * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
- int idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- lockdep_assert_held(&txq->lock);
-
- if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(trans,
- "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, q->n_bd,
- q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
- iwl_op_mode_nic_error(trans->op_mode);
- }
-
- }
-
- iwl_queue_progress(trans_pcie, txq);
-}
-
-/**
- * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+/*
+ * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers)
@@ -720,8 +1325,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
-void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
- int handler_status)
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb, int handler_status)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -731,7 +1336,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
@@ -751,7 +1356,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
- iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+ iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -763,20 +1368,18 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
meta->source->handler_status = handler_status;
}
- iwl_hcmd_queue_reclaim(trans, txq_id, index);
+ iwl_pcie_cmdq_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie,
- cmd->hdr.cmd));
+ get_cmd_string(trans_pcie, cmd->hdr.cmd));
}
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie,
- cmd->hdr.cmd));
- wake_up(&trans->wait_command_queue);
+ get_cmd_string(trans_pcie, cmd->hdr.cmd));
+ wake_up(&trans_pcie->wait_command_queue);
}
meta->flags = 0;
@@ -786,7 +1389,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -795,59 +1399,59 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
return -EINVAL;
-
- ret = iwl_enqueue_hcmd(trans, cmd);
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
if (ret < 0) {
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+ get_cmd_string(trans_pcie, cmd->id), ret);
return ret;
}
return 0;
}
-static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status))) {
IWL_ERR(trans, "Command %s: a command is already active!\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
return -EIO;
}
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
- cmd_idx = iwl_enqueue_hcmd(trans, cmd);
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+ get_cmd_string(trans_pcie, cmd->id), ret);
return ret;
}
- ret = wait_event_timeout(trans->wait_command_queue,
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
!test_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
- struct iwl_tx_queue *txq =
+ struct iwl_txq *txq =
&trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
IWL_ERR(trans,
"Error sending %s: time out after %dms.\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id),
+ get_cmd_string(trans_pcie, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans,
@@ -857,16 +1461,28 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
IWL_DEBUG_INFO(trans,
"Clearing HCMD_ACTIVE for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie,
- cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
goto cancel;
}
}
+ if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
ret = -EIO;
goto cancel;
}
@@ -893,64 +1509,183 @@ cancel:
return ret;
}
-int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
+ return -EIO;
+
+ if (test_bit(STATUS_RFKILL, &trans_pcie->status))
+ return -ERFKILL;
+
if (cmd->flags & CMD_ASYNC)
- return iwl_send_cmd_async(trans, cmd);
+ return iwl_pcie_send_hcmd_async(trans, cmd);
- return iwl_send_cmd_sync(trans, cmd);
+ /* We still can fail on RFKILL that can be asserted while we wait */
+ return iwl_pcie_send_hcmd_sync(trans, cmd);
}
-/* Frees buffers until index _not_ inclusive */
-int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
- struct sk_buff_head *skbs)
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int last_to_free;
- int freed = 0;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq;
+ struct iwl_queue *q;
+ dma_addr_t phys_addr = 0;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u8 wait_write_ptr = 0;
+ __le16 fc = hdr->frame_control;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 __maybe_unused wifi_seq;
+
+ txq = &trans_pcie->txq[txq_id];
+ q = &txq->q;
- /* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->cmd_queue))
- return 0;
+ if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
- lockdep_assert_held(&txq->lock);
+ spin_lock(&txq->lock);
- /*Since we free until index _not_ inclusive, the one before index is
- * the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
+ ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
+ /* Set up driver data for this TFD */
+ txq->entries[q->write_ptr].skb = skb;
+ txq->entries[q->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.cmd = REPLY_TX;
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[q->write_ptr].meta;
- if ((index >= q->n_bd) ||
- (iwl_queue_used(q, last_to_free) == 0)) {
- IWL_ERR(trans,
- "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
- q->write_ptr, q->read_ptr);
- return 0;
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = dma_map_single(trans->dev,
+ &dev_cmd->hdr, firstlen,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+ goto out_err;
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+
+ if (!ieee80211_has_morefrags(fc)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
- if (WARN_ON(!skb_queue_empty(skbs)))
- return 0;
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
+ secondlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+ dma_unmap_single(trans->dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ DMA_BIDIRECTIONAL);
+ goto out_err;
+ }
+ }
- for (;
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ /* Attach buffers to TFD */
+ iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
+ if (secondlen > 0)
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
- if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
- continue;
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
- __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+ /* take back ownership of DMA buffer to enable update */
+ dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+ le16_to_cpu(dev_cmd->hdr.sequence));
+ IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
- iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
- freed++;
- }
+ dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ &txq->tfds[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb,
+ skb->data + hdr_len, secondlen);
- iwl_queue_progress(trans_pcie, txq);
+ /* start timer if queue currently empty */
+ if (txq->need_update && q->read_ptr == q->write_ptr &&
+ trans_pcie->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
- return freed;
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+ if (iwl_queue_space(q) < q->high_mark) {
+ if (wait_write_ptr) {
+ txq->need_update = 1;
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ } else {
+ iwl_stop_queue(trans, txq);
+ }
+ }
+ spin_unlock(&txq->lock);
+ return 0;
+out_err:
+ spin_unlock(&txq->lock);
+ return -1;
}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1c10b542ab23..ec6d5d6b452e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -298,6 +298,7 @@ static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
const u8 *rates_eid, *ext_rates_eid;
int n = 0;
+ rcu_read_lock();
rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES);
@@ -325,6 +326,7 @@ static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
*tlv++ = 0x96;
n = 4;
}
+ rcu_read_unlock();
rate_tlv->header.len = cpu_to_le16(n);
return sizeof(rate_tlv->header) + n;
@@ -436,19 +438,19 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
*/
static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type)
+ struct cfg80211_chan_def *chandef)
{
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
- channel->center_freq, channel_type);
+ chandef->chan->center_freq,
+ cfg80211_get_chandef_type(chandef));
- if (channel_type != NL80211_CHAN_NO_HT)
+ if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
goto out;
- ret = lbs_set_channel(priv, channel->hw_value);
+ ret = lbs_set_channel(priv, chandef->chan->hw_value);
out:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -1140,11 +1142,13 @@ static int lbs_associate(struct lbs_private *priv,
cmd->capability = cpu_to_le16(bss->capability);
/* add SSID TLV */
+ rcu_read_lock();
ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (ssid_eid)
pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
else
lbs_deb_assoc("no SSID\n");
+ rcu_read_unlock();
/* add DS param TLV */
if (bss->channel)
@@ -1734,7 +1738,7 @@ static void lbs_join_post(struct lbs_private *priv,
/* Fake DS channel IE */
*fake++ = WLAN_EID_DS_PARAMS;
*fake++ = 1;
- *fake++ = params->channel->hw_value;
+ *fake++ = params->chandef.chan->hw_value;
/* Fake IBSS params IE */
*fake++ = WLAN_EID_IBSS_PARAMS;
*fake++ = 2;
@@ -1755,7 +1759,7 @@ static void lbs_join_post(struct lbs_private *priv,
lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
bss = cfg80211_inform_bss(priv->wdev->wiphy,
- params->channel,
+ params->chandef.chan,
bssid,
0,
capability,
@@ -1782,7 +1786,7 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
struct cfg80211_ibss_params *params,
struct cfg80211_bss *bss)
{
- const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
+ const u8 *rates_eid;
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
@@ -1833,7 +1837,7 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval);
cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS;
cmd.bss.ds.header.len = 1;
- cmd.bss.ds.channel = params->channel->hw_value;
+ cmd.bss.ds.channel = params->chandef.chan->hw_value;
cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS;
cmd.bss.ibss.header.len = 2;
cmd.bss.ibss.atimwindow = 0;
@@ -1841,6 +1845,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
/* set rates to the intersection of our rates and the rates in the
bss */
+ rcu_read_lock();
+ rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
@@ -1860,6 +1866,7 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
}
}
}
+ rcu_read_unlock();
/* Only v8 and below support setting this */
if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
@@ -1942,7 +1949,7 @@ static int lbs_ibss_start_new(struct lbs_private *priv,
cmd.ibss.atimwindow = 0;
cmd.ds.header.id = WLAN_EID_DS_PARAMS;
cmd.ds.header.len = 1;
- cmd.ds.channel = params->channel->hw_value;
+ cmd.ds.channel = params->chandef.chan->hw_value;
/* Only v8 and below support setting probe delay */
if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8)
cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
@@ -1987,18 +1994,18 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
lbs_deb_enter(LBS_DEB_CFG80211);
- if (!params->channel) {
+ if (!params->chandef.chan) {
ret = -ENOTSUPP;
goto out;
}
- ret = lbs_set_channel(priv, params->channel->hw_value);
+ ret = lbs_set_channel(priv, params->chandef.chan->hw_value);
if (ret)
goto out;
/* Search if someone is beaconing. This assumes that the
* bss list is populated already */
- bss = cfg80211_get_bss(wiphy, params->channel, params->bssid,
+ bss = cfg80211_get_bss(wiphy, params->chandef.chan, params->bssid,
params->ssid, params->ssid_len,
WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 4cb234349fbf..739309e70d8b 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -588,17 +588,38 @@ static int if_sdio_prog_real(struct if_sdio_card *card,
size = fw->size;
while (size) {
- ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
- if (ret)
- goto release;
+ timeout = jiffies + HZ;
+ while (1) {
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
- req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
- if (ret)
- goto release;
+ req_size = sdio_readb(card->func, IF_SDIO_RD_BASE,
+ &ret);
+ if (ret)
+ goto release;
+
+ req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1,
+ &ret) << 8;
+ if (ret)
+ goto release;
+
+ /*
+ * For SD8688 wait until the length is not 0, 1 or 2
+ * before downloading the first FW block,
+ * since BOOT code writes the register to indicate the
+ * helper/FW download winner,
+ * the value could be 1 or 2 (Func1 or Func2).
+ */
+ if ((size != fw->size) || (req_size > 2))
+ break;
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIMEDOUT;
+ goto release;
+ }
+ mdelay(1);
+ }
- req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8;
- if (ret)
- goto release;
/*
lbs_deb_sdio("firmware wants %d bytes\n", (int)req_size);
*/
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 9604a1c4a74d..4bb6574f4073 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1124,7 +1124,7 @@ static void if_spi_resume_worker(struct work_struct *work)
}
}
-static int __devinit if_spi_probe(struct spi_device *spi)
+static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
@@ -1226,7 +1226,7 @@ out:
return err;
}
-static int __devexit libertas_spi_remove(struct spi_device *spi)
+static int libertas_spi_remove(struct spi_device *spi)
{
struct if_spi_card *card = spi_get_drvdata(spi);
struct lbs_private *priv = card->priv;
@@ -1285,7 +1285,7 @@ static const struct dev_pm_ops if_spi_pm_ops = {
static struct spi_driver libertas_spi_driver = {
.probe = if_spi_probe,
- .remove = __devexit_p(libertas_spi_remove),
+ .remove = libertas_spi_remove,
.driver = {
.name = "libertas_spi",
.owner = THIS_MODULE,
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 97807751ebcf..3e81264db81e 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -101,7 +101,7 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
switch (action) {
case CMD_ACT_MESH_CONFIG_START:
- ie->id = WLAN_EID_GENERIC;
+ ie->id = WLAN_EID_VENDOR_SPECIFIC;
ie->val.oui[0] = 0x00;
ie->val.oui[1] = 0x50;
ie->val.oui[2] = 0x43;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 429ca3215fdb..ff9085502bea 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -44,9 +44,9 @@ static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
-static bool fake_hw_scan;
-module_param(fake_hw_scan, bool, 0444);
-MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
+static int channels = 1;
+module_param(channels, int, 0444);
+MODULE_PARM_DESC(channels, "Number of concurrent channels");
/**
* enum hwsim_regtest - the type of regulatory tests we offer
@@ -166,7 +166,9 @@ struct hwsim_vif_priv {
static inline void hwsim_check_magic(struct ieee80211_vif *vif)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- WARN_ON(vp->magic != HWSIM_VIF_MAGIC);
+ WARN(vp->magic != HWSIM_VIF_MAGIC,
+ "Invalid VIF (%p) magic %#x, %pM, %d/%d\n",
+ vif, vp->magic, vif->addr, vif->type, vif->p2p);
}
static inline void hwsim_set_magic(struct ieee80211_vif *vif)
@@ -185,7 +187,7 @@ struct hwsim_sta_priv {
u32 magic;
};
-#define HWSIM_STA_MAGIC 0x6d537748
+#define HWSIM_STA_MAGIC 0x6d537749
static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
{
@@ -205,6 +207,30 @@ static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
sp->magic = 0;
}
+struct hwsim_chanctx_priv {
+ u32 magic;
+};
+
+#define HWSIM_CHANCTX_MAGIC 0x6d53774a
+
+static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c)
+{
+ struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
+ WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC);
+}
+
+static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c)
+{
+ struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
+ cp->magic = HWSIM_CHANCTX_MAGIC;
+}
+
+static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
+{
+ struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
+ cp->magic = 0;
+}
+
static struct class *hwsim_class;
static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -299,6 +325,13 @@ struct mac80211_hwsim_data {
struct mac_address addresses[2];
+ struct ieee80211_channel *tmp_chan;
+ struct delayed_work roc_done;
+ struct delayed_work hw_scan;
+ struct cfg80211_scan_request *hw_scan_request;
+ struct ieee80211_vif *hw_scan_vif;
+ int scan_chan_idx;
+
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
@@ -396,7 +429,8 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
}
static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
- struct sk_buff *tx_skb)
+ struct sk_buff *tx_skb,
+ struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
struct sk_buff *skb;
@@ -423,7 +457,7 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
hdr->rt_flags = 0;
hdr->rt_rate = txrate->bitrate / 5;
- hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
+ hdr->rt_channel = cpu_to_le16(chan->center_freq);
flags = IEEE80211_CHAN_2GHZ;
if (txrate->flags & IEEE80211_RATE_ERP_G)
flags |= IEEE80211_CHAN_OFDM;
@@ -441,9 +475,9 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
}
-static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
+static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
+ const u8 *addr)
{
- struct mac80211_hwsim_data *data = hw->priv;
struct sk_buff *skb;
struct hwsim_radiotap_hdr *hdr;
u16 flags;
@@ -464,7 +498,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
(1 << IEEE80211_RADIOTAP_CHANNEL));
hdr->rt_flags = 0;
hdr->rt_rate = 0;
- hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
+ hdr->rt_channel = cpu_to_le16(chan->center_freq);
flags = IEEE80211_CHAN_2GHZ;
hdr->rt_chbitmask = cpu_to_le16(flags);
@@ -537,6 +571,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
md.ret = false;
md.addr = addr;
ieee80211_iterate_active_interfaces_atomic(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
mac80211_hwsim_addr_iter,
&md);
@@ -556,12 +591,6 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
int i;
struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
- if (data->idle) {
- wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
- dev_kfree_skb(my_skb);
- return;
- }
-
if (data->ps != PS_DISABLED)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
/* If the queue contains MAX_QUEUE skb's drop some */
@@ -629,8 +658,38 @@ nla_put_failure:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
}
+static bool hwsim_chans_compat(struct ieee80211_channel *c1,
+ struct ieee80211_channel *c2)
+{
+ if (!c1 || !c2)
+ return false;
+
+ return c1->center_freq == c2->center_freq;
+}
+
+struct tx_iter_data {
+ struct ieee80211_channel *channel;
+ bool receive;
+};
+
+static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
+ struct ieee80211_vif *vif)
+{
+ struct tx_iter_data *data = _data;
+
+ if (!vif->chanctx_conf)
+ return;
+
+ if (!hwsim_chans_compat(data->channel,
+ rcu_dereference(vif->chanctx_conf)->def.chan))
+ return;
+
+ data->receive = true;
+}
+
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv, *data2;
bool ack = false;
@@ -639,15 +698,10 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct ieee80211_rx_status rx_status;
struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
- if (data->idle) {
- wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
- return false;
- }
-
memset(&rx_status, 0, sizeof(rx_status));
- rx_status.flag |= RX_FLAG_MACTIME_MPDU;
- rx_status.freq = data->channel->center_freq;
- rx_status.band = data->channel->band;
+ rx_status.flag |= RX_FLAG_MACTIME_START;
+ rx_status.freq = chan->center_freq;
+ rx_status.band = chan->band;
rx_status.rate_idx = info->control.rates[0].idx;
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
rx_status.flag |= RX_FLAG_HT;
@@ -673,17 +727,35 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
list_for_each_entry(data2, &hwsim_radios, list) {
struct sk_buff *nskb;
struct ieee80211_mgmt *mgmt;
+ struct tx_iter_data tx_iter_data = {
+ .receive = false,
+ .channel = chan,
+ };
if (data == data2)
continue;
- if (data2->idle || !data2->started ||
- !hwsim_ps_rx_ok(data2, skb) || !data2->channel ||
- data->channel->center_freq != data2->channel->center_freq ||
- !(data->group & data2->group))
+ if (!data2->started || (data2->idle && !data2->tmp_chan) ||
+ !hwsim_ps_rx_ok(data2, skb))
continue;
- nskb = skb_copy(skb, GFP_ATOMIC);
+ if (!(data->group & data2->group))
+ continue;
+
+ if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
+ !hwsim_chans_compat(chan, data2->channel)) {
+ ieee80211_iterate_active_interfaces_atomic(
+ data2->hw, IEEE80211_IFACE_ITER_NORMAL,
+ mac80211_hwsim_tx_iter, &tx_iter_data);
+ if (!tx_iter_data.receive)
+ continue;
+ }
+
+ /*
+ * reserve some space for our vendor and the normal
+ * radiotap header, since we're copying anyway
+ */
+ nskb = skb_copy_expand(skb, 64, 0, GFP_ATOMIC);
if (nskb == NULL)
continue;
@@ -701,6 +773,33 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
(data->tsf_offset - data2->tsf_offset) +
24 * 8 * 10 / txrate->bitrate);
+#if 0
+ /*
+ * Don't enable this code by default as the OUI 00:00:00
+ * is registered to Xerox so we shouldn't use it here, it
+ * might find its way into pcap files.
+ * Note that this code requires the headroom in the SKB
+ * that was allocated earlier.
+ */
+ rx_status.vendor_radiotap_oui[0] = 0x00;
+ rx_status.vendor_radiotap_oui[1] = 0x00;
+ rx_status.vendor_radiotap_oui[2] = 0x00;
+ rx_status.vendor_radiotap_subns = 127;
+ /*
+ * Radiotap vendor namespaces can (and should) also be
+ * split into fields by using the standard radiotap
+ * presence bitmap mechanism. Use just BIT(0) here for
+ * the presence bitmap.
+ */
+ rx_status.vendor_radiotap_bitmap = BIT(0);
+ /* We have 8 bytes of (dummy) data */
+ rx_status.vendor_radiotap_len = 8;
+ /* For testing, also require it to be aligned */
+ rx_status.vendor_radiotap_align = 8;
+ /* push the data */
+ memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
+#endif
+
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
}
@@ -713,18 +812,51 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct mac80211_hwsim_data *data = hw->priv;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *channel;
bool ack;
- struct ieee80211_tx_info *txi;
u32 _portid;
- mac80211_hwsim_monitor_rx(hw, skb);
-
- if (skb->len < 10) {
+ if (WARN_ON(skb->len < 10)) {
/* Should not happen; just a sanity check for addr1 use */
dev_kfree_skb(skb);
return;
}
+ if (channels == 1) {
+ channel = data->channel;
+ } else if (txi->hw_queue == 4) {
+ channel = data->tmp_chan;
+ } else {
+ chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
+ if (chanctx_conf)
+ channel = chanctx_conf->def.chan;
+ else
+ channel = NULL;
+ }
+
+ if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (data->idle && !data->tmp_chan) {
+ wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (txi->control.vif)
+ hwsim_check_magic(txi->control.vif);
+ if (control->sta)
+ hwsim_check_sta_magic(control->sta);
+
+ txi->rate_driver_data[0] = channel;
+
+ mac80211_hwsim_monitor_rx(hw, skb, channel);
+
/* wmediumd mode check */
_portid = ACCESS_ONCE(wmediumd_portid);
@@ -732,15 +864,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
/* NO wmediumd detected, perfect medium simulation */
- ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
+ ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
if (ack && skb->len >= 16) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- mac80211_hwsim_monitor_ack(hw, hdr->addr2);
+ mac80211_hwsim_monitor_ack(channel, hdr->addr2);
}
- txi = IEEE80211_SKB_CB(skb);
-
ieee80211_tx_info_clear_status(txi);
/* frame was transmitted at most favorable rate at first attempt */
@@ -778,6 +908,13 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
__func__, ieee80211_vif_type_p2p(vif),
vif->addr);
hwsim_set_magic(vif);
+
+ vif->cab_queue = 0;
+ vif->hw_queue[IEEE80211_AC_VO] = 0;
+ vif->hw_queue[IEEE80211_AC_VI] = 1;
+ vif->hw_queue[IEEE80211_AC_BE] = 2;
+ vif->hw_queue[IEEE80211_AC_BK] = 3;
+
return 0;
}
@@ -807,14 +944,26 @@ static void mac80211_hwsim_remove_interface(
hwsim_clear_magic(vif);
}
+static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct ieee80211_channel *chan)
+{
+ u32 _pid = ACCESS_ONCE(wmediumd_portid);
+
+ mac80211_hwsim_monitor_rx(hw, skb, chan);
+
+ if (_pid)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+
+ mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
+ dev_kfree_skb(skb);
+}
static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_vif *vif)
{
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
- struct ieee80211_tx_info *info;
- u32 _portid;
hwsim_check_magic(vif);
@@ -826,18 +975,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
skb = ieee80211_beacon_get(hw, vif);
if (skb == NULL)
return;
- info = IEEE80211_SKB_CB(skb);
-
- mac80211_hwsim_monitor_rx(hw, skb);
-
- /* wmediumd mode check */
- _portid = ACCESS_ONCE(wmediumd_portid);
- if (_portid)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
-
- mac80211_hwsim_tx_frame_no_nl(hw, skb);
- dev_kfree_skb(skb);
+ mac80211_hwsim_tx_frame(hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
}
@@ -850,7 +990,8 @@ static void mac80211_hwsim_beacon(unsigned long arg)
return;
ieee80211_iterate_active_interfaces_atomic(
- hw, mac80211_hwsim_beacon_tx, hw);
+ hw, IEEE80211_IFACE_ITER_NORMAL,
+ mac80211_hwsim_beacon_tx, hw);
data->beacon_timer.expires = jiffies + data->beacon_int;
add_timer(&data->beacon_timer);
@@ -877,7 +1018,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
wiphy_debug(hw->wiphy,
"%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
__func__,
- conf->channel->center_freq,
+ conf->channel ? conf->channel->center_freq : 0,
hwsim_chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
!!(conf->flags & IEEE80211_CONF_PS),
@@ -886,6 +1027,9 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
data->channel = conf->channel;
+
+ WARN_ON(data->channel && channels > 1);
+
data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
del_timer(&data->beacon_timer);
@@ -963,15 +1107,17 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HT) {
- wiphy_debug(hw->wiphy, " HT: op_mode=0x%x, chantype=%s\n",
- info->ht_operation_mode,
- hwsim_chantypes[info->channel_type]);
+ wiphy_debug(hw->wiphy, " HT: op_mode=0x%x\n",
+ info->ht_operation_mode);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n",
(unsigned long long) info->basic_rates);
}
+
+ if (changed & BSS_CHANGED_TXPOWER)
+ wiphy_debug(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
}
static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
@@ -1166,45 +1312,101 @@ static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
/* Not implemented, queues only on kernel side */
}
-struct hw_scan_done {
- struct delayed_work w;
- struct ieee80211_hw *hw;
-};
-
-static void hw_scan_done(struct work_struct *work)
+static void hw_scan_work(struct work_struct *work)
{
- struct hw_scan_done *hsd =
- container_of(work, struct hw_scan_done, w.work);
+ struct mac80211_hwsim_data *hwsim =
+ container_of(work, struct mac80211_hwsim_data, hw_scan.work);
+ struct cfg80211_scan_request *req = hwsim->hw_scan_request;
+ int dwell, i;
+
+ mutex_lock(&hwsim->mutex);
+ if (hwsim->scan_chan_idx >= req->n_channels) {
+ wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
+ ieee80211_scan_completed(hwsim->hw, false);
+ hwsim->hw_scan_request = NULL;
+ hwsim->hw_scan_vif = NULL;
+ hwsim->tmp_chan = NULL;
+ mutex_unlock(&hwsim->mutex);
+ return;
+ }
+
+ wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n",
+ req->channels[hwsim->scan_chan_idx]->center_freq);
+
+ hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
+ if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
+ !req->n_ssids) {
+ dwell = 120;
+ } else {
+ dwell = 30;
+ /* send probes */
+ for (i = 0; i < req->n_ssids; i++) {
+ struct sk_buff *probe;
+
+ probe = ieee80211_probereq_get(hwsim->hw,
+ hwsim->hw_scan_vif,
+ req->ssids[i].ssid,
+ req->ssids[i].ssid_len,
+ req->ie_len);
+ if (!probe)
+ continue;
+
+ if (req->ie_len)
+ memcpy(skb_put(probe, req->ie_len), req->ie,
+ req->ie_len);
- ieee80211_scan_completed(hsd->hw, false);
- kfree(hsd);
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hwsim->hw, probe,
+ hwsim->tmp_chan);
+ local_bh_enable();
+ }
+ }
+ ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
+ msecs_to_jiffies(dwell));
+ hwsim->scan_chan_idx++;
+ mutex_unlock(&hwsim->mutex);
}
static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
- struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
- int i;
-
- if (!hsd)
- return -ENOMEM;
+ struct mac80211_hwsim_data *hwsim = hw->priv;
- hsd->hw = hw;
- INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
+ mutex_lock(&hwsim->mutex);
+ if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
+ mutex_unlock(&hwsim->mutex);
+ return -EBUSY;
+ }
+ hwsim->hw_scan_request = req;
+ hwsim->hw_scan_vif = vif;
+ hwsim->scan_chan_idx = 0;
+ mutex_unlock(&hwsim->mutex);
- printk(KERN_DEBUG "hwsim hw_scan request\n");
- for (i = 0; i < req->n_channels; i++)
- printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
- req->channels[i]->center_freq);
- print_hex_dump(KERN_DEBUG, "scan IEs: ", DUMP_PREFIX_OFFSET,
- 16, 1, req->ie, req->ie_len, 1);
+ wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
- ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
+ ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
return 0;
}
+static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
+
+ cancel_delayed_work_sync(&hwsim->hw_scan);
+
+ mutex_lock(&hwsim->mutex);
+ ieee80211_scan_completed(hwsim->hw, true);
+ hwsim->tmp_chan = NULL;
+ hwsim->hw_scan_request = NULL;
+ hwsim->hw_scan_vif = NULL;
+ mutex_unlock(&hwsim->mutex);
+}
+
static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1235,6 +1437,111 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
mutex_unlock(&hwsim->mutex);
}
+static void hw_roc_done(struct work_struct *work)
+{
+ struct mac80211_hwsim_data *hwsim =
+ container_of(work, struct mac80211_hwsim_data, roc_done.work);
+
+ mutex_lock(&hwsim->mutex);
+ ieee80211_remain_on_channel_expired(hwsim->hw);
+ hwsim->tmp_chan = NULL;
+ mutex_unlock(&hwsim->mutex);
+
+ wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n");
+}
+
+static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
+ mutex_unlock(&hwsim->mutex);
+ return -EBUSY;
+ }
+
+ hwsim->tmp_chan = chan;
+ mutex_unlock(&hwsim->mutex);
+
+ wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n",
+ chan->center_freq, duration);
+
+ ieee80211_ready_on_channel(hw);
+
+ ieee80211_queue_delayed_work(hw, &hwsim->roc_done,
+ msecs_to_jiffies(duration));
+ return 0;
+}
+
+static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ cancel_delayed_work_sync(&hwsim->roc_done);
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->tmp_chan = NULL;
+ mutex_unlock(&hwsim->mutex);
+
+ wiphy_debug(hw->wiphy, "hwsim ROC canceled\n");
+
+ return 0;
+}
+
+static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ hwsim_set_chanctx_magic(ctx);
+ wiphy_debug(hw->wiphy,
+ "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ ctx->def.chan->center_freq, ctx->def.width,
+ ctx->def.center_freq1, ctx->def.center_freq2);
+ return 0;
+}
+
+static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ wiphy_debug(hw->wiphy,
+ "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ ctx->def.chan->center_freq, ctx->def.width,
+ ctx->def.center_freq1, ctx->def.center_freq2);
+ hwsim_check_chanctx_magic(ctx);
+ hwsim_clear_chanctx_magic(ctx);
+}
+
+static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ hwsim_check_chanctx_magic(ctx);
+ wiphy_debug(hw->wiphy,
+ "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ ctx->def.chan->center_freq, ctx->def.width,
+ ctx->def.center_freq1, ctx->def.center_freq2);
+}
+
+static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ hwsim_check_magic(vif);
+ hwsim_check_chanctx_magic(ctx);
+
+ return 0;
+}
+
+static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ hwsim_check_magic(vif);
+ hwsim_check_chanctx_magic(ctx);
+}
+
static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
@@ -1315,7 +1622,6 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
- u32 _portid;
if (!vp->assoc)
return;
@@ -1335,25 +1641,18 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
memcpy(pspoll->ta, mac, ETH_ALEN);
- /* wmediumd mode check */
- _portid = ACCESS_ONCE(wmediumd_portid);
-
- if (_portid)
- return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
-
- if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
- printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
- dev_kfree_skb(skb);
+ rcu_read_lock();
+ mac80211_hwsim_tx_frame(data->hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_read_unlock();
}
-
static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct ieee80211_vif *vif, int ps)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- u32 _portid;
if (!vp->assoc)
return;
@@ -1374,15 +1673,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
- /* wmediumd mode check */
- _portid = ACCESS_ONCE(wmediumd_portid);
-
- if (_portid)
- return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
-
- if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
- printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
- dev_kfree_skb(skb);
+ rcu_read_lock();
+ mac80211_hwsim_tx_frame(data->hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ rcu_read_unlock();
}
@@ -1423,14 +1717,17 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
if (val == PS_MANUAL_POLL) {
ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
data->ps_poll_pending = true;
} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_ps,
data);
} else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
ieee80211_iterate_active_interfaces(data->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_no_ps,
data);
}
@@ -1551,7 +1848,8 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
(hwsim_flags & HWSIM_TX_STAT_ACK)) {
if (skb->len >= 16) {
hdr = (struct ieee80211_hdr *) skb->data;
- mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
+ mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
+ hdr->addr2);
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
@@ -1566,7 +1864,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
- struct mac80211_hwsim_data *data2;
+ struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
struct mac_address *dst;
int frame_data_len;
@@ -1574,9 +1872,9 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
struct sk_buff *skb = NULL;
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
- !info->attrs[HWSIM_ATTR_FRAME] ||
- !info->attrs[HWSIM_ATTR_RX_RATE] ||
- !info->attrs[HWSIM_ATTR_SIGNAL])
+ !info->attrs[HWSIM_ATTR_FRAME] ||
+ !info->attrs[HWSIM_ATTR_RX_RATE] ||
+ !info->attrs[HWSIM_ATTR_SIGNAL])
goto out;
dst = (struct mac_address *)nla_data(
@@ -1604,7 +1902,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* check if radio is configured properly */
- if (data2->idle || !data2->started || !data2->channel)
+ if (data2->idle || !data2->started)
goto out;
/*A frame is received from user space*/
@@ -1688,6 +1986,11 @@ static struct notifier_block hwsim_netlink_notifier = {
static int hwsim_init_netlink(void)
{
int rc;
+
+ /* userspace test API hasn't been adjusted for multi-channel */
+ if (channels > 1)
+ return 0;
+
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
rc = genl_register_family_with_ops(&hwsim_genl_family,
@@ -1710,6 +2013,10 @@ static void hwsim_exit_netlink(void)
{
int ret;
+ /* userspace test API hasn't been adjusted for multi-channel */
+ if (channels > 1)
+ return;
+
printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
/* unregister the notifier */
netlink_unregister_notifier(&hwsim_netlink_notifier);
@@ -1732,7 +2039,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
};
-static const struct ieee80211_iface_combination hwsim_if_comb = {
+static struct ieee80211_iface_combination hwsim_if_comb = {
.limits = hwsim_if_limits,
.n_limits = ARRAY_SIZE(hwsim_if_limits),
.max_interfaces = 2048,
@@ -1750,10 +2057,30 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
- if (fake_hw_scan) {
+ if (channels < 1)
+ return -EINVAL;
+
+ if (channels > 1) {
+ hwsim_if_comb.num_different_channels = channels;
mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+ mac80211_hwsim_ops.cancel_hw_scan =
+ mac80211_hwsim_cancel_hw_scan;
mac80211_hwsim_ops.sw_scan_start = NULL;
mac80211_hwsim_ops.sw_scan_complete = NULL;
+ mac80211_hwsim_ops.remain_on_channel =
+ mac80211_hwsim_roc;
+ mac80211_hwsim_ops.cancel_remain_on_channel =
+ mac80211_hwsim_croc;
+ mac80211_hwsim_ops.add_chanctx =
+ mac80211_hwsim_add_chanctx;
+ mac80211_hwsim_ops.remove_chanctx =
+ mac80211_hwsim_remove_chanctx;
+ mac80211_hwsim_ops.change_chanctx =
+ mac80211_hwsim_change_chanctx;
+ mac80211_hwsim_ops.assign_vif_chanctx =
+ mac80211_hwsim_assign_vif_chanctx;
+ mac80211_hwsim_ops.unassign_vif_chanctx =
+ mac80211_hwsim_unassign_vif_chanctx;
}
spin_lock_init(&hwsim_radio_lock);
@@ -1803,13 +2130,18 @@ static int __init init_mac80211_hwsim(void)
hw->wiphy->iface_combinations = &hwsim_if_comb;
hw->wiphy->n_iface_combinations = 1;
- if (fake_hw_scan) {
+ if (channels > 1) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->max_remain_on_channel_duration = 1000;
}
+ INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
+ INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
+
hw->channel_change_time = 1;
- hw->queues = 4;
+ hw->queues = 5;
+ hw->offchannel_tx_hw_queue = 4;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1824,7 +2156,8 @@ static int __init init_mac80211_hwsim(void)
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_WANT_MONITOR_VIF;
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_QUEUE_CONTROL;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -1874,6 +2207,34 @@ static int __init init_mac80211_hwsim(void)
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
hw->wiphy->bands[band] = sband;
+
+ if (channels == 1)
+ continue;
+
+ sband->vht_cap.vht_supported = true;
+ sband->vht_cap.cap =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_RXSTBC_2 |
+ IEEE80211_VHT_CAP_RXSTBC_3 |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ sband->vht_cap.vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+ IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
+ sband->vht_cap.vht_mcs.tx_mcs_map =
+ sband->vht_cap.vht_mcs.rx_mcs_map;
}
/* By default all radios are belonging to the first group */
data->group = 1;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 395f1bfd4102..68d52cfc1ebd 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -197,7 +197,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ra_list_flags);
mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
- mwifiex_write_data_complete(adapter, skb_src, 0);
+ mwifiex_write_data_complete(adapter, skb_src, 0, 0);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
@@ -256,7 +256,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
- mwifiex_write_data_complete(adapter, skb_aggr, -1);
+ mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
return -1;
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
@@ -282,13 +282,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
__func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
- mwifiex_write_data_complete(adapter, skb_aggr, ret);
+ mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
return 0;
case -EINPROGRESS:
adapter->data_sent = false;
break;
case 0:
- mwifiex_write_data_complete(adapter, skb_aggr, ret);
+ mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
break;
default:
break;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9402b93b9a36..4a97acd170f7 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -58,8 +58,7 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
else
- mwifiex_process_rx_packet(priv->adapter,
- rx_tmp_ptr);
+ mwifiex_process_rx_packet(priv, rx_tmp_ptr);
}
}
@@ -106,7 +105,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
else
- mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+ mwifiex_process_rx_packet(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -442,8 +441,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
mwifiex_handle_uap_rx_forward(priv, payload);
else
- mwifiex_process_rx_packet(priv->adapter,
- payload);
+ mwifiex_process_rx_packet(priv, payload);
}
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8e384fae3e68..b2e27723f801 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -1,7 +1,6 @@
config MWIFIEX
tristate "Marvell WiFi-Ex Driver"
depends on CFG80211
- select LIB80211
---help---
This adds support for wireless adapters based on Marvell
802.11n chipsets.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 780d3e168297..cdb11b3964e2 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -180,10 +180,8 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
static int
mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
- enum nl80211_channel_type channel_type,
- bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, bool no_cck,
- bool dont_wait_for_ack, u64 *cookie)
+ unsigned int wait, const u8 *buf, size_t len,
+ bool no_cck, bool dont_wait_for_ack, u64 *cookie)
{
struct sk_buff *skb;
u16 pkt_len;
@@ -253,7 +251,6 @@ static int
mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
unsigned int duration, u64 *cookie)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
@@ -271,15 +268,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
}
ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
- &channel_type, duration);
+ duration);
if (!ret) {
*cookie = random32() | 1;
priv->roc_cfg.cookie = *cookie;
priv->roc_cfg.chan = *chan;
- priv->roc_cfg.chan_type = channel_type;
- cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
+ cfg80211_ready_on_channel(wdev, *cookie, chan,
duration, GFP_ATOMIC);
wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
@@ -302,13 +298,11 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
return -ENOENT;
ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
- &priv->roc_cfg.chan,
- &priv->roc_cfg.chan_type, 0);
+ &priv->roc_cfg.chan, 0);
if (!ret) {
cfg80211_remain_on_channel_expired(wdev, cookie,
&priv->roc_cfg.chan,
- priv->roc_cfg.chan_type,
GFP_ATOMIC);
memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
@@ -324,6 +318,7 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
enum nl80211_tx_power_setting type,
int mbm)
{
@@ -471,13 +466,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
flag = 1;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_reg_power;
+ max_pwr = ch->max_power;
no_of_parsed_chan = 1;
continue;
}
if (ch->hw_value == next_chan + 1 &&
- ch->max_reg_power == max_pwr) {
+ ch->max_power == max_pwr) {
next_chan++;
no_of_parsed_chan++;
} else {
@@ -488,7 +483,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
no_of_triplet++;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_reg_power;
+ max_pwr = ch->max_power;
no_of_parsed_chan = 1;
}
}
@@ -1296,21 +1291,23 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
- bss_cfg->channel =
- (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
+ bss_cfg->channel = ieee80211_frequency_to_channel(
+ params->chandef.chan->center_freq);
/* Set appropriate bands */
- if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
- if (params->channel_type == NL80211_CHAN_NO_HT)
+ if (cfg80211_get_chandef_type(&params->chandef) ==
+ NL80211_CHAN_NO_HT)
config_bands = BAND_B | BAND_G;
else
config_bands = BAND_B | BAND_G | BAND_GN;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
- if (params->channel_type == NL80211_CHAN_NO_HT)
+ if (cfg80211_get_chandef_type(&params->chandef) ==
+ NL80211_CHAN_NO_HT)
config_bands = BAND_A;
else
config_bands = BAND_AN | BAND_A;
@@ -1462,7 +1459,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
struct cfg80211_bss *bss = NULL;
- u8 is_scanning_required = 0, config_bands = 0;
+ u8 is_scanning_required = 0;
memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
@@ -1481,19 +1478,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
/* disconnect before try to associate */
mwifiex_deauthenticate(priv, NULL);
- if (channel) {
- if (mode == NL80211_IFTYPE_STATION) {
- if (channel->band == IEEE80211_BAND_2GHZ)
- config_bands = BAND_B | BAND_G | BAND_GN;
- else
- config_bands = BAND_A | BAND_AN;
-
- if (!((config_bands | priv->adapter->fw_bands) &
- ~priv->adapter->fw_bands))
- priv->adapter->config_bands = config_bands;
- }
- }
-
/* As this is new association, clear locally stored
* keys and security related flags */
priv->sec_info.wpa_enabled = false;
@@ -1683,7 +1667,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
int index = 0, i;
u8 config_bands = 0;
- if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
if (!params->basic_rates) {
config_bands = BAND_B | BAND_G;
} else {
@@ -1708,10 +1692,12 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
}
}
- if (params->channel_type != NL80211_CHAN_NO_HT)
- config_bands |= BAND_GN;
+ if (cfg80211_get_chandef_type(&params->chandef) !=
+ NL80211_CHAN_NO_HT)
+ config_bands |= BAND_G | BAND_GN;
} else {
- if (params->channel_type == NL80211_CHAN_NO_HT)
+ if (cfg80211_get_chandef_type(&params->chandef) ==
+ NL80211_CHAN_NO_HT)
config_bands = BAND_A;
else
config_bands = BAND_AN | BAND_A;
@@ -1728,9 +1714,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
}
adapter->sec_chan_offset =
- mwifiex_chan_type_to_sec_chan_offset(params->channel_type);
- priv->adhoc_channel =
- ieee80211_frequency_to_channel(params->channel->center_freq);
+ mwifiex_chan_type_to_sec_chan_offset(
+ cfg80211_get_chandef_type(&params->chandef));
+ priv->adhoc_channel = ieee80211_frequency_to_channel(
+ params->chandef.chan->center_freq);
wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
@@ -1764,7 +1751,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
- params->channel, NULL, params->privacy);
+ params->chandef.chan, NULL,
+ params->privacy);
done:
if (!ret) {
cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
@@ -1819,12 +1807,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
- if (atomic_read(&priv->wmm.tx_pkts_queued) >=
+ if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+ atomic_read(&priv->wmm.tx_pkts_queued) >=
MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
return -EBUSY;
}
+ if (priv->user_scan_cfg) {
+ dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+ return -EBUSY;
+ }
+
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
GFP_KERNEL);
if (!priv->user_scan_cfg) {
@@ -1941,6 +1935,21 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
else
ht_info->cap &= ~IEEE80211_HT_CAP_TX_STBC;
+ if (ISSUPP_GREENFIELD(adapter->hw_dot_11n_dev_cap))
+ ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+ else
+ ht_info->cap &= ~IEEE80211_HT_CAP_GRN_FLD;
+
+ if (ISENABLED_40MHZ_INTOLERANT(adapter->hw_dot_11n_dev_cap))
+ ht_info->cap |= IEEE80211_HT_CAP_40MHZ_INTOLERANT;
+ else
+ ht_info->cap &= ~IEEE80211_HT_CAP_40MHZ_INTOLERANT;
+
+ if (ISSUPP_RXLDPC(adapter->hw_dot_11n_dev_cap))
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ else
+ ht_info->cap &= ~IEEE80211_HT_CAP_LDPC_CODING;
+
ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
@@ -2074,8 +2083,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EINVAL);
}
- dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
- ether_setup, 1);
+ dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
+ ether_setup, IEEE80211_NUM_ACS, 1);
if (!dev) {
wiphy_err(wiphy, "no memory available for netdevice\n");
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2116,7 +2125,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
}
sema_init(&priv->async_sem, 1);
- priv->scan_pending_on_block = false;
dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
@@ -2138,8 +2146,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
mwifiex_dev_debugfs_remove(priv);
#endif
- if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
@@ -2253,8 +2260,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
- wiphy->features = NL80211_FEATURE_HT_IBSS |
- NL80211_FEATURE_INACTIVITY_TIMER;
+ wiphy->features |= NL80211_FEATURE_HT_IBSS |
+ NL80211_FEATURE_INACTIVITY_TIMER |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ae9010ed58de..5f438e6c2155 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -914,21 +914,24 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
dev_err(adapter->dev, "last_cmd_index = %d\n",
adapter->dbg.last_cmd_index);
- print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_id, DBG_CMD_NUM);
- print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_act, DBG_CMD_NUM);
+ dev_err(adapter->dev, "last_cmd_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_id),
+ adapter->dbg.last_cmd_id);
+ dev_err(adapter->dev, "last_cmd_act: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_act),
+ adapter->dbg.last_cmd_act);
dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
adapter->dbg.last_cmd_resp_index);
- print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_cmd_resp_id,
- DBG_CMD_NUM);
+ dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_resp_id),
+ adapter->dbg.last_cmd_resp_id);
dev_err(adapter->dev, "last_event_index = %d\n",
adapter->dbg.last_event_index);
- print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET,
- adapter->dbg.last_event, DBG_CMD_NUM);
+ dev_err(adapter->dev, "last_event: %*ph\n",
+ (int)sizeof(adapter->dbg.last_event),
+ adapter->dbg.last_event);
dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
adapter->data_sent, adapter->cmd_sent);
@@ -946,6 +949,9 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
+
+ if (adapter->if_ops.card_reset)
+ adapter->if_ops.card_reset(adapter);
}
/*
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index a870b5885c09..46e34aa65d1c 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -178,6 +178,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
(struct mwifiex_private *) file->private_data;
struct net_device *netdev = priv->netdev;
struct netdev_hw_addr *ha;
+ struct netdev_queue *txq;
unsigned long page = get_zeroed_page(GFP_KERNEL);
char *p = (char *) page, fmt[64];
struct mwifiex_bss_info info;
@@ -229,8 +230,13 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors);
p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev))
? "on" : "off"));
- p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev))
- ? "stopped" : "started"));
+ p += sprintf(p, "tx queue");
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ txq = netdev_get_tx_queue(netdev, i);
+ p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ?
+ "stopped" : "started");
+ }
+ p += sprintf(p, "\n");
ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
(unsigned long) p - page);
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index dda588b35570..4dc8e2e9a889 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -194,6 +194,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_TXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(25))
#define ISSUPP_RXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(26))
#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
+#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
+#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
/* httxcfg bitmap
* 0 reserved
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index b5d37a8caa09..39f03ce5a5b1 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -84,18 +84,19 @@ static void scan_delay_timer_fn(unsigned long data)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
if (priv->user_scan_cfg) {
- dev_dbg(priv->adapter->dev,
- "info: %s: scan aborted\n", __func__);
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
+ if (priv->scan_request) {
+ dev_dbg(priv->adapter->dev,
+ "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: scan already aborted\n");
+ }
+
kfree(priv->user_scan_cfg);
priv->user_scan_cfg = NULL;
}
-
- if (priv->scan_pending_on_block) {
- priv->scan_pending_on_block = false;
- up(&priv->async_sem);
- }
goto done;
}
@@ -387,9 +388,17 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
unsigned long dev_queue_flags;
+ unsigned int i;
spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
- netif_tx_wake_all_queues(netdev);
+
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
+
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+ }
+
spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
}
@@ -400,9 +409,17 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
unsigned long dev_queue_flags;
+ unsigned int i;
spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
- netif_tx_stop_all_queues(netdev);
+
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
+
+ if (!netif_tx_queue_stopped(txq))
+ netif_tx_stop_queue(txq);
+ }
+
spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
}
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 7b0858af8f5d..88664ae667ba 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -721,8 +721,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
- if (netif_queue_stopped(priv->netdev))
- netif_wake_queue(priv->netdev);
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->scan_block = true;
@@ -1238,8 +1237,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
- if (netif_queue_stopped(priv->netdev))
- netif_wake_queue(priv->netdev);
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
mwifiex_save_curr_bcn(priv);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index eb22dd248d54..9c802ede9c3b 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -282,6 +282,7 @@ exit_main_proc:
mwifiex_shutdown_drv(adapter);
return ret;
}
+EXPORT_SYMBOL_GPL(mwifiex_main_process);
/*
* This function frees the adapter structure.
@@ -412,49 +413,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
}
/*
- * This function fills a driver buffer.
- *
- * The function associates a given SKB with the provided driver buffer
- * and also updates some of the SKB parameters, including IP header,
- * priority and timestamp.
- */
-static void
-mwifiex_fill_buffer(struct sk_buff *skb)
-{
- struct ethhdr *eth;
- struct iphdr *iph;
- struct timeval tv;
- u8 tid = 0;
-
- eth = (struct ethhdr *) skb->data;
- switch (eth->h_proto) {
- case __constant_htons(ETH_P_IP):
- iph = ip_hdr(skb);
- tid = IPTOS_PREC(iph->tos);
- pr_debug("data: packet type ETH_P_IP: %04x, tid=%#x prio=%#x\n",
- eth->h_proto, tid, skb->priority);
- break;
- case __constant_htons(ETH_P_ARP):
- pr_debug("data: ARP packet: %04x\n", eth->h_proto);
- default:
- break;
- }
-/* Offset for TOS field in the IP header */
-#define IPTOS_OFFSET 5
- tid = (tid >> IPTOS_OFFSET);
- skb->priority = tid;
- /* Record the current time the packet was queued; used to
- determine the amount of time the packet was queued in
- the driver before it was sent to the firmware.
- The delay is then sent along with the packet to the
- firmware for aggregate delay calculation for stats and
- MSDU lifetime expiry.
- */
- do_gettimeofday(&tv);
- skb->tstamp = timeval_to_ktime(tv);
-}
-
-/*
* CFG802.11 network device handler for open.
*
* Starts the data queue.
@@ -472,6 +430,14 @@ mwifiex_open(struct net_device *dev)
static int
mwifiex_close(struct net_device *dev)
{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->scan_request) {
+ dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ }
+
return 0;
}
@@ -480,17 +446,23 @@ mwifiex_close(struct net_device *dev)
*/
int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
{
- mwifiex_wmm_add_buf_txqueue(priv, skb);
+ struct netdev_queue *txq;
+ int index = mwifiex_1d_to_wmm_queue[skb->priority];
+
+ if (atomic_inc_return(&priv->wmm_tx_pending[index]) >= MAX_TX_PENDING) {
+ txq = netdev_get_tx_queue(priv->netdev, index);
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
+ }
+ }
+
atomic_inc(&priv->adapter->tx_pending);
+ mwifiex_wmm_add_buf_txqueue(priv, skb);
if (priv->adapter->scan_delay_cnt)
atomic_set(&priv->adapter->is_tx_received, true);
- if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
- mwifiex_set_trans_start(priv->netdev);
- mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
- }
-
queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
return 0;
@@ -505,6 +477,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct sk_buff *new_skb;
struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
jiffies, priv->bss_type, priv->bss_num);
@@ -542,7 +515,16 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info = MWIFIEX_SKB_TXCB(skb);
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
- mwifiex_fill_buffer(skb);
+
+ /* Record the current time the packet was queued; used to
+ * determine the amount of time the packet was queued in
+ * the driver before it was sent to the firmware.
+ * The delay is then sent along with the packet to the
+ * firmware for aggregate delay calculation for stats and
+ * MSDU lifetime expiry.
+ */
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
mwifiex_queue_tx_pkt(priv, skb);
@@ -622,6 +604,13 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
return &priv->stats;
}
+static u16
+mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ skb->priority = cfg80211_classify8021d(skb);
+ return mwifiex_1d_to_wmm_queue[skb->priority];
+}
+
/* Network device handlers */
static const struct net_device_ops mwifiex_netdev_ops = {
.ndo_open = mwifiex_open,
@@ -631,6 +620,7 @@ static const struct net_device_ops mwifiex_netdev_ops = {
.ndo_tx_timeout = mwifiex_tx_timeout,
.ndo_get_stats = mwifiex_get_stats,
.ndo_set_rx_mode = mwifiex_set_multicast_list,
+ .ndo_select_queue = mwifiex_netdev_select_wmm_queue,
};
/*
@@ -830,9 +820,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (priv && priv->netdev) {
- if (!netif_queue_stopped(priv->netdev))
- mwifiex_stop_net_dev_queue(priv->netdev,
- adapter);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index c2d0ab146af5..1b3cfc821940 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -115,8 +115,6 @@ enum {
#define MWIFIEX_TYPE_DATA 0
#define MWIFIEX_TYPE_EVENT 3
-#define DBG_CMD_NUM 5
-
#define MAX_BITMAP_RATES_SIZE 10
#define MAX_CHANNEL_BAND_BG 14
@@ -373,7 +371,6 @@ struct wps {
struct mwifiex_roc_cfg {
u64 cookie;
struct ieee80211_channel chan;
- enum nl80211_channel_type chan_type;